L4Re Operating System Framework
Interface and Usage Documentation
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
vfs_impl.h
1/*
2 * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Björn Döbel <doebel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
6 *
7 * License: see LICENSE.spdx (in this directory or the directories above)
8 */
9
10#include "fd_store.h"
11#include "vcon_stream.h"
12#include "ns_fs.h"
13
14#include <l4/bid_config.h>
15#include <l4/re/env>
16#include <l4/re/rm>
17#include <l4/re/dataspace>
18#include <l4/sys/assert.h>
19#include <l4/cxx/hlist>
20#include <l4/cxx/pair>
21#include <l4/cxx/std_alloc>
22
23#include <l4/l4re_vfs/backend>
24#include <l4/re/shared_cap>
25
26#include <unistd.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <sys/uio.h>
30
31#if 0
32#include <l4/sys/kdebug.h>
33static int debug_mmap = 1;
34#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
35#else
36#define DEBUG_LOG(level, dbg...) do { } while (0)
37#endif
38
44#define USE_BIG_ANON_DS
45
46using L4Re::Rm;
47
48namespace {
49
50using cxx::Ref_ptr;
51
52class Fd_store : public L4Re::Core::Fd_store
53{
54public:
55 Fd_store() noexcept;
56};
57
58// for internal Vcon_streams we want to have a placement new operator, so
59// inherit and add one
60class Std_stream : public L4Re::Core::Vcon_stream
61{
62public:
63 Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
64};
65
66Fd_store::Fd_store() noexcept
67{
68 // use this strange way to prevent deletion of the stdio object
69 // this depends on Fd_store to being a singleton !!!
70 static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
71 Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
72 // make sure that we never delete the static io stream thing
73 s->add_ref();
74 set(0, cxx::ref_ptr(s)); // stdin
75 set(1, cxx::ref_ptr(s)); // stdout
76 set(2, cxx::ref_ptr(s)); // stderr
77}
78
79class Root_mount_tree : public L4Re::Vfs::Mount_tree
80{
81public:
82 Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
83 void operator delete (void *) {}
84};
85
86class Vfs : public L4Re::Vfs::Ops
87{
88private:
89 bool _early_oom;
90
91public:
92 Vfs()
93 : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
94 {
95 _root_mount.add_ref();
96 _root.add_ref();
97 _root_mount.mount(cxx::ref_ptr(&_root));
98 _cwd = cxx::ref_ptr(&_root);
99
100#if 0
101 Ref_ptr<L4Re::Vfs::File> rom;
102 _root.openat("rom", 0, 0, &rom);
103
104 _root_mount.create_tree("lib/foo", rom);
105
106 _root.openat("lib", 0, 0, &_cwd);
107
108#endif
109 }
110
111 int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept override;
112 Ref_ptr<L4Re::Vfs::File> free_fd(int fd) noexcept override;
113 Ref_ptr<L4Re::Vfs::File> get_root() noexcept override;
114 Ref_ptr<L4Re::Vfs::File> get_cwd() noexcept override;
115 void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
116 Ref_ptr<L4Re::Vfs::File> get_file(int fd) noexcept override;
117 cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
118 set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) noexcept
119 override;
120
121 int mmap2(void *start, size_t len, int prot, int flags, int fd,
122 off_t offset, void **ptr) noexcept override;
123
124 int munmap(void *start, size_t len) noexcept override;
125 int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
126 void **new_addr) noexcept override;
127 int mprotect(const void *a, size_t sz, int prot) noexcept override;
128 int msync(void *addr, size_t len, int flags) noexcept override;
129 int madvise(void *addr, size_t len, int advice) noexcept override;
130
131 int register_file_system(L4Re::Vfs::File_system *f) noexcept override;
132 int unregister_file_system(L4Re::Vfs::File_system *f) noexcept override;
133 L4Re::Vfs::File_system *get_file_system(char const *fstype) noexcept override;
134 L4Re::Vfs::File_system_list file_system_list() noexcept override;
135
136 int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
137 int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
138 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
139 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
140 int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
141
142 void operator delete (void *) {}
143
144 void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
145 void free(void *m) noexcept override { Vfs_config::free(m); }
146
147private:
148 Root_mount_tree _root_mount;
149 L4Re::Core::Env_dir _root;
150 Ref_ptr<L4Re::Vfs::File> _cwd;
151 Fd_store fds;
152
153 L4Re::Vfs::File_system *_fs_registry;
154
155 struct File_factory_item : cxx::H_list_item_t<File_factory_item>
156 {
158 explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
159 : f(f) {};
160
161 File_factory_item() = default;
162 File_factory_item(File_factory_item const &) = delete;
163 File_factory_item &operator = (File_factory_item const &) = delete;
164 };
165
166 cxx::H_list_t<File_factory_item> _file_factories;
167
168 l4_addr_t _anon_offset;
170
171 int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
172 int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
173 l4_addr_t *offset);
174
175 void align_mmap_start_and_length(void **start, size_t *length);
176 int munmap_regions(void *start, size_t len);
177
178 L4Re::Vfs::File_system *find_fs_from_type(char const *fstype) noexcept;
179};
180
181static inline bool strequal(char const *a, char const *b)
182{
183 for (;*a && *a == *b; ++a, ++b)
184 ;
185 return *a == *b;
186}
187
188int
189Vfs::register_file_system(L4Re::Vfs::File_system *f) noexcept
190{
192
193 if (!f)
194 return -EINVAL;
195
196 for (File_system *c = _fs_registry; c; c = c->next())
197 if (strequal(c->type(), f->type()))
198 return -EEXIST;
199
200 f->next(_fs_registry);
201 _fs_registry = f;
202
203 return 0;
204}
205
206int
207Vfs::unregister_file_system(L4Re::Vfs::File_system *f) noexcept
208{
210
211 if (!f)
212 return -EINVAL;
213
214 File_system **p = &_fs_registry;
215
216 for (; *p; p = &(*p)->next())
217 if (*p == f)
218 {
219 *p = f->next();
220 f->next() = 0;
221 return 0;
222 }
223
224 return -ENOENT;
225}
226
228Vfs::find_fs_from_type(char const *fstype) noexcept
229{
230 L4Re::Vfs::File_system_list fsl(_fs_registry);
231 for (L4Re::Vfs::File_system_list::Iterator c = fsl.begin();
232 c != fsl.end(); ++c)
233 if (strequal(c->type(), fstype))
234 return *c;
235 return 0;
236}
237
238L4Re::Vfs::File_system_list
239Vfs::file_system_list() noexcept
240{
241 return L4Re::Vfs::File_system_list(_fs_registry);
242}
243
245Vfs::get_file_system(char const *fstype) noexcept
246{
248 if ((fs = find_fs_from_type(fstype)))
249 return fs;
250
251 // Try to load a file system module dynamically
252 int res = Vfs_config::load_module(fstype);
253 if (res < 0)
254 return 0;
255
256 // Try again
257 return find_fs_from_type(fstype);
258}
259
260int
261Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
262{
263 if (!f)
264 return -EINVAL;
265
266 void *x = this->malloc(sizeof(File_factory_item));
267 if (!x)
268 return -ENOMEM;
269
270 auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
271 _file_factories.push_front(ff);
272 return 0;
273}
274
275int
276Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
277{
278 for (auto p: _file_factories)
279 {
280 if (p->f == f)
281 {
282 _file_factories.remove(p);
283 p->~File_factory_item();
284 this->free(p);
285 return 0;
286 }
287 }
288 return -ENOENT;
289}
290
291Ref_ptr<L4Re::Vfs::File_factory>
292Vfs::get_file_factory(int proto) noexcept
293{
294 for (auto p: _file_factories)
295 if (p->f->proto() == proto)
296 return p->f;
297
298 return Ref_ptr<L4Re::Vfs::File_factory>();
299}
300
301Ref_ptr<L4Re::Vfs::File_factory>
302Vfs::get_file_factory(char const *proto_name) noexcept
303{
304 for (auto p: _file_factories)
305 {
306 auto n = p->f->proto_name();
307 if (n)
308 {
309 char const *a = n;
310 char const *b = proto_name;
311 for (; *a && *b && *a == *b; ++a, ++b)
312 ;
313
314 if ((*a == 0) && (*b == 0))
315 return p->f;
316 }
317 }
318
319 return Ref_ptr<L4Re::Vfs::File_factory>();
320}
321
322int
323Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept
324{
325 int fd = fds.alloc();
326 if (fd < 0)
327 return -EMFILE;
328
329 if (f)
330 fds.set(fd, f);
331
332 return fd;
333}
334
335Ref_ptr<L4Re::Vfs::File>
336Vfs::free_fd(int fd) noexcept
337{
338 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
339
340 if (!f)
341 return Ref_ptr<>::Nil;
342
343 fds.free(fd);
344 return f;
345}
346
347
348Ref_ptr<L4Re::Vfs::File>
349Vfs::get_root() noexcept
350{
351 return cxx::ref_ptr(&_root);
352}
353
354Ref_ptr<L4Re::Vfs::File>
355Vfs::get_cwd() noexcept
356{
357 return _cwd;
358}
359
360void
361Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
362{
363 // FIXME: check for is dir
364 if (dir)
365 _cwd = dir;
366}
367
368Ref_ptr<L4Re::Vfs::File>
369Vfs::get_file(int fd) noexcept
370{
371 return fds.get(fd);
372}
373
375Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) noexcept
376{
377 if (!fds.check_fd(fd))
378 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
379
380 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
381 fds.set(fd, f);
382 return cxx::pair(old, 0);
383}
384
385
386#define GET_FILE_DBG(fd, err) \
387 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
388 if (!fi) \
389 { \
390 return -err; \
391 }
392
393#define GET_FILE(fd, err) \
394 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
395 if (!fi) \
396 return -err;
397
398void
399Vfs::align_mmap_start_and_length(void **start, size_t *length)
400{
401 l4_addr_t const s = reinterpret_cast<l4_addr_t>(*start);
402 size_t const o = s & (L4_PAGESIZE - 1);
403
404 *start = reinterpret_cast<void *>(l4_trunc_page(s));
405 *length = l4_round_page(*length + o);
406}
407
408int
409Vfs::munmap_regions(void *start, size_t len)
410{
411 using namespace L4;
412 using namespace L4Re;
413
414 int err;
416 Cap<Rm> r = Env::env()->rm();
417
418 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
419 return -EINVAL;
420
421 align_mmap_start_and_length(&start, &len);
422
423 while (1)
424 {
425 DEBUG_LOG(debug_mmap, {
426 outstring("DETACH: start = 0x");
427 outhex32(l4_addr_t(start));
428 outstring(" len = 0x");
429 outhex32(len);
430 outstring("\n");
431 });
432 err = r->detach(l4_addr_t(start), len, &ds, This_task);
433 if (err < 0)
434 return err;
435
436 switch (err & Rm::Detach_result_mask)
437 {
438 case Rm::Split_ds:
439 if (ds.is_valid())
440 L4Re::virt_cap_alloc->take(ds);
441 return 0;
442 case Rm::Detached_ds:
443 if (ds.is_valid())
444 L4Re::virt_cap_alloc->release(ds);
445 break;
446 default:
447 break;
448 }
449
450 if (!(err & Rm::Detach_again))
451 return 0;
452 }
453}
454
455int
456Vfs::munmap(void *start, size_t len) L4_NOTHROW
457{
458 using namespace L4;
459 using namespace L4Re;
460
461 int err = 0;
462 Cap<Rm> r = Env::env()->rm();
463
464 // Fields for obtaining a list of areas for the calling process
465 long area_cnt = -1; // No. of areas in this process
466 Rm::Area const *area_array;
467 bool matches_area = false; // true if unmap parameters match an area
468
469 // First check if there are any areas matching the munmap request. Those
470 // might have been created by an mmap call using PROT_NONE as protection
471 // modifier.
472
473 area_cnt = r->get_areas((l4_addr_t) start, &area_array);
474
475 // It is enough to check for the very first entry, since get_areas will
476 // only return areas with a starting address equal or greater to <start>.
477 // However, we intend to unmap at most the area starting exactly at
478 // <start>.
479 if (area_cnt > 0)
480 {
481 size_t area_size = area_array[0].end - area_array[0].start + 1;
482
483 // Only free the area if the munmap parameters describe it exactly.
484 if (area_array[0].start == (l4_addr_t) start && area_size == len)
485 {
486 r->free_area((l4_addr_t) start);
487 matches_area = true;
488 }
489 }
490
491 // After clearing possible area reservations from PROT_NONE mappings, clear
492 // any regions in the address range specified. Note that errors shall be
493 // suppressed if an area was freed but no regions were found.
494 err = munmap_regions(start, len);
495 if (err == -ENOENT && matches_area)
496 return 0;
497
498 return err;
499}
500
501int
502Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
503{
504 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
505
506 if (!ds->is_valid())
507 return -ENOMEM;
508
509 int err;
510 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
511 return err;
512
513 DEBUG_LOG(debug_mmap, {
514 outstring("ANON DS ALLOCATED: size=");
515 outhex32(size);
516 outstring(" cap = 0x");
517 outhex32(ds->cap());
518 outstring("\n");
519 });
520
521 return 0;
522}
523
524int
525Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
526 l4_addr_t *offset)
527{
528#if !defined(CONFIG_MMU)
529 // Small values for !MMU systems. These platforms do not have much memory
530 // typically and the memory must be instantly allocated.
531 enum
532 {
533 ANON_MEM_DS_POOL_SIZE = 256UL << 10, // size of a pool dataspace used for anon memory
534 ANON_MEM_MAX_SIZE = 32UL << 10, // chunk size that will be allocate a dataspace
535 };
536#elif defined(USE_BIG_ANON_DS)
537 enum
538 {
539 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
540 ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
541 };
542#else
543 enum
544 {
545 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
546 ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
547 };
548#endif
549
550 if (size >= ANON_MEM_MAX_SIZE)
551 {
552 int err;
553 if ((err = alloc_ds(size, ds)) < 0)
554 return err;
555
556 *offset = 0;
557
558 if (!_early_oom)
559 return err;
560
561 return (*ds)->allocate(0, size);
562 }
563
564 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
565 {
566 int err;
567 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
568 return err;
569
570 _anon_offset = 0;
571 _anon_ds = *ds;
572 }
573 else
574 *ds = _anon_ds;
575
576 if (_early_oom)
577 {
578 if (int err = (*ds)->allocate(_anon_offset, size))
579 return err;
580 }
581
582 *offset = _anon_offset;
583 _anon_offset += size;
584 return 0;
585}
586
587int
588Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_offset,
589 void **resptr) L4_NOTHROW
590{
591 DEBUG_LOG(debug_mmap, {
592 outstring("MMAP params: ");
593 outstring("start = 0x");
594 outhex32(l4_addr_t(start));
595 outstring(", len = 0x");
596 outhex32(len);
597 outstring(", prot = 0x");
598 outhex32(prot);
599 outstring(", flags = 0x");
600 outhex32(flags);
601 outstring(", offset = 0x");
602 outhex32(page4k_offset);
603 outstring("\n");
604 });
605
606 using namespace L4Re;
607 off64_t offset = l4_trunc_page(page4k_offset << 12);
608
609 if (flags & MAP_FIXED)
610 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
611 return -EINVAL;
612
613 align_mmap_start_and_length(&start, &len);
614
615 // special code to just reserve an area of the virtual address space
616 // Same behavior should be exposed when mapping with PROT_NONE. Mind that
617 // PROT_NONE can only be specified exclusively, since it is defined to 0x0.
618 if ((flags & 0x1000000) || (prot == PROT_NONE))
619 {
620 int err;
621 L4::Cap<Rm> r = Env::env()->rm();
622 l4_addr_t area = reinterpret_cast<l4_addr_t>(start);
623 err = r->reserve_area(&area, len, L4Re::Rm::F::Search_addr);
624 if (err < 0)
625 return err;
626
627 *resptr = reinterpret_cast<void*>(area);
628
629 DEBUG_LOG(debug_mmap, {
630 outstring(" MMAP reserved area: 0x");
631 outhex32(area);
632 outstring(" length= 0x");
633 outhex32(len);
634 outstring("\n");
635 });
636
637 return 0;
638 }
639
641 l4_addr_t anon_offset = 0;
642 L4Re::Rm::Flags rm_flags(0);
643
644 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
645 {
646 rm_flags |= L4Re::Rm::F::Detach_free;
647
648 int err = alloc_anon_mem(len, &ds, &anon_offset);
649 if (err)
650 return err;
651
652 DEBUG_LOG(debug_mmap, {
653 outstring(" USE ANON MEM: 0x");
654 outhex32(ds.cap());
655 outstring(" offs = 0x");
656 outhex32(anon_offset);
657 outstring("\n");
658 });
659 }
660
661 if (!(flags & MAP_ANONYMOUS))
662 {
663 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
664 if (!fi)
665 return -EBADF;
666
667 L4::Cap<L4Re::Dataspace> fds = fi->data_space();
668
669 if (!fds.is_valid())
670 return -EINVAL;
671
672 if (len + offset > l4_round_page(fds->size()))
673 return -EINVAL;
674
675 if (flags & MAP_PRIVATE)
676 {
677 DEBUG_LOG(debug_mmap, outstring("COW\n"););
678 int err = ds->copy_in(anon_offset, fds, offset, len);
679 if (err == -L4_EINVAL)
680 {
681 L4::Cap<Rm> r = Env::env()->rm();
684 err = r->attach(&src, len,
686 fds, offset);
687 if (err < 0)
688 return err;
689
690 err = r->attach(&dst, len,
692 ds.get(), anon_offset);
693 if (err < 0)
694 return err;
695
696 memcpy(dst.get(), src.get(), len);
697 }
698 else if (err)
699 return err;
700
701 offset = anon_offset;
702 }
703 else
704 {
705 L4Re::virt_cap_alloc->take(fds);
706 ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
707 }
708 }
709 else
710 offset = anon_offset;
711
712
713 if (!(flags & MAP_FIXED) && start == 0)
714 start = reinterpret_cast<void*>(L4_PAGESIZE);
715
716 char *data = static_cast<char *>(start);
717 L4::Cap<Rm> r = Env::env()->rm();
718 l4_addr_t overmap_area = L4_INVALID_ADDR;
719
720 int err;
721 if (flags & MAP_FIXED)
722 {
723 overmap_area = l4_addr_t(start);
724
725 err = r->reserve_area(&overmap_area, len);
726 if (err < 0)
727 overmap_area = L4_INVALID_ADDR;
728
729 rm_flags |= Rm::F::In_area;
730
731 // Make sure to remove old mappings residing at the respective address
732 // range. If none exists, we are fine as well, allowing us to ignore
733 // ENOENT here.
734 err = munmap_regions(start, len);
735 if (err && err != -ENOENT)
736 return err;
737 }
738
739 if (!(flags & MAP_FIXED))
740 rm_flags |= Rm::F::Search_addr;
741 if (prot & PROT_READ)
742 rm_flags |= Rm::F::R;
743 if (prot & PROT_WRITE)
744 rm_flags |= Rm::F::W;
745 if (prot & PROT_EXEC)
746 rm_flags |= Rm::F::X;
747
748 err = r->attach(&data, len, rm_flags,
749 L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
752 offset);
753
754 DEBUG_LOG(debug_mmap, {
755 outstring(" MAPPED: 0x");
756 outhex32(ds.cap());
757 outstring(" addr: 0x");
758 outhex32(l4_addr_t(data));
759 outstring(" bytes: 0x");
760 outhex32(len);
761 outstring(" offset: 0x");
762 outhex32(offset);
763 outstring(" err = ");
764 outdec(err);
765 outstring("\n");
766 });
767
768
769 if (overmap_area != L4_INVALID_ADDR)
770 r->free_area(overmap_area);
771
772 if (err < 0)
773 return err;
774
775 l4_assert (!(start && !data));
776
777 // release ownership of the attached DS
778 ds.release();
779 *resptr = data;
780
781 return 0;
782}
783
784namespace {
785 class Auto_area
786 {
787 public:
789 l4_addr_t a;
790
791 explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
792 : r(r), a(a) {}
793
794 int reserve(l4_addr_t _a, l4_size_t sz, L4Re::Rm::Flags flags)
795 {
796 free();
797 a = _a;
798 int e = r->reserve_area(&a, sz, flags);
799 if (e)
800 a = L4_INVALID_ADDR;
801 return e;
802 }
803
804 void free()
805 {
806 if (is_valid())
807 {
808 r->free_area(a);
809 a = L4_INVALID_ADDR;
810 }
811 }
812
813 bool is_valid() const { return a != L4_INVALID_ADDR; }
814
815 ~Auto_area() { free(); }
816 };
817}
818
819int
820Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
821 void **new_addr) L4_NOTHROW
822{
823 using namespace L4Re;
824
825 DEBUG_LOG(debug_mmap, {
826 outstring("Mremap: addr = 0x");
827 outhex32((l4_umword_t)old_addr);
828 outstring(" old_size = 0x");
829 outhex32(old_size);
830 outstring(" new_size = 0x");
831 outhex32(new_size);
832 outstring("\n");
833 });
834
835 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
836 return -EINVAL;
837
838 l4_addr_t oa = l4_trunc_page(reinterpret_cast<l4_addr_t>(old_addr));
839 if (oa != reinterpret_cast<l4_addr_t>(old_addr))
840 return -EINVAL;
841
842 bool const fixed = flags & MREMAP_FIXED;
843 bool const maymove = flags & MREMAP_MAYMOVE;
844
845 L4::Cap<Rm> r = Env::env()->rm();
846
847 // sanitize input parameters to multiples of pages
848 old_size = l4_round_page(old_size);
849 new_size = l4_round_page(new_size);
850
851 if (!fixed)
852 {
853 if (new_size < old_size)
854 {
855 *new_addr = old_addr;
856 return munmap(reinterpret_cast<void*>(oa + new_size),
857 old_size - new_size);
858 }
859
860 if (new_size == old_size)
861 {
862 *new_addr = old_addr;
863 return 0;
864 }
865 }
866
867 Auto_area old_area(r);
868 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
869 if (err < 0)
870 return -EINVAL;
871
872 l4_addr_t pad_addr;
873 Auto_area new_area(r);
874 if (fixed)
875 {
876 l4_addr_t na = l4_trunc_page(reinterpret_cast<l4_addr_t>(*new_addr));
877 if (na != reinterpret_cast<l4_addr_t>(*new_addr))
878 return -EINVAL;
879
880 // check if the current virtual memory area can be expanded
881 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
882 if (err < 0)
883 return err;
884
885 pad_addr = na;
886 // unmap all stuff and remap ours ....
887 }
888 else
889 {
890 l4_addr_t ta = oa + old_size;
891 unsigned long ts = new_size - old_size;
892 // check if the current virtual memory area can be expanded
893 long err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
894 if (!maymove && err)
895 return -ENOMEM;
896
897 L4Re::Rm::Offset toffs;
898 L4Re::Rm::Flags tflags;
900
901 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
902
903 // there is enough space to expand the mapping in place
904 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
905 {
906 old_area.free(); // pad at the original address
907 pad_addr = oa + old_size;
908 *new_addr = old_addr;
909 }
910 else if (!maymove)
911 return -ENOMEM;
912 else
913 {
914 // search for a new area to remap
915 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
916 if (err < 0)
917 return -ENOMEM;
918
919 pad_addr = new_area.a + old_size;
920 *new_addr = reinterpret_cast<void *>(new_area.a);
921 }
922 }
923
924 if (old_area.is_valid())
925 {
926 unsigned long size = old_size;
927
928 l4_addr_t a = old_area.a;
929 unsigned long s = 1;
930 L4Re::Rm::Offset o;
931 L4Re::Rm::Flags f;
933
934 while (r->find(&a, &s, &o, &f, &ds) >= 0 && !(f & Rm::F::In_area))
935 {
936 if (a < old_area.a)
937 {
938 auto d = old_area.a - a;
939 a = old_area.a;
940 s -= d;
941 o += d;
942 }
943
944 if (a + s > old_area.a + old_size)
945 s = old_area.a + old_size - a;
946
947 l4_addr_t x = a - old_area.a + new_area.a;
948
949 int err = r->attach(&x, s, Rm::F::In_area | f,
950 L4::Ipc::make_cap(ds, f.cap_rights()), o);
951 if (err < 0)
952 return err;
953
954 // count the new attached ds reference
955 L4Re::virt_cap_alloc->take(ds);
956
957 err = r->detach(a, s, &ds, This_task,
958 Rm::Detach_exact | Rm::Detach_keep);
959 if (err < 0)
960 return err;
961
962 switch (err & Rm::Detach_result_mask)
963 {
964 case Rm::Split_ds:
965 // add a reference as we split up a mapping
966 if (ds.is_valid())
967 L4Re::virt_cap_alloc->take(ds);
968 break;
969 case Rm::Detached_ds:
970 if (ds.is_valid())
971 L4Re::virt_cap_alloc->release(ds);
972 break;
973 default:
974 break;
975 }
976
977 if (size <= s)
978 break;
979 a += s;
980 size -= s;
981 s = 1;
982 }
983
984 old_area.free();
985 }
986
987 if (old_size < new_size)
988 {
989 l4_addr_t const pad_sz = new_size - old_size;
990 l4_addr_t toffs;
992 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
993 if (err)
994 return err;
995
996 // FIXME: must get the protection rights from the old
997 // mapping and use the same here, for now just use RWX
998 err = r->attach(&pad_addr, pad_sz,
999 Rm::F::In_area | Rm::F::Detach_free | Rm::F::RWX,
1000 L4::Ipc::make_cap_rw(tds.get()), toffs);
1001 if (err < 0)
1002 return err;
1003
1004 // release ownership of tds, the region map is now the new owner
1005 tds.release();
1006 }
1007
1008 return 0;
1009}
1010
1011int
1012Vfs::mprotect(const void * /* a */, size_t /* sz */, int prot) L4_NOTHROW
1013{
1014 return (prot & PROT_WRITE) ? -1 : 0;
1015}
1016
1017int
1018Vfs::msync(void *, size_t, int) L4_NOTHROW
1019{ return 0; }
1020
1021int
1022Vfs::madvise(void *, size_t, int) L4_NOTHROW
1023{ return 0; }
1024
1025}
1026
1027L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
1028extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
1029
1030namespace {
1031 class Real_mount_tree : public L4Re::Vfs::Mount_tree
1032 {
1033 public:
1034 explicit Real_mount_tree(char *n) : Mount_tree(n) {}
1035
1036 void *operator new (size_t size)
1037 { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
1038
1039 void operator delete (void *mem)
1040 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
1041 };
1042}
1043
1045int
1046Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
1047{
1048 using L4Re::Vfs::File;
1049 using L4Re::Vfs::Mount_tree;
1050 using L4Re::Vfs::Path;
1051
1052 cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
1053 if (!root)
1054 return -EINVAL;
1055
1057 Path p = root->lookup(Path(path), &base);
1058
1059 while (!p.empty())
1060 {
1061 Path f = p.strip_first();
1062
1063 if (f.empty())
1064 return -EEXIST;
1065
1066 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
1067 if (!name)
1068 return -ENOMEM;
1069
1070 auto nt = cxx::make_ref_obj<Real_mount_tree>(name);
1071 if (!nt)
1072 {
1073 __rtld_l4re_env_posix_vfs_ops->free(name);
1074 return -ENOMEM;
1075 }
1076
1077 base->add_child_node(nt);
1078 base = nt;
1079
1080 if (p.empty())
1081 {
1082 nt->mount(dir);
1083 return 0;
1084 }
1085 }
1086
1087 return -EINVAL;
1088}
1089
1090#undef DEBUG_LOG
1091#undef GET_FILE_DBG
1092#undef GET_FILE
static Env const * env() noexcept
Returns the initial environment for the current task.
Definition env:95
L4::Cap< Log > log() const noexcept
Object-capability to the logging service.
Definition env:125
Unique region.
Definition rm:424
T get() const noexcept
Return the address.
Definition rm:497
Region map.
Definition rm:84
Basic interface for an L4Re::Vfs file system.
Definition vfs.h:847
The basic interface for an open POSIX file.
Definition vfs.h:460
Interface for the POSIX backends of an application.
Definition vfs.h:1099
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition capability.h:57
C++ interface for capabilities.
Definition capability.h:219
Basic element type for a double-linked H_list.
Definition hlist:23
Helper type to distinguish the operator new version that does not throw exceptions.
Definition std_alloc:19
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:71
Dataspace interface.
Environment interface.
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:24
unsigned long l4_umword_t
Unsigned machine word.
Definition l4int.h:40
unsigned long l4_addr_t
Address type.
Definition l4int.h:34
@ L4_EINVAL
Invalid argument.
Definition err.h:46
@ L4_CAP_FPAGE_RO
Read right for capability flexpages.
Definition __l4_fpage.h:176
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flexpages.
Definition __l4_fpage.h:192
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition consts.h:452
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition consts.h:477
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition consts.h:395
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:509
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition compiler.h:159
Functionality for invoking the kernel debugger.
void outhex32(l4_uint32_t number)
Output a 32-bit unsigned hexadecimal number via the kernel debugger.
Definition kdebug.h:284
void outstring(char const *text)
Output a string via the kernel debugger.
Definition kdebug.h:237
void outdec(l4_mword_t number)
Output a decimal unsigned machine word via the kernel debugger.
Definition kdebug.h:334
L4Re C++ Interfaces.
Definition cmd_control:14
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition shared_cap:33
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition ipc_types:785
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition ipc_types:795
L4 low-level kernel interface.
Our C++ library.
Definition arith:11
Pair implementation.
Region mapper interface.
Shared_cap / Shared_del_cap.
@ RW
Readable and writable region.
Definition rm:139
@ R
Readable region.
Definition rm:133
@ Detach_free
Free the portion of the data space after detach.
Definition rm:146
@ Search_addr
Search for a suitable address range.
Definition rm:114
A range of virtual addresses.
Definition rm:670
l4_addr_t start
First address of the range.
Definition rm:672
l4_addr_t end
Last address of the range.
Definition rm:674
Double-linked list of typed H_list_item_t elements.
Definition hlist:249
Pair of two values.
Definition pair:28
Low-level assert implementation.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:32