L4Re Operating System Framework – Interface and Usage Documentation
Loading...
Searching...
No Matches
vfs_impl.h
1/*
2 * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Björn Döbel <doebel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
6 *
7 * This file is part of TUD:OS and distributed under the terms of the
8 * GNU General Public License 2.
9 * Please see the COPYING-GPL-2 file for details.
10 *
11 * As a special exception, you may use this file as part of a free software
12 * library without restriction. Specifically, if other files instantiate
13 * templates or use macros or inline functions from this file, or you compile
14 * this file and link it with other files to produce an executable, this
15 * file does not by itself cause the resulting executable to be covered by
16 * the GNU General Public License. This exception does not however
17 * invalidate any other reasons why the executable file might be covered by
18 * the GNU General Public License.
19 */
20
21#include "fd_store.h"
22#include "vcon_stream.h"
23#include "ns_fs.h"
24
25#include <l4/re/env>
26#include <l4/re/rm>
27#include <l4/re/dataspace>
28#include <l4/cxx/hlist>
29#include <l4/cxx/pair>
30#include <l4/cxx/std_alloc>
31
32#include <l4/l4re_vfs/backend>
33#include <l4/re/shared_cap>
34
35#include <unistd.h>
36#include <cstdarg>
37#include <errno.h>
38#include <sys/uio.h>
39
40#if 0
41#include <l4/sys/kdebug.h>
42static int debug_mmap = 1;
43#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
44#else
45#define DEBUG_LOG(level, dbg...) do { } while (0)
46#endif
47
53#define USE_BIG_ANON_DS
54
55using L4Re::Rm;
56
57namespace {
58
59using cxx::Ref_ptr;
60
61class Fd_store : public L4Re::Core::Fd_store
62{
63public:
64 Fd_store() noexcept;
65};
66
67// for internal Vcon_streams we want to have a placement new operator, so
68// inherit and add one
69class Std_stream : public L4Re::Core::Vcon_stream
70{
71public:
72 Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
73};
74
75Fd_store::Fd_store() noexcept
76{
77 // use this strange way to prevent deletion of the stdio object
78 // this depends on Fd_store to being a singleton !!!
79 static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
80 Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
81 // make sure that we never delete the static io stream thing
82 s->add_ref();
83 set(0, cxx::ref_ptr(s)); // stdin
84 set(1, cxx::ref_ptr(s)); // stdout
85 set(2, cxx::ref_ptr(s)); // stderr
86}
87
88class Root_mount_tree : public L4Re::Vfs::Mount_tree
89{
90public:
91 Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
92 void operator delete (void *) {}
93};
94
95class Vfs : public L4Re::Vfs::Ops
96{
97private:
98 bool _early_oom;
99
100public:
101 Vfs()
102 : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
103 {
104 _root_mount.add_ref();
105 _root.add_ref();
106 _root_mount.mount(cxx::ref_ptr(&_root));
107 _cwd = cxx::ref_ptr(&_root);
108
109#if 0
110 Ref_ptr<L4Re::Vfs::File> rom;
111 _root.openat("rom", 0, 0, &rom);
112
113 _root_mount.create_tree("lib/foo", rom);
114
115 _root.openat("lib", 0, 0, &_cwd);
116
117#endif
118 }
119
120 int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept override;
121 Ref_ptr<L4Re::Vfs::File> free_fd(int fd) noexcept override;
122 Ref_ptr<L4Re::Vfs::File> get_root() noexcept override;
123 Ref_ptr<L4Re::Vfs::File> get_cwd() noexcept override;
124 void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
125 Ref_ptr<L4Re::Vfs::File> get_file(int fd) noexcept override;
126 cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
127 set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) noexcept
128 override;
129
130 int mmap2(void *start, size_t len, int prot, int flags, int fd,
131 off_t offset, void **ptr) noexcept override;
132
133 int munmap(void *start, size_t len) noexcept override;
134 int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
135 void **new_addr) noexcept override;
136 int mprotect(const void *a, size_t sz, int prot) noexcept override;
137 int msync(void *addr, size_t len, int flags) noexcept override;
138 int madvise(void *addr, size_t len, int advice) noexcept override;
139
140 int register_file_system(L4Re::Vfs::File_system *f) noexcept override;
141 int unregister_file_system(L4Re::Vfs::File_system *f) noexcept override;
142 L4Re::Vfs::File_system *get_file_system(char const *fstype) noexcept override;
143
144 int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
145 int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
146 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
147 Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
148 int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept override;
149
150 void operator delete (void *) {}
151
152 void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
153 void free(void *m) noexcept override { Vfs_config::free(m); }
154
155private:
156 Root_mount_tree _root_mount;
157 L4Re::Core::Env_dir _root;
158 Ref_ptr<L4Re::Vfs::File> _cwd;
159 Fd_store fds;
160
161 L4Re::Vfs::File_system *_fs_registry;
162
163 struct File_factory_item : cxx::H_list_item_t<File_factory_item>
164 {
166 explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
167 : f(f) {};
168
169 File_factory_item() = default;
170 File_factory_item(File_factory_item const &) = delete;
171 File_factory_item &operator = (File_factory_item const &) = delete;
172 };
173
174 cxx::H_list_t<File_factory_item> _file_factories;
175
176 l4_addr_t _anon_offset;
178
179 int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
180 int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
181 l4_addr_t *offset);
182
183 void align_mmap_start_and_length(void **start, size_t *length);
184};
185
186static inline bool strequal(char const *a, char const *b)
187{
188 for (;*a && *a == *b; ++a, ++b)
189 ;
190 return *a == *b;
191}
192
193int
194Vfs::register_file_system(L4Re::Vfs::File_system *f) noexcept
195{
197
198 if (!f)
199 return -EINVAL;
200
201 for (File_system *c = _fs_registry; c; c = c->next())
202 if (strequal(c->type(), f->type()))
203 return -EEXIST;
204
205 f->next(_fs_registry);
206 _fs_registry = f;
207
208 return 0;
209}
210
211int
212Vfs::unregister_file_system(L4Re::Vfs::File_system *f) noexcept
213{
215
216 if (!f)
217 return -EINVAL;
218
219 File_system **p = &_fs_registry;
220
221 for (; *p; p = &(*p)->next())
222 if (*p == f)
223 {
224 *p = f->next();
225 f->next() = 0;
226 return 0;
227 }
228
229 return -ENOENT;
230}
231
233Vfs::get_file_system(char const *fstype) noexcept
234{
235 bool try_dynamic = true;
236 for (;;)
237 {
239 for (File_system *c = _fs_registry; c; c = c->next())
240 if (strequal(c->type(), fstype))
241 return c;
242
243 if (!try_dynamic)
244 return 0;
245
246 // try to load a file system module dynamically
247 int res = Vfs_config::load_module(fstype);
248
249 if (res < 0)
250 return 0;
251
252 try_dynamic = false;
253 }
254}
255
256int
257Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
258{
259 if (!f)
260 return -EINVAL;
261
262 void *x = this->malloc(sizeof(File_factory_item));
263 if (!x)
264 return -ENOMEM;
265
266 auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
267 _file_factories.push_front(ff);
268 return 0;
269}
270
271int
272Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
273{
274 for (auto p: _file_factories)
275 {
276 if (p->f == f)
277 {
278 _file_factories.remove(p);
279 p->~File_factory_item();
280 this->free(p);
281 return 0;
282 }
283 }
284 return -ENOENT;
285}
286
287Ref_ptr<L4Re::Vfs::File_factory>
288Vfs::get_file_factory(int proto) noexcept
289{
290 for (auto p: _file_factories)
291 if (p->f->proto() == proto)
292 return p->f;
293
294 return Ref_ptr<L4Re::Vfs::File_factory>();
295}
296
297Ref_ptr<L4Re::Vfs::File_factory>
298Vfs::get_file_factory(char const *proto_name) noexcept
299{
300 for (auto p: _file_factories)
301 {
302 auto n = p->f->proto_name();
303 if (n)
304 {
305 char const *a = n;
306 char const *b = proto_name;
307 for (; *a && *b && *a == *b; ++a, ++b)
308 ;
309
310 if ((*a == 0) && (*b == 0))
311 return p->f;
312 }
313 }
314
315 return Ref_ptr<L4Re::Vfs::File_factory>();
316}
317
318int
319Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) noexcept
320{
321 int fd = fds.alloc();
322 if (fd < 0)
323 return -EMFILE;
324
325 if (f)
326 fds.set(fd, f);
327
328 return fd;
329}
330
331Ref_ptr<L4Re::Vfs::File>
332Vfs::free_fd(int fd) noexcept
333{
334 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
335
336 if (!f)
337 return Ref_ptr<>::Nil;
338
339 fds.free(fd);
340 return f;
341}
342
343
344Ref_ptr<L4Re::Vfs::File>
345Vfs::get_root() noexcept
346{
347 return cxx::ref_ptr(&_root);
348}
349
350Ref_ptr<L4Re::Vfs::File>
351Vfs::get_cwd() noexcept
352{
353 return _cwd;
354}
355
356void
357Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
358{
359 // FIXME: check for is dir
360 if (dir)
361 _cwd = dir;
362}
363
364Ref_ptr<L4Re::Vfs::File>
365Vfs::get_file(int fd) noexcept
366{
367 return fds.get(fd);
368}
369
371Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) noexcept
372{
373 if (!fds.check_fd(fd))
374 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
375
376 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
377 fds.set(fd, f);
378 return cxx::pair(old, 0);
379}
380
381
382#define GET_FILE_DBG(fd, err) \
383 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
384 if (!fi) \
385 { \
386 return -err; \
387 }
388
389#define GET_FILE(fd, err) \
390 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
391 if (!fi) \
392 return -err;
393
394
395void
396Vfs::align_mmap_start_and_length(void **start, size_t *length)
397{
398 l4_addr_t s = l4_addr_t(*start);
399
400 *length += s & (L4_PAGESIZE - 1); // Add rounding down delta to length
401 *start = (void *)l4_trunc_page(s); // Make start page aligned
402 *length = l4_round_page(*length); // Round length up to page size
403}
404
405int
406Vfs::munmap(void *start, size_t len) L4_NOTHROW
407{
408 using namespace L4;
409 using namespace L4Re;
410
411 int err;
413 Cap<Rm> r = Env::env()->rm();
414
415 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
416 return -EINVAL;
417
418 align_mmap_start_and_length(&start, &len);
419
420 while (1)
421 {
422 DEBUG_LOG(debug_mmap, {
423 outstring("DETACH: ");
424 outhex32(l4_addr_t(start));
425 outstring(" ");
426 outhex32(len);
427 outstring("\n");
428 });
429 err = r->detach(l4_addr_t(start), len, &ds, This_task);
430 if (err < 0)
431 return err;
432
433 switch (err & Rm::Detach_result_mask)
434 {
435 case Rm::Split_ds:
436 if (ds.is_valid())
437 L4Re::virt_cap_alloc->take(ds);
438 return 0;
439 case Rm::Detached_ds:
440 if (ds.is_valid())
441 L4Re::virt_cap_alloc->release(ds);
442 break;
443 default:
444 break;
445 }
446
447 if (!(err & Rm::Detach_again))
448 return 0;
449 }
450}
451
452int
453Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
454{
455 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
456
457 if (!ds->is_valid())
458 return -ENOMEM;
459
460 int err;
461 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
462 return err;
463
464 DEBUG_LOG(debug_mmap, {
465 outstring("ANON DS ALLOCATED: size=");
466 outhex32(size);
467 outstring(" cap=");
468 outhex32(ds->cap());
469 outstring("\n");
470 });
471
472 return 0;
473}
474
475int
476Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
477 l4_addr_t *offset)
478{
479#ifdef USE_BIG_ANON_DS
480 enum
481 {
482 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
483 ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
484 };
485#else
486 enum
487 {
488 ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
489 ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
490 };
491#endif
492
493 if (size >= ANON_MEM_MAX_SIZE)
494 {
495 int err;
496 if ((err = alloc_ds(size, ds)) < 0)
497 return err;
498
499 *offset = 0;
500
501 if (!_early_oom)
502 return err;
503
504 return (*ds)->allocate(0, size);
505 }
506
507 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
508 {
509 int err;
510 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
511 return err;
512
513 _anon_offset = 0;
514 _anon_ds = *ds;
515 }
516 else
517 *ds = _anon_ds;
518
519 if (_early_oom)
520 {
521 if (int err = (*ds)->allocate(_anon_offset, size))
522 return err;
523 }
524
525 *offset = _anon_offset;
526 _anon_offset += size;
527 return 0;
528}
529
530int
531Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_offset,
532 void **resptr) L4_NOTHROW
533{
534 using namespace L4Re;
535 off64_t offset = l4_trunc_page(page4k_offset << 12);
536
537 if (flags & MAP_FIXED)
538 if (l4_addr_t(start) & (L4_PAGESIZE - 1))
539 return -EINVAL;
540
541 align_mmap_start_and_length(&start, &len);
542
543 // special code to just reserve an area of the virtual address space
544 if (flags & 0x1000000)
545 {
546 int err;
547 L4::Cap<Rm> r = Env::env()->rm();
548 l4_addr_t area = (l4_addr_t)start;
549 err = r->reserve_area(&area, len, L4Re::Rm::F::Search_addr);
550 if (err < 0)
551 return err;
552 *resptr = (void*)area;
553 DEBUG_LOG(debug_mmap, {
554 outstring("MMAP reserved area: ");
555 outhex32(area);
556 outstring(" length=");
557 outhex32(len);
558 outstring("\n");
559 });
560 return 0;
561 }
562
564 l4_addr_t anon_offset = 0;
565 L4Re::Rm::Flags rm_flags(0);
566
567 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
568 {
569 rm_flags |= L4Re::Rm::F::Detach_free;
570
571 int err = alloc_anon_mem(len, &ds, &anon_offset);
572 if (err)
573 return err;
574
575 DEBUG_LOG(debug_mmap, {
576 outstring("USE ANON MEM: ");
577 outhex32(ds.cap());
578 outstring(" offs=");
579 outhex32(anon_offset);
580 outstring("\n");
581 });
582 }
583
584 if (!(flags & MAP_ANONYMOUS))
585 {
586 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
587 if (!fi)
588 {
589 return -EBADF;
590 }
591
592 L4::Cap<L4Re::Dataspace> fds = fi->data_space();
593
594 if (!fds.is_valid())
595 {
596 return -EINVAL;
597 }
598
599 if (len + offset > l4_round_page(fds->size()))
600 {
601 return -EINVAL;
602 }
603
604 if (flags & MAP_PRIVATE)
605 {
606 DEBUG_LOG(debug_mmap, outstring("COW\n"););
607 int err = ds->copy_in(anon_offset, fds, offset, len);
608 if (err < 0)
609 return err;
610
611 offset = anon_offset;
612 }
613 else
614 {
615 L4Re::virt_cap_alloc->take(fds);
616 ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
617 }
618 }
619 else
620 offset = anon_offset;
621
622
623 if (!(flags & MAP_FIXED) && start == 0)
624 start = (void*)L4_PAGESIZE;
625
626 char *data = (char *)start;
627 L4::Cap<Rm> r = Env::env()->rm();
628 l4_addr_t overmap_area = L4_INVALID_ADDR;
629
630 int err;
631 if (flags & MAP_FIXED)
632 {
633 overmap_area = l4_addr_t(start);
634
635 err = r->reserve_area(&overmap_area, len);
636 if (err < 0)
637 overmap_area = L4_INVALID_ADDR;
638
639 rm_flags |= Rm::F::In_area;
640
641 err = munmap(start, len);
642 if (err && err != -ENOENT)
643 return err;
644 }
645
646 if (!(flags & MAP_FIXED)) rm_flags |= Rm::F::Search_addr;
647 if (prot & PROT_READ) rm_flags |= Rm::F::R;
648 if (prot & PROT_WRITE) rm_flags |= Rm::F::W;
649 if (prot & PROT_EXEC) rm_flags |= Rm::F::X;
650
651 err = r->attach(&data, len, rm_flags,
652 L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
655 offset);
656
657 DEBUG_LOG(debug_mmap, {
658 outstring(" MAPPED: ");
659 outhex32(ds.cap());
660 outstring(" addr: ");
661 outhex32(l4_addr_t(data));
662 outstring(" bytes: ");
663 outhex32(len);
664 outstring(" offset: ");
665 outhex32(offset);
666 outstring(" err=");
667 outdec(err);
668 outstring("\n");
669 });
670
671
672 if (overmap_area != L4_INVALID_ADDR)
673 r->free_area(overmap_area);
674
675 if (err < 0)
676 return err;
677
678 l4_assert (!(start && !data));
679
680 // release ownership of the attached DS
681 ds.release();
682 *resptr = data;
683
684 return 0;
685}
686
687namespace {
688 class Auto_area
689 {
690 public:
692 l4_addr_t a;
693
694 explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
695 : r(r), a(a) {}
696
697 int reserve(l4_addr_t _a, l4_size_t sz, L4Re::Rm::Flags flags)
698 {
699 free();
700 a = _a;
701 int e = r->reserve_area(&a, sz, flags);
702 if (e)
703 a = L4_INVALID_ADDR;
704 return e;
705 }
706
707 void free()
708 {
709 if (is_valid())
710 {
711 r->free_area(a);
712 a = L4_INVALID_ADDR;
713 }
714 }
715
716 bool is_valid() const { return a != L4_INVALID_ADDR; }
717
718 ~Auto_area() { free(); }
719 };
720}
721
722int
723Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
724 void **new_addr) L4_NOTHROW
725{
726 using namespace L4Re;
727
728 DEBUG_LOG(debug_mmap, {
729 outstring("Mremap: addr=");
730 outhex32((l4_umword_t)old_addr);
731 outstring(" old_size=");
732 outhex32(old_size);
733 outstring(" new_size=");
734 outhex32(new_size);
735 outstring("\n");
736 });
737
738 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
739 return -EINVAL;
740
741 l4_addr_t oa = l4_trunc_page((l4_addr_t)old_addr);
742 if (oa != (l4_addr_t)old_addr)
743 return -EINVAL;
744
745 bool const fixed = flags & MREMAP_FIXED;
746 bool const maymove = flags & MREMAP_MAYMOVE;
747
748 L4::Cap<Rm> r = Env::env()->rm();
749
750 // sanitize input parameters to multiples of pages
751 old_size = l4_round_page(old_size);
752 new_size = l4_round_page(new_size);
753
754 if (!fixed)
755 {
756 if (new_size < old_size)
757 {
758 *new_addr = old_addr;
759 return munmap((void*)(oa + new_size), old_size - new_size);
760 }
761
762 if (new_size == old_size)
763 {
764 *new_addr = old_addr;
765 return 0;
766 }
767 }
768
769 Auto_area old_area(r);
770 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
771 if (err < 0)
772 return -EINVAL;
773
774 l4_addr_t pad_addr;
775 Auto_area new_area(r);
776 if (fixed)
777 {
778 l4_addr_t na = l4_trunc_page((l4_addr_t)*new_addr);
779 if (na != (l4_addr_t)*new_addr)
780 return -EINVAL;
781
782 // check if the current virtual memory area can be expanded
783 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
784 if (err < 0)
785 return err;
786
787 pad_addr = na;
788 // unmap all stuff and remap ours ....
789 }
790 else
791 {
792 l4_addr_t ta = oa + old_size;
793 unsigned long ts = new_size - old_size;
794 // check if the current virtual memory area can be expanded
795 long err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
796 if (!maymove && err)
797 return -ENOMEM;
798
799 L4Re::Rm::Offset toffs;
800 L4Re::Rm::Flags tflags;
802
803 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
804
805 // there is enough space to expand the mapping in place
806 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
807 {
808 old_area.free(); // pad at the original address
809 pad_addr = oa + old_size;
810 *new_addr = old_addr;
811 }
812 else if (!maymove)
813 return -ENOMEM;
814 else
815 {
816 // search for a new area to remap
817 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
818 if (err < 0)
819 return -ENOMEM;
820
821 pad_addr = new_area.a + old_size;
822 *new_addr = (void *)new_area.a;
823 }
824 }
825
826 if (old_area.is_valid())
827 {
828 l4_addr_t a = old_area.a;
829 unsigned long s = old_size;
830 L4Re::Rm::Offset o;
831 L4Re::Rm::Flags f;
833
834 for (; r->find(&a, &s, &o, &f, &ds) >= 0 && (!(f & Rm::F::In_area));)
835 {
836 if (a < old_area.a)
837 {
838 auto d = old_area.a - a;
839 a = old_area.a;
840 s -= d;
841 o += d;
842 }
843
844 if (a + s > old_area.a + old_size)
845 s = old_area.a + old_size - a;
846
847 l4_addr_t x = a - old_area.a + new_area.a;
848
849 int err = r->attach(&x, s, Rm::F::In_area | f,
850 L4::Ipc::make_cap(ds, f.cap_rights()),
851 o);
852 if (err < 0)
853 return err;
854
855 // cout the new attached ds reference
856 L4Re::virt_cap_alloc->take(ds);
857
858 err = r->detach(a, s, &ds, This_task,
859 Rm::Detach_exact | Rm::Detach_keep);
860 if (err < 0)
861 return err;
862
863 switch (err & Rm::Detach_result_mask)
864 {
865 case Rm::Split_ds:
866 // add a reference as we split up a mapping
867 if (ds.is_valid())
868 L4Re::virt_cap_alloc->take(ds);
869 break;
870 case Rm::Detached_ds:
871 if (ds.is_valid())
872 L4Re::virt_cap_alloc->release(ds);
873 break;
874 default:
875 break;
876 }
877 }
878 old_area.free();
879 }
880
881 if (old_size < new_size)
882 {
883 l4_addr_t const pad_sz = new_size - old_size;
884 l4_addr_t toffs;
886 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
887 if (err)
888 return err;
889
890 // FIXME: must get the protection rights from the old
891 // mapping and use the same here, for now just use RWX
892 err = r->attach(&pad_addr, pad_sz,
893 Rm::F::In_area | Rm::F::Detach_free | Rm::F::RWX,
894 L4::Ipc::make_cap_rw(tds.get()), toffs);
895 if (err < 0)
896 return err;
897
898 // release ownership of tds, the region map is now the new owner
899 tds.release();
900 }
901
902 return 0;
903}
904
905int
906Vfs::mprotect(const void *a, size_t sz, int prot) L4_NOTHROW
907{
908 (void)a;
909 (void)sz;
910 return (prot & PROT_WRITE) ? -1 : 0;
911}
912
913int
914Vfs::msync(void *, size_t, int) L4_NOTHROW
915{ return 0; }
916
917int
918Vfs::madvise(void *, size_t, int) L4_NOTHROW
919{ return 0; }
920
921}
922
923L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
924extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
925
926namespace {
927 class Real_mount_tree : public L4Re::Vfs::Mount_tree
928 {
929 public:
930 explicit Real_mount_tree(char *n) : Mount_tree(n) {}
931
932 void *operator new (size_t size)
933 { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
934
935 void operator delete (void *mem)
936 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
937 };
938}
939
940int
941Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) noexcept
942{
943 using L4Re::Vfs::File;
944 using L4Re::Vfs::Mount_tree;
945 using L4Re::Vfs::Path;
946
947 cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
948 if (!root)
949 return -EINVAL;
950
952 Path p = root->lookup(Path(path), &base);
953
954 while (!p.empty())
955 {
956 Path f = p.strip_first();
957
958 if (f.empty())
959 return -EEXIST;
960
961 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
962 if (!name)
963 return -ENOMEM;
964
965 cxx::Ref_ptr<Mount_tree> nt(new Real_mount_tree(name));
966 if (!nt)
967 {
968 __rtld_l4re_env_posix_vfs_ops->free(name);
969 return -ENOMEM;
970 }
971
972 base->add_child_node(nt);
973 base = nt;
974
975 if (p.empty())
976 {
977 nt->mount(dir);
978 return 0;
979 }
980 }
981
982 return -EINVAL;
983}
984
985
986#undef DEBUG_LOG
987#undef GET_FILE_DBG
988#undef GET_FILE
989
static Env const * env() noexcept
Returns the initial environment for the current task.
Definition env:103
L4::Cap< Log > log() const noexcept
Object-capability to the logging service.
Definition env:133
Region map.
Definition rm:88
Basic interface for an L4Re::Vfs file system.
Definition vfs.h:829
The basic interface for an open POSIX file.
Definition vfs.h:441
Interface for the POSIX backends of an application.
Definition vfs.h:1008
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition capability.h:60
C++ interface for capabilities.
Definition capability.h:222
Basic element type for a double-linked H_list.
Definition hlist:34
Helper type to distinguish the oeprator new version that does not throw exceptions.
Definition std_alloc:30
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:82
Dataspace interface.
Environment interface.
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:35
unsigned long l4_umword_t
Unsigned machine word.
Definition l4int.h:51
unsigned long l4_addr_t
Address type.
Definition l4int.h:45
@ L4_CAP_FPAGE_RO
Read right for capability flex-pages.
Definition __l4_fpage.h:179
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flex-pages.
Definition __l4_fpage.h:195
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition consts.h:437
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition consts.h:462
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition consts.h:380
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:494
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition compiler.h:188
Functionality for invoking the kernel debugger.
void outhex32(l4_uint32_t number)
Output a 32-bit unsigned hexadecimal number via the kernel debugger.
Definition kdebug.h:282
void outstring(char const *text)
Output a string via the kernel debugger.
Definition kdebug.h:235
void outdec(l4_mword_t number)
Output a decimal unsigned machine word via the kernel debugger.
Definition kdebug.h:332
L4Re C++ Interfaces.
Definition cmd_control:15
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition shared_cap:44
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition ipc_types:628
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition ipc_types:638
L4 low-level kernel interface.
Our C++ library.
Definition arith:22
Pair implementation.
Region mapper interface.
Shared_cap / Shared_del_cap.
@ Detach_free
Free the portion of the data space after detach.
Definition rm:147
@ Search_addr
Search for a suitable address range.
Definition rm:118
Double-linked list of typed H_list_item_t elements.
Definition hlist:260
Pair of two values.
Definition pair:37
#define l4_assert(expr)
Low-level assert.
Definition assert.h:43