L4Re – L4 Runtime Environment
vfs_impl.h
1 /*
2  * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3  * Alexander Warg <warg@os.inf.tu-dresden.de>,
4  * Björn Döbel <doebel@os.inf.tu-dresden.de>
5  * economic rights: Technische Universität Dresden (Germany)
6  *
7  * This file is part of TUD:OS and distributed under the terms of the
8  * GNU General Public License 2.
9  * Please see the COPYING-GPL-2 file for details.
10  *
11  * As a special exception, you may use this file as part of a free software
12  * library without restriction. Specifically, if other files instantiate
13  * templates or use macros or inline functions from this file, or you compile
14  * this file and link it with other files to produce an executable, this
15  * file does not by itself cause the resulting executable to be covered by
16  * the GNU General Public License. This exception does not however
17  * invalidate any other reasons why the executable file might be covered by
18  * the GNU General Public License.
19  */
20 
21 #include "fd_store.h"
22 #include "vcon_stream.h"
23 #include "ns_fs.h"
24 
25 #include <l4/re/env>
26 #include <l4/re/rm>
27 #include <l4/re/dataspace>
28 #include <l4/cxx/hlist>
29 #include <l4/cxx/pair>
30 #include <l4/cxx/std_alloc>
31 
32 #include <l4/l4re_vfs/backend>
33 #include <l4/re/shared_cap>
34 
35 #include <unistd.h>
36 #include <cstdarg>
37 #include <errno.h>
38 #include <sys/uio.h>
39 
40 #if 0
41 #include <l4/sys/kdebug.h>
42 static int debug_mmap = 1;
43 #define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
44 #else
45 #define DEBUG_LOG(level, dbg...) do { } while (0)
46 #endif
47 
53 #define USE_BIG_ANON_DS
54 
55 using L4Re::Rm;
56 
57 namespace {
58 
59 using cxx::Ref_ptr;
60 
61 class Fd_store : public L4Re::Core::Fd_store
62 {
63 public:
64  Fd_store() throw();
65 };
66 
67 // for internal Vcon_streams we want to have a placement new operator, so
68 // inherit and add one
69 class Std_stream : public L4Re::Core::Vcon_stream
70 {
71 public:
72  Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
73 };
74 
75 Fd_store::Fd_store() throw()
76 {
77  // use this strange way to prevent deletion of the stdio object
78  // this depends on Fd_store to being a singleton !!!
79  static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
80  Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
81  // make sure that we never delete the static io stream thing
82  s->add_ref();
83  set(0, cxx::ref_ptr(s)); // stdin
84  set(1, cxx::ref_ptr(s)); // stdout
85  set(2, cxx::ref_ptr(s)); // stderr
86 }
87 
88 class Root_mount_tree : public L4Re::Vfs::Mount_tree
89 {
90 public:
91  Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
92  void operator delete (void *) {}
93 };
94 
95 class Vfs : public L4Re::Vfs::Ops
96 {
97 private:
98  bool _early_oom;
99 
100 public:
101  Vfs()
102  : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
103  {
104  _root_mount.add_ref();
105  _root.add_ref();
106  _root_mount.mount(cxx::ref_ptr(&_root));
107  _cwd = cxx::ref_ptr(&_root);
108 
109 #if 0
110  Ref_ptr<L4Re::Vfs::File> rom;
111  _root.openat("rom", 0, 0, &rom);
112 
113  _root_mount.create_tree("lib/foo", rom);
114 
115  _root.openat("lib", 0, 0, &_cwd);
116 
117 #endif
118  }
119 
120  int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw();
121  Ref_ptr<L4Re::Vfs::File> free_fd(int fd) throw();
122  Ref_ptr<L4Re::Vfs::File> get_root() throw();
123  Ref_ptr<L4Re::Vfs::File> get_cwd() throw();
124  void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw();
125  Ref_ptr<L4Re::Vfs::File> get_file(int fd) throw();
126  cxx::Pair<Ref_ptr<L4Re::Vfs::File>, int>
127  set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) throw();
128 
129  int mmap2(void *start, size_t len, int prot, int flags, int fd,
130  off_t offset, void **ptr) throw();
131 
132  int munmap(void *start, size_t len) throw();
133  int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
134  void **new_addr) throw();
135  int mprotect(const void *a, size_t sz, int prot) throw();
136  int msync(void *addr, size_t len, int flags) throw();
137  int madvise(void *addr, size_t len, int advice) throw();
138 
139  int register_file_system(L4Re::Vfs::File_system *f) throw();
140  int unregister_file_system(L4Re::Vfs::File_system *f) throw();
141  L4Re::Vfs::File_system *get_file_system(char const *fstype) throw();
142 
143  int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw();
144  int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw();
145  Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) throw();
146  Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) throw();
147  int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) throw();
148 
149  void operator delete (void *) {}
150 
151  void *malloc(size_t size) noexcept { return Vfs_config::malloc(size); }
152  void free(void *m) noexcept { Vfs_config::free(m); }
153 
154 private:
155  Root_mount_tree _root_mount;
156  L4Re::Core::Env_dir _root;
157  Ref_ptr<L4Re::Vfs::File> _cwd;
158  Fd_store fds;
159 
160  L4Re::Vfs::File_system *_fs_registry;
161 
162  struct File_factory_item : cxx::H_list_item_t<File_factory_item>
163  {
165  explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
166  : f(f) {};
167 
168  File_factory_item() = default;
169  File_factory_item(File_factory_item const &) = delete;
170  File_factory_item &operator = (File_factory_item const &) = delete;
171  };
172 
173  cxx::H_list_t<File_factory_item> _file_factories;
174 
175  l4_addr_t _anon_offset;
177 
178  int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
179  int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
180  l4_addr_t *offset);
181 };
182 
183 static inline bool strequal(char const *a, char const *b)
184 {
185  for (;*a && *a == *b; ++a, ++b)
186  ;
187  return *a == *b;
188 }
189 
190 int
191 Vfs::register_file_system(L4Re::Vfs::File_system *f) throw()
192 {
194 
195  if (!f)
196  return -EINVAL;
197 
198  for (File_system *c = _fs_registry; c; c = c->next())
199  if (strequal(c->type(), f->type()))
200  return -EEXIST;
201 
202  f->next(_fs_registry);
203  _fs_registry = f;
204 
205  return 0;
206 }
207 
208 int
209 Vfs::unregister_file_system(L4Re::Vfs::File_system *f) throw()
210 {
212 
213  if (!f)
214  return -EINVAL;
215 
216  File_system **p = &_fs_registry;
217 
218  for (; *p; p = &(*p)->next())
219  if (*p == f)
220  {
221  *p = f->next();
222  f->next() = 0;
223  return 0;
224  }
225 
226  return -ENOENT;
227 }
228 
230 Vfs::get_file_system(char const *fstype) throw()
231 {
232  bool try_dynamic = true;
233  for (;;)
234  {
236  for (File_system *c = _fs_registry; c; c = c->next())
237  if (strequal(c->type(), fstype))
238  return c;
239 
240  if (!try_dynamic)
241  return 0;
242 
243  // try to load a file system module dynamically
244  int res = Vfs_config::load_module(fstype);
245 
246  if (res < 0)
247  return 0;
248 
249  try_dynamic = false;
250  }
251 }
252 
253 int
254 Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw()
255 {
256  if (!f)
257  return -EINVAL;
258 
259  void *x = this->malloc(sizeof(File_factory_item));
260  if (!x)
261  return -ENOMEM;
262 
263  auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
264  _file_factories.push_front(ff);
265  return 0;
266 }
267 
268 int
269 Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw()
270 {
271  for (auto p: _file_factories)
272  {
273  if (p->f == f)
274  {
275  _file_factories.remove(p);
276  p->~File_factory_item();
277  this->free(p);
278  return 0;
279  }
280  }
281  return -ENOENT;
282 }
283 
284 Ref_ptr<L4Re::Vfs::File_factory>
285 Vfs::get_file_factory(int proto) throw()
286 {
287  for (auto p: _file_factories)
288  if (p->f->proto() == proto)
289  return p->f;
290 
291  return Ref_ptr<L4Re::Vfs::File_factory>();
292 }
293 
294 Ref_ptr<L4Re::Vfs::File_factory>
295 Vfs::get_file_factory(char const *proto_name) throw()
296 {
297  for (auto p: _file_factories)
298  {
299  auto n = p->f->proto_name();
300  if (n)
301  {
302  char const *a = n;
303  char const *b = proto_name;
304  for (; *a && *b && *a == *b; ++a, ++b)
305  ;
306 
307  if ((*a == 0) && (*b == 0))
308  return p->f;
309  }
310  }
311 
312  return Ref_ptr<L4Re::Vfs::File_factory>();
313 }
314 
315 int
316 Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw()
317 {
318  int fd = fds.alloc();
319  if (fd < 0)
320  return -EMFILE;
321 
322  if (f)
323  fds.set(fd, f);
324 
325  return fd;
326 }
327 
328 Ref_ptr<L4Re::Vfs::File>
329 Vfs::free_fd(int fd) throw()
330 {
331  Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
332 
333  if (!f)
334  return Ref_ptr<>::Nil;
335 
336  fds.free(fd);
337  return f;
338 }
339 
340 
341 Ref_ptr<L4Re::Vfs::File>
342 Vfs::get_root() throw()
343 {
344  return cxx::ref_ptr(&_root);
345 }
346 
347 Ref_ptr<L4Re::Vfs::File>
348 Vfs::get_cwd() throw()
349 {
350  return _cwd;
351 }
352 
353 void
354 Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw()
355 {
356  // FIXME: check for is dir
357  if (dir)
358  _cwd = dir;
359 }
360 
361 Ref_ptr<L4Re::Vfs::File>
362 Vfs::get_file(int fd) throw()
363 {
364  return fds.get(fd);
365 }
366 
368 Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) throw()
369 {
370  if (!fds.check_fd(fd))
371  return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
372 
373  Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
374  fds.set(fd, f);
375  return cxx::pair(old, 0);
376 }
377 
378 
379 #define GET_FILE_DBG(fd, err) \
380  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
381  if (!fi) \
382  { \
383  return -err; \
384  }
385 
386 #define GET_FILE(fd, err) \
387  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
388  if (!fi) \
389  return -err;
390 
391 
392 int
393 Vfs::munmap(void *start, size_t len) L4_NOTHROW
394 {
395  using namespace L4;
396  using namespace L4Re;
397 
398  int err;
399  Cap<Dataspace> ds;
400  Cap<Rm> r = Env::env()->rm();
401 
402  while (1)
403  {
404  DEBUG_LOG(debug_mmap, {
405  outstring("DETACH: ");
406  outhex32(l4_addr_t(start));
407  outstring(" ");
408  outhex32(len);
409  outstring("\n");
410  });
411  err = r->detach(l4_addr_t(start), len, &ds, This_task);
412  if (err < 0)
413  return err;
414 
415  switch (err & Rm::Detach_result_mask)
416  {
417  case Rm::Split_ds:
418  if (ds.is_valid())
419  L4Re::virt_cap_alloc->take(ds);
420  return 0;
421  case Rm::Detached_ds:
422  if (ds.is_valid())
423  L4Re::virt_cap_alloc->release(ds);
424  break;
425  default:
426  break;
427  }
428 
429  if (!(err & Rm::Detach_again))
430  return 0;
431  }
432 }
433 
434 int
435 Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
436 {
437  *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
438 
439  if (!ds->is_valid())
440  return -ENOMEM;
441 
442  int err;
443  if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
444  return err;
445 
446  DEBUG_LOG(debug_mmap, {
447  outstring("ANON DS ALLOCATED: size=");
448  outhex32(size);
449  outstring(" cap=");
450  outhex32(ds->cap());
451  outstring("\n");
452  });
453 
454  return 0;
455 }
456 
457 int
458 Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
459  l4_addr_t *offset)
460 {
461 #ifdef USE_BIG_ANON_DS
462  enum
463  {
464  ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
465  ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
466  };
467 #else
468  enum
469  {
470  ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
471  ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
472  };
473 #endif
474 
475  if (size >= ANON_MEM_MAX_SIZE)
476  {
477  int err;
478  if ((err = alloc_ds(size, ds)) < 0)
479  return err;
480 
481  *offset = 0;
482 
483  if (!_early_oom)
484  return err;
485 
486  return (*ds)->allocate(0, size);
487  }
488 
489  if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
490  {
491  int err;
492  if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
493  return err;
494 
495  _anon_offset = 0;
496  _anon_ds = *ds;
497  }
498  else
499  *ds = _anon_ds;
500 
501  if (_early_oom)
502  {
503  if (int err = (*ds)->allocate(_anon_offset, size))
504  return err;
505  }
506 
507  *offset = _anon_offset;
508  _anon_offset += size;
509  return 0;
510 }
511 
512 int
513 Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t _offset,
514  void **resptr) L4_NOTHROW
515 {
516  using namespace L4Re;
517  off64_t offset = l4_trunc_page(_offset << 12);
518 
519  start = (void*)l4_trunc_page(l4_addr_t(start));
520  len = l4_round_page(len);
521  l4_umword_t size = (len + L4_PAGESIZE-1) & ~(L4_PAGESIZE-1);
522 
523  // special code to just reserve an area of the virtual address space
524  if (flags & 0x1000000)
525  {
526  int err;
527  L4::Cap<Rm> r = Env::env()->rm();
528  l4_addr_t area = (l4_addr_t)start;
529  err = r->reserve_area(&area, size, L4Re::Rm::F::Search_addr);
530  if (err < 0)
531  return err;
532  *resptr = (void*)area;
533  DEBUG_LOG(debug_mmap, {
534  outstring("MMAP reserved area: ");
535  outhex32(area);
536  outstring(" size=");
537  outhex32(size);
538  outstring("\n");
539  });
540  return 0;
541  }
542 
544  l4_addr_t anon_offset = 0;
545  L4Re::Rm::Flags rm_flags(0);
546 
547  if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
548  {
549  rm_flags |= L4Re::Rm::F::Detach_free;
550 
551  int err = alloc_anon_mem(size, &ds, &anon_offset);
552  if (err)
553  return err;
554 
555  DEBUG_LOG(debug_mmap, {
556  outstring("USE ANON MEM: ");
557  outhex32(ds.cap());
558  outstring(" offs=");
559  outhex32(anon_offset);
560  outstring("\n");
561  });
562  }
563 
564  if (!(flags & MAP_ANONYMOUS))
565  {
566  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
567  if (!fi)
568  {
569  return -EBADF;
570  }
571 
572  L4::Cap<L4Re::Dataspace> fds = fi->data_space();
573 
574  if (!fds.is_valid())
575  {
576  return -EINVAL;
577  }
578 
579  if (size + offset > l4_round_page(fds->size()))
580  {
581  return -EINVAL;
582  }
583 
584  if (flags & MAP_PRIVATE)
585  {
586  DEBUG_LOG(debug_mmap, outstring("COW\n"););
587  int err = ds->copy_in(anon_offset, fds, l4_trunc_page(offset),
588  l4_round_page(size));
589  if (err < 0)
590  return err;
591 
592  offset = anon_offset;
593  }
594  else
595  {
596  L4Re::virt_cap_alloc->take(fds);
597  ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
598  }
599  }
600  else
601  offset = anon_offset;
602 
603 
604  if (!(flags & MAP_FIXED) && start == 0)
605  start = (void*)L4_PAGESIZE;
606 
607  char *data = (char *)start;
608  L4::Cap<Rm> r = Env::env()->rm();
609  l4_addr_t overmap_area = L4_INVALID_ADDR;
610 
611  int err;
612  if (flags & MAP_FIXED)
613  {
614  overmap_area = l4_addr_t(start);
615 
616  err = r->reserve_area(&overmap_area, size);
617  if (err < 0)
618  overmap_area = L4_INVALID_ADDR;
619 
620  rm_flags |= Rm::F::In_area;
621 
622  err = munmap(start, len);
623  if (err && err != -ENOENT)
624  return err;
625  }
626 
627  if (!(flags & MAP_FIXED)) rm_flags |= Rm::F::Search_addr;
628  if (prot & PROT_READ) rm_flags |= Rm::F::R;
629  if (prot & PROT_WRITE) rm_flags |= Rm::F::W;
630  if (prot & PROT_EXEC) rm_flags |= Rm::F::X;
631 
632  err = r->attach(&data, size, rm_flags,
633  L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
635  : L4_CAP_FPAGE_RO),
636  offset);
637 
638  DEBUG_LOG(debug_mmap, {
639  outstring(" MAPPED: ");
640  outhex32(ds.cap());
641  outstring(" addr: ");
642  outhex32(l4_addr_t(data));
643  outstring(" bytes: ");
644  outhex32(size);
645  outstring(" offset: ");
646  outhex32(offset);
647  outstring(" err=");
648  outdec(err);
649  outstring("\n");
650  });
651 
652 
653  if (overmap_area != L4_INVALID_ADDR)
654  r->free_area(overmap_area);
655 
656  if (err < 0)
657  return err;
658 
659  l4_assert (!(start && !data));
660 
661  // release ownership of the attached DS
662  ds.release();
663  *resptr = data;
664 
665  return 0;
666 }
667 
668 namespace {
669  class Auto_area
670  {
671  public:
673  l4_addr_t a;
674 
675  explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
676  : r(r), a(a) {}
677 
678  int reserve(l4_addr_t _a, l4_size_t sz, L4Re::Rm::Flags flags)
679  {
680  free();
681  a = _a;
682  int e = r->reserve_area(&a, sz, flags);
683  if (e)
684  a = L4_INVALID_ADDR;
685  return e;
686  }
687 
688  void free()
689  {
690  if (is_valid())
691  {
692  r->free_area(a);
693  a = L4_INVALID_ADDR;
694  }
695  }
696 
697  bool is_valid() const { return a != L4_INVALID_ADDR; }
698 
699  ~Auto_area() { free(); }
700  };
701 }
702 
703 int
704 Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
705  void **new_addr) L4_NOTHROW
706 {
707  using namespace L4Re;
708 
709  DEBUG_LOG(debug_mmap, {
710  outstring("Mremap: addr=");
711  outhex32((l4_umword_t)old_addr);
712  outstring(" old_size=");
713  outhex32(old_size);
714  outstring(" new_size=");
715  outhex32(new_size);
716  outstring("\n");
717  });
718 
719  if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
720  return -EINVAL;
721 
722  l4_addr_t oa = l4_trunc_page((l4_addr_t)old_addr);
723  if (oa != (l4_addr_t)old_addr)
724  return -EINVAL;
725 
726  bool const fixed = flags & MREMAP_FIXED;
727  bool const maymove = flags & MREMAP_MAYMOVE;
728 
729  L4::Cap<Rm> r = Env::env()->rm();
730 
731  // sanitize input parameters to multiples of pages
732  old_size = l4_round_page(old_size);
733  new_size = l4_round_page(new_size);
734 
735  if (!fixed)
736  {
737  if (new_size < old_size)
738  {
739  *new_addr = old_addr;
740  return munmap((void*)(oa + new_size), old_size - new_size);
741  }
742 
743  if (new_size == old_size)
744  {
745  *new_addr = old_addr;
746  return 0;
747  }
748  }
749 
750  Auto_area old_area(r);
751  int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
752  if (err < 0)
753  return -EINVAL;
754 
755  l4_addr_t pad_addr;
756  Auto_area new_area(r);
757  if (fixed)
758  {
759  l4_addr_t na = l4_trunc_page((l4_addr_t)*new_addr);
760  if (na != (l4_addr_t)*new_addr)
761  return -EINVAL;
762 
763  // check if the current virtual memory area can be expanded
764  int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
765  if (err < 0)
766  return err;
767 
768  pad_addr = na;
769  // unmap all stuff and remap ours ....
770  }
771  else
772  {
773  l4_addr_t ta = oa + old_size;
774  unsigned long ts = new_size - old_size;
775  // check if the current virtual memory area can be expanded
776  int err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
777  if (!maymove && err)
778  return -ENOMEM;
779 
780  L4Re::Rm::Offset toffs;
781  L4Re::Rm::Flags tflags;
783 
784  err = r->find(&ta, &ts, &toffs, &tflags, &tds);
785 
786  // there is enough space to expand the mapping in place
787  if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
788  {
789  old_area.free(); // pad at the original address
790  pad_addr = oa + old_size;
791  *new_addr = old_addr;
792  }
793  else if (!maymove)
794  return -ENOMEM;
795  else
796  {
797  // search for a new area to remap
798  err = new_area.reserve(0, new_size, Rm::F::Search_addr);
799  if (err < 0)
800  return -ENOMEM;
801 
802  pad_addr = new_area.a + old_size;
803  *new_addr = (void *)new_area.a;
804  }
805  }
806 
807  if (old_area.is_valid())
808  {
809  l4_addr_t a = old_area.a;
810  unsigned long s = old_size;
811  L4Re::Rm::Offset o;
812  L4Re::Rm::Flags f;
814 
815  for (; r->find(&a, &s, &o, &f, &ds) >= 0 && (!(f & Rm::F::In_area));)
816  {
817  if (a < old_area.a)
818  {
819  auto d = old_area.a - a;
820  a = old_area.a;
821  s -= d;
822  o += d;
823  }
824 
825  if (a + s > old_area.a + old_size)
826  s = old_area.a + old_size - a;
827 
828  l4_addr_t x = a - old_area.a + new_area.a;
829 
830  int err = r->attach(&x, s, Rm::F::In_area | f,
831  L4::Ipc::make_cap(ds, f.cap_rights()),
832  o);
833  if (err < 0)
834  return err;
835 
836  // cout the new attached ds reference
837  L4Re::virt_cap_alloc->take(ds);
838 
839  err = r->detach(a, s, &ds, This_task,
840  Rm::Detach_exact | Rm::Detach_keep);
841  if (err < 0)
842  return err;
843 
844  switch (err & Rm::Detach_result_mask)
845  {
846  case Rm::Split_ds:
847  // add a reference as we split up a mapping
848  if (ds.is_valid())
849  L4Re::virt_cap_alloc->take(ds);
850  break;
851  case Rm::Detached_ds:
852  if (ds.is_valid())
853  L4Re::virt_cap_alloc->release(ds);
854  break;
855  default:
856  break;
857  }
858  }
859  old_area.free();
860  }
861 
862  if (old_size < new_size)
863  {
864  l4_addr_t const pad_sz = new_size - old_size;
865  l4_addr_t toffs;
867  int err = alloc_anon_mem(pad_sz, &tds, &toffs);
868  if (err)
869  return err;
870 
871  // FIXME: must get the protection rights from the old
872  // mapping and use the same here, for now just use RWX
873  err = r->attach(&pad_addr, pad_sz,
874  Rm::F::In_area | Rm::F::Detach_free | Rm::F::RWX,
875  L4::Ipc::make_cap_rw(tds.get()), toffs);
876  if (err < 0)
877  return err;
878 
879  // release ownership of tds, the region map is now the new owner
880  tds.release();
881  }
882 
883  return 0;
884 }
885 
886 int
887 Vfs::mprotect(const void *a, size_t sz, int prot) L4_NOTHROW
888 {
889  (void)a;
890  (void)sz;
891  return (prot & PROT_WRITE) ? -1 : 0;
892 }
893 
894 int
895 Vfs::msync(void *, size_t, int) L4_NOTHROW
896 { return 0; }
897 
898 int
899 Vfs::madvise(void *, size_t, int) L4_NOTHROW
900 { return 0; }
901 
902 }
903 
904 L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
905 extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
906 
907 namespace {
908  class Real_mount_tree : public L4Re::Vfs::Mount_tree
909  {
910  public:
911  explicit Real_mount_tree(char *n) : Mount_tree(n) {}
912 
913  void *operator new (size_t size)
914  { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
915 
916  void operator delete (void *mem)
917  { __rtld_l4re_env_posix_vfs_ops->free(mem); }
918  };
919 }
920 
921 int
922 Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) throw()
923 {
924  using L4Re::Vfs::File;
925  using L4Re::Vfs::Mount_tree;
926  using L4Re::Vfs::Path;
927 
928  cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
929  if (!root)
930  return -EINVAL;
931 
933  Path p = root->lookup(Path(path), &base);
934 
935  while (!p.empty())
936  {
937  Path f = p.strip_first();
938 
939  if (f.empty())
940  return -EEXIST;
941 
942  char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
943  if (!name)
944  return -ENOMEM;
945 
946  cxx::Ref_ptr<Mount_tree> nt(new Real_mount_tree(name));
947  if (!nt)
948  {
949  __rtld_l4re_env_posix_vfs_ops->free(name);
950  return -ENOMEM;
951  }
952 
953  base->add_child_node(nt);
954  base = nt;
955 
956  if (p.empty())
957  {
958  nt->mount(dir);
959  return 0;
960  }
961  }
962 
963  return -EINVAL;
964 }
965 
966 
967 #undef DEBUG_LOG
968 #undef GET_FILE_DBG
969 #undef GET_FILE
970 
unsigned int l4_size_t
Unsigned size type.
Definition: l4int.h:35
Size size() const noexcept
Get size of a dataspace.
static Env const * env() noexcept
Returns the initial environment for the current task.
Definition: env:103
L4::Cap< Log > log() const noexcept
Object-capability to the logging service.
Definition: env:133
Region map.
Definition: rm:76
long free_area(l4_addr_t addr)
Free an area from the region map.
long reserve_area(l4_addr_t *start, unsigned long size, Flags flags=Flags(0), unsigned char align=L4_PAGESHIFT) const noexcept
Reserve the given area in the region map.
Definition: rm:258
Basic interface for an L4Re::Vfs file system.
Definition: vfs.h:830
The basic interface for an open POSIX file.
Definition: vfs.h:441
Interface for the POSIX backends for an application.
Definition: vfs.h:1008
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition: capability.h:60
Basic element type for a double-linked H_list.
Definition: hlist:34
Helper type to distinguish the oeprator new version that does not throw exceptions.
Definition: std_alloc:30
A reference-counting pointer with automatic cleanup.
Definition: ref_ptr:82
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition: compiler.h:186
Dataspace interface.
Environment interface.
unsigned long l4_umword_t
Unsigned machine word.
Definition: l4int.h:51
unsigned long l4_addr_t
Address type.
Definition: l4int.h:45
@ L4_CAP_FPAGE_RO
Read right for capability flex-pages.
Definition: __l4_fpage.h:162
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flex-pages.
Definition: __l4_fpage.h:178
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition: consts.h:364
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition: consts.h:389
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition: consts.h:307
@ L4_INVALID_ADDR
Invalid address.
Definition: consts.h:421
L4Re C++ Interfaces.
Definition: cmd_control:15
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition: shared_cap:44
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition: ipc_types:624
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition: ipc_types:634
L4 low-level kernel interface.
Our C++ library.
Definition: arith:22
Pair implementation.
Region mapper interface.
Shared_cap / Shared_del_cap.
@ Detach_free
Free the portion of the data space after detach.
Definition: rm:128
@ Search_addr
Search for a suitable address range.
Definition: rm:105
Double-linked list of typed H_list_item_t elements.
Definition: hlist:260
Pair of two values.
Definition: pair:37
#define l4_assert(expr)
Low-level assert.
Definition: assert.h:43