L4Re - L4 Runtime Environment
vfs_impl.h
1 /*
2  * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3  * Alexander Warg <warg@os.inf.tu-dresden.de>,
4  * Björn Döbel <doebel@os.inf.tu-dresden.de>
5  * economic rights: Technische Universität Dresden (Germany)
6  *
7  * This file is part of TUD:OS and distributed under the terms of the
8  * GNU General Public License 2.
9  * Please see the COPYING-GPL-2 file for details.
10  *
11  * As a special exception, you may use this file as part of a free software
12  * library without restriction. Specifically, if other files instantiate
13  * templates or use macros or inline functions from this file, or you compile
14  * this file and link it with other files to produce an executable, this
15  * file does not by itself cause the resulting executable to be covered by
16  * the GNU General Public License. This exception does not however
17  * invalidate any other reasons why the executable file might be covered by
18  * the GNU General Public License.
19  */
20 
21 #include "fd_store.h"
22 #include "vcon_stream.h"
23 #include "ns_fs.h"
24 
25 #include <l4/re/env>
26 #include <l4/re/rm>
27 #include <l4/re/dataspace>
28 #include <l4/cxx/hlist>
29 #include <l4/cxx/std_alloc>
30 
31 #include <l4/l4re_vfs/backend>
32 #include <l4/re/shared_cap>
33 
34 #include <unistd.h>
35 #include <cstdarg>
36 #include <errno.h>
37 #include <sys/uio.h>
38 
39 #if 0
40 #include <l4/sys/kdebug.h>
41 static int debug_mmap = 1;
42 #define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
43 #else
44 #define DEBUG_LOG(level, dbg...) do { } while (0)
45 #endif
46 
52 #define USE_BIG_ANON_DS
53 
54 using L4Re::Rm;
55 
56 namespace {
57 
58 using cxx::Ref_ptr;
59 
60 class Fd_store : public L4Re::Core::Fd_store
61 {
62 public:
63  Fd_store() throw();
64 };
65 
66 // for internal Vcon_streams we want to have a placement new operator, so
67 // inherit and add one
68 class Std_stream : public L4Re::Core::Vcon_stream
69 {
70 public:
71  Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
72 };
73 
74 Fd_store::Fd_store() throw()
75 {
76  // use this strange way to prevent deletion of the stdio object
77  // this depends on Fd_store to being a singleton !!!
78  static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
79  Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
80  // make sure that we never delete the static io stream thing
81  s->add_ref();
82  set(0, cxx::ref_ptr(s)); // stdin
83  set(1, cxx::ref_ptr(s)); // stdout
84  set(2, cxx::ref_ptr(s)); // stderr
85 }
86 
87 class Root_mount_tree : public L4Re::Vfs::Mount_tree
88 {
89 public:
90  Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
91  void operator delete (void *) {}
92 };
93 
94 class Vfs : public L4Re::Vfs::Ops
95 {
96 private:
97  bool _early_oom;
98 
99 public:
100  Vfs()
101  : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
102  {
103  _root_mount.add_ref();
104  _root.add_ref();
105  _root_mount.mount(cxx::ref_ptr(&_root));
106  _cwd = cxx::ref_ptr(&_root);
107 
108 #if 0
109  Ref_ptr<L4Re::Vfs::File> rom;
110  _root.openat("rom", 0, 0, &rom);
111 
112  _root_mount.create_tree("lib/foo", rom);
113 
114  _root.openat("lib", 0, 0, &_cwd);
115 
116 #endif
117  }
118 
119  int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw();
120  Ref_ptr<L4Re::Vfs::File> free_fd(int fd) throw();
121  Ref_ptr<L4Re::Vfs::File> get_root() throw();
122  Ref_ptr<L4Re::Vfs::File> get_cwd() throw();
123  void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw();
124  Ref_ptr<L4Re::Vfs::File> get_file(int fd) throw();
125  Ref_ptr<L4Re::Vfs::File> set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) throw();
126 
127  int mmap2(void *start, size_t len, int prot, int flags, int fd,
128  off_t offset, void **ptr) throw();
129 
130  int munmap(void *start, size_t len) throw();
131  int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
132  void **new_addr) throw();
133  int mprotect(const void *a, size_t sz, int prot) throw();
134  int msync(void *addr, size_t len, int flags) throw();
135  int madvise(void *addr, size_t len, int advice) throw();
136 
137  int register_file_system(L4Re::Vfs::File_system *f) throw();
138  int unregister_file_system(L4Re::Vfs::File_system *f) throw();
139  L4Re::Vfs::File_system *get_file_system(char const *fstype) throw();
140 
141  int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw();
142  int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw();
143  Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) throw();
144  Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) throw();
145  int mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) throw();
146 
147  void operator delete (void *) {}
148 
149  void *malloc(size_t size) noexcept { return Vfs_config::malloc(size); }
150  void free(void *m) noexcept { Vfs_config::free(m); }
151 
152 private:
153  Root_mount_tree _root_mount;
154  L4Re::Core::Env_dir _root;
155  Ref_ptr<L4Re::Vfs::File> _cwd;
156  Fd_store fds;
157 
158  L4Re::Vfs::File_system *_fs_registry;
159 
160  struct File_factory_item : cxx::H_list_item_t<File_factory_item>
161  {
163  explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
164  : f(f) {};
165 
166  File_factory_item() = default;
167  File_factory_item(File_factory_item const &) = delete;
168  File_factory_item &operator = (File_factory_item const &) = delete;
169  };
170 
171  cxx::H_list_t<File_factory_item> _file_factories;
172 
173  l4_addr_t _anon_offset;
175 
176  int alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds);
177  int alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
178  l4_addr_t *offset);
179 };
180 
181 static inline bool strequal(char const *a, char const *b)
182 {
183  for (;*a && *a == *b; ++a, ++b)
184  ;
185  return *a == *b;
186 }
187 
188 int
189 Vfs::register_file_system(L4Re::Vfs::File_system *f) throw()
190 {
192 
193  if (!f)
194  return -EINVAL;
195 
196  for (File_system *c = _fs_registry; c; c = c->next())
197  if (strequal(c->type(), f->type()))
198  return -EEXIST;
199 
200  f->next(_fs_registry);
201  _fs_registry = f;
202 
203  return 0;
204 }
205 
206 int
207 Vfs::unregister_file_system(L4Re::Vfs::File_system *f) throw()
208 {
210 
211  if (!f)
212  return -EINVAL;
213 
214  File_system **p = &_fs_registry;
215 
216  for (; *p; p = &(*p)->next())
217  if (*p == f)
218  {
219  *p = f->next();
220  f->next() = 0;
221  return 0;
222  }
223 
224  return -ENOENT;
225 }
226 
228 Vfs::get_file_system(char const *fstype) throw()
229 {
230  bool try_dynamic = true;
231  for (;;)
232  {
234  for (File_system *c = _fs_registry; c; c = c->next())
235  if (strequal(c->type(), fstype))
236  return c;
237 
238  if (!try_dynamic)
239  return 0;
240 
241  // try to load a file system module dynamically
242  int res = Vfs_config::load_module(fstype);
243 
244  if (res < 0)
245  return 0;
246 
247  try_dynamic = false;
248  }
249 }
250 
251 int
252 Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw()
253 {
254  if (!f)
255  return -EINVAL;
256 
257  void *x = this->malloc(sizeof(File_factory_item));
258  if (!x)
259  return -ENOMEM;
260 
261  auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
262  _file_factories.push_front(ff);
263  return 0;
264 }
265 
266 int
267 Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw()
268 {
269  for (auto p: _file_factories)
270  {
271  if (p->f == f)
272  {
273  _file_factories.remove(p);
274  p->~File_factory_item();
275  this->free(p);
276  return 0;
277  }
278  }
279  return -ENOENT;
280 }
281 
282 Ref_ptr<L4Re::Vfs::File_factory>
283 Vfs::get_file_factory(int proto) throw()
284 {
285  for (auto p: _file_factories)
286  if (p->f->proto() == proto)
287  return p->f;
288 
289  return Ref_ptr<L4Re::Vfs::File_factory>();
290 }
291 
292 Ref_ptr<L4Re::Vfs::File_factory>
293 Vfs::get_file_factory(char const *proto_name) throw()
294 {
295  for (auto p: _file_factories)
296  {
297  auto n = p->f->proto_name();
298  if (n)
299  {
300  char const *a = n;
301  char const *b = proto_name;
302  for (; *a && *b && *a == *b; ++a, ++b)
303  ;
304 
305  if ((*a == 0) && (*b == 0))
306  return p->f;
307  }
308  }
309 
310  return Ref_ptr<L4Re::Vfs::File_factory>();
311 }
312 
313 int
314 Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw()
315 {
316  int fd = fds.alloc();
317  if (fd < 0)
318  return -EMFILE;
319 
320  if (f)
321  fds.set(fd, f);
322 
323  return fd;
324 }
325 
326 Ref_ptr<L4Re::Vfs::File>
327 Vfs::free_fd(int fd) throw()
328 {
329  Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
330 
331  if (!f)
332  return Ref_ptr<>::Nil;
333 
334  fds.free(fd);
335  return f;
336 }
337 
338 
339 Ref_ptr<L4Re::Vfs::File>
340 Vfs::get_root() throw()
341 {
342  return cxx::ref_ptr(&_root);
343 }
344 
345 Ref_ptr<L4Re::Vfs::File>
346 Vfs::get_cwd() throw()
347 {
348  return _cwd;
349 }
350 
351 void
352 Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw()
353 {
354  // FIXME: check for is dir
355  if (dir)
356  _cwd = dir;
357 }
358 
359 Ref_ptr<L4Re::Vfs::File>
360 Vfs::get_file(int fd) throw()
361 {
362  return fds.get(fd);
363 }
364 
365 Ref_ptr<L4Re::Vfs::File>
366 Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) throw()
367 {
368  Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
369  fds.set(fd, f);
370  return old;
371 }
372 
373 
374 #define GET_FILE_DBG(fd, err) \
375  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
376  if (!fi) \
377  { \
378  return -err; \
379  }
380 
381 #define GET_FILE(fd, err) \
382  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
383  if (!fi) \
384  return -err;
385 
386 
387 int
388 Vfs::munmap(void *start, size_t len) L4_NOTHROW
389 {
390  using namespace L4;
391  using namespace L4Re;
392 
393  int err;
394  Cap<Dataspace> ds;
395  Cap<Rm> r = Env::env()->rm();
396 
397  while (1)
398  {
399  DEBUG_LOG(debug_mmap, {
400  outstring("DETACH: ");
401  outhex32(l4_addr_t(start));
402  outstring(" ");
403  outhex32(len);
404  outstring("\n");
405  });
406  err = r->detach(l4_addr_t(start), len, &ds, This_task);
407  if (err < 0)
408  return err;
409 
410  switch (err & Rm::Detach_result_mask)
411  {
412  case Rm::Split_ds:
413  if (ds.is_valid())
414  L4Re::virt_cap_alloc->take(ds);
415  return 0;
416  case Rm::Detached_ds:
417  if (ds.is_valid())
418  L4Re::virt_cap_alloc->release(ds);
419  break;
420  default:
421  break;
422  }
423 
424  if (!(err & Rm::Detach_again))
425  return 0;
426  }
427 }
428 
429 int
430 Vfs::alloc_ds(unsigned long size, L4Re::Shared_cap<L4Re::Dataspace> *ds)
431 {
432  *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
433 
434  if (!ds->is_valid())
435  return -ENOMEM;
436 
437  int err;
438  if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
439  return err;
440 
441  DEBUG_LOG(debug_mmap, {
442  outstring("ANON DS ALLOCATED: size=");
443  outhex32(size);
444  outstring(" cap=");
445  outhex32(ds->cap());
446  outstring("\n");
447  });
448 
449  return 0;
450 }
451 
452 int
453 Vfs::alloc_anon_mem(l4_umword_t size, L4Re::Shared_cap<L4Re::Dataspace> *ds,
454  l4_addr_t *offset)
455 {
456 #ifdef USE_BIG_ANON_DS
457  enum
458  {
459  ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
460  ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
461  };
462 #else
463  enum
464  {
465  ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
466  ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
467  };
468 #endif
469 
470  if (size >= ANON_MEM_MAX_SIZE)
471  {
472  int err;
473  if ((err = alloc_ds(size, ds)) < 0)
474  return err;
475 
476  *offset = 0;
477 
478  if (!_early_oom)
479  return err;
480 
481  return (*ds)->allocate(0, size);
482  }
483 
484  if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
485  {
486  int err;
487  if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
488  return err;
489 
490  _anon_offset = 0;
491  _anon_ds = *ds;
492  }
493  else
494  *ds = _anon_ds;
495 
496  if (_early_oom)
497  {
498  if (int err = (*ds)->allocate(_anon_offset, size))
499  return err;
500  }
501 
502  *offset = _anon_offset;
503  _anon_offset += size;
504  return 0;
505 }
506 
507 int
508 Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t _offset,
509  void **resptr) L4_NOTHROW
510 {
511  using namespace L4Re;
512  off64_t offset = l4_trunc_page(_offset << 12);
513 
514  start = (void*)l4_trunc_page(l4_addr_t(start));
515  len = l4_round_page(len);
516  l4_umword_t size = (len + L4_PAGESIZE-1) & ~(L4_PAGESIZE-1);
517 
518  // special code to just reserve an area of the virtual address space
519  if (flags & 0x1000000)
520  {
521  int err;
522  L4::Cap<Rm> r = Env::env()->rm();
523  l4_addr_t area = (l4_addr_t)start;
524  err = r->reserve_area(&area, size, L4Re::Rm::Search_addr);
525  if (err < 0)
526  return err;
527  *resptr = (void*)area;
528  DEBUG_LOG(debug_mmap, {
529  outstring("MMAP reserved area: ");
530  outhex32(area);
531  outstring(" size=");
532  outhex32(size);
533  outstring("\n");
534  });
535  return 0;
536  }
537 
539  l4_addr_t anon_offset = 0;
540  unsigned rm_flags = 0;
541 
542  if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
543  {
544  rm_flags |= L4Re::Rm::Detach_free;
545 
546  int err = alloc_anon_mem(size, &ds, &anon_offset);
547  if (err)
548  return err;
549 
550  DEBUG_LOG(debug_mmap, {
551  outstring("USE ANON MEM: ");
552  outhex32(ds.cap());
553  outstring(" offs=");
554  outhex32(anon_offset);
555  outstring("\n");
556  });
557  }
558 
559  if (!(flags & MAP_ANONYMOUS))
560  {
561  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
562  if (!fi)
563  {
564  return -EBADF;
565  }
566 
567  L4::Cap<L4Re::Dataspace> fds = fi->data_space();
568 
569  if (!fds.is_valid())
570  {
571  return -EINVAL;
572  }
573 
574  if (size + offset > l4_round_page(fds->size()))
575  {
576  return -EINVAL;
577  }
578 
579  if (flags & MAP_PRIVATE)
580  {
581  DEBUG_LOG(debug_mmap, outstring("COW\n"););
582  ds->copy_in(anon_offset, fds, l4_trunc_page(offset), l4_round_page(size));
583  offset = anon_offset;
584  }
585  else
586  {
587  L4Re::virt_cap_alloc->take(fds);
588  ds = L4Re::Shared_cap<L4Re::Dataspace>(fds, L4Re::virt_cap_alloc);
589  }
590  }
591  else
592  offset = anon_offset;
593 
594 
595  if (!(flags & MAP_FIXED) && start == 0)
596  start = (void*)L4_PAGESIZE;
597 
598  int err;
599  char *data = (char *)start;
600  L4::Cap<Rm> r = Env::env()->rm();
601  l4_addr_t overmap_area = L4_INVALID_ADDR;
602 
603  if (flags & MAP_FIXED)
604  {
605  overmap_area = l4_addr_t(start);
606 
607  err = r->reserve_area(&overmap_area, size);
608  if (err < 0)
609  overmap_area = L4_INVALID_ADDR;
610 
611  rm_flags |= Rm::In_area;
612 
613  err = munmap(start, len);
614  if (err && err != -ENOENT)
615  return err;
616  }
617 
618  if (!(flags & MAP_FIXED)) rm_flags |= Rm::Search_addr;
619  if (!(prot & PROT_WRITE)) rm_flags |= Rm::Read_only;
620 
621  err = r->attach(&data, size, rm_flags,
622  L4::Ipc::make_cap(ds.get(), (prot & PROT_WRITE)
624  : L4_CAP_FPAGE_RO),
625  offset);
626 
627  DEBUG_LOG(debug_mmap, {
628  outstring(" MAPPED: ");
629  outhex32(ds.cap());
630  outstring(" addr: ");
631  outhex32(l4_addr_t(data));
632  outstring(" bytes: ");
633  outhex32(size);
634  outstring(" offset: ");
635  outhex32(offset);
636  outstring(" err=");
637  outdec(err);
638  outstring("\n");
639  });
640 
641 
642  if (overmap_area != L4_INVALID_ADDR)
643  r->free_area(overmap_area);
644 
645  if (err < 0)
646  return err;
647 
648  l4_assert (!(start && !data));
649 
650  // release ownership of the attached DS
651  ds.release();
652  *resptr = data;
653 
654  return 0;
655 }
656 
657 namespace {
658  class Auto_area
659  {
660  public:
662  l4_addr_t a;
663 
664  explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
665  : r(r), a(a) {}
666 
667  int reserve(l4_addr_t _a, l4_size_t sz, unsigned flags)
668  {
669  free();
670  a = _a;
671  int e = r->reserve_area(&a, sz, flags);
672  if (e)
673  a = L4_INVALID_ADDR;
674  return e;
675  }
676 
677  void free()
678  {
679  if (is_valid())
680  {
681  r->free_area(a);
682  a = L4_INVALID_ADDR;
683  }
684  }
685 
686  bool is_valid() const { return a != L4_INVALID_ADDR; }
687 
688  ~Auto_area() { free(); }
689  };
690 }
691 
692 int
693 Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
694  void **new_addr) L4_NOTHROW
695 {
696  using namespace L4Re;
697 
698  DEBUG_LOG(debug_mmap, {
699  outstring("Mremap: addr=");
700  outhex32((l4_umword_t)old_addr);
701  outstring(" old_size=");
702  outhex32(old_size);
703  outstring(" new_size=");
704  outhex32(new_size);
705  outstring("\n");
706  });
707 
708  if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
709  return -EINVAL;
710 
711  l4_addr_t oa = l4_trunc_page((l4_addr_t)old_addr);
712  if (oa != (l4_addr_t)old_addr)
713  return -EINVAL;
714 
715  bool const fixed = flags & MREMAP_FIXED;
716  bool const maymove = flags & MREMAP_MAYMOVE;
717 
718  L4::Cap<Rm> r = Env::env()->rm();
719 
720  // sanitize input parameters to multiples of pages
721  old_size = l4_round_page(old_size);
722  new_size = l4_round_page(new_size);
723 
724  if (!fixed)
725  {
726  if (new_size < old_size)
727  {
728  *new_addr = old_addr;
729  return munmap((void*)(oa + new_size), old_size - new_size);
730  }
731 
732  if (new_size == old_size)
733  {
734  *new_addr = old_addr;
735  return 0;
736  }
737  }
738 
739  Auto_area old_area(r);
740  int err = old_area.reserve(oa, old_size, 0);
741  if (err < 0)
742  return -EINVAL;
743 
744  l4_addr_t pad_addr;
745  Auto_area new_area(r);
746  if (fixed)
747  {
748  l4_addr_t na = l4_trunc_page((l4_addr_t)*new_addr);
749  if (na != (l4_addr_t)*new_addr)
750  return -EINVAL;
751 
752  // check if the current virtual memory area can be expanded
753  int err = new_area.reserve(na, new_size, 0);
754  if (err < 0)
755  return err;
756 
757  pad_addr = na;
758  // unmap all stuff and remap ours ....
759  }
760  else
761  {
762  l4_addr_t ta = oa + old_size;
763  unsigned long ts = new_size - old_size;
764  // check if the current virtual memory area can be expanded
765  int err = new_area.reserve(ta, ts, 0);
766  if (!maymove && err)
767  return -ENOMEM;
768 
769  l4_addr_t toffs;
770  unsigned tflags;
772 
773  err = r->find(&ta, &ts, &toffs, &tflags, &tds);
774 
775  // there is enough space to expand the mapping in place
776  if (err == -ENOENT || (err == 0 && (tflags & Rm::In_area)))
777  {
778  old_area.free(); // pad at the original address
779  pad_addr = oa + old_size;
780  *new_addr = old_addr;
781  }
782  else if (!maymove)
783  return -ENOMEM;
784  else
785  {
786  // search for a new area to remap
787  err = new_area.reserve(0, new_size, Rm::Search_addr);
788  if (err < 0)
789  return -ENOMEM;
790 
791  pad_addr = new_area.a + old_size;
792  *new_addr = (void *)new_area.a;
793  }
794  }
795 
796  if (old_area.is_valid())
797  {
798  l4_addr_t a = old_area.a;
799  unsigned long s = old_size;
800  l4_addr_t o;
801  unsigned f;
803 
804  for (; r->find(&a, &s, &o, &f, &ds) >= 0 && (!(f & Rm::In_area));)
805  {
806  if (a < old_area.a)
807  {
808  auto d = old_area.a - a;
809  a = old_area.a;
810  s -= d;
811  o += d;
812  }
813 
814  if (a + s > old_area.a + old_size)
815  s = old_area.a + old_size - a;
816 
817  l4_addr_t x = a - old_area.a + new_area.a;
818 
819  int err = r->attach(&x, s, Rm::In_area | f,
820  L4::Ipc::make_cap(ds, (f & Rm::Read_only)
821  ? L4_CAP_FPAGE_RO
822  : L4_CAP_FPAGE_RW),
823  o);
824  if (err < 0)
825  return err;
826 
827  // cout the new attached ds reference
828  L4Re::virt_cap_alloc->take(ds);
829 
830  err = r->detach(a, s, &ds, This_task,
831  Rm::Detach_exact | Rm::Detach_keep);
832  if (err < 0)
833  return err;
834 
835  switch (err & Rm::Detach_result_mask)
836  {
837  case Rm::Split_ds:
838  // add a reference as we split up a mapping
839  if (ds.is_valid())
840  L4Re::virt_cap_alloc->take(ds);
841  break;
842  case Rm::Detached_ds:
843  if (ds.is_valid())
844  L4Re::virt_cap_alloc->release(ds);
845  break;
846  default:
847  break;
848  }
849  }
850  old_area.free();
851  }
852 
853  if (old_size < new_size)
854  {
855  l4_addr_t const pad_sz = new_size - old_size;
856  l4_addr_t toffs;
858  int err = alloc_anon_mem(pad_sz, &tds, &toffs);
859  if (err)
860  return err;
861 
862  err = r->attach(&pad_addr, pad_sz, Rm::In_area | Rm::Detach_free,
863  L4::Ipc::make_cap_rw(tds.get()), toffs);
864  if (err < 0)
865  return err;
866 
867  // release ownership of tds, the region map is now the new owner
868  tds.release();
869  }
870 
871  return 0;
872 }
873 
874 int
875 Vfs::mprotect(const void *a, size_t sz, int prot) L4_NOTHROW
876 {
877  (void)a;
878  (void)sz;
879  return (prot & PROT_WRITE) ? -1 : 0;
880 }
881 
882 int
883 Vfs::msync(void *, size_t, int) L4_NOTHROW
884 { return 0; }
885 
886 int
887 Vfs::madvise(void *, size_t, int) L4_NOTHROW
888 { return 0; }
889 
890 }
891 
892 L4Re::Vfs::Ops *__rtld_l4re_env_posix_vfs_ops;
893 extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
894 
895 namespace {
896  class Real_mount_tree : public L4Re::Vfs::Mount_tree
897  {
898  public:
899  explicit Real_mount_tree(char *n) : Mount_tree(n) {}
900 
901  void *operator new (size_t size)
902  { return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
903 
904  void operator delete (void *mem)
905  { __rtld_l4re_env_posix_vfs_ops->free(mem); }
906  };
907 }
908 
909 int
910 Vfs::mount(char const *path, cxx::Ref_ptr<L4Re::Vfs::File> const &dir) throw()
911 {
912  using L4Re::Vfs::File;
913  using L4Re::Vfs::Mount_tree;
914  using L4Re::Vfs::Path;
915 
916  cxx::Ref_ptr<Mount_tree> root = get_root()->mount_tree();
917  if (!root)
918  return -EINVAL;
919 
921  Path p = root->lookup(Path(path), &base);
922 
923  while (!p.empty())
924  {
925  Path f = p.strip_first();
926 
927  if (f.empty())
928  return -EEXIST;
929 
930  char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
931  if (!name)
932  return -ENOMEM;
933 
934  cxx::Ref_ptr<Mount_tree> nt(new Real_mount_tree(name));
935  if (!nt)
936  {
937  __rtld_l4re_env_posix_vfs_ops->free(name);
938  return -ENOMEM;
939  }
940 
941  base->add_child_node(nt);
942  base = nt;
943 
944  if (p.empty())
945  {
946  nt->mount(dir);
947  return 0;
948  }
949  }
950 
951  return -EINVAL;
952 }
953 
954 
955 #undef DEBUG_LOG
956 #undef GET_FILE_DBG
957 #undef GET_FILE
958 
L4::Cap< Log > log() const
Object-capability to the logging service.
Definition: env:133
unsigned int l4_size_t
Unsigned size type.
Definition: l4int.h:35
Read and interface specific &#39;W&#39; right for capability flex-pages.
Definition: __l4_fpage.h:176
Our C++ library.
Definition: arith:22
long reserve_area(l4_addr_t *start, unsigned long size, unsigned flags=0, unsigned char align=L4_PAGESHIFT) const
Reserve the given area in the region map.
Definition: rm:239
unsigned long size() const
Get size of a dataspace.
A reference-counting pointer with automatic cleanup.
Definition: ref_ptr:80
Interface for the POSIX backends for an application.
Definition: vfs.h:994
long free_area(l4_addr_t addr)
Free an area from the region map.
Double-linked list of typed H_list_item_t elements.
Definition: hlist:259
Helper type to distinguish the oeprator new version that does not throw exceptions.
Definition: std_alloc:30
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition: consts.h:389
L4 low-level kernel interface.
Definition: alloc.h:27
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights)
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition: ipc_types:624
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition: consts.h:364
L4Re C++ Interfaces.
Definition: cmd_control:15
The basic interface for an open POSIX file.
Definition: vfs.h:434
Search for a suitable address range.
Definition: rm:113
Environment interface.
Read right for capability flex-pages.
Definition: __l4_fpage.h:160
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition: consts.h:307
Region mapper interface.
static Env const * env()
Returns the initial environment for the current task.
Definition: env:103
int detach(l4_addr_t addr, L4::Cap< Dataspace > *mem, L4::Cap< L4::Task > const &task=This_task) const
Detach a region from the address space.
Definition: rm:589
l4_cap_idx_t cap() const
Return capability selector.
Definition: capability.h:52
Cap< T > make_cap_rw(L4::Cap< T > cap)
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Definition: ipc_types:634
Basic element type for a double-linked H_list.
Definition: hlist:33
unsigned long l4_umword_t
Unsigned machine word.
Definition: l4int.h:52
Free the portion of the data space after detach.
Definition: rm:90
#define l4_assert(expr)
Low-level assert.
Definition: assert.h:43
Invalid address.
Definition: consts.h:409
Shared_cap / Shared_del_cap.
Region map.
Definition: rm:69
bool is_valid() const
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition: capability.h:60
int find(l4_addr_t *addr, unsigned long *size, l4_addr_t *offset, unsigned *flags, L4::Cap< Dataspace > *m)
Find a region given an address and size.
Definition: rm:553
Dataspace interface.
unsigned long l4_addr_t
Address type.
Definition: l4int.h:45
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Definition: shared_cap:44
long attach(l4_addr_t *start, unsigned long size, unsigned long flags, L4::Ipc::Cap< Dataspace > mem, l4_addr_t offs=0, unsigned char align=L4_PAGESHIFT) const
Attach a data space to a region.
Definition: rm_impl.h:43
Basic interface for an L4Re::Vfs file system.
Definition: vfs.h:821
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition: compiler.h:185