L4Re - L4 Runtime Environment
vfs_impl.h
1 /*
2  * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3  * Alexander Warg <warg@os.inf.tu-dresden.de>,
4  * Björn Döbel <doebel@os.inf.tu-dresden.de>
5  * economic rights: Technische Universität Dresden (Germany)
6  *
7  * This file is part of TUD:OS and distributed under the terms of the
8  * GNU General Public License 2.
9  * Please see the COPYING-GPL-2 file for details.
10  *
11  * As a special exception, you may use this file as part of a free software
12  * library without restriction. Specifically, if other files instantiate
13  * templates or use macros or inline functions from this file, or you compile
14  * this file and link it with other files to produce an executable, this
15  * file does not by itself cause the resulting executable to be covered by
16  * the GNU General Public License. This exception does not however
17  * invalidate any other reasons why the executable file might be covered by
18  * the GNU General Public License.
19  */
20 
21 #include "ds_util.h"
22 #include "fd_store.h"
23 #include "vcon_stream.h"
24 #include "ns_fs.h"
25 #include "vfs_api.h"
26 
27 #include <l4/re/env>
28 #include <l4/re/rm>
29 #include <l4/re/dataspace>
30 #include <l4/cxx/hlist>
31 #include <l4/cxx/std_alloc>
32 
33 #include <l4/l4re_vfs/backend>
34 
35 #include <unistd.h>
36 #include <cstdarg>
37 #include <errno.h>
38 #include <sys/uio.h>
39 
40 #if 0
41 #include <l4/sys/kdebug.h>
42 static int debug_mmap = 1;
43 #define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
44 #else
45 #define DEBUG_LOG(level, dbg...) do { } while (0)
46 #endif
47 
53 #define USE_BIG_ANON_DS
54 
55 using L4Re::Rm;
56 
57 namespace {
58 
59 using cxx::Ref_ptr;
60 
61 class Fd_store : public L4Re::Core::Fd_store
62 {
63 public:
64  Fd_store() throw();
65 };
66 
67 // for internal Vcon_streams we want to have a placement new operator, so
68 // inherit and add one
69 class Std_stream : public L4Re::Core::Vcon_stream
70 {
71 public:
72  Std_stream(L4::Cap<L4::Vcon> c) : L4Re::Core::Vcon_stream(c) {}
73 };
74 
75 Fd_store::Fd_store() throw()
76 {
77  // use this strange way to prevent deletion of the stdio object
78  // this depends on Fd_store to being a singleton !!!
79  static char m[sizeof(Std_stream)] __attribute__((aligned(sizeof(long))));
80  Std_stream *s = new (m) Std_stream(L4Re::Env::env()->log());
81  // make sure that we never delete the static io stream thing
82  s->add_ref();
83  set(0, cxx::ref_ptr(s)); // stdin
84  set(1, cxx::ref_ptr(s)); // stdout
85  set(2, cxx::ref_ptr(s)); // stderr
86 }
87 
88 class Root_mount_tree : public L4Re::Vfs::Mount_tree
89 {
90 public:
91  Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
92  void operator delete (void *) {}
93 };
94 
95 class Vfs : public L4Re::Vfs::Ops
96 {
97 private:
98  bool _early_oom;
99 
100 public:
101  Vfs()
102  : _early_oom(true), _root_mount(), _root(L4Re::Env::env())
103  {
104  _root_mount.add_ref();
105  _root.add_ref();
106  _root_mount.mount(cxx::ref_ptr(&_root));
107  _cwd = cxx::ref_ptr(&_root);
108 
109 #if 0
110  Ref_ptr<L4Re::Vfs::File> rom;
111  _root.openat("rom", 0, 0, &rom);
112 
113  _root_mount.create_tree("lib/foo", rom);
114 
115  _root.openat("lib", 0, 0, &_cwd);
116 
117 #endif
118  }
119 
120  int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw();
121  Ref_ptr<L4Re::Vfs::File> free_fd(int fd) throw();
122  Ref_ptr<L4Re::Vfs::File> get_root() throw();
123  Ref_ptr<L4Re::Vfs::File> get_cwd() throw();
124  void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw();
125  Ref_ptr<L4Re::Vfs::File> get_file(int fd) throw();
126  Ref_ptr<L4Re::Vfs::File> set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) throw();
127  L4Re::Cap_alloc *cap_alloc() throw();
128 
129  int mmap2(void *start, size_t len, int prot, int flags, int fd,
130  off_t offset, void **ptr) throw();
131 
132  int munmap(void *start, size_t len) throw();
133  int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
134  void **new_addr) throw();
135  int mprotect(const void *a, size_t sz, int prot) throw();
136  int msync(void *addr, size_t len, int flags) throw();
137  int madvise(void *addr, size_t len, int advice) throw();
138 
139  int register_file_system(L4Re::Vfs::File_system *f) throw();
140  int unregister_file_system(L4Re::Vfs::File_system *f) throw();
141  L4Re::Vfs::File_system *get_file_system(char const *fstype) throw();
142 
143  int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw();
144  int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw();
145  Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) throw();
146  Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) throw();
147 
148  void operator delete (void *) {}
149 
150  void *malloc(size_t size) noexcept { return Vfs_config::malloc(size); }
151  void free(void *m) noexcept { Vfs_config::free(m); }
152 
153 private:
154  Root_mount_tree _root_mount;
155  L4Re::Core::Env_dir _root;
156  Ref_ptr<L4Re::Vfs::File> _cwd;
157  Fd_store fds;
158 
159  L4Re::Vfs::File_system *_fs_registry;
160 
161  struct File_factory_item : cxx::H_list_item_t<File_factory_item>
162  {
164  explicit File_factory_item(cxx::Ref_ptr<L4Re::Vfs::File_factory> const &f)
165  : f(f) {};
166 
167  File_factory_item() = default;
168  File_factory_item(File_factory_item const &) = delete;
169  File_factory_item &operator = (File_factory_item const &) = delete;
170  };
171 
172  cxx::H_list_t<File_factory_item> _file_factories;
173 
174  l4_addr_t _anon_offset;
175  L4::Cap<L4Re::Dataspace> _anon_ds;
176 
177  int alloc_ds(unsigned long size, L4::Cap<L4Re::Dataspace> *ds);
178  int alloc_anon_mem(l4_umword_t size, L4::Cap<L4Re::Dataspace> *ds,
179  l4_addr_t *offset);
180 };
181 
182 static inline bool strequal(char const *a, char const *b)
183 {
184  for (;*a && *a == *b; ++a, ++b)
185  ;
186  return *a == *b;
187 }
188 
189 int
190 Vfs::register_file_system(L4Re::Vfs::File_system *f) throw()
191 {
193 
194  if (!f)
195  return -EINVAL;
196 
197  for (File_system *c = _fs_registry; c; c = c->next())
198  if (strequal(c->type(), f->type()))
199  return -EEXIST;
200 
201  f->next(_fs_registry);
202  _fs_registry = f;
203 
204  return 0;
205 }
206 
207 int
208 Vfs::unregister_file_system(L4Re::Vfs::File_system *f) throw()
209 {
211 
212  if (!f)
213  return -EINVAL;
214 
215  File_system **p = &_fs_registry;
216 
217  for (; *p; p = &(*p)->next())
218  if (*p == f)
219  {
220  *p = f->next();
221  f->next() = 0;
222  return 0;
223  }
224 
225  return -ENOENT;
226 }
227 
229 Vfs::get_file_system(char const *fstype) throw()
230 {
231  bool try_dynamic = true;
232  for (;;)
233  {
235  for (File_system *c = _fs_registry; c; c = c->next())
236  if (strequal(c->type(), fstype))
237  return c;
238 
239  if (!try_dynamic)
240  return 0;
241 
242  // try to load a file system module dynamically
243  int res = Vfs_config::load_module(fstype);
244 
245  if (res < 0)
246  return 0;
247 
248  try_dynamic = false;
249  }
250 }
251 
252 int
253 Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw()
254 {
255  if (!f)
256  return -EINVAL;
257 
258  void *x = this->malloc(sizeof(File_factory_item));
259  if (!x)
260  return -ENOMEM;
261 
262  auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
263  _file_factories.push_front(ff);
264  return 0;
265 }
266 
267 int
268 Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) throw()
269 {
270  for (auto p: _file_factories)
271  {
272  if (p->f == f)
273  {
274  _file_factories.remove(p);
275  p->~File_factory_item();
276  this->free(p);
277  return 0;
278  }
279  }
280  return -ENOENT;
281 }
282 
283 Ref_ptr<L4Re::Vfs::File_factory>
284 Vfs::get_file_factory(int proto) throw()
285 {
286  for (auto p: _file_factories)
287  if (p->f->proto() == proto)
288  return p->f;
289 
290  return Ref_ptr<L4Re::Vfs::File_factory>();
291 }
292 
293 Ref_ptr<L4Re::Vfs::File_factory>
294 Vfs::get_file_factory(char const *proto_name) throw()
295 {
296  for (auto p: _file_factories)
297  {
298  auto n = p->f->proto_name();
299  if (n)
300  {
301  char const *a = n;
302  char const *b = proto_name;
303  for (; *a && *b && *a == *b; ++a, ++b)
304  ;
305 
306  if ((*a == 0) && (*b == 0))
307  return p->f;
308  }
309  }
310 
311  return Ref_ptr<L4Re::Vfs::File_factory>();
312 }
313 
314 int
315 Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw()
316 {
317  int fd = fds.alloc();
318  if (fd < 0)
319  return -EMFILE;
320 
321  if (f)
322  fds.set(fd, f);
323 
324  return fd;
325 }
326 
327 Ref_ptr<L4Re::Vfs::File>
328 Vfs::free_fd(int fd) throw()
329 {
330  Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
331 
332  if (!f)
333  return Ref_ptr<>::Nil;
334 
335  fds.free(fd);
336  return f;
337 }
338 
339 
340 Ref_ptr<L4Re::Vfs::File>
341 Vfs::get_root() throw()
342 {
343  return cxx::ref_ptr(&_root);
344 }
345 
346 Ref_ptr<L4Re::Vfs::File>
347 Vfs::get_cwd() throw()
348 {
349  return _cwd;
350 }
351 
352 void
353 Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw()
354 {
355  // FIXME: check for is dir
356  if (dir)
357  _cwd = dir;
358 }
359 
360 Ref_ptr<L4Re::Vfs::File>
361 Vfs::get_file(int fd) throw()
362 {
363  return fds.get(fd);
364 }
365 
366 Ref_ptr<L4Re::Vfs::File>
367 Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) throw()
368 {
369  Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
370  fds.set(fd, f);
371  return old;
372 }
373 
375 Vfs::cap_alloc() throw()
376 {
377  return L4Re::Core::cap_alloc();
378 }
379 
380 
381 
382 #define GET_FILE_DBG(fd, err) \
383  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
384  if (!fi) \
385  { \
386  return -err; \
387  }
388 
389 #define GET_FILE(fd, err) \
390  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
391  if (!fi) \
392  return -err;
393 
394 
395 int
396 Vfs::munmap(void *start, size_t len) L4_NOTHROW
397 {
398  using namespace L4;
399  using namespace L4Re;
400 
401  int err;
402  Cap<Dataspace> ds;
403  Cap<Rm> r = Env::env()->rm();
404 
405  while (1)
406  {
407  DEBUG_LOG(debug_mmap, {
408  outstring("DETACH: ");
409  outhex32(l4_addr_t(start));
410  outstring(" ");
411  outhex32(len);
412  outstring("\n");
413  });
414  err = r->detach(l4_addr_t(start), len, &ds, This_task);
415  if (err < 0)
416  return err;
417 
418  switch (err & Rm::Detach_result_mask)
419  {
420  case Rm::Split_ds:
421  return 0;
422  case Rm::Detached_ds:
423  if (ds.is_valid())
424  L4Re::Core::release_ds(ds);
425  break;
426  default:
427  break;
428  }
429 
430  if (!(err & Rm::Detach_again))
431  return 0;
432  }
433 }
434 
435 int
436 Vfs::alloc_ds(unsigned long size, L4::Cap<L4Re::Dataspace> *ds)
437 {
439 
440  if (!ds->is_valid())
441  return -ENOMEM;
442 
443  int err;
444  if ((err = Vfs_config::allocator()->alloc(size, *ds)) < 0)
445  return err;
446 
447  DEBUG_LOG(debug_mmap, {
448  outstring("ANON DS ALLOCATED: size=");
449  outhex32(size);
450  outstring(" cap=");
451  outhex32(ds->cap());
452  outstring("\n");
453  });
454 
455  return 0;
456 }
457 
458 int
459 Vfs::alloc_anon_mem(l4_umword_t size, L4::Cap<L4Re::Dataspace> *ds,
460  l4_addr_t *offset)
461 {
462 #ifdef USE_BIG_ANON_DS
463  enum
464  {
465  ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
466  ANON_MEM_MAX_SIZE = 32UL << 20, // chunk size that will be allocate a dataspace
467  };
468 #else
469  enum
470  {
471  ANON_MEM_DS_POOL_SIZE = 256UL << 20, // size of a pool dataspace used for anon memory
472  ANON_MEM_MAX_SIZE = 0UL << 20, // chunk size that will be allocate a dataspace
473  };
474 #endif
475 
476  if (size >= ANON_MEM_MAX_SIZE)
477  {
478  int err;
479  if ((err = alloc_ds(size, ds)) < 0)
480  return err;
481 
482  *offset = 0;
483 
484  if (!_early_oom)
485  return err;
486 
487  return (*ds)->allocate(0, size);
488  }
489 
490  if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
491  {
492  if (_anon_ds.is_valid())
493  L4Re::Core::release_ds(_anon_ds);
494 
495  int err;
496  if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
497  return err;
498 
499  _anon_offset = 0;
500  _anon_ds = *ds;
501  }
502  else
503  *ds = _anon_ds;
504 
505  if (_early_oom)
506  {
507  if (int err = (*ds)->allocate(_anon_offset, size))
508  return err;
509  }
510 
511  *offset = _anon_offset;
512  _anon_offset += size;
513  return 0;
514 }
515 
516 int
517 Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t _offset,
518  void **resptr) L4_NOTHROW
519 {
520  using namespace L4Re;
521  off64_t offset = l4_trunc_page(_offset << 12);
522 
523  start = (void*)l4_trunc_page(l4_addr_t(start));
524  len = l4_round_page(len);
525  l4_umword_t size = (len + L4_PAGESIZE-1) & ~(L4_PAGESIZE-1);
526 
527  // special code to just reserve an area of the virtual address space
528  if (flags & 0x1000000)
529  {
530  int err;
531  L4::Cap<Rm> r = Env::env()->rm();
532  l4_addr_t area = (l4_addr_t)start;
533  err = r->reserve_area(&area, size, L4Re::Rm::Search_addr);
534  if (err < 0)
535  return err;
536  *resptr = (void*)area;
537  DEBUG_LOG(debug_mmap, {
538  outstring("MMAP reserved area: ");
539  outhex32(area);
540  outstring(" size=");
541  outhex32(size);
542  outstring("\n");
543  });
544  return 0;
545  }
546 
548  l4_addr_t anon_offset = 0;
549  unsigned rm_flags = 0;
550 
551  if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
552  {
553  rm_flags |= L4Re::Rm::Detach_free;
554 
555  int err = alloc_anon_mem(size, &ds, &anon_offset);
556  if (err)
557  return err;
558 
559  DEBUG_LOG(debug_mmap, {
560  outstring("USE ANON MEM: ");
561  outhex32(ds.cap());
562  outstring(" offs=");
563  outhex32(anon_offset);
564  outstring("\n");
565  });
566  }
567 
568  if (!(flags & MAP_ANONYMOUS))
569  {
570  Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
571  if (!fi)
572  {
573  return -EBADF;
574  }
575 
576  L4::Cap<L4Re::Dataspace> fds = fi->data_space();
577 
578  if (!fds.is_valid())
579  {
580  return -EINVAL;
581  }
582 
583  if (size + offset > l4_round_page(fds->size()))
584  {
585  return -EINVAL;
586  }
587 
588  if (flags & MAP_PRIVATE)
589  {
590  DEBUG_LOG(debug_mmap, outstring("COW\n"););
591  ds->copy_in(anon_offset, fds, l4_trunc_page(offset), l4_round_page(size));
592  offset = anon_offset;
593  }
594  else
595  {
596  ds = fds;
597  }
598  }
599  else
600  offset = anon_offset;
601 
602 
603  if (!(flags & MAP_FIXED) && start == 0)
604  start = (void*)L4_PAGESIZE;
605 
606  int err;
607  char *data = (char *)start;
608  L4::Cap<Rm> r = Env::env()->rm();
609  l4_addr_t overmap_area = L4_INVALID_ADDR;
610 
611  if (flags & MAP_FIXED)
612  {
613  overmap_area = l4_addr_t(start);
614 
615  err = r->reserve_area(&overmap_area, size);
616  if (err < 0)
617  overmap_area = L4_INVALID_ADDR;
618 
619  rm_flags |= Rm::In_area;
620 
621  err = munmap(start, len);
622  if (err && err != -ENOENT)
623  return err;
624  }
625 
626  if (!(flags & MAP_FIXED)) rm_flags |= Rm::Search_addr;
627  if (!(prot & PROT_WRITE)) rm_flags |= Rm::Read_only;
628 
629  err = r->attach(&data, size, rm_flags,
630  L4::Ipc::make_cap(ds, (prot & PROT_WRITE)
632  : L4_CAP_FPAGE_RO),
633  offset);
634 
635  DEBUG_LOG(debug_mmap, {
636  outstring(" MAPPED: ");
637  outhex32(ds.cap());
638  outstring(" addr: ");
639  outhex32(l4_addr_t(data));
640  outstring(" bytes: ");
641  outhex32(size);
642  outstring(" offset: ");
643  outhex32(offset);
644  outstring(" err=");
645  outdec(err);
646  outstring("\n");
647  });
648 
649 
650  if (overmap_area != L4_INVALID_ADDR)
651  r->free_area(overmap_area);
652 
653  if (err < 0)
654  return err;
655 
656 
657  if (start && !data)
658  return -EINVAL;
659 
660  *resptr = data;
661 
662  return 0;
663 }
664 
665 namespace {
666  class Auto_area
667  {
668  public:
670  l4_addr_t a;
671 
672  explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
673  : r(r), a(a) {}
674 
675  int reserve(l4_addr_t _a, l4_size_t sz, unsigned flags)
676  {
677  a = _a;
678  int e = r->reserve_area(&a, sz, flags);
679  if (e)
680  a = L4_INVALID_ADDR;
681  return e;
682  }
683 
684  void free()
685  {
686  if (a != L4_INVALID_ADDR)
687  {
688  r->free_area(a);
689  a = L4_INVALID_ADDR;
690  }
691  }
692 
693  ~Auto_area() { free(); }
694  };
695 }
696 
697 int
698 Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
699  void **new_addr) L4_NOTHROW
700 {
701  using namespace L4Re;
702 
703  DEBUG_LOG(debug_mmap, {
704  outstring("Mremap: addr=");
705  outhex32((l4_umword_t)old_addr);
706  outstring(" old_size=");
707  outhex32(old_size);
708  outstring(" new_size=");
709  outhex32(new_size);
710  outstring("\n");
711  });
712 
713  if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
714  return -EINVAL;
715 
716  L4::Cap<Rm> r = Env::env()->rm();
717 
718  // sanitize input parameters to multiples of pages
719  l4_addr_t oa = l4_trunc_page((l4_addr_t)old_addr);
720  old_size = l4_round_page(old_size);
721  new_size = l4_round_page(new_size);
722 
723  l4_addr_t na;
724 
725  if (new_size < old_size)
726  {
727  *new_addr = old_addr;
728  return munmap((void*)(oa + new_size), old_size - new_size);
729  }
730 
731  if (new_size == old_size)
732  {
733  *new_addr = old_addr;
734  return 0;
735  }
736 
737  Auto_area area(r);
738 
739  if (!(flags & MREMAP_FIXED))
740  na = oa;
741  else
742  na = l4_trunc_page((l4_addr_t)new_addr);
743 
744  int err;
745 
746  // check if the current virtual memory area can be expanded
747  err = area.reserve(oa, new_size, 0);
748  if (err)
749  return err;
750 
751  l4_addr_t ta = oa + old_size;
752  unsigned long ts = new_size - old_size;
753  l4_addr_t toffs;
754  unsigned tflags;
756 
757  err = r->find(&ta, &ts, &toffs, &tflags, &tds);
758 
759  // there is enough space to expand the mapping in place
760  if (!(err == -ENOENT || (err == 0 && (tflags & Rm::In_area))))
761  {
762  if ((flags & (MREMAP_FIXED | MREMAP_MAYMOVE)) != MREMAP_MAYMOVE)
763  return -EINVAL;
764 
765  // free our old reserved area, used for blocking the old memory region
766  area.free();
767 
768  // move
769  err = area.reserve(0, new_size, Rm::Search_addr);
770  if (err)
771  return err;
772 
773  na = area.a;
774 
775  // move all the old regions to the new place ...
776  Auto_area block_area(r);
777  err = block_area.reserve(oa, old_size, 0);
778  if (err)
779  return err;
780 
781  while (1)
782  {
783  ta = oa;
784  ts = old_size;
785 
786  err = r->find(&ta, &ts, &toffs, &tflags, &tds);
787  if (err == -ENOENT || (err == 0 && (tflags & Rm::In_area)))
788  break;
789 
790  if (err)
791  return err;
792 
793  if (ta < oa)
794  {
795  toffs += oa - ta;
796  ts -= oa - ta;
797  ta = oa;
798  }
799 
800  l4_addr_t n = na + (ta - oa);
801  unsigned long max_s = old_size - (ta - oa);
802 
803  if (ts > max_s)
804  ts = max_s;
805 
806  err = r->attach(&n, ts, tflags | Rm::In_area,
807  L4::Ipc::make_cap(tds, (tflags & Rm::Read_only)
808  ? L4_CAP_FPAGE_RO
809  : L4_CAP_FPAGE_RW),
810  toffs);
811  if (err)
812  return err;
813 
814  err = r->detach(ta, ts, &tds, This_task, Rm::Detach_exact | Rm::Detach_keep);
815  if (err < 0)
816  return err;
817 
818  switch (err & Rm::Detach_result_mask)
819  {
820  case Rm::Split_ds:
821  break;
822  case Rm::Detached_ds:
823  if (tds.is_valid())
824  L4Re::Core::release_ds(tds);
825  break;
826  default:
827  break;
828  }
829  }
830  }
831 
832  err = alloc_anon_mem(new_size - old_size, &tds, &toffs);
833  if (err)
834  return err;
835 
836  *new_addr = (void *)na;
837  na = na + old_size;
838  err = r->attach(&na, new_size - old_size, Rm::In_area | Rm::Detach_free,
839  L4::Ipc::make_cap(tds, (tflags & Rm::Read_only)
840  ? L4_CAP_FPAGE_RO
841  : L4_CAP_FPAGE_RW),
842  toffs);
843 
844  return err;
845 }
846 
847 int
848 Vfs::mprotect(const void *a, size_t sz, int prot) L4_NOTHROW
849 {
850  (void)a;
851  (void)sz;
852  return (prot & PROT_WRITE) ? -1 : 0;
853 }
854 
855 int
856 Vfs::msync(void *, size_t, int) L4_NOTHROW
857 { return 0; }
858 
859 int
860 Vfs::madvise(void *, size_t, int) L4_NOTHROW
861 { return 0; }
862 
863 }
864 
865 void *__rtld_l4re_env_posix_vfs_ops;
866 extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));
867 
868 
869 #undef DEBUG_LOG
870 #undef GET_FILE_DBG
871 #undef GET_FILE
872 
L4::Cap< Log > log() const
Object-capability to the logging service.
Definition: env:130
unsigned int l4_size_t
Unsigned size type.
Definition: l4int.h:35
Read and interface specific &#39;W&#39; right for capability flex-pages.
Definition: __l4_fpage.h:176
Our C++ library.
Definition: arith:22
long reserve_area(l4_addr_t *start, unsigned long size, unsigned flags=0, unsigned char align=L4_PAGESHIFT) const
Reserve the given area in the region map.
Definition: rm:239
unsigned long size() const
Get size of a dataspace.
A reference-counting pointer with automatic cleanup.
Definition: ref_ptr:80
Interface for the POSIX backends for an application.
Definition: vfs.h:1063
long free_area(l4_addr_t addr)
Free an area from the region map.
Double-linked list of typed H_list_item_t elements.
Definition: hlist:259
Helper type to distinguish the oeprator new version that does not throw exceptions.
Definition: std_alloc:30
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
Definition: consts.h:359
Capability allocator interface.
Definition: cap_alloc:40
L4 low-level kernel interface.
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights)
Make an L4::Ipc::Cap<T> for the given capability and rights.
Definition: ipc_types:624
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
Definition: consts.h:334
L4Re C++ Interfaces.
Definition: cmd_control:15
Search for a suitable address range.
Definition: rm:113
long copy_in(l4_addr_t dst_offs, L4::Ipc::Cap< Dataspace > src, l4_addr_t src_offs, unsigned long size)
Copy contents from another dataspace.
Environment interface.
Read right for capability flex-pages.
Definition: __l4_fpage.h:160
Interface for memory-like objects.
Definition: dataspace:59
#define L4_PAGESIZE
Minimal page size (in bytes).
Definition: consts.h:277
Region mapper interface.
static Env const * env()
Returns the initial environment for the current task.
Definition: env:100
int detach(l4_addr_t addr, L4::Cap< Dataspace > *mem, L4::Cap< L4::Task > const &task=This_task) const
Detach a region from the address space.
Definition: rm:589
l4_cap_idx_t cap() const
Return capability selector.
Definition: capability.h:51
_Cap_alloc & cap_alloc
Capability allocator.
Basic element type for a double-linked H_list.
Definition: hlist:33
unsigned long l4_umword_t
Unsigned machine word.
Definition: l4int.h:52
Free the portion of the data space after detach.
Definition: rm:90
Invalid address.
Definition: consts.h:379
L4::Cap< void > alloc()
Allocate a new capability slot.
l4_cap_idx_t cap() const
Return capability selector.
Definition: kobject:79
Region map.
Definition: rm:69
bool is_valid() const
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Definition: capability.h:59
int find(l4_addr_t *addr, unsigned long *size, l4_addr_t *offset, unsigned *flags, L4::Cap< Dataspace > *m)
Find a region given an address and size.
Definition: rm:553
Dataspace interface.
unsigned long l4_addr_t
Address type.
Definition: l4int.h:45
long attach(l4_addr_t *start, unsigned long size, unsigned long flags, L4::Ipc::Cap< Dataspace > mem, l4_addr_t offs=0, unsigned char align=L4_PAGESHIFT) const
Attach a data space to a region.
Definition: rm_impl.h:43
Basic interface for an L4Re::Vfs file system.
Definition: vfs.h:867
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
Definition: compiler.h:185