22 #include "vcon_stream.h"
28 #include <l4/cxx/hlist>
30 #include <l4/cxx/std_alloc>
32 #include <l4/l4re_vfs/backend>
41 #include <l4/sys/kdebug.h>
42 static int debug_mmap = 1;
43 #define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
45 #define DEBUG_LOG(level, dbg...) do { } while (0)
53 #define USE_BIG_ANON_DS
61 class Fd_store :
public L4Re::Core::Fd_store
69 class Std_stream : public
L4Re::Core::Vcon_stream
75 Fd_store::Fd_store() throw()
79 static char m[
sizeof(Std_stream)] __attribute__((aligned(
sizeof(
long))));
83 set(0, cxx::ref_ptr(s));
84 set(1, cxx::ref_ptr(s));
85 set(2, cxx::ref_ptr(s));
88 class Root_mount_tree :
public L4Re::Vfs::Mount_tree
91 Root_mount_tree() :
L4Re::Vfs::Mount_tree(0) {}
92 void operator delete (
void *) {}
102 : _early_oom(true), _root_mount(), _root(
L4Re::Env::env())
104 _root_mount.add_ref();
106 _root_mount.mount(cxx::ref_ptr(&_root));
107 _cwd = cxx::ref_ptr(&_root);
110 Ref_ptr<L4Re::Vfs::File> rom;
111 _root.openat(
"rom", 0, 0, &rom);
113 _root_mount.create_tree(
"lib/foo", rom);
115 _root.openat(
"lib", 0, 0, &_cwd);
120 int alloc_fd(Ref_ptr<L4Re::Vfs::File>
const &f)
throw();
121 Ref_ptr<L4Re::Vfs::File> free_fd(
int fd)
throw();
122 Ref_ptr<L4Re::Vfs::File> get_root() throw();
123 Ref_ptr<
L4Re::Vfs::File> get_cwd() throw();
124 void set_cwd(Ref_ptr<
L4Re::Vfs::File> const &dir) throw();
125 Ref_ptr<
L4Re::Vfs::File> get_file(
int fd) throw();
126 cxx::Pair<Ref_ptr<
L4Re::Vfs::File>,
int>
127 set_fd(
int fd, Ref_ptr<
L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) throw();
129 int mmap2(
void *start,
size_t len,
int prot,
int flags,
int fd,
130 off_t offset,
void **ptr) throw();
132 int munmap(
void *start,
size_t len) throw();
133 int mremap(
void *old,
size_t old_sz,
size_t new_sz,
int flags,
134 void **new_addr) throw();
135 int mprotect(const
void *a,
size_t sz,
int prot) throw();
136 int msync(
void *addr,
size_t len,
int flags) throw();
137 int madvise(
void *addr,
size_t len,
int advice) throw();
139 int register_file_system(
L4Re::Vfs::File_system *f) throw();
140 int unregister_file_system(
L4Re::Vfs::File_system *f) throw();
141 L4Re::Vfs::File_system *get_file_system(
char const *fstype) throw();
143 int register_file_factory(
cxx::Ref_ptr<
L4Re::Vfs::File_factory> f) throw();
144 int unregister_file_factory(
cxx::Ref_ptr<
L4Re::Vfs::File_factory> f) throw();
145 Ref_ptr<
L4Re::Vfs::File_factory> get_file_factory(
int proto) throw();
146 Ref_ptr<
L4Re::Vfs::File_factory> get_file_factory(
char const *proto_name) throw();
147 int mount(
char const *path,
cxx::Ref_ptr<
L4Re::Vfs::File> const &dir) throw();
149 void operator delete (
void *) {}
151 void *malloc(
size_t size) noexcept {
return Vfs_config::malloc(size); }
152 void free(
void *m) noexcept { Vfs_config::free(m); }
155 Root_mount_tree _root_mount;
156 L4Re::Core::Env_dir _root;
157 Ref_ptr<L4Re::Vfs::File> _cwd;
168 File_factory_item() =
default;
169 File_factory_item(File_factory_item
const &) =
delete;
170 File_factory_item &operator = (File_factory_item
const &) =
delete;
183 static inline bool strequal(
char const *a,
char const *b)
185 for (;*a && *a == *b; ++a, ++b)
198 for (File_system *c = _fs_registry; c; c = c->next())
199 if (strequal(c->type(), f->type()))
202 f->next(_fs_registry);
216 File_system **p = &_fs_registry;
218 for (; *p; p = &(*p)->next())
230 Vfs::get_file_system(
char const *fstype)
throw()
232 bool try_dynamic =
true;
236 for (File_system *c = _fs_registry; c; c = c->next())
237 if (strequal(c->type(), fstype))
244 int res = Vfs_config::load_module(fstype);
259 void *x = this->malloc(
sizeof(File_factory_item));
264 _file_factories.push_front(ff);
271 for (
auto p: _file_factories)
275 _file_factories.remove(p);
276 p->~File_factory_item();
284 Ref_ptr<L4Re::Vfs::File_factory>
285 Vfs::get_file_factory(
int proto)
throw()
287 for (
auto p: _file_factories)
288 if (p->f->proto() == proto)
291 return Ref_ptr<L4Re::Vfs::File_factory>();
294 Ref_ptr<L4Re::Vfs::File_factory>
295 Vfs::get_file_factory(
char const *proto_name)
throw()
297 for (
auto p: _file_factories)
299 auto n = p->f->proto_name();
303 char const *b = proto_name;
304 for (; *a && *b && *a == *b; ++a, ++b)
307 if ((*a == 0) && (*b == 0))
312 return Ref_ptr<L4Re::Vfs::File_factory>();
316 Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File>
const &f)
throw()
318 int fd = fds.alloc();
328 Ref_ptr<L4Re::Vfs::File>
329 Vfs::free_fd(
int fd)
throw()
331 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
334 return Ref_ptr<>::Nil;
341 Ref_ptr<L4Re::Vfs::File>
342 Vfs::get_root() throw()
344 return cxx::ref_ptr(&_root);
347 Ref_ptr<L4Re::Vfs::File>
348 Vfs::get_cwd() throw()
354 Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File>
const &dir)
throw()
361 Ref_ptr<L4Re::Vfs::File>
362 Vfs::get_file(
int fd)
throw()
368 Vfs::set_fd(
int fd, Ref_ptr<L4Re::Vfs::File>
const &f)
throw()
370 if (!fds.check_fd(fd))
371 return cxx::pair(Ref_ptr<L4Re::Vfs::File>(Ref_ptr<>::Nil), EBADF);
373 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
375 return cxx::pair(old, 0);
379 #define GET_FILE_DBG(fd, err) \
380 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
386 #define GET_FILE(fd, err) \
387 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
393 Vfs::munmap(
void *start,
size_t len)
L4_NOTHROW
396 using namespace L4Re;
404 DEBUG_LOG(debug_mmap, {
405 outstring(
"DETACH: ");
411 err = r->detach(
l4_addr_t(start), len, &ds, This_task);
415 switch (err & Rm::Detach_result_mask)
419 L4Re::virt_cap_alloc->take(ds);
421 case Rm::Detached_ds:
423 L4Re::virt_cap_alloc->release(ds);
429 if (!(err & Rm::Detach_again))
437 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
443 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
446 DEBUG_LOG(debug_mmap, {
447 outstring(
"ANON DS ALLOCATED: size=");
461 #ifdef USE_BIG_ANON_DS
464 ANON_MEM_DS_POOL_SIZE = 256UL << 20,
465 ANON_MEM_MAX_SIZE = 32UL << 20,
470 ANON_MEM_DS_POOL_SIZE = 256UL << 20,
471 ANON_MEM_MAX_SIZE = 0UL << 20,
475 if (size >= ANON_MEM_MAX_SIZE)
478 if ((err = alloc_ds(size, ds)) < 0)
486 return (*ds)->allocate(0, size);
489 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
492 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
503 if (
int err = (*ds)->allocate(_anon_offset, size))
507 *offset = _anon_offset;
508 _anon_offset += size;
513 Vfs::mmap2(
void *start,
size_t len,
int prot,
int flags,
int fd, off_t _offset,
516 using namespace L4Re;
524 if (flags & 0x1000000)
532 *resptr = (
void*)area;
533 DEBUG_LOG(debug_mmap, {
534 outstring(
"MMAP reserved area: ");
545 L4Re::Rm::Flags rm_flags(0);
547 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
551 int err = alloc_anon_mem(size, &ds, &anon_offset);
555 DEBUG_LOG(debug_mmap, {
556 outstring(
"USE ANON MEM: ");
559 outhex32(anon_offset);
564 if (!(flags & MAP_ANONYMOUS))
566 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
584 if (flags & MAP_PRIVATE)
586 DEBUG_LOG(debug_mmap, outstring(
"COW\n"););
587 int err = ds->copy_in(anon_offset, fds,
l4_trunc_page(offset),
592 offset = anon_offset;
596 L4Re::virt_cap_alloc->take(fds);
601 offset = anon_offset;
604 if (!(flags & MAP_FIXED) && start == 0)
607 char *data = (
char *)start;
612 if (flags & MAP_FIXED)
616 err = r->reserve_area(&overmap_area, size);
620 rm_flags |= Rm::F::In_area;
622 err = munmap(start, len);
623 if (err && err != -ENOENT)
627 if (!(flags & MAP_FIXED)) rm_flags |= Rm::F::Search_addr;
628 if (prot & PROT_READ) rm_flags |= Rm::F::R;
629 if (prot & PROT_WRITE) rm_flags |= Rm::F::W;
630 if (prot & PROT_EXEC) rm_flags |= Rm::F::X;
632 err = r->attach(&data, size, rm_flags,
638 DEBUG_LOG(debug_mmap, {
639 outstring(
" MAPPED: ");
641 outstring(
" addr: ");
643 outstring(
" bytes: ");
645 outstring(
" offset: ");
654 r->free_area(overmap_area);
699 ~Auto_area() { free(); }
704 Vfs::mremap(
void *old_addr,
size_t old_size,
size_t new_size,
int flags,
707 using namespace L4Re;
709 DEBUG_LOG(debug_mmap, {
710 outstring(
"Mremap: addr=");
712 outstring(
" old_size=");
714 outstring(
" new_size=");
719 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
726 bool const fixed = flags & MREMAP_FIXED;
727 bool const maymove = flags & MREMAP_MAYMOVE;
737 if (new_size < old_size)
739 *new_addr = old_addr;
740 return munmap((
void*)(oa + new_size), old_size - new_size);
743 if (new_size == old_size)
745 *new_addr = old_addr;
750 Auto_area old_area(r);
751 int err = old_area.reserve(oa, old_size, L4Re::Rm::Flags(0));
756 Auto_area new_area(r);
764 int err = new_area.reserve(na, new_size, L4Re::Rm::Flags(0));
774 unsigned long ts = new_size - old_size;
776 int err = new_area.reserve(ta, ts, L4Re::Rm::Flags(0));
780 L4Re::Rm::Offset toffs;
781 L4Re::Rm::Flags tflags;
784 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
787 if (err == -ENOENT || (err == 0 && (tflags & Rm::F::In_area)))
790 pad_addr = oa + old_size;
791 *new_addr = old_addr;
798 err = new_area.reserve(0, new_size, Rm::F::Search_addr);
802 pad_addr = new_area.a + old_size;
803 *new_addr = (
void *)new_area.a;
807 if (old_area.is_valid())
810 unsigned long s = old_size;
815 for (; r->find(&a, &s, &o, &f, &ds) >= 0 && (!(f & Rm::F::In_area));)
819 auto d = old_area.a - a;
825 if (a + s > old_area.a + old_size)
826 s = old_area.a + old_size - a;
828 l4_addr_t x = a - old_area.a + new_area.a;
830 int err = r->attach(&x, s, Rm::F::In_area | f,
837 L4Re::virt_cap_alloc->take(ds);
839 err = r->detach(a, s, &ds, This_task,
840 Rm::Detach_exact | Rm::Detach_keep);
844 switch (err & Rm::Detach_result_mask)
849 L4Re::virt_cap_alloc->take(ds);
851 case Rm::Detached_ds:
853 L4Re::virt_cap_alloc->release(ds);
862 if (old_size < new_size)
864 l4_addr_t const pad_sz = new_size - old_size;
867 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
873 err = r->attach(&pad_addr, pad_sz,
874 Rm::F::In_area | Rm::F::Detach_free | Rm::F::RWX,
887 Vfs::mprotect(
const void *a,
size_t sz,
int prot)
L4_NOTHROW
891 return (prot & PROT_WRITE) ? -1 : 0;
905 extern void *l4re_env_posix_vfs_ops __attribute__((alias(
"__rtld_l4re_env_posix_vfs_ops"), visibility(
"default")));
908 class Real_mount_tree :
public L4Re::Vfs::Mount_tree
911 explicit Real_mount_tree(
char *n) : Mount_tree(n) {}
913 void *
operator new (
size_t size)
914 {
return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
916 void operator delete (
void *mem)
917 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
925 using L4Re::Vfs::Mount_tree;
926 using L4Re::Vfs::Path;
933 Path p = root->lookup(Path(path), &base);
937 Path f = p.strip_first();
942 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
949 __rtld_l4re_env_posix_vfs_ops->free(name);
953 base->add_child_node(nt);
unsigned int l4_size_t
Unsigned size type.
Size size() const noexcept
Get size of a dataspace.
static Env const * env() noexcept
Returns the initial environment for the current task.
L4::Cap< Log > log() const noexcept
Object-capability to the logging service.
long free_area(l4_addr_t addr)
Free an area from the region map.
long reserve_area(l4_addr_t *start, unsigned long size, Flags flags=Flags(0), unsigned char align=L4_PAGESHIFT) const noexcept
Reserve the given area in the region map.
Basic interface for an L4Re::Vfs file system.
The basic interface for an open POSIX file.
Interface for the POSIX backends for an application.
bool is_valid() const noexcept
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
Basic element type for a double-linked H_list.
Helper type to distinguish the oeprator new version that does not throw exceptions.
A reference-counting pointer with automatic cleanup.
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.
unsigned long l4_umword_t
Unsigned machine word.
unsigned long l4_addr_t
Address type.
@ L4_CAP_FPAGE_RO
Read right for capability flex-pages.
@ L4_CAP_FPAGE_RW
Read and interface specific 'W' right for capability flex-pages.
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
#define L4_PAGESIZE
Minimal page size (in bytes).
@ L4_INVALID_ADDR
Invalid address.
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights) noexcept
Make an L4::Ipc::Cap<T> for the given capability and rights.
Cap< T > make_cap_rw(L4::Cap< T > cap) noexcept
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
L4 low-level kernel interface.
Shared_cap / Shared_del_cap.
@ Detach_free
Free the portion of the data space after detach.
@ Search_addr
Search for a suitable address range.
Double-linked list of typed H_list_item_t elements.
#define l4_assert(expr)
Low-level assert.