22 #include "vcon_stream.h" 28 #include <l4/cxx/hlist> 29 #include <l4/cxx/std_alloc> 31 #include <l4/l4re_vfs/backend> 40 #include <l4/sys/kdebug.h> 41 static int debug_mmap = 1;
42 #define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0) 44 #define DEBUG_LOG(level, dbg...) do { } while (0) 52 #define USE_BIG_ANON_DS 60 class Fd_store :
public L4Re::Core::Fd_store
68 class Std_stream : public
L4Re::Core::Vcon_stream
74 Fd_store::Fd_store() throw()
78 static char m[
sizeof(Std_stream)] __attribute__((aligned(
sizeof(
long))));
82 set(0, cxx::ref_ptr(s));
83 set(1, cxx::ref_ptr(s));
84 set(2, cxx::ref_ptr(s));
87 class Root_mount_tree :
public L4Re::Vfs::Mount_tree
90 Root_mount_tree() :
L4Re::Vfs::Mount_tree(0) {}
91 void operator delete (
void *) {}
101 : _early_oom(true), _root_mount(), _root(
L4Re::Env::env())
103 _root_mount.add_ref();
105 _root_mount.mount(cxx::ref_ptr(&_root));
106 _cwd = cxx::ref_ptr(&_root);
109 Ref_ptr<L4Re::Vfs::File> rom;
110 _root.openat(
"rom", 0, 0, &rom);
112 _root_mount.create_tree(
"lib/foo", rom);
114 _root.openat(
"lib", 0, 0, &_cwd);
119 int alloc_fd(Ref_ptr<L4Re::Vfs::File>
const &f)
throw();
120 Ref_ptr<L4Re::Vfs::File> free_fd(
int fd)
throw();
121 Ref_ptr<L4Re::Vfs::File> get_root() throw();
122 Ref_ptr<
L4Re::Vfs::File> get_cwd() throw();
123 void set_cwd(Ref_ptr<
L4Re::Vfs::File> const &dir) throw();
124 Ref_ptr<
L4Re::Vfs::File> get_file(
int fd) throw();
125 Ref_ptr<
L4Re::Vfs::File> set_fd(
int fd, Ref_ptr<
L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) throw();
127 int mmap2(
void *start,
size_t len,
int prot,
int flags,
int fd,
128 off_t offset,
void **ptr) throw();
130 int munmap(
void *start,
size_t len) throw();
131 int mremap(
void *old,
size_t old_sz,
size_t new_sz,
int flags,
132 void **new_addr) throw();
133 int mprotect(const
void *a,
size_t sz,
int prot) throw();
134 int msync(
void *addr,
size_t len,
int flags) throw();
135 int madvise(
void *addr,
size_t len,
int advice) throw();
137 int register_file_system(
L4Re::Vfs::File_system *f) throw();
138 int unregister_file_system(
L4Re::Vfs::File_system *f) throw();
139 L4Re::Vfs::File_system *get_file_system(
char const *fstype) throw();
141 int register_file_factory(
cxx::Ref_ptr<
L4Re::Vfs::File_factory> f) throw();
142 int unregister_file_factory(
cxx::Ref_ptr<
L4Re::Vfs::File_factory> f) throw();
143 Ref_ptr<
L4Re::Vfs::File_factory> get_file_factory(
int proto) throw();
144 Ref_ptr<
L4Re::Vfs::File_factory> get_file_factory(
char const *proto_name) throw();
145 int mount(
char const *path,
cxx::Ref_ptr<
L4Re::Vfs::File> const &dir) throw();
147 void operator delete (
void *) {}
149 void *malloc(
size_t size) noexcept {
return Vfs_config::malloc(size); }
150 void free(
void *m) noexcept { Vfs_config::free(m); }
153 Root_mount_tree _root_mount;
154 L4Re::Core::Env_dir _root;
155 Ref_ptr<L4Re::Vfs::File> _cwd;
166 File_factory_item() =
default;
167 File_factory_item(File_factory_item
const &) =
delete;
168 File_factory_item &operator = (File_factory_item
const &) =
delete;
181 static inline bool strequal(
char const *a,
char const *b)
183 for (;*a && *a == *b; ++a, ++b)
196 for (File_system *c = _fs_registry; c; c = c->next())
197 if (strequal(c->type(), f->type()))
200 f->next(_fs_registry);
214 File_system **p = &_fs_registry;
216 for (; *p; p = &(*p)->next())
228 Vfs::get_file_system(
char const *fstype)
throw()
230 bool try_dynamic =
true;
234 for (File_system *c = _fs_registry; c; c = c->next())
235 if (strequal(c->type(), fstype))
242 int res = Vfs_config::load_module(fstype);
257 void *x = this->malloc(
sizeof(File_factory_item));
262 _file_factories.push_front(ff);
269 for (
auto p: _file_factories)
273 _file_factories.remove(p);
274 p->~File_factory_item();
282 Ref_ptr<L4Re::Vfs::File_factory>
283 Vfs::get_file_factory(
int proto)
throw()
285 for (
auto p: _file_factories)
286 if (p->f->proto() == proto)
289 return Ref_ptr<L4Re::Vfs::File_factory>();
292 Ref_ptr<L4Re::Vfs::File_factory>
293 Vfs::get_file_factory(
char const *proto_name)
throw()
295 for (
auto p: _file_factories)
297 auto n = p->f->proto_name();
301 char const *b = proto_name;
302 for (; *a && *b && *a == *b; ++a, ++b)
305 if ((*a == 0) && (*b == 0))
310 return Ref_ptr<L4Re::Vfs::File_factory>();
314 Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File>
const &f)
throw()
316 int fd = fds.alloc();
326 Ref_ptr<L4Re::Vfs::File>
327 Vfs::free_fd(
int fd)
throw()
329 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
332 return Ref_ptr<>::Nil;
339 Ref_ptr<L4Re::Vfs::File>
340 Vfs::get_root() throw()
342 return cxx::ref_ptr(&_root);
345 Ref_ptr<L4Re::Vfs::File>
346 Vfs::get_cwd() throw()
352 Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File>
const &dir)
throw()
359 Ref_ptr<L4Re::Vfs::File>
360 Vfs::get_file(
int fd)
throw()
365 Ref_ptr<L4Re::Vfs::File>
366 Vfs::set_fd(
int fd, Ref_ptr<L4Re::Vfs::File>
const &f)
throw()
368 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
374 #define GET_FILE_DBG(fd, err) \ 375 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \ 381 #define GET_FILE(fd, err) \ 382 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \ 388 Vfs::munmap(
void *start,
size_t len)
L4_NOTHROW 391 using namespace L4Re;
399 DEBUG_LOG(debug_mmap, {
400 outstring(
"DETACH: ");
410 switch (err & Rm::Detach_result_mask)
414 L4Re::virt_cap_alloc->take(ds);
416 case Rm::Detached_ds:
418 L4Re::virt_cap_alloc->release(ds);
424 if (!(err & Rm::Detach_again))
432 *ds = L4Re::make_shared_cap<L4Re::Dataspace>(L4Re::virt_cap_alloc);
438 if ((err = Vfs_config::allocator()->alloc(size, ds->get())) < 0)
441 DEBUG_LOG(debug_mmap, {
442 outstring(
"ANON DS ALLOCATED: size=");
456 #ifdef USE_BIG_ANON_DS 459 ANON_MEM_DS_POOL_SIZE = 256UL << 20,
460 ANON_MEM_MAX_SIZE = 32UL << 20,
465 ANON_MEM_DS_POOL_SIZE = 256UL << 20,
466 ANON_MEM_MAX_SIZE = 0UL << 20,
470 if (size >= ANON_MEM_MAX_SIZE)
473 if ((err = alloc_ds(size, ds)) < 0)
481 return (*ds)->allocate(0, size);
484 if (!_anon_ds.is_valid() || _anon_offset + size >= ANON_MEM_DS_POOL_SIZE)
487 if ((err = alloc_ds(ANON_MEM_DS_POOL_SIZE, ds)) < 0)
498 if (
int err = (*ds)->allocate(_anon_offset, size))
502 *offset = _anon_offset;
503 _anon_offset += size;
508 Vfs::mmap2(
void *start,
size_t len,
int prot,
int flags,
int fd, off_t _offset,
511 using namespace L4Re;
519 if (flags & 0x1000000)
527 *resptr = (
void*)area;
528 DEBUG_LOG(debug_mmap, {
529 outstring(
"MMAP reserved area: ");
540 unsigned rm_flags = 0;
542 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
546 int err = alloc_anon_mem(size, &ds, &anon_offset);
550 DEBUG_LOG(debug_mmap, {
551 outstring(
"USE ANON MEM: ");
554 outhex32(anon_offset);
559 if (!(flags & MAP_ANONYMOUS))
561 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
579 if (flags & MAP_PRIVATE)
581 DEBUG_LOG(debug_mmap, outstring(
"COW\n"););
583 offset = anon_offset;
587 L4Re::virt_cap_alloc->take(fds);
592 offset = anon_offset;
595 if (!(flags & MAP_FIXED) && start == 0)
599 char *data = (
char *)start;
603 if (flags & MAP_FIXED)
611 rm_flags |= Rm::In_area;
613 err = munmap(start, len);
614 if (err && err != -ENOENT)
618 if (!(flags & MAP_FIXED)) rm_flags |= Rm::Search_addr;
619 if (!(prot & PROT_WRITE)) rm_flags |= Rm::Read_only;
621 err = r->
attach(&data, size, rm_flags,
627 DEBUG_LOG(debug_mmap, {
628 outstring(
" MAPPED: ");
630 outstring(
" addr: ");
632 outstring(
" bytes: ");
634 outstring(
" offset: ");
688 ~Auto_area() { free(); }
693 Vfs::mremap(
void *old_addr,
size_t old_size,
size_t new_size,
int flags,
696 using namespace L4Re;
698 DEBUG_LOG(debug_mmap, {
699 outstring(
"Mremap: addr=");
701 outstring(
" old_size=");
703 outstring(
" new_size=");
708 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
715 bool const fixed = flags & MREMAP_FIXED;
716 bool const maymove = flags & MREMAP_MAYMOVE;
726 if (new_size < old_size)
728 *new_addr = old_addr;
729 return munmap((
void*)(oa + new_size), old_size - new_size);
732 if (new_size == old_size)
734 *new_addr = old_addr;
739 Auto_area old_area(r);
740 int err = old_area.reserve(oa, old_size, 0);
745 Auto_area new_area(r);
753 int err = new_area.reserve(na, new_size, 0);
763 unsigned long ts = new_size - old_size;
765 int err = new_area.reserve(ta, ts, 0);
773 err = r->
find(&ta, &ts, &toffs, &tflags, &tds);
776 if (err == -ENOENT || (err == 0 && (tflags & Rm::In_area)))
779 pad_addr = oa + old_size;
780 *new_addr = old_addr;
787 err = new_area.reserve(0, new_size, Rm::Search_addr);
791 pad_addr = new_area.a + old_size;
792 *new_addr = (
void *)new_area.a;
796 if (old_area.is_valid())
799 unsigned long s = old_size;
804 for (; r->
find(&a, &s, &o, &f, &ds) >= 0 && (!(f & Rm::In_area));)
808 auto d = old_area.a - a;
814 if (a + s > old_area.a + old_size)
815 s = old_area.a + old_size - a;
817 l4_addr_t x = a - old_area.a + new_area.a;
819 int err = r->
attach(&x, s, Rm::In_area | f,
828 L4Re::virt_cap_alloc->take(ds);
830 err = r->
detach(a, s, &ds, This_task,
831 Rm::Detach_exact | Rm::Detach_keep);
835 switch (err & Rm::Detach_result_mask)
840 L4Re::virt_cap_alloc->take(ds);
842 case Rm::Detached_ds:
844 L4Re::virt_cap_alloc->release(ds);
853 if (old_size < new_size)
855 l4_addr_t const pad_sz = new_size - old_size;
858 int err = alloc_anon_mem(pad_sz, &tds, &toffs);
862 err = r->
attach(&pad_addr, pad_sz, Rm::In_area | Rm::Detach_free,
875 Vfs::mprotect(
const void *a,
size_t sz,
int prot)
L4_NOTHROW 879 return (prot & PROT_WRITE) ? -1 : 0;
893 extern void *l4re_env_posix_vfs_ops __attribute__((alias(
"__rtld_l4re_env_posix_vfs_ops"), visibility(
"default")));
896 class Real_mount_tree :
public L4Re::Vfs::Mount_tree
899 explicit Real_mount_tree(
char *n) : Mount_tree(n) {}
901 void *
operator new (
size_t size)
902 {
return __rtld_l4re_env_posix_vfs_ops->malloc(size); }
904 void operator delete (
void *mem)
905 { __rtld_l4re_env_posix_vfs_ops->free(mem); }
913 using L4Re::Vfs::Mount_tree;
914 using L4Re::Vfs::Path;
921 Path p = root->lookup(Path(path), &base);
925 Path f = p.strip_first();
930 char *name = __rtld_l4re_env_posix_vfs_ops->strndup(f.path(), f.length());
937 __rtld_l4re_env_posix_vfs_ops->free(name);
941 base->add_child_node(nt);
L4::Cap< Log > log() const
Object-capability to the logging service.
unsigned int l4_size_t
Unsigned size type.
Read and interface specific 'W' right for capability flex-pages.
long reserve_area(l4_addr_t *start, unsigned long size, unsigned flags=0, unsigned char align=L4_PAGESHIFT) const
Reserve the given area in the region map.
unsigned long size() const
Get size of a dataspace.
A reference-counting pointer with automatic cleanup.
Interface for the POSIX backends for an application.
long free_area(l4_addr_t addr)
Free an area from the region map.
Double-linked list of typed H_list_item_t elements.
Helper type to distinguish the oeprator new version that does not throw exceptions.
l4_addr_t l4_round_page(l4_addr_t address) L4_NOTHROW
Round address up to the next page.
L4 low-level kernel interface.
Cap< T > make_cap(L4::Cap< T > cap, unsigned rights)
Make an L4::Ipc::Cap<T> for the given capability and rights.
l4_addr_t l4_trunc_page(l4_addr_t address) L4_NOTHROW
Round an address down to the next lower page boundary.
The basic interface for an open POSIX file.
Search for a suitable address range.
Read right for capability flex-pages.
#define L4_PAGESIZE
Minimal page size (in bytes).
static Env const * env()
Returns the initial environment for the current task.
int detach(l4_addr_t addr, L4::Cap< Dataspace > *mem, L4::Cap< L4::Task > const &task=This_task) const
Detach a region from the address space.
l4_cap_idx_t cap() const
Return capability selector.
Cap< T > make_cap_rw(L4::Cap< T > cap)
Make an L4::Ipc::Cap<T> for the given capability with L4_CAP_FPAGE_RW rights.
Basic element type for a double-linked H_list.
unsigned long l4_umword_t
Unsigned machine word.
Free the portion of the data space after detach.
#define l4_assert(expr)
Low-level assert.
Shared_cap / Shared_del_cap.
bool is_valid() const
Test whether the capability is a valid capability index (i.e., not L4_INVALID_CAP).
int find(l4_addr_t *addr, unsigned long *size, l4_addr_t *offset, unsigned *flags, L4::Cap< Dataspace > *m)
Find a region given an address and size.
unsigned long l4_addr_t
Address type.
L4::Detail::Shared_cap_impl< T, Smart_count_cap< L4_FP_ALL_SPACES > > Shared_cap
Shared capability that implements automatic free and unmap of the capability selector.
long attach(l4_addr_t *start, unsigned long size, unsigned long flags, L4::Ipc::Cap< Dataspace > mem, l4_addr_t offs=0, unsigned char align=L4_PAGESHIFT) const
Attach a data space to a region.
Basic interface for an L4Re::Vfs file system.
#define L4_NOTHROW
Mark a function declaration and definition as never throwing an exception.