L4Re Operating System Framework
Interface and Usage Documentation
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
region_mapping
Go to the documentation of this file.
1// -*- Mode: C++ -*-
2// vim:ft=cpp
7/*
8 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
9 * Alexander Warg <warg@os.inf.tu-dresden.de>,
10 * Björn Döbel <doebel@os.inf.tu-dresden.de>
11 * economic rights: Technische Universität Dresden (Germany)
12 *
13 * License: see LICENSE.spdx (in this directory or the directories above)
14 */
15
16#pragma once
17
18#include <l4/cxx/avl_map>
19#include <l4/sys/types.h>
20#include <l4/re/rm>
21
22
23namespace L4Re { namespace Util {
24class Region
25{
26private:
27 l4_addr_t _start, _end;
28
29public:
30 Region() noexcept : _start(~0UL), _end(~0UL) {}
31 Region(l4_addr_t addr) noexcept : _start(addr), _end(addr) {}
32 Region(l4_addr_t start, l4_addr_t end) noexcept
33 : _start(start), _end(end) {}
34 l4_addr_t start() const noexcept { return _start; }
35 l4_addr_t end() const noexcept { return _end; }
36 unsigned long size() const noexcept { return end() - start() + 1; }
37 bool invalid() const noexcept { return _start == ~0UL && _end == ~0UL; }
38 bool operator < (Region const &o) const noexcept
39 { return end() < o.start(); }
40 bool contains(Region const &o) const noexcept
41 { return o.start() >= start() && o.end() <= end(); }
42 bool operator == (Region const &o) const noexcept
43 { return o.start() == start() && o.end() == end(); }
44 ~Region() noexcept {}
45};
46
47template< typename DS, typename OPS >
48class Region_handler
49{
50private:
51 L4Re::Rm::Offset _offs;
52 DS _mem;
53 l4_cap_idx_t _client_cap = L4_INVALID_CAP;
55
56public:
57 typedef DS Dataspace;
58 typedef OPS Ops;
59 typedef typename OPS::Map_result Map_result;
60
61 Region_handler() noexcept : _offs(0), _mem(), _flags() {}
62 Region_handler(Dataspace const &mem, l4_cap_idx_t client_cap,
63 L4Re::Rm::Offset offset = 0,
65 : _offs(offset), _mem(mem), _client_cap(client_cap), _flags(flags)
66 {}
67
68 Dataspace const &memory() const noexcept
69 {
70 return _mem;
71 }
72
73 l4_cap_idx_t client_cap_idx() const noexcept
74 {
75 return _client_cap;
76 }
77
78 L4Re::Rm::Offset offset() const noexcept
79 {
80 return _offs;
81 }
82
83 constexpr bool is_ro() const noexcept
84 {
85 return !(_flags & L4Re::Rm::F::W);
86 }
87
88 L4Re::Rm::Region_flags caching() const noexcept
89 {
90 return _flags & L4Re::Rm::F::Caching_mask;
91 }
92
93 L4Re::Rm::Region_flags flags() const noexcept
94 {
95 return _flags;
96 }
97
98 Region_handler operator + (l4_int64_t offset) const noexcept
99 {
100 Region_handler n = *this; n._offs += offset; return n;
101 }
102
103 void free(l4_addr_t start, unsigned long size) const noexcept
104 {
105 Ops::free(this, start, size);
106 }
107
108 int map(l4_addr_t addr, Region const &r, bool writable,
109 Map_result *result) const
110 {
111 return Ops::map(this, addr, r, writable, result);
112 }
113
114 int map_info(l4_addr_t *start_addr, l4_addr_t *end_addr) const
115 {
116 return Ops::map_info(this, start_addr, end_addr);
117 }
118
119};
120
121
122template< typename Hdlr, template<typename T> class Alloc >
123class Region_map
124{
125protected:
127 Tree _rm;
128 Tree _am;
129
130private:
131 l4_addr_t _start;
132 l4_addr_t _end;
133
134protected:
135 void set_limits(l4_addr_t start, l4_addr_t end) noexcept
136 {
137 _start = start;
138 _end = end;
139 }
140
141public:
142 typedef typename Tree::Item_type Item;
143 typedef typename Tree::Node Node;
144 typedef typename Tree::Key_type Key_type;
145 typedef Hdlr Region_handler;
146
147 typedef typename Tree::Iterator Iterator;
148 typedef typename Tree::Const_iterator Const_iterator;
149 typedef typename Tree::Rev_iterator Rev_iterator;
150 typedef typename Tree::Const_rev_iterator Const_rev_iterator;
151
152 Iterator begin() noexcept { return _rm.begin(); }
153 Const_iterator begin() const noexcept { return _rm.begin(); }
154 Iterator end() noexcept { return _rm.end(); }
155 Const_iterator end() const noexcept { return _rm.end(); }
156
157 Iterator area_begin() noexcept { return _am.begin(); }
158 Const_iterator area_begin() const noexcept { return _am.begin(); }
159 Iterator area_end() noexcept { return _am.end(); }
160 Const_iterator area_end() const noexcept { return _am.end(); }
161 Node area_find(Key_type const &c) const noexcept { return _am.find_node(c); }
162
163 l4_addr_t min_addr() const noexcept { return _start; }
164 l4_addr_t max_addr() const noexcept { return _end; }
165
166
167 Region_map(l4_addr_t start, l4_addr_t end) noexcept : _start(start), _end(end) {}
168
169 Node find(Key_type const &key) const noexcept
170 {
171 Node n = _rm.find_node(key);
172 if (!n)
173 return Node();
174
175 // 'find' should find any region overlapping with the searched one, the
176 // caller should check for further requirements
177 if (0)
178 if (!n->first.contains(key))
179 return Node();
180
181 return n;
182 }
183
184 Node lower_bound(Key_type const &key) const noexcept
185 {
186 Node n = _rm.lower_bound_node(key);
187 return n;
188 }
189
190 Node lower_bound_area(Key_type const &key) const noexcept
191 {
192 Node n = _am.lower_bound_node(key);
193 return n;
194 }
195
196 l4_addr_t attach_area(l4_addr_t addr, unsigned long size,
197 L4Re::Rm::Flags flags = L4Re::Rm::Flags(0),
198 unsigned char align = L4_PAGESHIFT) noexcept
199 {
200 if (size < 2)
201 return L4_INVALID_ADDR;
202
203
204 Region c;
205
206 if (!(flags & L4Re::Rm::F::Search_addr))
207 {
208 c = Region(addr, addr + size - 1);
209 Node r = _am.find_node(c);
210 if (r)
211 return L4_INVALID_ADDR;
212 }
213
214 while (flags & L4Re::Rm::F::Search_addr)
215 {
216 if (addr < min_addr() || (addr + size - 1) > max_addr())
217 addr = min_addr();
218 addr = find_free(addr, max_addr(), size, align, flags);
219 if (addr == L4_INVALID_ADDR)
220 return L4_INVALID_ADDR;
221
222 c = Region(addr, addr + size - 1);
223 Node r = _am.find_node(c);
224 if (!r)
225 break;
226
227 if (r->first.end() >= max_addr())
228 return L4_INVALID_ADDR;
229
230 addr = r->first.end() + 1;
231 }
232
233 if (_am.insert(c, Hdlr(typename Hdlr::Dataspace(), 0, 0, flags.region_flags())).second == 0)
234 return addr;
235
236 return L4_INVALID_ADDR;
237 }
238
239 bool detach_area(l4_addr_t addr) noexcept
240 {
241 if (_am.remove(addr))
242 return false;
243
244 return true;
245 }
246
247 void *attach(void *addr, unsigned long size, Hdlr const &hdlr,
248 L4Re::Rm::Flags flags = L4Re::Rm::Flags(0),
249 unsigned char align = L4_PAGESHIFT) noexcept
250 {
251 if (size < 2)
252 return L4_INVALID_PTR;
253
254 l4_addr_t beg, end;
255 int err = hdlr.map_info(&beg, &end);
256 if (err > 0)
257 {
258 // Mapping address determined by underlying dataspace. Make sure we
259 // prevent any additional alignment. We already know the place!
260 beg += hdlr.offset();
261 end = beg + size - 1U;
262 align = L4_PAGESHIFT;
263
264 // In case of exact mappings, the supplied address must match because
265 // we cannot remap.
266 if (!(flags & L4Re::Rm::F::Search_addr)
267 && reinterpret_cast<l4_addr_t>(addr) != beg)
268 return L4_INVALID_PTR;
269
270 // When searching for a suitable address, the start must cover the
271 // dataspace beginning to "find" the right spot.
272 if ((flags & L4Re::Rm::F::Search_addr)
273 && reinterpret_cast<l4_addr_t>(addr) > beg)
274 return L4_INVALID_PTR;
275 }
276 else if (err == 0)
277 {
278 beg = reinterpret_cast<l4_addr_t>(addr);
279 end = max_addr();
280 }
281 else if (err < 0)
282 return L4_INVALID_PTR;
283
284 if (flags & L4Re::Rm::F::In_area)
285 {
286 Node r = _am.find_node(Region(beg, beg + size - 1));
287 if (!r || (r->second.flags() & L4Re::Rm::F::Reserved))
288 return L4_INVALID_PTR;
289
290 end = r->first.end();
291 }
292
293 if (flags & L4Re::Rm::F::Search_addr)
294 {
295 beg = find_free(beg, end, size, align, flags);
296 if (beg == L4_INVALID_ADDR)
297 return L4_INVALID_PTR;
298 }
299
301 && _am.find_node(Region(beg, beg + size - 1)))
302 return L4_INVALID_PTR;
303
304 if (beg < min_addr() || beg + size - 1 > end)
305 return L4_INVALID_PTR;
306
307 if (_rm.insert(Region(beg, beg + size - 1), hdlr).second == 0)
308 return reinterpret_cast<void*>(beg);
309
310 return L4_INVALID_PTR;
311 }
312
313 int detach(void *addr, unsigned long sz, unsigned flags,
314 Region *reg, Hdlr *hdlr) noexcept
315 {
316 l4_addr_t a = reinterpret_cast<l4_addr_t>(addr);
317 Region dr(a, a + sz - 1);
318 Region res(~0UL, 0);
319
320 Node r = find(dr);
321 if (!r)
322 return -L4_ENOENT;
323
324 Region g = r->first;
325 Hdlr const &h = r->second;
326
327 if (flags & L4Re::Rm::Detach_overlap || dr.contains(g))
328 {
329 // successful removal of the AVL tree item also frees the node
330 Hdlr h_copy = h;
331
332 if (_rm.remove(g))
333 return -L4_ENOENT;
334
335 if (!(flags & L4Re::Rm::Detach_keep) && (h_copy.flags() & L4Re::Rm::F::Detach_free))
336 h_copy.free(0, g.size());
337
338 if (hdlr)
339 *hdlr = h_copy;
340 if (reg)
341 *reg = g;
342
343 if (find(dr))
345 else
346 return Rm::Detached_ds;
347 }
348 else if (dr.start() <= g.start())
349 {
350 // move the start of a region
351
352 if (!(flags & L4Re::Rm::Detach_keep) && (h.flags() & L4Re::Rm::F::Detach_free))
353 h.free(0, dr.end() + 1 - g.start());
354
355 unsigned long sz = dr.end() + 1 - g.start();
356 Item &cn = const_cast<Item &>(*r);
357 cn.first = Region(dr.end() + 1, g.end());
358 cn.second = cn.second + sz;
359 if (hdlr)
360 *hdlr = Hdlr();
361 if (reg)
362 *reg = Region(g.start(), dr.end());
363 if (find(dr))
365 else
366 return Rm::Kept_ds;
367 }
368 else if (dr.end() >= g.end())
369 {
370 // move the end of a region
371
372 if (!(flags & L4Re::Rm::Detach_keep)
373 && (h.flags() & L4Re::Rm::F::Detach_free))
374 h.free(dr.start() - g.start(), g.end() + 1 - dr.start());
375
376 Item &cn = const_cast<Item &>(*r);
377 cn.first = Region(g.start(), dr.start() - 1);
378 if (hdlr)
379 *hdlr = Hdlr();
380 if (reg)
381 *reg = Region(dr.start(), g.end());
382
383 if (find(dr))
385 else
386 return Rm::Kept_ds;
387 }
388 else if (g.contains(dr))
389 {
390 // split a single region that contains the new region
391
392 if (!(flags & L4Re::Rm::Detach_keep) && (h.flags() & L4Re::Rm::F::Detach_free))
393 h.free(dr.start() - g.start(), dr.size());
394
395 // first move the end off the existing region before the new one
396 Item &cn = const_cast<Item &>(*r);
397 cn.first = Region(g.start(), dr.start()-1);
398
399 int err;
400
401 // insert a second region for the remaining tail of
402 // the old existing region
403 err = _rm.insert(Region(dr.end() + 1, g.end()),
404 h + (dr.end() + 1 - g.start())).second;
405
406 if (err)
407 return err;
408
409 if (hdlr)
410 *hdlr = h;
411 if (reg)
412 *reg = dr;
413 return Rm::Split_ds;
414 }
415 return -L4_ENOENT;
416 }
417
418 l4_addr_t find_free(l4_addr_t start, l4_addr_t end, l4_addr_t size,
419 unsigned char align, L4Re::Rm::Flags flags) const noexcept;
420
421};
422
423
424template< typename Hdlr, template<typename T> class Alloc >
426Region_map<Hdlr, Alloc>::find_free(l4_addr_t start, l4_addr_t end,
427 unsigned long size, unsigned char align, L4Re::Rm::Flags flags) const noexcept
428{
429 l4_addr_t addr = start;
430
431 if (addr == ~0UL || addr < min_addr() || addr >= end)
432 addr = min_addr();
433
434 addr = l4_round_size(addr, align);
435 Node r;
436
437 for(;;)
438 {
439 if (addr > 0 && addr - 1 > end - size)
440 return L4_INVALID_ADDR;
441
442 Region c(addr, addr + size - 1);
443 r = _rm.find_node(c);
444
445 if (!r)
446 {
447 if (!(flags & L4Re::Rm::F::In_area) && (r = _am.find_node(c)))
448 {
449 if (r->first.end() > end - size)
450 return L4_INVALID_ADDR;
451
452 addr = l4_round_size(r->first.end() + 1, align);
453 continue;
454 }
455 break;
456 }
457 else if (r->first.end() > end - size)
458 return L4_INVALID_ADDR;
459
460 addr = l4_round_size(r->first.end() + 1, align);
461 }
462
463 if (!r)
464 return addr;
465
466 return L4_INVALID_ADDR;
467}
468
469}}
AVL map.
@ Detached_ds
Detached data sapce.
Definition rm:91
@ Detach_again
Detached data space, more to do.
Definition rm:96
@ Split_ds
Splitted data space, and done.
Definition rm:93
@ Kept_ds
Kept data space.
Definition rm:92
@ Detach_overlap
Do an unmap of all overlapping regions.
Definition rm:239
@ Detach_keep
Do not free the detached data space, ignore the F::Detach_free.
Definition rm:248
Region Key_type
Type of the key values.
Definition avl_map:59
Base_type::Node Node
Return type for find.
Definition avl_map:63
ITEM_TYPE Item_type
Type for the items store in the set.
Definition avl_set:141
unsigned long l4_addr_t
Address type.
Definition l4int.h:34
signed long long l4_int64_t
Signed 64bit value.
Definition l4int.h:30
unsigned long l4_cap_idx_t
Capability selector type.
Definition types.h:335
@ L4_INVALID_CAP
Invalid capability selector.
Definition consts.h:157
@ L4_ENOENT
No such entity.
Definition err.h:34
#define L4_INVALID_PTR
Invalid address as pointer type.
Definition consts.h:516
#define L4_PAGESHIFT
Size of a page, log2-based.
Definition consts.h:26
l4_addr_t l4_round_size(l4_addr_t value, unsigned char bits) L4_NOTHROW
Round value up to the next alignment with bits size.
Definition consts.h:488
@ L4_INVALID_ADDR
Invalid address.
Definition consts.h:509
Common L4 ABI Data Types.
L4Re C++ Interfaces.
Definition cmd_control:14
Region mapper interface.
Region_flags
Region flags (permissions, cacheability, special).
Definition rm:129
@ Reserved
Region is reserved (blocked)
Definition rm:150
@ Detach_free
Free the portion of the data space after detach.
Definition rm:146
@ W
Writable region.
Definition rm:135
@ Caching_mask
Mask of all Rm cache bits.
Definition rm:154
@ Search_addr
Search for a suitable address range.
Definition rm:114
@ In_area
Search only in area, or map into area.
Definition rm:116