L4Re – L4 Runtime Environment
list_alloc
1 // vim:set ft=cpp: -*- Mode: C++ -*-
2 /*
3  * (c) 2008-2009 Alexander Warg <warg@os.inf.tu-dresden.de>,
4  * Torsten Frenzel <frenzel@os.inf.tu-dresden.de>
5  * economic rights: Technische Universität Dresden (Germany)
6  *
7  * This file is part of TUD:OS and distributed under the terms of the
8  * GNU General Public License 2.
9  * Please see the COPYING-GPL-2 file for details.
10  *
11  * As a special exception, you may use this file as part of a free software
12  * library without restriction. Specifically, if other files instantiate
13  * templates or use macros or inline functions from this file, or you compile
14  * this file and link it with other files to produce an executable, this
15  * file does not by itself cause the resulting executable to be covered by
16  * the GNU General Public License. This exception does not however
17  * invalidate any other reasons why the executable file might be covered by
18  * the GNU General Public License.
19  */
20 
21 #pragma once
22 
23 #include <l4/cxx/arith>
24 #include <l4/cxx/minmax>
25 
26 namespace cxx {
27 
32 {
33 private:
34  friend class List_alloc_sanity_guard;
35 
36  struct Mem_block
37  {
38  Mem_block *next;
39  unsigned long size;
40  };
41 
42  Mem_block *_first;
43 
44  inline void check_overlap(void *, unsigned long );
45  inline void sanity_check_list(char const *, char const *);
46  inline void merge();
47 
48 public:
49 
56  List_alloc() : _first(0) {}
57 
70  inline void free(void *block, unsigned long size, bool initial_free = false);
71 
82  inline void *alloc(unsigned long size, unsigned long align);
83 
99  inline void *alloc_max(unsigned long min, unsigned long *max,
100  unsigned long align, unsigned granularity);
101 
107  inline unsigned long avail();
108 
109  template <typename DBG>
110  void dump_free_list(DBG &out);
111 };
112 
113 #if !defined (CXX_LIST_ALLOC_SANITY)
114 class List_alloc_sanity_guard
115 {
116 public:
117  List_alloc_sanity_guard(List_alloc *, char const *)
118  {}
119 
120 };
121 
122 
123 void
124 List_alloc::check_overlap(void *, unsigned long )
125 {}
126 
127 void
128 List_alloc::sanity_check_list(char const *, char const *)
129 {}
130 
131 #else
132 
133 class List_alloc_sanity_guard
134 {
135 private:
136  List_alloc *a;
137  char const *func;
138 
139 public:
140  List_alloc_sanity_guard(List_alloc *a, char const *func)
141  : a(a), func(func)
142  { a->sanity_check_list(func, "entry"); }
143 
144  ~List_alloc_sanity_guard()
145  { a->sanity_check_list(func, "exit"); }
146 };
147 
148 void
149 List_alloc::check_overlap(void *b, unsigned long s)
150 {
151  unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
152  if ((unsigned long)b & mb_align)
153  {
154  L4::cerr << "List_alloc(FATAL): trying to free unaligned memory: "
155  << b << " align=" << arith::Ld<sizeof(Mem_block)>::value << "\n";
156  }
157 
158  Mem_block *c = _first;
159  for (;c ; c = c->next)
160  {
161  unsigned long x_s = (unsigned long)b;
162  unsigned long x_e = x_s + s;
163  unsigned long b_s = (unsigned long)c;
164  unsigned long b_e = b_s + c->size;
165 
166  if ((x_s >= b_s && x_s < b_e)
167  || (x_e > b_s && x_e <= b_e)
168  || (b_s >= x_s && b_s < x_e)
169  || (b_e > x_s && b_e <= x_e))
170  {
171  L4::cerr << "List_alloc(FATAL): trying to free memory that "
172  "is already free: \n ["
173  << (void*)x_s << '-' << (void*)x_e << ") overlaps ["
174  << (void*)b_s << '-' << (void*)b_e << ")\n";
175  }
176  }
177 }
178 
179 void
180 List_alloc::sanity_check_list(char const *func, char const *info)
181 {
182  Mem_block *c = _first;
183  for (;c ; c = c->next)
184  {
185  if (c->next)
186  {
187  if (c >= c->next)
188  {
189  L4::cerr << "List_alloc(FATAL): " << func << '(' << info
190  << "): list order violation\n";
191  }
192 
193  if (((unsigned long)c) + c->size > (unsigned long)c->next)
194  {
195  L4::cerr << "List_alloc(FATAL): " << func << '(' << info
196  << "): list order violation\n";
197  }
198  }
199  }
200 }
201 
202 #endif
203 
204 void
205 List_alloc::merge()
206 {
207  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
208  Mem_block *c = _first;
209  while (c && c->next)
210  {
211  unsigned long f_start = (unsigned long)c;
212  unsigned long f_end = f_start + c->size;
213  unsigned long n_start = (unsigned long)c->next;
214 
215  if (f_end == n_start)
216  {
217  c->size += c->next->size;
218  c->next = c->next->next;
219  continue;
220  }
221 
222  c = c->next;
223  }
224 }
225 
226 void
227 List_alloc::free(void *block, unsigned long size, bool initial_free)
228 {
229  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
230 
231  unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
232 
233  if (initial_free)
234  {
235  // enforce alignment constraint on initial memory
236  unsigned long nblock = ((unsigned long)block + mb_align) & ~mb_align;
237  size = (size - (nblock - (unsigned long)block)) & ~mb_align;
238  block = (void*)nblock;
239  }
240  else
241  // blow up size to the minimum aligned size
242  size = (size + mb_align) & ~mb_align;
243 
244  check_overlap(block, size);
245 
246  Mem_block **c = &_first;
247  Mem_block *next = 0;
248 
249  if (*c)
250  {
251  while (*c && *c < block)
252  c = &(*c)->next;
253 
254  next = *c;
255  }
256 
257  *c = (Mem_block*)block;
258 
259  (*c)->next = next;
260  (*c)->size = size;
261 
262  merge();
263 }
264 
265 void *
266 List_alloc::alloc_max(unsigned long min, unsigned long *max, unsigned long align,
267  unsigned granularity)
268 {
269  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
270 
271  unsigned char const mb_bits = arith::Ld<sizeof(Mem_block)>::value;
272  unsigned long const mb_align = (1UL << mb_bits) - 1;
273 
274  // blow minimum up to at least the minimum aligned size of a Mem_block
275  min = l4_round_size(min, mb_bits);
276  // truncate maximum to at least the size of a Mem_block
277  *max = l4_trunc_size(*max, mb_bits);
278  // truncate maximum size according to granularity
279  *max = *max & ~(granularity - 1UL);
280 
281  if (min > *max)
282  return 0;
283 
284  unsigned long almask = align ? (align - 1UL) : 0;
285 
286  // minimum alignment is given by the size of a Mem_block
287  if (almask < mb_align)
288  almask = mb_align;
289 
290  Mem_block **c = &_first;
291  Mem_block **fit = 0;
292  unsigned long max_fit = 0;
293 
294  for (; *c; c = &(*c)->next)
295  {
296  // address of free memory block
297  unsigned long n_start = (unsigned long)(*c);
298 
299  // block too small, next
300  // XXX: maybe we can skip this and just do the test below
301  if ((*c)->size < min)
302  continue;
303 
304  // aligned start address within the free block
305  unsigned long a_start = (n_start + almask) & ~almask;
306 
307  // check if aligned start address is behind the block, next
308  if (a_start - n_start >= (*c)->size)
309  continue;
310 
311  // remaining size after subtracting the padding for the alignment
312  unsigned long r_size = (*c)->size - a_start + n_start;
313  // round down according to granularity
314  r_size &= ~(granularity - 1UL);
315 
316  // block too small
317  if (r_size < min)
318  continue;
319 
320  if (r_size >= *max)
321  {
322  fit = c;
323  max_fit = *max;
324  break;
325  }
326 
327  if (r_size > max_fit)
328  {
329  max_fit = r_size;
330  fit = c;
331  }
332  }
333 
334  if (fit)
335  {
336  unsigned long n_start = (unsigned long)(*fit);
337  unsigned long a_start = (n_start + almask) & ~almask;
338  unsigned long r_size = (*fit)->size - a_start + n_start;
339 
340  if (a_start > n_start)
341  {
342  (*fit)->size -= r_size;
343  fit = &(*fit)->next;
344  }
345  else
346  *fit = (*fit)->next;
347 
348  *max = max_fit;
349  if (r_size == max_fit)
350  return (void *)a_start;
351 
352  Mem_block *m = (Mem_block*)(a_start + max_fit);
353  m->next = *fit;
354  m->size = r_size - max_fit;
355  *fit = m;
356  return (void *)a_start;
357  }
358 
359  return 0;
360 }
361 
362 void *
363 List_alloc::alloc(unsigned long size, unsigned long align)
364 {
365  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
366 
367  unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
368 
369  // blow up size to the minimum aligned size
370  size = (size + mb_align) & ~mb_align;
371 
372  unsigned long almask = align ? (align - 1UL) : 0;
373 
374  // minimum alignment is given by the size of a Mem_block
375  if (almask < mb_align)
376  almask = mb_align;
377 
378  Mem_block **c = &_first;
379 
380  for (; *c; c=&(*c)->next)
381  {
382  // address of free memory block
383  unsigned long n_start = (unsigned long)(*c);
384 
385  // block too small, next
386  // XXX: maybe we can skip this and just do the test below
387  if ((*c)->size < size)
388  continue;
389 
390  // aligned start address within the free block
391  unsigned long a_start = (n_start + almask) & ~almask;
392 
393  // block too small after alignment, next
394  if (a_start - n_start >= (*c)->size)
395  continue;
396 
397  // remaining size after subtracting the padding
398  // for the alignment
399  unsigned long r_size = (*c)->size - a_start + n_start;
400 
401  // block too small
402  if (r_size < size)
403  continue;
404 
405  if (a_start > n_start)
406  {
407  // have free space before the allocated block
408  // shrink the block and set c to the next pointer of that
409  // block
410  (*c)->size -= r_size;
411  c = &(*c)->next;
412  }
413  else
414  // drop the block, c remains the next pointer of the
415  // previous block
416  *c = (*c)->next;
417 
418  // allocated the whole remaining space
419  if (r_size == size)
420  return (void*)a_start;
421 
422  // add a new free block behind the allocated block
423  Mem_block *m = (Mem_block*)(a_start + size);
424  m->next = *c;
425  m->size = r_size - size;
426  *c = m;
427  return (void *)a_start;
428  }
429 
430  return 0;
431 }
432 
433 unsigned long
435 {
436  List_alloc_sanity_guard __attribute__((unused)) guard(this, __FUNCTION__);
437  Mem_block *c = _first;
438  unsigned long a = 0;
439  while (c)
440  {
441  a += c->size;
442  c = c->next;
443  }
444 
445  return a;
446 }
447 
448 template <typename DBG>
449 void
450 List_alloc::dump_free_list(DBG &out)
451 {
452  Mem_block *c = _first;
453  while (c)
454  {
455  unsigned sz;
456  const char *unit;
457 
458  if (c->size < 1024)
459  {
460  sz = c->size;
461  unit = "Byte";
462  }
463  else if (c->size < 1 << 20)
464  {
465  sz = c->size >> 10;
466  unit = "kB";
467  }
468  else
469  {
470  sz = c->size >> 20;
471  unit = "MB";
472  }
473 
474  out.printf("%12p - %12p (%u %s)\n", c, (char *) c + c->size - 1, sz, unit);
475 
476  c = c->next;
477  }
478 }
479 
480 }
Standard list-based allocator.
Definition: list_alloc:32
unsigned long avail()
Get the amount of available memory.
Definition: list_alloc:434
void * alloc(unsigned long size, unsigned long align)
Allocate a memory block.
Definition: list_alloc:363
List_alloc()
Initializes an empty list allocator.
Definition: list_alloc:56
void * alloc_max(unsigned long min, unsigned long *max, unsigned long align, unsigned granularity)
Allocate a memory block of min <= size <= max.
Definition: list_alloc:266
void free(void *block, unsigned long size, bool initial_free=false)
Return a free memory block to the allocator.
Definition: list_alloc:227
T1 min(T1 a, T1 b)
Get the minimum of a and b.
Definition: minmax:35
T1 max(T1 a, T1 b)
Get the maximum of a and b.
Definition: minmax:46
l4_addr_t l4_trunc_size(l4_addr_t address, unsigned char bits) L4_NOTHROW
Round an address down to the next lower flex page with size bits.
Definition: consts.h:375
l4_addr_t l4_round_size(l4_addr_t value, unsigned char bits) L4_NOTHROW
Round value up to the next alignment with bits size.
Definition: consts.h:400
BasicOStream cerr
Standard error stream.
Our C++ library.
Definition: arith:22