L4Re - L4 Runtime Environment
list_alloc
1 // vim:set ft=cpp: -*- Mode: C++ -*-
2 /*
3  * (c) 2008-2009 Alexander Warg <warg@os.inf.tu-dresden.de>,
4  * Torsten Frenzel <frenzel@os.inf.tu-dresden.de>
5  * economic rights: Technische Universit├Ąt Dresden (Germany)
6  *
7  * This file is part of TUD:OS and distributed under the terms of the
8  * GNU General Public License 2.
9  * Please see the COPYING-GPL-2 file for details.
10  *
11  * As a special exception, you may use this file as part of a free software
12  * library without restriction. Specifically, if other files instantiate
13  * templates or use macros or inline functions from this file, or you compile
14  * this file and link it with other files to produce an executable, this
15  * file does not by itself cause the resulting executable to be covered by
16  * the GNU General Public License. This exception does not however
17  * invalidate any other reasons why the executable file might be covered by
18  * the GNU General Public License.
19  */
20 
21 #pragma once
22 
23 #include <l4/cxx/arith>
24 #include <l4/cxx/minmax>
25 
26 namespace cxx {
27 
28 /**
29  * \brief Standard list-based allocator.
30  */
31 class List_alloc
32 {
33 private:
34  friend class List_alloc_sanity_guard;
35 
36  struct Mem_block
37  {
38  Mem_block *next;
39  unsigned long size;
40  };
41 
42  Mem_block *_first;
43 
44  inline void check_overlap(void *, unsigned long );
45  inline void sanity_check_list(char const *, char const *);
46  inline void merge();
47 
48 public:
49 
50  /**
51  * \brief Initializes an empty list allocator.
52  *
53  * \note To initialize the allocator with available memory
54  * use the #free() function.
55  */
56  List_alloc() : _first(0) {}
57 
58  /**
59  * \brief Return a free memory block to the allocator.
60  *
61  * \param block pointer to memory block
62  * \param size size of memory block
63  * \param initial_free Set to true for putting fresh memory
64  * to the allocator. This will enforce alignment on that
65  * memory.
66  */
67  inline void free(void *block, unsigned long size, bool initial_free = false);
68 
69  /**
70  * \brief Alloc a memory block.
71  *
72  * \param size Size of the memory block
73  * \param align Alignment constraint
74  *
75  * \return Pointer to memory block
76  */
77  inline void *alloc(unsigned long size, unsigned align);
78 
79  /**
80  * \brief Allocate a memory block of `min` <= size <=`max`.
81  *
82  * \param min Minimal size to allocate.
83  * \param[in,out] max Maximum size to allocate. The actual allocated
84  * size is returned here.
85  * \param align Alignment constraint.
86  * \param granularity Granularity to use for the allocation.
87  *
88  * \return Pointer to memory block
89  */
90  inline void *alloc_max(unsigned long min, unsigned long *max,
91  unsigned align, unsigned granularity);
92 
93  /**
94  * \brief Get the amount of available memory.
95  *
96  * \return Available memory in bytes
97  */
98  inline unsigned long avail();
99 
100  template <typename DBG>
101  void dump_free_list(DBG &out);
102 };
103 
104 #if !defined (CXX_LIST_ALLOC_SANITY)
105 class List_alloc_sanity_guard
106 {
107 public:
108  List_alloc_sanity_guard(List_alloc *, char const *)
109  {}
110 
111 };
112 
113 
114 void
115 List_alloc::check_overlap(void *, unsigned long )
116 {}
117 
118 void
119 List_alloc::sanity_check_list(char const *, char const *)
120 {}
121 
122 #else
123 
124 class List_alloc_sanity_guard
125 {
126 private:
127  List_alloc *a;
128  char const *func;
129 
130 public:
131  List_alloc_sanity_guard(List_alloc *a, char const *func)
132  : a(a), func(func)
133  { a->sanity_check_list(func, "entry"); }
134 
135  ~List_alloc_sanity_guard()
136  { a->sanity_check_list(func, "exit"); }
137 };
138 
139 void
140 List_alloc::check_overlap(void *b, unsigned long s)
141 {
142  unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
143  if ((unsigned long)b & mb_align)
144  {
145  L4::cerr << "List_alloc(FATAL): trying to free unaligned memory: "
146  << b << " align=" << arith::Ld<sizeof(Mem_block)>::value << "\n";
147  }
148 
149  Mem_block *c = _first;
150  for (;c ; c = c->next)
151  {
152  unsigned long x_s = (unsigned long)b;
153  unsigned long x_e = x_s + s;
154  unsigned long b_s = (unsigned long)c;
155  unsigned long b_e = b_s + c->size;
156 
157  if ( (x_s >= b_s && x_s < b_e)
158  || (x_e > b_s && x_e <= b_e)
159  || (b_s >= x_s && b_s < x_e)
160  || (b_e > x_s && b_e <= x_e))
161  {
162  L4::cerr << "List_alloc(FATAL): trying to free memory that "
163  "is already free: \n ["
164  << (void*)x_s << '-' << (void*)x_e << ") overlaps ["
165  << (void*)b_s << '-' << (void*)b_e << ")\n";
166  }
167  }
168 }
169 
170 void
171 List_alloc::sanity_check_list(char const *func, char const *info)
172 {
173  Mem_block *c = _first;
174  for (;c ; c = c->next)
175  {
176  if (c->next)
177  {
178  if (c >= c->next)
179  {
180  L4::cerr << "List_alloc(FATAL): " << func << '(' << info
181  << "): list order violation\n";
182  }
183 
184  if (((unsigned long)c) + c->size > (unsigned long)c->next)
185  {
186  L4::cerr << "List_alloc(FATAL): " << func << '(' << info
187  << "): list order violation\n";
188  }
189  }
190  }
191 }
192 
193 #endif
194 
195 void
196 List_alloc::merge()
197 {
198  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
199  Mem_block *c = _first;
200  while (c && c->next)
201  {
202  unsigned long f_start = (unsigned long)c;
203  unsigned long f_end = f_start + c->size;
204  unsigned long n_start = (unsigned long)c->next;
205 
206  if (f_end == n_start)
207  {
208  c->size += c->next->size;
209  c->next = c->next->next;
210  continue;
211  }
212 
213  c = c->next;
214  }
215 }
216 
217 void
218 List_alloc::free(void *block, unsigned long size, bool initial_free)
219 {
220  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
221 
222  unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
223 
224  if (initial_free)
225  {
226  // enforce alignment constraint on initial memory
227  unsigned long nblock = ((unsigned long)block + mb_align) & ~mb_align;
228  size = (size - (nblock - (unsigned long)block)) & ~mb_align;
229  block = (void*)nblock;
230  }
231  else
232  // blow up size to the minimum aligned size
233  size = (size + mb_align) & ~mb_align;
234 
235  check_overlap(block, size);
236 
237  Mem_block **c = &_first;
238  Mem_block *next = 0;
239 
240  if (*c)
241  {
242  while (*c && *c < block)
243  c = &(*c)->next;
244 
245  next = *c;
246  }
247 
248  *c = (Mem_block*)block;
249 
250  (*c)->next = next;
251  (*c)->size = size;
252 
253  merge();
254 }
255 
256 void *
257 List_alloc::alloc_max(unsigned long min, unsigned long *max, unsigned align,
258  unsigned granularity)
259 {
260  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
261 
262  unsigned char const mb_bits = arith::Ld<sizeof(Mem_block)>::value;
263  unsigned long const mb_align = (1UL << mb_bits) - 1;
264 
265  // blow minimum up to at least the minimum aligned size of a Mem_block
266  min = l4_round_size(min, mb_bits);
267  // truncate maximum to at least the size of a Mem_block
268  *max = l4_trunc_size(*max, mb_bits);
269  // truncate maximum size according to granularity
270  *max = *max & ~(granularity - 1UL);
271 
272  if (min > *max)
273  return 0;
274 
275  unsigned long almask = align ? (align - 1) : 0;
276 
277  // minimum alignment is given by the size of a Mem_block
278  if (almask < mb_align)
279  almask = mb_align;
280 
281  Mem_block **c = &_first;
282  Mem_block **fit = 0;
283  unsigned long max_fit = 0;
284 
285  for (; *c; c = &(*c)->next)
286  {
287  // address of free memory block
288  unsigned long n_start = (unsigned long)(*c);
289 
290  // block too small, next
291  // XXX: maybe we can skip this and just do the test below
292  if ((*c)->size < min)
293  continue;
294 
295  // aligned start address within the free block
296  unsigned long a_start = (n_start + almask) & ~almask;
297 
298  // check if aligned start address is behind the block, next
299  if (a_start - n_start >= (*c)->size)
300  continue;
301 
302  // remaining size after subtracting the padding for the alignment
303  unsigned long r_size = (*c)->size - a_start + n_start;
304  // round down according to granularity
305  r_size &= ~(granularity - 1UL);
306 
307  // block too small
308  if (r_size < min)
309  continue;
310 
311  if (r_size > *max)
312  {
313  fit = c;
314  max_fit = *max;
315  break;
316  }
317 
318  if (r_size > max_fit)
319  {
320  max_fit = r_size;
321  fit = c;
322  }
323  }
324 
325  if (fit)
326  {
327  unsigned long n_start = (unsigned long)(*fit);
328  unsigned long a_start = (n_start + almask) & ~almask;
329  unsigned long r_size = (*fit)->size - a_start + n_start;
330 
331  if (a_start > n_start)
332  {
333  (*fit)->size -= r_size;
334  fit = &(*fit)->next;
335  }
336  else
337  *fit = (*fit)->next;
338 
339  *max = max_fit;
340  if (r_size == max_fit)
341  return (void *)a_start;
342 
343  Mem_block *m = (Mem_block*)(a_start + max_fit);
344  m->next = *fit;
345  m->size = r_size - max_fit;
346  *fit = m;
347  return (void*)a_start;
348  }
349 
350  return 0;
351 }
352 
353 void *
354 List_alloc::alloc(unsigned long size, unsigned align)
355 {
356  List_alloc_sanity_guard __attribute__((unused)) guard(this, __func__);
357 
358  unsigned long const mb_align = (1UL << arith::Ld<sizeof(Mem_block)>::value) - 1;
359 
360  // blow up size to the minimum aligned size
361  size = (size + mb_align) & ~mb_align;
362 
363  unsigned long almask = align ? (align -1) : 0;
364 
365  // minimum alignment is given by the size of a Mem_block
366  if (almask < mb_align)
367  almask = mb_align;
368 
369  Mem_block **c = &_first;
370 
371  for (; *c; c=&(*c)->next)
372  {
373  // address of free memory block
374  unsigned long n_start = (unsigned long)(*c);
375 
376  // block too small, next
377  // XXX: maybe we can skip this and just do the test below
378  if ((*c)->size < size)
379  continue;
380 
381  // aligned start address within the free block
382  unsigned long a_start = (n_start + almask) & ~almask;
383 
384  // hm, block too small after alignment, next
385  if (a_start - n_start >= (*c)->size)
386  continue;
387 
388  // remaining size after subtracting the padding
389  // for the alignment
390  unsigned long r_size = (*c)->size - a_start + n_start;
391 
392  // block too small
393  if (r_size < size)
394  continue;
395 
396  if (a_start > n_start)
397  {
398  // have free space before the allocated block
399  // shrink the block and set c to the next pointer of that
400  // block
401  (*c)->size -= r_size;
402  c = &(*c)->next;
403  }
404  else
405  // drop the block, c remains the next pointer of the
406  // previous block
407  *c = (*c)->next;
408 
409  // allocated the whole remaining space
410  if (r_size == size)
411  return (void*)a_start;
412 
413  // add a new free block behind the allocated block
414  Mem_block *m = (Mem_block*)(a_start + size);
415  m->next = *c;
416  m->size = r_size - size;
417  *c = m;
418  return (void*)a_start;
419  }
420 
421  return 0;
422 }
423 
424 unsigned long
425 List_alloc::avail()
426 {
427  List_alloc_sanity_guard __attribute__((unused)) guard(this, __FUNCTION__);
428  Mem_block *c = _first;
429  unsigned long a = 0;
430  while (c)
431  {
432  a += c->size;
433  c = c->next;
434  }
435 
436  return a;
437 }
438 
439 template <typename DBG>
440 void
441 List_alloc::dump_free_list(DBG &out)
442 {
443  Mem_block *c = _first;
444  while (c)
445  {
446  unsigned sz;
447  const char *unit;
448 
449  if (c->size < 1024)
450  {
451  sz = c->size;
452  unit = "Byte";
453  }
454  else if (c->size < 1 << 20)
455  {
456  sz = c->size >> 10;
457  unit = "kB";
458  }
459  else
460  {
461  sz = c->size >> 20;
462  unit = "MB";
463  }
464 
465  out.printf("%10p - %10p (%u %s)\n", c, (char *) c + c->size - 1, sz, unit);
466 
467  c = c->next;
468  }
469 }
470 
471 }
472 
473