L4Re - L4 Runtime Environment
slab_alloc
1 // vi:set ft=cpp: -*- Mode: C++ -*-
2 /*
3  * (c) 2008-2009 Alexander Warg <warg@os.inf.tu-dresden.de>,
4  * Alexander Warg <warg@os.inf.tu-dresden.de>
5  * economic rights: Technische Universit├Ąt Dresden (Germany)
6  *
7  * This file is part of TUD:OS and distributed under the terms of the
8  * GNU General Public License 2.
9  * Please see the COPYING-GPL-2 file for details.
10  *
11  * As a special exception, you may use this file as part of a free software
12  * library without restriction. Specifically, if other files instantiate
13  * templates or use macros or inline functions from this file, or you compile
14  * this file and link it with other files to produce an executable, this
15  * file does not by itself cause the resulting executable to be covered by
16  * the GNU General Public License. This exception does not however
17  * invalidate any other reasons why the executable file might be covered by
18  * the GNU General Public License.
19  */
20 
21 #pragma once
22 
23 #include <l4/cxx/std_alloc>
24 #include <l4/cxx/hlist>
25 #include <l4/sys/consts.h>
26 
27 namespace cxx {
28 
29 /**
30  * \ingroup cxx_api
31  * \brief Basic slab allocator.
32  * \param Obj_size The size of the objects managed by the allocator (in bytes).
33  * \param Slab_size The size of a slab cache (in bytes).
34  * \param Max_free The maximum number of free slab caches. When this limit is
35  * reached slab caches are freed.
36  * \param Alloc The allocator that is used to allocate the slab caches.
37  */
38 template< int Obj_size, int Slab_size = L4_PAGESIZE,
39  int Max_free = 2, template<typename A> class Alloc = New_allocator >
40 class Base_slab
41 {
42 private:
43  struct Free_o
44  {
45  Free_o *next;
46  };
47 
48 protected:
49  struct Slab_i;
50 
51 private:
52  struct Slab_head : public H_list_item
53  {
54  unsigned num_free;
55  Free_o *free;
56  Base_slab<Obj_size, Slab_size, Max_free, Alloc> *cache;
57 
58  inline Slab_head() throw() : num_free(0), free(0), cache(0)
59  {}
60  };
61 
62 public:
63  enum
64  {
65  object_size = Obj_size, ///< size of an object.
66  slab_size = Slab_size, ///< size of a slab cache.
67  /// objects per slab cache.
68  objects_per_slab = (Slab_size - sizeof(Slab_head)) / object_size,
69  /// maximum number of free slab caches.
70  max_free_slabs = Max_free,
71  };
72 
73 protected:
74  struct Slab_store
75  {
76  char _o[slab_size - sizeof(Slab_head)];
77  Free_o *object(unsigned obj) throw()
78  { return reinterpret_cast<Free_o*>(_o + object_size * obj); }
79  };
80 
81  struct Slab_i : public Slab_store, public Slab_head
82  {};
83 
84 public:
85  /// Type of the allocator for the slab caches.
86  typedef Alloc<Slab_i> Slab_alloc;
87 
88  typedef void Obj_type;
89 
90 private:
91  Slab_alloc _alloc;
92  unsigned _num_free;
93  unsigned _num_slabs;
94  H_list<Slab_i> _full_slabs;
95  H_list<Slab_i> _partial_slabs;
96  H_list<Slab_i> _empty_slabs;
97 
98  /// Add a new slab cache.
99  void add_slab(Slab_i *s) throw()
100  {
101  s->num_free = objects_per_slab;
102  s->cache = this;
103 
104  //L4::cerr << "Slab: " << this << "->add_slab(" << s << ", size="
105  // << slab_size << "):" << " f=" << s->object(0) << '\n';
106 
107  // initialize free list
108  Free_o *f = s->free = s->object(0);
109  for (unsigned i = 0; i < objects_per_slab; ++i)
110  {
111  f->next = s->object(i);
112  f = f->next;
113  }
114  f->next = 0;
115 
116  // insert slab into cache's list
117  _empty_slabs.push_front(s);
118  ++_num_slabs;
119  ++_num_free;
120  }
121 
122  /// Grow the allocator, by adding a new slab cache.
123  bool grow() throw()
124  {
125  Slab_i *s = _alloc.alloc();
126  if (!s)
127  return false;
128 
129  new (s, cxx::Nothrow()) Slab_i();
130 
131  add_slab(s);
132  return true;
133  }
134 
135  /// Shrink the allocator by freeing free slab caches.
136  void shrink() throw()
137  {
138  if (!_alloc.can_free)
139  return;
140 
141  while (!_empty_slabs.empty() && _num_free > max_free_slabs)
142  {
143  Slab_i *s = _empty_slabs.front();
144  _empty_slabs.remove(s);
145  --_num_free;
146  --_num_slabs;
147  _alloc.free(s);
148  }
149  }
150 
151 public:
152  Base_slab(Slab_alloc const &alloc = Slab_alloc()) throw()
153  : _alloc(alloc), _num_free(0), _num_slabs(0), _full_slabs(0),
154  _partial_slabs(0), _empty_slabs(0)
155  {}
156 
157  ~Base_slab() throw()
158  {
159  while (!_empty_slabs.empty())
160  {
161  Slab_i *o = _empty_slabs.front();
162  _empty_slabs.remove(o);
163  _alloc.free(o);
164  }
165  while (!_partial_slabs.empty())
166  {
167  Slab_i *o = _partial_slabs.front();
168  _partial_slabs.remove(o);
169  _alloc.free(o);
170  }
171  while (!_full_slabs.empty())
172  {
173  Slab_i *o = _full_slabs.front();
174  _full_slabs.remove(o);
175  _alloc.free(o);
176  }
177  }
178 
179  void *alloc() throw()
180  {
181  H_list<Slab_i> *free = &_partial_slabs;
182  if (free->empty())
183  free = &_empty_slabs;
184 
185  if (free->empty() && !grow())
186  return 0;
187 
188  Slab_i *s = free->front();
189  Free_o *o = s->free;
190  s->free = o->next;
191 
192  if (free == &_empty_slabs)
193  {
194  _empty_slabs.remove(s);
195  --_num_free;
196  }
197 
198  --(s->num_free);
199 
200  if (!s->free)
201  {
202  _partial_slabs.remove(s);
203  _full_slabs.push_front(s);
204  }
205  else if (free == &_empty_slabs)
206  _partial_slabs.push_front(s);
207 
208  //L4::cerr << this << "->alloc(): " << o << ", of " << s << '\n';
209 
210  return o;
211  }
212 
213  void free(void *_o) throw()
214  {
215  if (!_o)
216  return;
217 
218  unsigned long addr = (unsigned long)_o;
219  addr = (addr / slab_size) * slab_size;
220  Slab_i *s = (Slab_i*)addr;
221 
222  if (s->cache != this)
223  return;
224 
225  Free_o *o = reinterpret_cast<Free_o*>(_o);
226 
227  o->next = s->free;
228  s->free = o;
229 
230  bool was_full = false;
231 
232  if (!s->num_free)
233  {
234  _full_slabs.remove(s);
235  was_full = true;
236  }
237 
238  ++(s->num_free);
239 
240  if (s->num_free == objects_per_slab)
241  {
242  if (!was_full)
243  _partial_slabs.remove(s);
244 
245  _empty_slabs.push_front(s);
246  ++_num_free;
247  if (_num_free > max_free_slabs)
248  shrink();
249 
250  was_full = false;
251  }
252  else if (was_full)
253  _partial_slabs.push_front(s);
254 
255  //L4::cerr << this << "->free(" << _o << "): of " << s << '\n';
256  }
257 
258  /**
259  * \brief Get the total number of objects managed by the slab allocator.
260  * \return The number of objects managed by the allocator (including the
261  * free objects).
262  */
263  unsigned total_objects() const throw()
264  { return _num_slabs * objects_per_slab; }
265 
266  /**
267  * \brief Get the total number of objects managed by the slab allocator.
268  * \return The number of objects managed by the allocator (including the
269  * free objects).
270  */
271  unsigned free_objects() const throw()
272  {
273  unsigned count = 0;
274 
275  /* count partial slabs first */
276  for (typename H_list<Slab_i>::Const_iterator s = _partial_slabs.begin();
277  s != _partial_slabs.end(); ++s)
278  count += s->num_free;
279 
280  /* add empty slabs */
281  count += _num_free * objects_per_slab;
282 
283  return count;
284  }
285 };
286 
287 /**
288  * \ingroup cxx_api
289  * \brief Slab allocator for object of type \a Type.
290  * \param Type the type of the objects to manage.
291  * \param Slab_size size of a slab cache.
292  * \param Max_free the maximum number of free slab caches.
293  * \param Alloc the allocator for the slab caches.
294  */
295 template<typename Type, int Slab_size = L4_PAGESIZE,
296  int Max_free = 2, template<typename A> class Alloc = New_allocator >
297 class Slab : public Base_slab<sizeof(Type), Slab_size, Max_free, Alloc>
298 {
299 private:
300  typedef Base_slab<sizeof(Type), Slab_size, Max_free, Alloc> Base_type;
301 public:
302 
303  typedef Type Obj_type;
304 
305  Slab(typename Base_type::Slab_alloc const &alloc
306  = typename Base_type::Slab_alloc()) throw()
307  : Base_slab<sizeof(Type), Slab_size, Max_free, Alloc>(alloc) {}
308 
309 
310  /**
311  * \brief Allocate an object of type \a Type.
312  * \return A pointer to the object just allocated, or 0 on failure.
313  */
314  Type *alloc() throw()
315  {
316  return (Type*)Base_slab<sizeof(Type), Slab_size,
317  Max_free, Alloc>::alloc();
318  }
319 
320  /**
321  * \brief Free the object addressed by \a o.
322  * \param o The pointer to the object to free.
323  * \pre The object must have been allocated with this allocator.
324  */
325  void free(Type *o) throw()
326  { Base_slab<sizeof(Type), Slab_size, Max_free, Alloc>::free(o); }
327 };
328 
329 
330 /**
331  * \ingroup cxx_api
332  * \brief Merged slab allocator (allocators for objects of the same size
333  * are merged together).
334  *
335  * \param Obj_size The size of an object managed by the slab allocator.
336  * \param Slab_size The size of a slab cache.
337  * \param Max_free The maximum number of free slab caches.
338  * \param Alloc The allocator for the slab caches.
339  *
340  * This slab allocator class is useful for merging slab allocators with the
341  * same parameters (equal \a Obj_size, \a Slab_size, \a Max_free, and
342  * \a Alloc parameters) together and share the overhead for the slab caches
343  * among all equal-sized objects.
344  *
345  */
346 template< int Obj_size, int Slab_size = L4_PAGESIZE,
347  int Max_free = 2, template<typename A> class Alloc = New_allocator >
348 class Base_slab_static
349 {
350 private:
351  typedef Base_slab<Obj_size, Slab_size, Max_free, Alloc> _A;
352  static _A _a;
353 public:
354  typedef void Obj_type;
355  enum
356  {
357  object_size = Obj_size, ///< size of an object.
358  slab_size = Slab_size, ///< size of a slab cache.
359  /// number of objects per slab cache.
360  objects_per_slab = _A::objects_per_slab,
361  max_free_slabs = Max_free, ///< maximum number of free slab caches.
362  };
363 
364  /** \brief Allocate an object. */
365  void *alloc() throw() { return _a.alloc(); }
366  /**
367  * \brief Free the given object (\a p).
368  * \param p The pointer to the object to free.
369  * \pre \a p must be a pointer to an object allocated by this allocator.
370  */
371  void free(void *p) throw() { _a.free(p); }
372 
373  /**
374  * \brief Get the total number of objects managed by the slab allocator.
375  * \return The number of objects managed by the allocator (including the
376  * free objects).
377  * \note The value is the merged value for all equal parameterized
378  * Base_slab_static instances.
379  */
380  unsigned total_objects() const throw() { return _a.total_objects(); }
381 
382  /**
383  * \brief Get the number of free objects in the slab allocator.
384  * \return The number of free objects in all free and partially used
385  * slab caches managed by this allocator.
386  * \note The value is the merged value for all equal parameterized
387  * Base_slab_static instances.
388  */
389  unsigned free_objects() const throw() { return _a.free_objects(); }
390 };
391 
392 
393 template< int _O, int _S, int _M, template<typename A> class Alloc >
394 typename Base_slab_static<_O,_S,_M,Alloc>::_A
395  Base_slab_static<_O,_S,_M,Alloc>::_a;
396 
397 /**
398  * \ingroup cxx_api
399  * \brief Merged slab allocator (allocators for objects of the same size
400  * are merged together).
401  *
402  * \param Type The type of the objects to manage.
403  * \param Slab_size The size of a slab cache.
404  * \param Max_free The maximum number of free slab caches.
405  * \param Alloc The allocator for the slab caches.
406  *
407  * This slab allocator class is useful for merging slab allocators with the
408  * same parameters (equal \a sizeof(Type), \a Slab_size, \a Max_free, and
409  * \a Alloc parameters) together and share the overhead for the slab caches
410  * among all equal-sized objects.
411  *
412  */
413 template<typename Type, int Slab_size = L4_PAGESIZE,
414  int Max_free = 2, template<typename A> class Alloc = New_allocator >
415 class Slab_static
416 : public Base_slab_static<sizeof(Type), Slab_size, Max_free, Alloc>
417 {
418 public:
419 
420  typedef Type Obj_type;
421  /**
422  * \brief Allocate an object of type \a Type.
423  * \return A pointer to the just allocated object, or 0 of failure.
424  */
425  Type *alloc() throw()
426  {
427  return (Type*)Base_slab_static<sizeof(Type), Slab_size,
428  Max_free, Alloc>::alloc();
429  }
430 };
431 
432 }