00001 // AUTOMATICALLY GENERATED -- DO NOT EDIT! -*- c++ -*- 00002 00003 #ifndef slab_cache_anon_i_h 00004 #define slab_cache_anon_i_h 00005 00006 #include <stddef.h> 00007 #include <stdlib.h> 00008 #include <assert.h> 00009 00010 // 00011 // class slab 00012 // 00013 00014 class slab // a slab of the cache 00015 { 00016 slab(); 00017 slab(const slab&); // default constructors remain undefined 00018 00019 struct slab_entry 00020 { 00021 slab_entry *_next_free; 00022 char _entry[0]; 00023 }; 00024 00025 struct slab_data 00026 { 00027 slab_cache_anon *_cache; 00028 slab_entry *_first_free; 00029 slab *_next, *_prev; 00030 unsigned short _in_use, _free; 00031 }; 00032 00033 // slabs for CACHE_ENTRY should contain at least min_cache_items 00034 // cache entries 00035 static const unsigned min_cache_items = 4; 00036 00037 // 00038 // data declaration follows 00039 // 00040 slab_data _data; 00041 00042 public: 00043 slab(slab_cache_anon *cache); 00044 00045 inline ~slab(); 00046 00047 void * alloc(); 00048 00049 void free(void *entry); 00050 00051 inline bool is_empty(); 00052 00053 inline bool is_full(); 00054 00055 void enqueue(slab *prev); 00056 00057 void dequeue(); 00058 00059 inline slab * prev(); 00060 00061 inline slab * next(); 00062 00063 inline void * operator new(size_t, slab_cache_anon *cache); 00064 00065 private: 00066 // default deallocator must not be called -- must use explicit destruction 00067 inline void operator delete(void* /*block*/); 00068 }; // end declaration of class slab 00069 00070 // 00071 // IMPLEMENTATION of inline functions follows 00072 // 00073 00074 00075 00076 00077 inline slab::~slab() 00078 { 00079 assert(_data._in_use == 0); 00080 00081 slab_entry *e = _data._first_free; 00082 00083 while (e) 00084 { 00085 _data._cache->elem_dtor(& e->_entry[0]); 00086 e = e->_next_free; 00087 } 00088 } 00089 00090 00091 00092 inline bool 00093 slab::is_empty() 00094 { 00095 return _data._in_use == 0; 00096 } 00097 00098 00099 00100 inline bool 00101 slab::is_full() 00102 { 00103 return _data._free == 0; 00104 } 00105 00106 00107 00108 inline slab * 00109 slab::prev() 00110 { 00111 return _data._prev; 00112 } 00113 00114 00115 00116 inline slab * 00117 slab::next() 00118 { 00119 return _data._next; 00120 } 00121 00122 00123 00124 inline void * 00125 slab::operator new(size_t, 00126 slab_cache_anon *cache) 00127 { 00128 // slabs must be size-aligned so that we can compute their addresses 00129 // from element addresses 00130 return cache->block_alloc(cache->_slab_size, cache->_slab_size); 00131 } 00132 00133 #endif // slab_cache_anon_i_h