00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include <linux/gfp.h>
00023 #include <linux/string.h>
00024 #include <linux/pagevec.h>
00025 #include <linux/mm.h>
00026 #include <asm/page.h>
00027
00028
00029 #include <l4/dde/ddekit/memory.h>
00030 #include <l4/dde/ddekit/assert.h>
00031 #include <l4/dde/ddekit/panic.h>
00032
00033 #include "local.h"
00034
00035 unsigned long max_low_pfn;
00036 unsigned long min_low_pfn;
00037 unsigned long max_pfn;
00038
00039
00040
00041
00042
00043 #define DEBUG_PAGE_ALLOC 0
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056 #define DDE_PAGE_CACHE_SHIFT 10
00057 #define DDE_PAGE_CACHE_SIZE (1 << DDE_PAGE_CACHE_SHIFT)
00058 #define DDE_PAGE_CACHE_MASK (DDE_PAGE_CACHE_SIZE - 1)
00059
00060 typedef struct
00061 {
00062 struct hlist_node list;
00063 struct page *page;
00064 } page_cache_entry;
00065
00066 static struct hlist_head dde_page_cache[DDE_PAGE_CACHE_SIZE];
00067
00068
00069 #define VIRT_TO_PAGEHASH(a) ((((unsigned long)a) >> PAGE_SHIFT) & DDE_PAGE_CACHE_MASK)
00070
00071
00072 void dde_page_cache_add(struct page *p)
00073 {
00074 unsigned int hashval = VIRT_TO_PAGEHASH(p->virtual);
00075
00076 page_cache_entry *e = kmalloc(sizeof(page_cache_entry), GFP_KERNEL);
00077
00078 #if DEBUG_PAGE_ALLOC
00079 DEBUG_MSG("virt %p, hash: %x", p->virtual, hashval);
00080 #endif
00081
00082 e->page = p;
00083 INIT_HLIST_NODE(&e->list);
00084
00085 hlist_add_head(&e->list, &dde_page_cache[hashval]);
00086 }
00087
00088
00089 void dde_page_cache_remove(struct page *p)
00090 {
00091 unsigned int hashval = VIRT_TO_PAGEHASH(p->virtual);
00092 struct hlist_node *hn = NULL;
00093 struct hlist_head *h = &dde_page_cache[hashval];
00094 page_cache_entry *e = NULL;
00095 struct hlist_node *v = NULL;
00096
00097 hlist_for_each_entry(e, hn, h, list) {
00098 if ((unsigned long)e->page->virtual == ((unsigned long)p->virtual & PAGE_MASK))
00099 v = hn;
00100 break;
00101 }
00102
00103 if (v) {
00104 #if DEBUG_PAGE_ALLOC
00105 DEBUG_MSG("deleting node %p which contained page %p", v, p);
00106 #endif
00107 hlist_del(v);
00108 }
00109 }
00110
00111
00112 struct page* dde_page_lookup(unsigned long va)
00113 {
00114 unsigned int hashval = VIRT_TO_PAGEHASH(va);
00115
00116 struct hlist_node *hn = NULL;
00117 struct hlist_head *h = &dde_page_cache[hashval];
00118 page_cache_entry *e = NULL;
00119
00120 hlist_for_each_entry(e, hn, h, list) {
00121 if ((unsigned long)e->page->virtual == (va & PAGE_MASK))
00122 return e->page;
00123 }
00124
00125 return NULL;
00126 }
00127
00128
00129 struct page * __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
00130 struct zonelist *zonelist, nodemask_t *nm)
00131 {
00132
00133
00134
00135 struct page *ret = kmalloc(sizeof(*ret), GFP_KERNEL);
00136
00137 ret->virtual = (void *)__get_free_pages(gfp_mask, order);
00138 dde_page_cache_add(ret);
00139
00140 return ret;
00141 }
00142
00143
00144 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
00145 {
00146 ddekit_log(DEBUG_PAGE_ALLOC, "gfp_mask=%x order=%d (%d bytes)",
00147 gfp_mask, order, PAGE_SIZE << order);
00148
00149 Assert(gfp_mask != GFP_DMA);
00150 void *p = ddekit_large_malloc(PAGE_SIZE << order);
00151
00152 return (unsigned long)p;
00153 }
00154
00155
00156 unsigned long get_zeroed_page(gfp_t gfp_mask)
00157 {
00158 unsigned long p = __get_free_pages(gfp_mask, 0);
00159
00160 if (p) memset((void *)p, 0, PAGE_SIZE);
00161
00162 return (unsigned long)p;
00163 }
00164
00165
00166 void free_hot_page(struct page *page)
00167 {
00168 WARN_UNIMPL;
00169 }
00170
00171
00172
00173
00174
00175 void __free_pages(struct page *page, unsigned int order)
00176 {
00177 free_pages((unsigned long)page->virtual, order);
00178 dde_page_cache_remove(page);
00179 }
00180
00181 void __pagevec_free(struct pagevec *pvec)
00182 {
00183 WARN_UNIMPL;
00184 }
00185
00186 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
00187 unsigned long start, int len, int write, int force,
00188 struct page **pages, struct vm_area_struct **vmas)
00189 {
00190 WARN_UNIMPL;
00191 return 0;
00192 }
00193
00194
00195
00196
00197
00198
00199
00200 void free_pages(unsigned long addr, unsigned int order)
00201 {
00202 ddekit_log(DEBUG_PAGE_ALLOC, "addr=%p order=%d", (void *)addr, order);
00203
00204 ddekit_large_free((void *)addr);
00205 }
00206
00207
00208 unsigned long __pa(volatile void *addr)
00209 {
00210 return ddekit_pgtab_get_physaddr((void*)addr);
00211 }
00212
00213 void *__va(unsigned long addr)
00214 {
00215 return (void*)ddekit_pgtab_get_virtaddr((ddekit_addr_t) addr);
00216 }
00217
00218
00219 int set_page_dirty_lock(struct page *page)
00220 {
00221 WARN_UNIMPL;
00222 return 0;
00223 }
00224
00225
00226
00227
00228
00229 void *__init alloc_large_system_hash(const char *tablename,
00230 unsigned long bucketsize,
00231 unsigned long numentries,
00232 int scale,
00233 int flags,
00234 unsigned int *_hash_shift,
00235 unsigned int *_hash_mask,
00236 unsigned long limit)
00237 {
00238 void * table = NULL;
00239 unsigned long log2qty;
00240 unsigned long size;
00241
00242 if (numentries == 0)
00243 numentries = 1024;
00244
00245 log2qty = ilog2(numentries);
00246 size = bucketsize << log2qty;
00247
00248 do {
00249 unsigned long order;
00250 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++);
00251 table = (void*) __get_free_pages(GFP_ATOMIC, order);
00252 } while (!table && size > PAGE_SIZE && --log2qty);
00253
00254 if (!table)
00255 panic("Failed to allocate %s hash table\n", tablename);
00256
00257 printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
00258 tablename,
00259 (1U << log2qty),
00260 ilog2(size) - PAGE_SHIFT,
00261 size);
00262
00263 if (_hash_shift)
00264 *_hash_shift = log2qty;
00265 if (_hash_mask)
00266 *_hash_mask = (1 << log2qty) - 1;
00267
00268 return table;
00269 }
00270
00271
00272 static void __init dde_page_cache_init(void)
00273 {
00274 printk("Initializing DDE page cache\n");
00275 int i=0;
00276
00277 for (i; i < DDE_PAGE_CACHE_SIZE; ++i)
00278 INIT_HLIST_HEAD(&dde_page_cache[i]);
00279 }
00280
00281 core_initcall(dde_page_cache_init);