OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [uclinux/] [uClinux-2.0.x/] [mmnommu/] [kmalloc.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 *  linux/mm/kmalloc.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds & Roger Wolff.
5
 *
6
 *  Written by R.E. Wolff Sept/Oct '93.
7
 *
8
 */
9
 
10
/*
11
 * Modified by Alex Bligh (alex@cconcepts.co.uk) 4 Apr 1994 to use multiple
12
 * pages. So for 'page' throughout, read 'area'.
13
 *
14
 * Largely rewritten.. Linus
15
 */
16
 
17
#include <linux/mm.h>
18
#include <linux/delay.h>
19
#include <linux/interrupt.h>
20
 
21
#include <asm/system.h>
22
#include <asm/dma.h>
23
 
24
/* Define this if you want slow routines that try to trip errors */
25
#undef SADISTIC_KMALLOC
26
 
27
/* Private flags. */
28
 
29
#define MF_USED 0xffaa0055
30
#define MF_DMA  0xff00aa55
31
#define MF_FREE 0x0055ffaa
32
 
33
 
34
/*
35
 * Much care has gone into making these routines in this file reentrant.
36
 *
37
 * The fancy bookkeeping of nbytesmalloced and the like are only used to
38
 * report them to the user (oooohhhhh, aaaaahhhhh....) are not
39
 * protected by cli(). (If that goes wrong. So what?)
40
 *
41
 * These routines restore the interrupt status to allow calling with ints
42
 * off.
43
 */
44
 
45
/*
46
 * A block header. This is in front of every malloc-block, whether free or not.
47
 */
48
struct block_header {
49
        unsigned long bh_flags;
50
        union {
51
                unsigned long ubh_length;
52
                struct block_header *fbh_next;
53
        } vp;
54
};
55
 
56
 
57
#define bh_length vp.ubh_length
58
#define bh_next   vp.fbh_next
59
#define BH(p) ((struct block_header *)(p))
60
 
61
 
62
/*
63
 * The page descriptor is at the front of every page that malloc has in use.
64
 */
65
struct page_descriptor {
66
        struct page_descriptor *next;
67
        struct block_header *firstfree;
68
        int order;
69
        int nfree;
70
};
71
 
72
 
73
#define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
74
 
75
 
76
/*
77
 * A size descriptor describes a specific class of malloc sizes.
78
 * Each class of sizes has its own freelist.
79
 */
80
struct size_descriptor {
81
        struct page_descriptor *firstfree;
82
        struct page_descriptor *dmafree;        /* DMA-able memory */
83
        int nblocks;
84
 
85
        int nmallocs;
86
        int nfrees;
87
        int nbytesmalloced;
88
        int npages;
89
        unsigned long gfporder; /* number of pages in the area required */
90
};
91
 
92
/*
93
 * For now it is unsafe to allocate bucket sizes between n and
94
 * n-sizeof(page_descriptor) where n is PAGE_SIZE * any power of two
95
 *
96
 * The blocksize and sizes arrays _must_ match!
97
 */
98
#if PAGE_SIZE == 4096
99
static const unsigned int blocksize[] = {
100
        32,
101
        64,
102
        128,
103
        252,
104
        508,
105
        1020,
106
        2040,
107
        4096 - 16,
108
        8192 - 16,
109
        16384 - 16,
110
        32768 - 16,
111
        65536 - 16,
112
        131072 - 16,
113
        262144 - 16,
114
#ifdef BIGALLOCS
115
        524288 - 16,
116
        1048576 - 16,
117 765 simons
/* SIMON: change of source file - te get mw working */
118
        2097152 - 16,
119 199 simons
#endif
120
 
121
};
122
 
123
static struct size_descriptor sizes[] =
124
{
125
        {NULL, NULL, 127, 0, 0, 0, 0, 0},
126
        {NULL, NULL, 63, 0, 0, 0, 0, 0},
127
        {NULL, NULL, 31, 0, 0, 0, 0, 0},
128
        {NULL, NULL, 16, 0, 0, 0, 0, 0},
129
        {NULL, NULL, 8, 0, 0, 0, 0, 0},
130
        {NULL, NULL, 4, 0, 0, 0, 0, 0},
131
        {NULL, NULL, 2, 0, 0, 0, 0, 0},
132
        {NULL, NULL, 1, 0, 0, 0, 0, 0},
133
        {NULL, NULL, 1, 0, 0, 0, 0, 1},
134
        {NULL, NULL, 1, 0, 0, 0, 0, 2},
135
        {NULL, NULL, 1, 0, 0, 0, 0, 3},
136
        {NULL, NULL, 1, 0, 0, 0, 0, 4},
137
        {NULL, NULL, 1, 0, 0, 0, 0, 5},
138
        {NULL, NULL, 1, 0, 0, 0, 0, 6},
139
#ifdef BIGALLOCS
140
        {NULL, NULL, 1, 0, 0, 0, 0, 7},
141
        {NULL, NULL, 1, 0, 0, 0, 0, 8},
142 765 simons
        {NULL, NULL, 1, 0, 0, 0, 0, 9},
143 199 simons
#endif
144
        {NULL, NULL, 0, 0, 0, 0, 0, 0},
145
};
146
#elif PAGE_SIZE == 8192
147
static const unsigned int blocksize[] = {
148
        64,
149
        128,
150
        248,
151
        504,
152
        1016,
153
        2040,
154
        4080,
155
        8192 - 32,
156
        16384 - 32,
157
        32768 - 32,
158
        65536 - 32,
159
        131072 - 32,
160
        262144 - 32,
161
 
162
};
163
 
164
struct size_descriptor sizes[] =
165
{
166
        {NULL, NULL, 127, 0, 0, 0, 0, 0},
167
        {NULL, NULL, 63, 0, 0, 0, 0, 0},
168
        {NULL, NULL, 31, 0, 0, 0, 0, 0},
169
        {NULL, NULL, 16, 0, 0, 0, 0, 0},
170
        {NULL, NULL, 8, 0, 0, 0, 0, 0},
171
        {NULL, NULL, 4, 0, 0, 0, 0, 0},
172
        {NULL, NULL, 2, 0, 0, 0, 0, 0},
173
        {NULL, NULL, 1, 0, 0, 0, 0, 0},
174
        {NULL, NULL, 1, 0, 0, 0, 0, 1},
175
        {NULL, NULL, 1, 0, 0, 0, 0, 2},
176
        {NULL, NULL, 1, 0, 0, 0, 0, 3},
177
        {NULL, NULL, 1, 0, 0, 0, 0, 4},
178
        {NULL, NULL, 1, 0, 0, 0, 0, 5},
179
        {NULL, NULL, 0, 0, 0, 0, 0, 0}
180
};
181
#else
182
#error you need to make a version for your pagesize
183
#endif
184
 
185
#define NBLOCKS(order)          (sizes[order].nblocks)
186
#define BLOCKSIZE(order)        (blocksize[order])
187
#define AREASIZE(order)         (PAGE_SIZE<<(sizes[order].gfporder))
188
 
189
/*
190
 * Create a small cache of page allocations: this helps a bit with
191
 * those pesky 8kB+ allocations for NFS when we're temporarily
192
 * out of memory..
193
 *
194
 * This is a _truly_ small cache, we just cache one single page
195
 * order (for orders 0, 1 and 2, that is  4, 8 and 16kB on x86).
196
 */
197
#define MAX_CACHE_ORDER 3
198
struct page_descriptor * kmalloc_cache[MAX_CACHE_ORDER];
199
 
200
static inline struct page_descriptor * get_kmalloc_pages(unsigned long priority,
201
        unsigned long order, int dma)
202
{
203
        return (struct page_descriptor *) __get_free_pages(priority, order, dma);
204
}
205
 
206
static inline void free_kmalloc_pages(struct page_descriptor * page,
207
        unsigned long order, int dma)
208
{
209
        if (!dma && order < MAX_CACHE_ORDER) {
210
                page = xchg(kmalloc_cache+order, page);
211
                if (!page)
212
                        return;
213
        }
214
        free_pages((unsigned long) page, order);
215
}
216
 
217
long kmalloc_init(long start_mem, long end_mem)
218
{
219
        int order;
220
 
221
/*
222
 * Check the static info array. Things will blow up terribly if it's
223
 * incorrect. This is a late "compile time" check.....
224
 */
225
        for (order = 0; BLOCKSIZE(order); order++) {
226
                if ((NBLOCKS(order) * BLOCKSIZE(order) + sizeof(struct page_descriptor)) >
227
                    AREASIZE(order)) {
228
                        printk("Cannot use %d bytes out of %d in order = %d block mallocs\n",
229
                               (int) (NBLOCKS(order) * BLOCKSIZE(order) +
230
                                      sizeof(struct page_descriptor)),
231
                                (int) AREASIZE(order),
232
                               BLOCKSIZE(order));
233
                        panic("This only happens if someone messes with kmalloc");
234
                }
235
        }
236
        return start_mem;
237
}
238
 
239
 
240
/*
241
 * Ugh, this is ugly, but we want the default case to run
242
 * straight through, which is why we have the ugly goto's
243
 */
244
void *kmalloc(size_t size, int priority)
245
{
246
        unsigned long flags;
247
        unsigned long type;
248
        int order, dma;
249
        struct block_header *p;
250
        struct page_descriptor *page, **pg;
251
        struct size_descriptor *bucket = sizes;
252
 
253
        /* Get order */
254
        order = 0;
255
        {
256
                unsigned int realsize = size + sizeof(struct block_header);
257
                for (;;) {
258
                        int ordersize = BLOCKSIZE(order);
259
                        if (realsize <= ordersize)
260
                                break;
261
                        order++;
262
                        bucket++;
263
                        if (ordersize)
264
                                continue;
265
                        printk("kmalloc of too large a block (%d bytes).\n", (int) size);
266
                        return NULL;
267
                }
268
        }
269
 
270
        dma = 0;
271
        type = MF_USED;
272
        pg = &bucket->firstfree;
273
        if (priority & GFP_DMA) {
274
                dma = 1;
275
                type = MF_DMA;
276
                pg = &bucket->dmafree;
277
        }
278
 
279
        priority &= GFP_LEVEL_MASK;
280
 
281
/* Sanity check... */
282
        if (intr_count && priority != GFP_ATOMIC) {
283
                static int count = 0;
284
                if (++count < 5) {
285
                        printk("kmalloc called nonatomically from interrupt %p\n",
286
                               __builtin_return_address(0));
287
                        priority = GFP_ATOMIC;
288
                }
289
        }
290
 
291
        save_flags(flags);
292
        cli();
293
        page = *pg;
294
        if (!page)
295
                goto no_bucket_page;
296
 
297
        p = page->firstfree;
298
        if (p->bh_flags != MF_FREE)
299
                goto not_free_on_freelist;
300
 
301
found_it:
302
        page->firstfree = p->bh_next;
303
        page->nfree--;
304
        if (!page->nfree)
305
                *pg = page->next;
306
        restore_flags(flags);
307
        bucket->nmallocs++;
308
        bucket->nbytesmalloced += size;
309
        p->bh_flags = type;     /* As of now this block is officially in use */
310
        p->bh_length = size;
311
#ifdef SADISTIC_KMALLOC
312
        memset(p+1, 0xf0, size);
313
#endif
314
        return p + 1;           /* Pointer arithmetic: increments past header */
315
 
316
 
317
no_bucket_page:
318
        /*
319
         * If we didn't find a page already allocated for this
320
         * bucket size, we need to get one..
321
         *
322
         * This can be done with ints on: it is private to this invocation
323
         */
324
        restore_flags(flags);
325
 
326
        {
327
                int i, sz;
328
 
329
                /* sz is the size of the blocks we're dealing with */
330
                sz = BLOCKSIZE(order);
331
 
332
                page = get_kmalloc_pages(priority, bucket->gfporder, dma);
333
                if (!page)
334
                        goto no_free_page;
335
found_cached_page:
336
 
337
                bucket->npages++;
338
 
339
                page->order = order;
340
                /* Loop for all but last block: */
341
                i = (page->nfree = bucket->nblocks) - 1;
342
                p = BH(page + 1);
343
                while (i > 0) {
344
                        i--;
345
                        p->bh_flags = MF_FREE;
346
                        p->bh_next = BH(((long) p) + sz);
347
                        p = p->bh_next;
348
                }
349
                /* Last block: */
350
                p->bh_flags = MF_FREE;
351
                p->bh_next = NULL;
352
 
353
                p = BH(page+1);
354
        }
355
 
356
        /*
357
         * Now we're going to muck with the "global" freelist
358
         * for this size: this should be uninterruptible
359
         */
360
        cli();
361
        page->next = *pg;
362
        *pg = page;
363
        goto found_it;
364
 
365
 
366
no_free_page:
367
        /*
368
         * No free pages, check the kmalloc cache of
369
         * pages to see if maybe we have something available
370
         */
371
        if (!dma && order < MAX_CACHE_ORDER) {
372
                page = xchg(kmalloc_cache+order, page);
373
                if (page)
374
                        goto found_cached_page;
375
        }
376
        {
377
                static unsigned long last = 0;
378
                if (priority != GFP_BUFFER && priority != GFP_IO &&
379
                    (last + 10 * HZ < jiffies)) {
380
                        last = jiffies;
381
                        printk("Couldn't get a free page.....\n");
382
                }
383
                return NULL;
384
        }
385
 
386
not_free_on_freelist:
387
        restore_flags(flags);
388
        printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
389
        return NULL;
390
}
391
 
392
void kfree(void *__ptr)
393
{
394
        int dma;
395
        unsigned long flags;
396
        unsigned int order;
397
        struct page_descriptor *page, **pg;
398
        struct size_descriptor *bucket;
399
 
400
        if (!__ptr)
401
                goto null_kfree;
402
#define ptr ((struct block_header *) __ptr)
403
        page = PAGE_DESC(ptr);
404
        __ptr = ptr - 1;
405
        if (~PAGE_MASK & (unsigned long)page->next)
406
                goto bad_order;
407
        order = page->order;
408
        if (order >= sizeof(sizes) / sizeof(sizes[0]))
409
                goto bad_order;
410
        bucket = sizes + order;
411
        dma = 0;
412
        pg = &bucket->firstfree;
413
        if (ptr->bh_flags == MF_DMA) {
414
                dma = 1;
415
                ptr->bh_flags = MF_USED;
416
                pg = &bucket->dmafree;
417
        }
418
        if (ptr->bh_flags != MF_USED)
419
                goto bad_order;
420
        ptr->bh_flags = MF_FREE;        /* As of now this block is officially free */
421
#ifdef SADISTIC_KMALLOC
422
        memset(ptr+1, 0xe0, ptr->bh_length);
423
#endif
424
        save_flags(flags);
425
        cli();
426
 
427
        bucket->nfrees++;
428
        bucket->nbytesmalloced -= ptr->bh_length;
429
 
430
        ptr->bh_next = page->firstfree;
431
        page->firstfree = ptr;
432
        if (!page->nfree++) {
433
/* Page went from full to one free block: put it on the freelist. */
434
                if (bucket->nblocks == 1)
435
                        goto free_page;
436
                page->next = *pg;
437
                *pg = page;
438
        }
439
/* If page is completely free, free it */
440
        if (page->nfree == bucket->nblocks) {
441
                for (;;) {
442
                        struct page_descriptor *tmp = *pg;
443
                        if (!tmp)
444
                                goto not_on_freelist;
445
                        if (tmp == page)
446
                                break;
447
                        pg = &tmp->next;
448
                }
449
                *pg = page->next;
450
free_page:
451
                bucket->npages--;
452
                free_kmalloc_pages(page, bucket->gfporder, dma);
453
        }
454
        restore_flags(flags);
455
null_kfree:
456
        return;
457
 
458
bad_order:
459
        printk("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
460
               ptr+1, page->next, page->order);
461
        return;
462
 
463
not_on_freelist:
464
        restore_flags(flags);
465
        printk("Ooops. page %p doesn't show on freelist.\n", page);
466
}
467
 
468
unsigned int ksize(void *__ptr)
469
{
470
        unsigned int order;
471
        struct page_descriptor *page;
472
 
473
        if (!ptr) return 0;
474
#define ptr ((struct block_header *) __ptr)
475
        page = PAGE_DESC(ptr);
476
        __ptr = ptr - 1;
477
 
478
        if (~PAGE_MASK & (unsigned long)page->next) return 0;
479
 
480
        order = page->order;
481
        if (order >= sizeof(sizes) / sizeof(sizes[0])) return 0;
482
 
483
        return BLOCKSIZE(order);
484
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.