OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [armnommu/] [mm/] [kmalloc-arm.c] - Blame information for rev 1622

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1622 jcastillo
/*
2
 *  linux/mm/kmalloc.c
3
 *
4
 *  Copyright (C) 1994, 1995, 1996 R.M.King, ???, Linus Torvalds
5
 *
6
 * Changelog:
7
 *  4/1/96      RMK     Changed allocation sizes to suite requested
8
 *                      sizes.  On i386, and other 4k page machines,
9
 *                      it should use a around 2/3 of the memory
10
 *                      that it used to.
11
 *                      Allowed recursive mallocing for machines with
12
 *                      rediculously large page sizes to reduce
13
 *                      memory wastage.
14
 */
15
 
16
#include <linux/mm.h>
17
#include <linux/delay.h>
18
#include <linux/interrupt.h>
19
#include <asm/system.h>
20
#include <asm/dma.h>
21
 
22
/*
23
 *
24
 * A study of the sizes requested of kmalloc in v1.3.35 shows the following pattern:
25
 *
26
 *                      Req.            ------- Allocd -------
27
 * Size   Times req.    Total(B)        size     Total   Pages          Wastage
28
 *   16          7        112             32       224     1              16
29
 *   36          3        108             64       192     1              28
30
 *   56         22       1232             64      1408     1               8
31
 *   68          3        204            128       384     1              60
32
 *  128         26       3328            256      6656     2               0
33
 *  512          5       2560           1020      5100     2             508
34
 *  516          4       2064           1020      4080     1             504
35
 *  752          4       3008           1020      4080     1             268
36
 * 1024          1       1024           2040      1024     1               0
37
 * 1060          4       4240           2040      8160     2             980
38
 * 2048          2       4096           4080      8160     2            2032
39
 * 2956          1       2956           4080      4080     1            1124
40
 * 4096          8      32768           8176     65408    16            4080
41
 * ---------------------------------------------------------------------------
42
 *      TOTALS          57700                   108956    32
43
 *                                                      (128k)
44
 *
45
 * On average, the old kmalloc uses twice the amount of memory required with a
46
 * page size of 4k.  On 32k (which is what I'm dealing with):
47
 *
48
 *                      Req.            ------- Allocd -------
49
 * Size   Times req.    Total(B)        size     Total   Pages          Wastage
50
 *   16          7        112             32       224     1               8
51
 *   36          3        108             64       192     1              20
52
 *   56         22       1232             64      1408     1               0
53
 *   68          3        204            128       384     1              52
54
 *  128         26       3328            256      6656     1             120
55
 *  512          5       2560           1020      5100     2             500
56
 *  516          4       2064           1020      4080     1             496
57
 *  752          4       3008           1020      4080     1             260
58
 * 1024          1       1024           2040      1024     1               0
59
 * 1060          4       4240           2040      8160     2             972
60
 * 2048          2       4096           4080      8160     2            2026
61
 * 2956          1       2956           4080      4080     1            1116
62
 * 4096          8      32768           8176     65408     2            4072
63
 * ---------------------------------------------------------------------------
64
 *      TOTALS          57700                   108956    17
65
 *                                                      (544k)
66
 *
67
 * Using the old kmalloc system, this ate up a lot of memory in the rounding
68
 * up of the sizes.
69
 * We now use a similar strategy to the old kmalloc, except we allocate in
70
 * sizes determined by looking at the use of kmalloc.  There is an overhead of
71
 * 2 words on each malloc for mallocs private data, and 4 words at the beginning
72
 * of the page/4k block.
73
 *
74
 *                      Req.            ------ Allocd --(pages)
75
 * Size   Times req.    Total(B)        size    total   4k  32k         wastage
76
 *   16          7        112             24      168    1                 0
77
 *   36          3        108             48      144    1                 0
78
 *   56         22       1232             64     1408    1                 0
79
 *   68          3        204             80      240    1                 4
80
 *  128         26       3328            136     3536    1                 0
81
 *  512          5       2560            580     2900    1   2            60
82
 *  516          4       2064            580     2320    1                56
83
 *  752          4       3008            816     3264    1                56
84
 * 1024          1       1024           1360     1360    1               328
85
 * 1060          4       4240           1360     5440    2              2048
86
 * 2048          2       4096           4096     8192    2              1140
87
 * 2956          1       2956           4096     4096    1                20
88
 * 4096          8      32768           4676    37408   10   2           582
89
 * --------------------------------------------------------------------------
90
 *      TOTALS          57700                   70476   24   4
91
 *                                                    (96k) (128k)
92
 *
93
 */
94
 
95
#define MF_USED  0xffaa0055
96
#define MF_DMA   0xff00aa55
97
#define MF_FREE  0x0055ffaa
98
#define MF_INUSE 0x00ff55aa
99
 
100
#define PAGE_MALLOC 4096
101
 
102
#undef DEBUG
103
 
104
struct mblk_header {
105
        unsigned long mb_flags;
106
        union {
107
                unsigned long vmb_size;
108
                struct mblk_header *vmb_next;
109
        } v;
110
};
111
#define mb_size v.vmb_size
112
#define mb_next v.vmb_next
113
 
114
struct page_header {
115
        unsigned long       ph_flags;
116
        struct page_header *ph_next;
117
        struct mblk_header *ph_free;
118
        unsigned char       ph_order;
119
        unsigned char       ph_dma;
120
        unsigned short      ph_nfree;
121
};
122
 
123
struct size_descriptor {
124
        struct page_header *sd_firstfree;
125
        struct page_header *sd_dmafree;
126
        struct page_header *sd_used;
127
        int sd_size;
128
        int sd_blocks;
129
 
130
        int sd_gfporder;
131
};
132
 
133
static unsigned long bytes_wanted, bytes_malloced, pages_malloced;
134
/*
135
 * These are more suited to the sizes that the kernel will claim.
136
 * Note the two main ones for 56 and 128 bytes (64 and 136), which
137
 * get the most use.  gfporder has special values:
138
 *  -1 = don't get a free page, but malloc a small page.
139
 */
140
static struct size_descriptor sizes[32] = {
141
        { NULL, NULL, NULL,     24, 170, -1 }, /* was 32 - shrink so that we have more */
142
#if 1
143
        { NULL, NULL, NULL,     48,  85, -1 }, /* was 64 - shrink so that we have more, however... */
144
#endif
145
        { NULL, NULL, NULL,     64,  63, -1 }, /* contains 56 byte mallocs */
146
#if 1
147
        { NULL, NULL, NULL,     80,  51. -1 }, /* contains 68 byte mallocs + room for expansion */
148
#endif
149
        { NULL, NULL, NULL,    136,  30, -1 }, /* was 128 - swallow up 128B */
150
        { NULL, NULL, NULL,    580,   7, -1 }, /* was 512 - swallow up 516B and allow for expansion */
151
        { NULL, NULL, NULL,    816,   5, -1 }, /* was 1024 - we get an extra block */
152
        { NULL, NULL, NULL,   1360,   3, -1 }, /* was 2048 - we get an extra block */
153
        { NULL, NULL, NULL,   4096,   7, 0 }, /* we need this one for mallocing 4k */
154
        { NULL, NULL, NULL,   4676,   7, 0 },
155
        { NULL, NULL, NULL,   8188,   4, 0 },
156
        { NULL, NULL, NULL,  16368,   2, 0 },
157
        { NULL, NULL, NULL,  32752,   1, 0 },
158
        { NULL, NULL, NULL,  65520,   1, 1 },
159
        { NULL, NULL, NULL, 131056,   1, 2 },
160
        { NULL, NULL, NULL, 262128,   1, 3 },
161
        { NULL, NULL, NULL,      0,   0, 0 },
162
};
163
 
164
static int num_sizes = 32;
165
 
166
static inline int get_order (size_t size)
167
{
168
        int order;
169
 
170
        for (order = 0; sizes[order].sd_size; order ++)
171
                if (size <= sizes[order].sd_size && sizes[order].sd_gfporder != -3)
172
                        return order;
173
        return -1;
174
}
175
 
176
long kmalloc_init (long start_mem, long end_mem)
177
{
178
    int i, gfporder, errors = 0;
179
    /*
180
     * kmalloc_init is now a bit more intelligent.
181
     *
182
     * It now sets up the gfp orders.
183
     */
184
#ifndef PAGE_MALLOC
185
    gfporder = 0;
186
#else
187
    gfporder = -1;
188
#endif
189
    pages_malloced = bytes_wanted = bytes_malloced = 0;
190
 
191
    for (i = 0; i < num_sizes && sizes[i].sd_size; i++) {
192
        sizes[i].sd_firstfree = NULL;
193
        sizes[i].sd_dmafree = NULL;
194
        sizes[i].sd_used = NULL;
195
        if (gfporder >= 0 && sizes[i].sd_size > (PAGE_SIZE << gfporder))
196
            gfporder += 1;
197
#ifdef PAGE_MALLOC
198
        if (gfporder < 0 && sizes[i].sd_size >= PAGE_MALLOC)
199
            gfporder += 1;
200
        if (gfporder < 0)
201
            sizes[i].sd_blocks = (PAGE_MALLOC - sizeof(struct page_header)) / sizes[i].sd_size;
202
        else
203
#endif
204
            sizes[i].sd_blocks = ((PAGE_SIZE << gfporder) - sizeof(struct page_header)) /
205
                                        sizes[i].sd_size;
206
        sizes[i].sd_gfporder = gfporder;
207
    }
208
 
209
    for (i = 0; i < num_sizes && sizes[i].sd_size; i++) {
210
#ifdef PAGE_MALLOC
211
        if (sizes[i].sd_gfporder < 0) {
212
            if ((sizes[i].sd_size * sizes[i].sd_blocks + sizeof (struct page_header))
213
                <= PAGE_MALLOC)
214
                continue;
215
        } else
216
#endif
217
        {
218
            if ((sizes[i].sd_size * sizes[i].sd_blocks + sizeof (struct page_header))
219
                <= (PAGE_SIZE << sizes[i].sd_gfporder))
220
                continue;
221
        }
222
        printk ("Cannot use order %d (size %d, blocks %d)\n", i, sizes[i].sd_size, sizes[i].sd_blocks);
223
        errors ++;
224
    }
225
    if (errors)
226
        panic ("This only happens when someone messes with kmalloc");
227
 
228
    return start_mem;
229
}
230
 
231
/*
232
 * kmalloc of any size.
233
 *
234
 * if size < PAGE_MALLOC, then when we get a PAGE_MALLOC size, we malloc a
235
 * PAGE_MALLOC block and override the malloc header with our own.
236
 */
237
void *kmalloc (size_t size, int priority)
238
{
239
    int dma_flag, order, i;
240
    unsigned long flags;
241
    struct page_header *ph, **php;
242
    struct mblk_header *mb;
243
    struct size_descriptor *sz;
244
 
245
#ifdef DEBUG
246
    printk (KERN_DEBUG "km: s %4d ", size);
247
#endif
248
 
249
    {
250
        unsigned int realsize = size + sizeof (struct mblk_header);
251
 
252
        order = 0;
253
        sz = sizes;
254
        do {
255
            if (realsize <= sz->sd_size)
256
                break;
257
            order ++;
258
            sz ++;
259
            if (!sz->sd_size) {
260
                printk ("\n" KERN_ERR "kmalloc of too large a block (%d bytes).\n", (int) size);
261
                return NULL;
262
            }
263
        } while (1);
264
    }
265
#ifdef DEBUG
266
    printk ("o %2d ", order);
267
#endif
268
 
269
    dma_flag = priority & GFP_DMA;
270
    priority &= GFP_LEVEL_MASK;
271
 
272
    /* Sanity check... */
273
    if (intr_count && priority != GFP_ATOMIC) {
274
        static int count = 0;
275
        if (++count < 5)
276
            printk ("\n" KERN_ERR "kmalloc called non-atomically from interrupt %p\n",
277
                __builtin_return_address(0));
278
        priority = GFP_ATOMIC;
279
    }
280
 
281
    save_flags_cli (flags);
282
 
283
    php = dma_flag ? &sz->sd_dmafree : &sz->sd_firstfree;
284
again:
285
    ph = *php;
286
    if (!ph)
287
        goto no_free_page;
288
#ifdef DEBUG
289
    printk ("ph %p n %p f %p ", ph, ph->ph_next, ph->ph_free);
290
#endif
291
 
292
    if (ph->ph_flags != MF_INUSE)
293
        goto major_problem;
294
 
295
    if ((mb = ph->ph_free) != NULL) {
296
        if (mb->mb_flags != MF_FREE)
297
            goto major_problem_2;
298
        ph->ph_free = mb->mb_next;
299
        if (--ph->ph_nfree == 0) {
300
#ifdef DEBUG
301
            printk ("nxp %p n %p f %p\n"KERN_DEBUG"    ", ph, ph->ph_next, ph->ph_free);
302
#endif
303
            *php = ph->ph_next;
304
            ph->ph_next = sz->sd_used;
305
            sz->sd_used = ph;
306
        }
307
        mb->mb_flags = MF_USED;
308
        mb->mb_size = size;
309
        bytes_wanted += size;
310
        bytes_malloced += sz->sd_size;
311
        restore_flags (flags);
312
#ifdef DEBUG
313
        printk (" -> %p malloced\n", mb);
314
#endif
315
        return mb + 1; /* increments past header */
316
    } else {
317
        printk ("\n" KERN_CRIT
318
                "kmalloc: problem: page %p has null free pointer - "
319
                "discarding page (pc=%p)\n", ph,
320
                __builtin_return_address(0));
321
        if (ph != ph->ph_next)
322
            *php = ph->ph_next;
323
        else
324
            *php = NULL;
325
        goto again;
326
    }
327
no_free_page:
328
    restore_flags (flags);
329
 
330
    /* We need to get a new 4k page.  Whether we get a new page or malloc 4k depends on
331
     * the page size
332
     */
333
#ifdef PAGE_MALLOC
334
    if (sz->sd_gfporder < 0) { /* malloc it */
335
#ifdef DEBUG
336
        printk ("nsp:\n" KERN_DEBUG "  ");
337
#endif
338
        mb = kmalloc (PAGE_MALLOC - sizeof (struct mblk_header), priority | dma_flag);
339
        /*
340
         * override malloc header with our own.  This means that we
341
         * destroy the size entry.  However, we change the flags entry
342
         * so that a free won't free it without the blocks inside it
343
         * are freed, and the data put back as it was.
344
         */
345
        if (mb)
346
            ph = (struct page_header *) (mb - 1);
347
        else
348
            ph = NULL;
349
#ifdef DEBUG
350
        printk (KERN_DEBUG);
351
#endif
352
    } else
353
#endif
354
    {
355
        unsigned long max_addr;
356
 
357
        max_addr = dma_flag ? MAX_DMA_ADDRESS : ~0UL;
358
#ifdef DEBUG
359
        printk ("nlp:\n" KERN_DEBUG "  ");
360
#endif
361
        ph = (struct page_header *) __get_free_pages (priority, sz->sd_gfporder, max_addr);
362
        if (ph)
363
            pages_malloced += PAGE_SIZE;
364
    }
365
 
366
    if (!ph) {
367
        static unsigned long last = 0;
368
        if (priority != GFP_BUFFER && (last + 10*HZ) < jiffies) {
369
            last = jiffies;
370
            printk ("\n" KERN_CRIT "kmalloc: couldn't get a free page.....\n");
371
        }
372
        return NULL;
373
    }
374
    ph->ph_flags = MF_INUSE;
375
    ph->ph_order = order;
376
    ph->ph_nfree = sz->sd_blocks;
377
    ph->ph_dma   = dma_flag;
378
 
379
    for (i = sz->sd_blocks, mb = (struct mblk_header *)(ph + 1); i  > 1;
380
                                i --, mb = mb->mb_next) {
381
        mb->mb_flags = MF_FREE;
382
        mb->mb_next = (struct mblk_header *) (((unsigned long)mb) + sz->sd_size);
383
    }
384
    ph->ph_free = (struct mblk_header *)(ph + 1);
385
    mb->mb_flags = MF_FREE;
386
    mb->mb_next = NULL;
387
 
388
    cli();
389
#ifdef DEBUG
390
    printk ("New page %p, next %p\n" KERN_DEBUG "                ", ph, *php);
391
#endif
392
    ph->ph_next = *php;
393
    *php = ph;
394
    goto again;
395
 
396
major_problem_2:
397
#ifdef DEBUG
398
    printk ("\n\nmb->flags = %08lX\n", mb->mb_flags);
399
#endif
400
    panic ("kmalloc: problem: block %p on freelist %p isn't free (pc=%p)\n",
401
        ph->ph_free, ph, __builtin_return_address(0));
402
major_problem:
403
    panic ("kmalloc: problem: page %p in freelist isn't real (pc=%p)\n",
404
        ph, __builtin_return_address(0));
405
}
406
 
407
void kfree (void *ptr)
408
{
409
    int order, size;
410
    unsigned long flags;
411
    struct page_header *ph;
412
    struct mblk_header *mb;
413
 
414
    mb = ((struct mblk_header *)ptr) - 1;
415
 
416
    if (mb->mb_flags != MF_USED) {
417
        printk (KERN_ERR "kfree of non-kmalloc'd memory: %p\n", ptr);
418
        return;
419
    }
420
    size = mb->mb_size;
421
    order = get_order (size + sizeof(struct mblk_header));
422
 
423
    if (order < 0) {
424
        printk (KERN_ERR "kfree of non-kmalloc'd memory: %p,"
425
                " size %d, order %d (pc=%p)\n", ptr, size, order, __builtin_return_address(0));
426
        return;
427
    }
428
 
429
#ifdef PAGE_MALLOC
430
    if (sizes[order].sd_gfporder < 0)
431
        ph = (struct page_header *) ((((unsigned long) mb) & ~(PAGE_MALLOC - 1)) +
432
                sizeof (struct page_header));
433
    else
434
#endif
435
        ph = (struct page_header *) (((unsigned long) mb) & PAGE_MASK);
436
#ifdef DEBUG
437
    printk (KERN_DEBUG "kfree: page starts at %p\n", ph);
438
#endif
439
 
440
    if (ph->ph_flags != MF_INUSE && ph->ph_order != order) {
441
        printk (KERN_ERR "kfree of non-kmalloc'd memory: %p,"
442
                " size %d, order %d (pc=%p)\n", ptr, size, order, __builtin_return_address(0));
443
        return;
444
    }
445
 
446
    mb->mb_flags = MF_FREE;
447
    save_flags (flags);
448
    cli ();
449
    bytes_wanted -= size;
450
    bytes_malloced -= sizes[order].sd_size;
451
    mb->mb_next = ph->ph_free;
452
    ph->ph_free = mb;
453
 
454
    if (++ph->ph_nfree == 1) {
455
        /*
456
         * Page went from full to one free block: put it on the free list.
457
         */
458
        struct page_header *pp;
459
 
460
        if (sizes[order].sd_used == ph)
461
            sizes[order].sd_used = ph->ph_next;
462
        else {
463
            for (pp = sizes[order].sd_used; pp != NULL && pp->ph_next != ph; pp = pp->ph_next);
464
 
465
            if (pp->ph_next == ph)
466
                pp->ph_next = ph->ph_next;
467
            else {
468
                printk (KERN_ERR "kfree: page %p not found on used list (pc=%p)\n", ph,
469
                        __builtin_return_address(0));
470
                restore_flags (flags);
471
                return;
472
            }
473
        }
474
 
475
        ph->ph_next = sizes[order].sd_firstfree;
476
        sizes[order].sd_firstfree = ph;
477
    }
478
 
479
    if (ph->ph_nfree == sizes[order].sd_blocks) {
480
        if (sizes[order].sd_firstfree == ph)
481
            sizes[order].sd_firstfree = ph->ph_next;
482
        else if (sizes[order].sd_dmafree == ph)
483
            sizes[order].sd_dmafree = ph->ph_next;
484
        else {
485
            struct page_header *pp;
486
 
487
            for (pp = sizes[order].sd_firstfree; pp != NULL && pp->ph_next != ph; pp = pp->ph_next);
488
 
489
            if (pp == NULL)
490
                for (pp = sizes[order].sd_dmafree; pp != NULL && pp->ph_next != ph; pp = pp->ph_next);
491
 
492
            if (pp)
493
                pp->ph_next = ph->ph_next;
494
            else
495
                printk (KERN_ERR "Oops.  Page %p not found on free list\n", ph);
496
        }
497
        restore_flags (flags);
498
 
499
#ifdef PAGE_MALLOC
500
        if (sizes[order].sd_gfporder < 0) {
501
            mb = (struct mblk_header *)ph;
502
            mb->mb_flags = MF_USED;
503
            mb->mb_size = PAGE_MALLOC - sizeof (struct mblk_header);
504
            kfree (mb + 1);
505
        } else
506
#endif
507
        {
508
            pages_malloced -= PAGE_SIZE;
509
            free_pages ((unsigned long)ph, sizes[order].sd_gfporder);
510
        }
511
    }
512
}
513
 
514
 
515
void kmalloc_stats (void)
516
{
517
    printk ("kmalloc usage: %ld bytes requested of malloc, %ld actually malloced, %ld bytes claimed\n", bytes_wanted,
518
                bytes_malloced, pages_malloced);
519
}
520
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.