OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mmnommu/] [page_alloc.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1634 jcastillo
/*
2
 *  linux/mm/page_alloc.c
3
 *
4
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5
 *  Swap reorganised 29.12.95, Stephen Tweedie
6
 */
7
 
8
/*
9
 * uClinux revisions for NO_MM
10
 * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
11
 *                     The Silver Hammer Group, Ltd.
12
 * Copyright (C) 1999  D. Jeff Dionne <jeff@uclinux.org>,
13
 *                     Rt-Control, Inc.
14
 */
15
 
16
#include <linux/config.h>
17
#include <linux/mm.h>
18
#include <linux/sched.h>
19
#include <linux/head.h>
20
#include <linux/kernel.h>
21
#include <linux/kernel_stat.h>
22
#include <linux/errno.h>
23
#include <linux/string.h>
24
#include <linux/stat.h>
25
#include <linux/swap.h>
26
#include <linux/fs.h>
27
#include <linux/swapctl.h>
28
#include <linux/interrupt.h>
29
 
30
#include <asm/dma.h>
31
#include <asm/system.h> /* for cli()/sti() */
32
#include <asm/segment.h> /* for memcpy_to/fromfs */
33
#include <asm/bitops.h>
34
#include <asm/pgtable.h>
35
 
36
int nr_swap_pages = 0;
37
int nr_free_pages = 0;
38
 
39
extern struct wait_queue *buffer_wait;
40
 
41
/*
42
 * Free area management
43
 *
44
 * The free_area_list arrays point to the queue heads of the free areas
45
 * of different sizes
46
 */
47
 
48
#ifdef BIGALLOCS
49
/* SIMON: change of source file - te get mw working */
50
#define NR_MEM_LISTS 10
51
#else
52
#define NR_MEM_LISTS 7
53
#endif
54
 
55
/* The start of this MUST match the start of "struct page" */
56
struct free_area_struct {
57
        struct page *next;
58
        struct page *prev;
59
        unsigned int * map;
60
};
61
 
62
#define memory_head(x) ((struct page *)(x))
63
 
64
static struct free_area_struct free_area[NR_MEM_LISTS];
65
 
66
static inline void init_mem_queue(struct free_area_struct * head)
67
{
68
        head->next = memory_head(head);
69
        head->prev = memory_head(head);
70
}
71
 
72
static inline void add_mem_queue(struct free_area_struct * head, struct page * entry)
73
{
74
        struct page * next = head->next;
75
 
76
        entry->prev = memory_head(head);
77
        entry->next = next;
78
        next->prev = entry;
79
        head->next = entry;
80
}
81
 
82
static inline void remove_mem_queue(struct page * entry)
83
{
84
        struct page * next = entry->next;
85
        struct page * prev = entry->prev;
86
        next->prev = prev;
87
        prev->next = next;
88
}
89
 
90
/*
91
 * Free_page() adds the page to the free lists. This is optimized for
92
 * fast normal cases (no error jumps taken normally).
93
 *
94
 * The way to optimize jumps for gcc-2.2.2 is to:
95
 *  - select the "normal" case and put it inside the if () { XXX }
96
 *  - no else-statements if you can avoid them
97
 *
98
 * With the above two rules, you get a straight-line execution path
99
 * for the normal case, giving better asm-code.
100
 *
101
 * free_page() may sleep since the page being freed may be a buffer
102
 * page or present in the swap cache. It will not sleep, however,
103
 * for a freshly allocated page (get_free_page()).
104
 */
105
 
106
/*
107
 * Buddy system. Hairy. You really aren't expected to understand this
108
 *
109
 * Hint: -mask = 1+~mask
110
 */
111
static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
112
{
113
        struct free_area_struct *area = free_area + order;
114
        unsigned long index = map_nr >> (1 + order);
115
        unsigned long mask = (~0UL) << order;
116
        unsigned long flags;
117
 
118
        save_flags(flags);
119
        cli();
120
 
121
#define list(x) (mem_map+(x))
122
 
123
        map_nr &= mask;
124
        nr_free_pages -= mask;
125
        while (mask + (1 << (NR_MEM_LISTS-1))) {
126
                if (!change_bit(index, area->map))
127
                        break;
128
                remove_mem_queue(list(map_nr ^ -mask));
129
                mask <<= 1;
130
                area++;
131
                index >>= 1;
132
                map_nr &= mask;
133
        }
134
        add_mem_queue(area, list(map_nr));
135
 
136
#undef list
137
 
138
        restore_flags(flags);
139
        if (!waitqueue_active(&buffer_wait))
140
                return;
141
        wake_up(&buffer_wait);
142
}
143
 
144
#ifdef DEBUG_FREE_PAGES
145
 
146
#undef __free_page
147
void __free_page_flf(struct page *page, char*file, int line, char*function)
148
{
149
        printk("Freeing page %p from %s @%s:%d\n", page, function, file, line);
150
        __free_page(page);
151
}
152
 
153
#undef free_pages
154
void free_pages_flf(unsigned long addr, unsigned long order, char*file, int line, char*function)
155
{
156
        printk("Freeing %lu byte page %lx from %s @%s:%d\n", 4096 << order, addr, function, file, line);
157
        free_pages(addr, order);
158
}
159
 
160
#undef __get_free_pages
161
unsigned long __get_free_pages_flf(int priority, unsigned long order, int dma, char * file, int line, char * function)
162
{
163
        printk("Allocating %d byte page from %s @%s:%d\n", 4096 << order, function, file, line);
164
        return __get_free_pages(priority, order, dma);
165
}
166
 
167
#undef get_free_page
168
unsigned long get_free_page_flf(int priority, char * file, int line, char * function)
169
{
170
        void * result = (void*)__get_free_pages_flf(priority, 0, 0, file, line, function);
171
        if (result)
172
                memset(result, 0, PAGE_SIZE);
173
        return result;
174
}
175
 
176
#endif /* DEBUG_FREE_PAGES */
177
 
178
void __free_page(struct page *page)
179
{
180
        if (!PageReserved(page) && atomic_dec_and_test(&page->count)) {
181
                unsigned long map_nr = page->map_nr;
182
 
183
                free_pages_ok(map_nr, 0);
184
        }
185
}
186
 
187
void free_pages(unsigned long addr, unsigned long order)
188
{
189
        unsigned long map_nr = MAP_NR(addr);
190
 
191
        if (map_nr < MAP_NR(high_memory)) {
192
                mem_map_t * map = mem_map + map_nr;
193
                if (PageReserved(map))
194
                        return;
195
                if (atomic_dec_and_test(&map->count)) {
196
 
197
                        free_pages_ok(map_nr, order);
198
                        return;
199
                }
200
        }
201
}
202
 
203
/*
204
 * Some ugly macros to speed up __get_free_pages()..
205
 */
206
#define MARK_USED(index, order, area) \
207
        change_bit((index) >> (1+(order)), (area)->map)
208
#define CAN_DMA(x) (PageDMA(x))
209
#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
210
#define RMQUEUE(order, dma) \
211
do { struct free_area_struct * area = free_area+order; \
212
     unsigned long new_order = order; \
213
        do { struct page *prev = memory_head(area), *ret; \
214
                while (memory_head(area) != (ret = prev->next)) { \
215
                        if (!dma || CAN_DMA(ret)) { \
216
                                unsigned long map_nr = ret->map_nr; \
217
                                (prev->next = ret->next)->prev = prev; \
218
                                MARK_USED(map_nr, new_order, area); \
219
                                nr_free_pages -= 1 << order; \
220
                                EXPAND(ret, map_nr, order, new_order, area); \
221
                                restore_flags(flags); \
222
                                return ADDRESS(map_nr); \
223
                        } \
224
                        prev = ret; \
225
                } \
226
                new_order++; area++; \
227
        } while (new_order < NR_MEM_LISTS); \
228
} while (0)
229
 
230
#define EXPAND(map,index,low,high,area) \
231
do { unsigned long size = 1 << high; \
232
        while (high > low) { \
233
                area--; high--; size >>= 1; \
234
                add_mem_queue(area, map); \
235
                MARK_USED(index, high, area); \
236
                index += size; \
237
                map += size; \
238
        } \
239
        map->count = 1; \
240
        map->age = PAGE_INITIAL_AGE; \
241
} while (0)
242
 
243
unsigned long __get_free_pages(int priority, unsigned long order, int dma)
244
{
245
        unsigned long flags;
246
        int reserved_pages;
247
 
248
        if (order >= NR_MEM_LISTS)
249
                return 0;
250
        if (intr_count && priority != GFP_ATOMIC) {
251
                static int count = 0;
252
                if (++count < 5) {
253
                        printk("gfp called nonatomically from interrupt %p\n",
254
                                __builtin_return_address(0));
255
                        priority = GFP_ATOMIC;
256
                }
257
        }
258
        reserved_pages = 5;
259
#ifndef CONFIG_REDUCED_MEMORY
260
        if (priority != GFP_NFS)
261
                reserved_pages = min_free_pages;
262
        if ((priority == GFP_BUFFER || priority == GFP_IO) && reserved_pages >= 48)
263
                reserved_pages -= (12 + (reserved_pages>>3));
264
#endif /* !CONFIG_REDUCED_MEMORY */
265
        save_flags(flags);
266
repeat:
267
        cli();
268
        if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
269
                RMQUEUE(order, dma);
270
                restore_flags(flags);
271
#ifdef DEBUG
272
                printk("fragmentation preventing allocation, re-attempting to free\n");
273
#endif
274
        }
275
        restore_flags(flags);
276
        if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
277
                goto repeat;
278
        return 0;
279
}
280
 
281
/*
282
 * Show free area list (used inside shift_scroll-lock stuff)
283
 * We also calculate the percentage fragmentation. We do this by counting the
284
 * memory on each free list with the exception of the first item on the list.
285
 */
286
 
287
/*
288
 * That's as may be, but I added an explicit fragmentation percentage, just
289
 * to make it obvious. -kja
290
 */
291
 
292
/* totals held by do_mmap to compute memory wastage */
293
unsigned long realalloc, askedalloc;
294
 
295
void show_free_areas(void)
296
{
297
        unsigned long order, flags;
298
        unsigned long total = 0;
299
        unsigned long fragmented = 0;
300
        unsigned long slack;
301
 
302
        printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
303
        save_flags(flags);
304
        cli();
305
        for (order=0 ; order < NR_MEM_LISTS; order++) {
306
                struct page * tmp;
307
                unsigned long nr = 0;
308
                for (tmp = free_area[order].next ; tmp != memory_head(free_area+order) ; tmp = tmp->next) {
309
                        nr ++;
310
                }
311
                total += nr * ((PAGE_SIZE>>10) << order);
312
                if ((nr > 1) && (order < (NR_MEM_LISTS-1)))
313
                        fragmented += (nr-1) * (1 << order);
314
                printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
315
        }
316
        restore_flags(flags);
317
        fragmented *= 100;
318
        fragmented /= nr_free_pages;
319
 
320
        if (realalloc)
321
                slack = (realalloc-askedalloc) * 100 / realalloc;
322
        else
323
                slack = 0;
324
 
325
        printk("= %lukB, %%%lu frag, %%%lu slack)\n", total, fragmented, slack);
326
 
327
#ifdef SWAP_CACHE_INFO
328
        show_swap_cache_info();
329
#endif  
330
}
331
 
332
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
333
 
334
/*
335
 * set up the free-area data structures:
336
 *   - mark all pages reserved
337
 *   - mark all memory queues empty
338
 *   - clear the memory bitmaps
339
 */
340
unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
341
{
342
        mem_map_t * p;
343
        unsigned long mask = PAGE_MASK;
344
        int i;
345
 
346
        /*
347
         * select nr of pages we try to keep free for important stuff
348
         * with a minimum of 48 pages. This is totally arbitrary
349
         */
350
        i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
351
        if (i < 24)
352
                i = 24;
353
        i += 24;   /* The limit for buffer pages in __get_free_pages is
354
                    * decreased by 12+(i>>3) */
355
        min_free_pages = i;
356
        free_pages_low = i + (i>>1);
357
        free_pages_high = i + i;
358
 
359
        mem_map = (mem_map_t *) start_mem;
360
        p = mem_map + MAP_NR(end_mem);
361
        start_mem = LONG_ALIGN((unsigned long) p);
362
        memset(mem_map, 0, start_mem - (unsigned long) mem_map);
363
        do {
364
                --p;
365
                p->flags = (1 << PG_DMA) | (1 << PG_reserved);
366
                p->map_nr = p - mem_map;
367
        } while (p > mem_map);
368
 
369
        for (i = 0 ; i < NR_MEM_LISTS ; i++) {
370
                unsigned long bitmap_size;
371
                init_mem_queue(free_area+i);
372
                mask += mask;
373
                end_mem = (end_mem + ~mask) & mask;
374
                bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
375
                bitmap_size = (bitmap_size + 7) >> 3;
376
                bitmap_size = LONG_ALIGN(bitmap_size);
377
                free_area[i].map = (unsigned int *) start_mem;
378
                memset((void *) start_mem, 0, bitmap_size);
379
                start_mem += bitmap_size;
380
        }
381
        return start_mem;
382
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.