OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mm/] [page_alloc.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1634 jcastillo
/*
2
 *  linux/mm/page_alloc.c
3
 *
4
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5
 *  Swap reorganised 29.12.95, Stephen Tweedie
6
 */
7
 
8
#include <linux/mm.h>
9
#include <linux/sched.h>
10
#include <linux/head.h>
11
#include <linux/kernel.h>
12
#include <linux/kernel_stat.h>
13
#include <linux/errno.h>
14
#include <linux/string.h>
15
#include <linux/stat.h>
16
#include <linux/swap.h>
17
#include <linux/fs.h>
18
#include <linux/swapctl.h>
19
#include <linux/interrupt.h>
20
 
21
#include <asm/dma.h>
22
#include <asm/system.h> /* for cli()/sti() */
23
#include <asm/segment.h> /* for memcpy_to/fromfs */
24
#include <asm/bitops.h>
25
#include <asm/pgtable.h>
26
 
27
int nr_swap_pages = 0;
28
int nr_free_pages = 0;
29
 
30
extern struct wait_queue *buffer_wait;
31
 
32
/*
33
 * Free area management
34
 *
35
 * The free_area_list arrays point to the queue heads of the free areas
36
 * of different sizes
37
 */
38
 
39
#define NR_MEM_LISTS 6
40
 
41
/* The start of this MUST match the start of "struct page" */
42
struct free_area_struct {
43
        struct page *next;
44
        struct page *prev;
45
        unsigned int * map;
46
};
47
 
48
#define memory_head(x) ((struct page *)(x))
49
 
50
static struct free_area_struct free_area[NR_MEM_LISTS];
51
 
52
static inline void init_mem_queue(struct free_area_struct * head)
53
{
54
        head->next = memory_head(head);
55
        head->prev = memory_head(head);
56
}
57
 
58
static inline void add_mem_queue(struct free_area_struct * head, struct page * entry)
59
{
60
        struct page * next = head->next;
61
 
62
        entry->prev = memory_head(head);
63
        entry->next = next;
64
        next->prev = entry;
65
        head->next = entry;
66
}
67
 
68
static inline void remove_mem_queue(struct page * entry)
69
{
70
        struct page * next = entry->next;
71
        struct page * prev = entry->prev;
72
        next->prev = prev;
73
        prev->next = next;
74
}
75
 
76
/*
77
 * Free_page() adds the page to the free lists. This is optimized for
78
 * fast normal cases (no error jumps taken normally).
79
 *
80
 * The way to optimize jumps for gcc-2.2.2 is to:
81
 *  - select the "normal" case and put it inside the if () { XXX }
82
 *  - no else-statements if you can avoid them
83
 *
84
 * With the above two rules, you get a straight-line execution path
85
 * for the normal case, giving better asm-code.
86
 *
87
 * free_page() may sleep since the page being freed may be a buffer
88
 * page or present in the swap cache. It will not sleep, however,
89
 * for a freshly allocated page (get_free_page()).
90
 */
91
 
92
/*
93
 * Buddy system. Hairy. You really aren't expected to understand this
94
 *
95
 * Hint: -mask = 1+~mask
96
 */
97
static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
98
{
99
        struct free_area_struct *area = free_area + order;
100
        unsigned long index = map_nr >> (1 + order);
101
        unsigned long mask = (~0UL) << order;
102
        unsigned long flags;
103
 
104
        save_flags(flags);
105
        cli();
106
 
107
#define list(x) (mem_map+(x))
108
 
109
        map_nr &= mask;
110
        nr_free_pages -= mask;
111
        while (mask + (1 << (NR_MEM_LISTS-1))) {
112
                if (!change_bit(index, area->map))
113
                        break;
114
                remove_mem_queue(list(map_nr ^ -mask));
115
                mask <<= 1;
116
                area++;
117
                index >>= 1;
118
                map_nr &= mask;
119
        }
120
        add_mem_queue(area, list(map_nr));
121
 
122
#undef list
123
 
124
        restore_flags(flags);
125
        if (!waitqueue_active(&buffer_wait))
126
                return;
127
        wake_up(&buffer_wait);
128
}
129
 
130
void __free_page(struct page *page)
131
{
132
        if (!PageReserved(page) && atomic_dec_and_test(&page->count)) {
133
                unsigned long map_nr = page->map_nr;
134
                delete_from_swap_cache(map_nr);
135
                free_pages_ok(map_nr, 0);
136
        }
137
}
138
 
139
void free_pages(unsigned long addr, unsigned long order)
140
{
141
        unsigned long map_nr = MAP_NR(addr);
142
 
143
        if (map_nr < MAP_NR(high_memory)) {
144
                mem_map_t * map = mem_map + map_nr;
145
                if (PageReserved(map))
146
                        return;
147
                if (atomic_dec_and_test(&map->count)) {
148
                        delete_from_swap_cache(map_nr);
149
                        free_pages_ok(map_nr, order);
150
                        return;
151
                }
152
        }
153
}
154
 
155
/*
156
 * Some ugly macros to speed up __get_free_pages()..
157
 */
158
#define MARK_USED(index, order, area) \
159
        change_bit((index) >> (1+(order)), (area)->map)
160
#define CAN_DMA(x) (PageDMA(x))
161
#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
162
#define RMQUEUE(order, dma) \
163
do { struct free_area_struct * area = free_area+order; \
164
     unsigned long new_order = order; \
165
        do { struct page *prev = memory_head(area), *ret; \
166
                while (memory_head(area) != (ret = prev->next)) { \
167
                        if (!dma || CAN_DMA(ret)) { \
168
                                unsigned long map_nr = ret->map_nr; \
169
                                (prev->next = ret->next)->prev = prev; \
170
                                MARK_USED(map_nr, new_order, area); \
171
                                nr_free_pages -= 1 << order; \
172
                                EXPAND(ret, map_nr, order, new_order, area); \
173
                                restore_flags(flags); \
174
                                return ADDRESS(map_nr); \
175
                        } \
176
                        prev = ret; \
177
                } \
178
                new_order++; area++; \
179
        } while (new_order < NR_MEM_LISTS); \
180
} while (0)
181
 
182
#define EXPAND(map,index,low,high,area) \
183
do { unsigned long size = 1 << high; \
184
        while (high > low) { \
185
                area--; high--; size >>= 1; \
186
                add_mem_queue(area, map); \
187
                MARK_USED(index, high, area); \
188
                index += size; \
189
                map += size; \
190
        } \
191
        map->count = 1; \
192
        map->age = PAGE_INITIAL_AGE; \
193
} while (0)
194
 
195
unsigned long __get_free_pages(int priority, unsigned long order, int dma)
196
{
197
        unsigned long flags;
198
        int reserved_pages;
199
 
200
        if (order >= NR_MEM_LISTS)
201
                return 0;
202
        if (intr_count && priority != GFP_ATOMIC) {
203
                static int count = 0;
204
                if (++count < 5) {
205
                        printk("gfp called nonatomically from interrupt %p\n",
206
                                __builtin_return_address(0));
207
                        priority = GFP_ATOMIC;
208
                }
209
        }
210
        reserved_pages = 5;
211
        if (priority != GFP_NFS)
212
                reserved_pages = min_free_pages;
213
        if ((priority == GFP_BUFFER || priority == GFP_IO) && reserved_pages >= 48)
214
                reserved_pages -= (12 + (reserved_pages>>3));
215
        save_flags(flags);
216
repeat:
217
        cli();
218
        if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
219
                RMQUEUE(order, dma);
220
                restore_flags(flags);
221
                return 0;
222
        }
223
        restore_flags(flags);
224
        if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
225
                goto repeat;
226
        return 0;
227
}
228
 
229
/*
230
 * Show free area list (used inside shift_scroll-lock stuff)
231
 * We also calculate the percentage fragmentation. We do this by counting the
232
 * memory on each free list with the exception of the first item on the list.
233
 */
234
void show_free_areas(void)
235
{
236
        unsigned long order, flags;
237
        unsigned long total = 0;
238
 
239
        printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
240
        save_flags(flags);
241
        cli();
242
        for (order=0 ; order < NR_MEM_LISTS; order++) {
243
                struct page * tmp;
244
                unsigned long nr = 0;
245
                for (tmp = free_area[order].next ; tmp != memory_head(free_area+order) ; tmp = tmp->next) {
246
                        nr ++;
247
                }
248
                total += nr * ((PAGE_SIZE>>10) << order);
249
                printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
250
        }
251
        restore_flags(flags);
252
        printk("= %lukB)\n", total);
253
#ifdef SWAP_CACHE_INFO
254
        show_swap_cache_info();
255
#endif  
256
}
257
 
258
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
259
 
260
/*
261
 * set up the free-area data structures:
262
 *   - mark all pages reserved
263
 *   - mark all memory queues empty
264
 *   - clear the memory bitmaps
265
 */
266
unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
267
{
268
        mem_map_t * p;
269
        unsigned long mask = PAGE_MASK;
270
        int i;
271
 
272
        /*
273
         * select nr of pages we try to keep free for important stuff
274
         * with a minimum of 48 pages. This is totally arbitrary
275
         */
276
        i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
277
        if (i < 24)
278
                i = 24;
279
        i += 24;   /* The limit for buffer pages in __get_free_pages is
280
                    * decreased by 12+(i>>3) */
281
        min_free_pages = i;
282
        free_pages_low = i + (i>>1);
283
        free_pages_high = i + i;
284
        start_mem = init_swap_cache(start_mem, end_mem);
285
        mem_map = (mem_map_t *) start_mem;
286
        p = mem_map + MAP_NR(end_mem);
287
        start_mem = LONG_ALIGN((unsigned long) p);
288
        memset(mem_map, 0, start_mem - (unsigned long) mem_map);
289
        do {
290
                --p;
291
                p->flags = (1 << PG_DMA) | (1 << PG_reserved);
292
                p->map_nr = p - mem_map;
293
        } while (p > mem_map);
294
 
295
        for (i = 0 ; i < NR_MEM_LISTS ; i++) {
296
                unsigned long bitmap_size;
297
                init_mem_queue(free_area+i);
298
                mask += mask;
299
                end_mem = (end_mem + ~mask) & mask;
300
                bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
301
                bitmap_size = (bitmap_size + 7) >> 3;
302
                bitmap_size = LONG_ALIGN(bitmap_size);
303
                free_area[i].map = (unsigned int *) start_mem;
304
                memset((void *) start_mem, 0, bitmap_size);
305
                start_mem += bitmap_size;
306
        }
307
        return start_mem;
308
}
309
 
310
/*
311
 * The tests may look silly, but it essentially makes sure that
312
 * no other process did a swap-in on us just as we were waiting.
313
 *
314
 * Also, don't bother to add to the swap cache if this page-in
315
 * was due to a write access.
316
 */
317
void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
318
        pte_t * page_table, unsigned long entry, int write_access)
319
{
320
        unsigned long page = __get_free_page(GFP_KERNEL);
321
 
322
        if (pte_val(*page_table) != entry) {
323
                if (page)
324
                        free_page(page);
325
                return;
326
        }
327
        if (!page) {
328
                printk("swap_in:");
329
                set_pte(page_table, BAD_PAGE);
330
                swap_free(entry);
331
                oom(tsk);
332
                return;
333
        }
334
        read_swap_page(entry, (char *) page);
335
        if (pte_val(*page_table) != entry) {
336
                free_page(page);
337
                return;
338
        }
339
        vma->vm_mm->rss++;
340
        tsk->maj_flt++;
341
 
342
        /* Give the physical reallocated page a bigger start */
343
        if (vma->vm_mm->rss < (MAP_NR(high_memory) >> 2))
344
                mem_map[MAP_NR(page)].age = (PAGE_INITIAL_AGE + PAGE_ADVANCE);
345
 
346
        if (!write_access && add_to_swap_cache(MAP_NR(page), entry)) {
347
                /* keep swap page allocated for the moment (swap cache) */
348
                set_pte(page_table, mk_pte(page, vma->vm_page_prot));
349
                return;
350
        }
351
        set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
352
        swap_free(entry);
353
        return;
354
}
355
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.