OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [x86/] [mm/] [pgtable_32.c] - Blame information for rev 17

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  linux/arch/i386/mm/pgtable.c
3
 */
4
 
5
#include <linux/sched.h>
6
#include <linux/kernel.h>
7
#include <linux/errno.h>
8
#include <linux/mm.h>
9
#include <linux/nmi.h>
10
#include <linux/swap.h>
11
#include <linux/smp.h>
12
#include <linux/highmem.h>
13
#include <linux/slab.h>
14
#include <linux/pagemap.h>
15
#include <linux/spinlock.h>
16
#include <linux/module.h>
17
#include <linux/quicklist.h>
18
 
19
#include <asm/system.h>
20
#include <asm/pgtable.h>
21
#include <asm/pgalloc.h>
22
#include <asm/fixmap.h>
23
#include <asm/e820.h>
24
#include <asm/tlb.h>
25
#include <asm/tlbflush.h>
26
 
27
void show_mem(void)
28
{
29
        int total = 0, reserved = 0;
30
        int shared = 0, cached = 0;
31
        int highmem = 0;
32
        struct page *page;
33
        pg_data_t *pgdat;
34
        unsigned long i;
35
        unsigned long flags;
36
 
37
        printk(KERN_INFO "Mem-info:\n");
38
        show_free_areas();
39
        printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
40
        for_each_online_pgdat(pgdat) {
41
                pgdat_resize_lock(pgdat, &flags);
42
                for (i = 0; i < pgdat->node_spanned_pages; ++i) {
43
                        if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
44
                                touch_nmi_watchdog();
45
                        page = pgdat_page_nr(pgdat, i);
46
                        total++;
47
                        if (PageHighMem(page))
48
                                highmem++;
49
                        if (PageReserved(page))
50
                                reserved++;
51
                        else if (PageSwapCache(page))
52
                                cached++;
53
                        else if (page_count(page))
54
                                shared += page_count(page) - 1;
55
                }
56
                pgdat_resize_unlock(pgdat, &flags);
57
        }
58
        printk(KERN_INFO "%d pages of RAM\n", total);
59
        printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
60
        printk(KERN_INFO "%d reserved pages\n", reserved);
61
        printk(KERN_INFO "%d pages shared\n", shared);
62
        printk(KERN_INFO "%d pages swap cached\n", cached);
63
 
64
        printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
65
        printk(KERN_INFO "%lu pages writeback\n",
66
                                        global_page_state(NR_WRITEBACK));
67
        printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
68
        printk(KERN_INFO "%lu pages slab\n",
69
                global_page_state(NR_SLAB_RECLAIMABLE) +
70
                global_page_state(NR_SLAB_UNRECLAIMABLE));
71
        printk(KERN_INFO "%lu pages pagetables\n",
72
                                        global_page_state(NR_PAGETABLE));
73
}
74
 
75
/*
76
 * Associate a virtual page frame with a given physical page frame
77
 * and protection flags for that frame.
78
 */
79
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
80
{
81
        pgd_t *pgd;
82
        pud_t *pud;
83
        pmd_t *pmd;
84
        pte_t *pte;
85
 
86
        pgd = swapper_pg_dir + pgd_index(vaddr);
87
        if (pgd_none(*pgd)) {
88
                BUG();
89
                return;
90
        }
91
        pud = pud_offset(pgd, vaddr);
92
        if (pud_none(*pud)) {
93
                BUG();
94
                return;
95
        }
96
        pmd = pmd_offset(pud, vaddr);
97
        if (pmd_none(*pmd)) {
98
                BUG();
99
                return;
100
        }
101
        pte = pte_offset_kernel(pmd, vaddr);
102
        if (pgprot_val(flags))
103
                set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
104
        else
105
                pte_clear(&init_mm, vaddr, pte);
106
 
107
        /*
108
         * It's enough to flush this one mapping.
109
         * (PGE mappings get flushed as well)
110
         */
111
        __flush_tlb_one(vaddr);
112
}
113
 
114
/*
115
 * Associate a large virtual page frame with a given physical page frame
116
 * and protection flags for that frame. pfn is for the base of the page,
117
 * vaddr is what the page gets mapped to - both must be properly aligned.
118
 * The pmd must already be instantiated. Assumes PAE mode.
119
 */
120
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
121
{
122
        pgd_t *pgd;
123
        pud_t *pud;
124
        pmd_t *pmd;
125
 
126
        if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
127
                printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
128
                return; /* BUG(); */
129
        }
130
        if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
131
                printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
132
                return; /* BUG(); */
133
        }
134
        pgd = swapper_pg_dir + pgd_index(vaddr);
135
        if (pgd_none(*pgd)) {
136
                printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
137
                return; /* BUG(); */
138
        }
139
        pud = pud_offset(pgd, vaddr);
140
        pmd = pmd_offset(pud, vaddr);
141
        set_pmd(pmd, pfn_pmd(pfn, flags));
142
        /*
143
         * It's enough to flush this one mapping.
144
         * (PGE mappings get flushed as well)
145
         */
146
        __flush_tlb_one(vaddr);
147
}
148
 
149
static int fixmaps;
150
unsigned long __FIXADDR_TOP = 0xfffff000;
151
EXPORT_SYMBOL(__FIXADDR_TOP);
152
 
153
void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
154
{
155
        unsigned long address = __fix_to_virt(idx);
156
 
157
        if (idx >= __end_of_fixed_addresses) {
158
                BUG();
159
                return;
160
        }
161
        set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
162
        fixmaps++;
163
}
164
 
165
/**
166
 * reserve_top_address - reserves a hole in the top of kernel address space
167
 * @reserve - size of hole to reserve
168
 *
169
 * Can be used to relocate the fixmap area and poke a hole in the top
170
 * of kernel address space to make room for a hypervisor.
171
 */
172
void reserve_top_address(unsigned long reserve)
173
{
174
        BUG_ON(fixmaps > 0);
175
        printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
176
               (int)-reserve);
177
        __FIXADDR_TOP = -reserve - PAGE_SIZE;
178
        __VMALLOC_RESERVE += reserve;
179
}
180
 
181
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
182
{
183
        return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
184
}
185
 
186
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
187
{
188
        struct page *pte;
189
 
190
#ifdef CONFIG_HIGHPTE
191
        pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
192
#else
193
        pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
194
#endif
195
        return pte;
196
}
197
 
198
void pmd_ctor(struct kmem_cache *cache, void *pmd)
199
{
200
        memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
201
}
202
 
203
/*
204
 * List of all pgd's needed for non-PAE so it can invalidate entries
205
 * in both cached and uncached pgd's; not needed for PAE since the
206
 * kernel pmd is shared. If PAE were not to share the pmd a similar
207
 * tactic would be needed. This is essentially codepath-based locking
208
 * against pageattr.c; it is the unique case in which a valid change
209
 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
210
 * vmalloc faults work because attached pagetables are never freed.
211
 * -- wli
212
 */
213
DEFINE_SPINLOCK(pgd_lock);
214
struct page *pgd_list;
215
 
216
static inline void pgd_list_add(pgd_t *pgd)
217
{
218
        struct page *page = virt_to_page(pgd);
219
        page->index = (unsigned long)pgd_list;
220
        if (pgd_list)
221
                set_page_private(pgd_list, (unsigned long)&page->index);
222
        pgd_list = page;
223
        set_page_private(page, (unsigned long)&pgd_list);
224
}
225
 
226
static inline void pgd_list_del(pgd_t *pgd)
227
{
228
        struct page *next, **pprev, *page = virt_to_page(pgd);
229
        next = (struct page *)page->index;
230
        pprev = (struct page **)page_private(page);
231
        *pprev = next;
232
        if (next)
233
                set_page_private(next, (unsigned long)pprev);
234
}
235
 
236
 
237
 
238
#if (PTRS_PER_PMD == 1)
239
/* Non-PAE pgd constructor */
240
static void pgd_ctor(void *pgd)
241
{
242
        unsigned long flags;
243
 
244
        /* !PAE, no pagetable sharing */
245
        memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
246
 
247
        spin_lock_irqsave(&pgd_lock, flags);
248
 
249
        /* must happen under lock */
250
        clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
251
                        swapper_pg_dir + USER_PTRS_PER_PGD,
252
                        KERNEL_PGD_PTRS);
253
        paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
254
                                __pa(swapper_pg_dir) >> PAGE_SHIFT,
255
                                USER_PTRS_PER_PGD,
256
                                KERNEL_PGD_PTRS);
257
        pgd_list_add(pgd);
258
        spin_unlock_irqrestore(&pgd_lock, flags);
259
}
260
#else  /* PTRS_PER_PMD > 1 */
261
/* PAE pgd constructor */
262
static void pgd_ctor(void *pgd)
263
{
264
        /* PAE, kernel PMD may be shared */
265
 
266
        if (SHARED_KERNEL_PMD) {
267
                clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
268
                                swapper_pg_dir + USER_PTRS_PER_PGD,
269
                                KERNEL_PGD_PTRS);
270
        } else {
271
                unsigned long flags;
272
 
273
                memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
274
                spin_lock_irqsave(&pgd_lock, flags);
275
                pgd_list_add(pgd);
276
                spin_unlock_irqrestore(&pgd_lock, flags);
277
        }
278
}
279
#endif  /* PTRS_PER_PMD */
280
 
281
static void pgd_dtor(void *pgd)
282
{
283
        unsigned long flags; /* can be called from interrupt context */
284
 
285
        if (SHARED_KERNEL_PMD)
286
                return;
287
 
288
        paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
289
        spin_lock_irqsave(&pgd_lock, flags);
290
        pgd_list_del(pgd);
291
        spin_unlock_irqrestore(&pgd_lock, flags);
292
}
293
 
294
#define UNSHARED_PTRS_PER_PGD                           \
295
        (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
296
 
297
/* If we allocate a pmd for part of the kernel address space, then
298
   make sure its initialized with the appropriate kernel mappings.
299
   Otherwise use a cached zeroed pmd.  */
300
static pmd_t *pmd_cache_alloc(int idx)
301
{
302
        pmd_t *pmd;
303
 
304
        if (idx >= USER_PTRS_PER_PGD) {
305
                pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
306
 
307
                if (pmd)
308
                        memcpy(pmd,
309
                               (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
310
                               sizeof(pmd_t) * PTRS_PER_PMD);
311
        } else
312
                pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
313
 
314
        return pmd;
315
}
316
 
317
static void pmd_cache_free(pmd_t *pmd, int idx)
318
{
319
        if (idx >= USER_PTRS_PER_PGD)
320
                free_page((unsigned long)pmd);
321
        else
322
                kmem_cache_free(pmd_cache, pmd);
323
}
324
 
325
pgd_t *pgd_alloc(struct mm_struct *mm)
326
{
327
        int i;
328
        pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
329
 
330
        if (PTRS_PER_PMD == 1 || !pgd)
331
                return pgd;
332
 
333
        for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
334
                pmd_t *pmd = pmd_cache_alloc(i);
335
 
336
                if (!pmd)
337
                        goto out_oom;
338
 
339
                paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
340
                set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
341
        }
342
        return pgd;
343
 
344
out_oom:
345
        for (i--; i >= 0; i--) {
346
                pgd_t pgdent = pgd[i];
347
                void* pmd = (void *)__va(pgd_val(pgdent)-1);
348
                paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
349
                pmd_cache_free(pmd, i);
350
        }
351
        quicklist_free(0, pgd_dtor, pgd);
352
        return NULL;
353
}
354
 
355
void pgd_free(pgd_t *pgd)
356
{
357
        int i;
358
 
359
        /* in the PAE case user pgd entries are overwritten before usage */
360
        if (PTRS_PER_PMD > 1)
361
                for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
362
                        pgd_t pgdent = pgd[i];
363
                        void* pmd = (void *)__va(pgd_val(pgdent)-1);
364
                        paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
365
                        pmd_cache_free(pmd, i);
366
                }
367
        /* in the non-PAE case, free_pgtables() clears user pgd entries */
368
        quicklist_free(0, pgd_dtor, pgd);
369
}
370
 
371
void check_pgt_cache(void)
372
{
373
        quicklist_trim(0, pgd_dtor, 25, 16);
374
}
375
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.