OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [or32/] [mm/] [init.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/or32/mm/init.c
3
 *
4
 *  or32 version
5
 *    author(s): Matjaz Breskvar (phoenix@opencores.org)
6
 *
7
 *  derived from cris, i386, m68k, ppc, sh ports.
8
 *
9
 *  changes:
10
 *  18. 11. 2003: Matjaz Breskvar (phoenix@opencores.org)
11
 *    initial port to or32 architecture
12
 *
13
 *  22. 11. 2003: Matjaz Breskvar (phoenix@opencores.org)
14
 *    cleanups, identical mapping for serial console
15
 */
16
 
17
#include <linux/config.h>
18
#include <linux/signal.h>
19
#include <linux/sched.h>
20
#include <linux/kernel.h>
21
#include <linux/errno.h>
22
#include <linux/string.h>
23
#include <linux/types.h>
24
#include <linux/ptrace.h>
25
#include <linux/mman.h>
26
#include <linux/mm.h>
27
#include <linux/swap.h>
28
#include <linux/smp.h>
29
#include <linux/bootmem.h>
30
#include <linux/init.h>
31
#include <linux/delay.h>
32
#ifdef CONFIG_BLK_DEV_INITRD
33
#include <linux/blk.h>          /* for initrd_* */
34
#endif
35
 
36
#include <asm/system.h>
37
#include <asm/segment.h>
38
#include <asm/pgalloc.h>
39
#include <asm/pgtable.h>
40
#include <asm/dma.h>
41
#include <asm/io.h>
42
#include <asm/mmu_context.h>
43
 
44
/* MMU stuff */
45
static unsigned long totalram_pages;
46
struct pgtable_cache_struct quicklists;  /* see asm/pgalloc.h */
47
 
48
int do_check_pgt_cache(int low, int high)
49
{
50
        int freed = 0;
51
        if(pgtable_cache_size > high) {
52
                do {
53
                        if (pgd_quicklist) {
54
                                free_pgd_slow(get_pgd_fast());
55
                                freed++;
56
                        }
57
#if 0
58
                        if (pmd_quicklist) {
59
                                pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
60
                                freed++;
61
                        }
62
#endif
63
                        if (pte_quicklist) {
64
                                pte_free_slow(pte_alloc_one_fast(NULL, 0));
65
                                freed++;
66
                        }
67
                } while(pgtable_cache_size > low);
68
        }
69
        return freed;
70
}
71
 
72
 
73
 
74
 
75
void show_mem(void)
76
{
77
 
78
        int i,free = 0,total = 0,cached = 0, reserved = 0, nonshared = 0;
79
        int shared = 0;
80
 
81
        printk("\nMem-info:\n");
82
        show_free_areas();
83
        printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
84
        i = max_mapnr;
85
        while (i-- > 0)
86
        {
87
                total++;
88
                if (PageReserved(mem_map+i))
89
                        reserved++;
90
                else if (PageSwapCache(mem_map+i))
91
                        cached++;
92
                else if (!page_count(mem_map+i))
93
                        free++;
94
                else if (page_count(mem_map+i) == 1)
95
                        nonshared++;
96
                else
97
                        shared += page_count(mem_map+i) - 1;
98
        }
99
 
100
        printk("%d pages of RAM\n",total);
101
        printk("%d free pages\n",free);
102
        printk("%d reserved pages\n",reserved);
103
        printk("%d pages nonshared\n",nonshared);
104
        printk("%d pages shared\n",shared);
105
        printk("%d pages swap cached\n",cached);
106
        printk("%ld pages in page table cache\n",pgtable_cache_size);
107
        show_buffers();
108
}
109
 
110
 
111
static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
112
{
113
        pgd_t *pgd;
114
        pmd_t *pmd;
115
        pte_t *pte;
116
        int i, j;
117
        unsigned long vaddr;
118
 
119
        vaddr = start;
120
        i = __pgd_offset(vaddr);
121
        j = __pmd_offset(vaddr);
122
        pgd = pgd_base + i;
123
 
124
        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
125
                pmd = (pmd_t *)pgd;
126
 
127
                for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
128
                        if (pmd_none(*pmd)) {
129
                                pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
130
                                set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
131
                                if (pte != pte_offset(pmd, 0))
132
                                        BUG();
133
                        }
134
                        vaddr += PMD_SIZE;
135
                }
136
                j = 0;
137
        }
138
}
139
 
140
 
141
static void __init zone_sizes_init(void)
142
{
143
        unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
144
        unsigned int max_dma, low;
145
 
146
        max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
147
        low = max_low_pfn;
148
 
149
        if (low < max_dma)
150
                zones_size[ZONE_DMA] = low;
151
        else {
152
                zones_size[ZONE_DMA] = max_dma;
153
                zones_size[ZONE_NORMAL] = low - max_dma;
154
        }
155
        free_area_init(zones_size);
156
}
157
 
158
static void __init identical_mapping(unsigned long start, unsigned long size,
159
                                     unsigned long page_attrs)
160
{
161
 
162
        unsigned long vaddr, end;
163
        pgd_t *pgd, *pgd_base;
164
        int i, j, k;
165
        pmd_t *pmd;
166
        pte_t *pte, *pte_base;
167
 
168
        printk("Setting up identical mapping (0x%x - 0x%x)\n",
169
               start, start + size);
170
 
171
        page_attrs |= _PAGE_ALL | _PAGE_SRE | _PAGE_SWE |
172
                _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC;
173
        /*
174
         * This can be zero as well - no problem, in that case we exit
175
         * the loops anyway due to the PTRS_PER_* conditions.
176
         */
177
        end = start + size;
178
 
179
        pgd_base = swapper_pg_dir;
180
        i = __pgd_offset(start);
181
        pgd = pgd_base + i;
182
 
183
        for (; i < PTRS_PER_PGD; pgd++, i++) {
184
                vaddr = i*PGDIR_SIZE;
185
                if (end && (vaddr >= end))
186
                        break;
187
                pmd = (pmd_t *)pgd;
188
 
189
                if (pmd != pmd_offset(pgd, 0))
190
                        BUG();
191
                for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
192
                        vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
193
                        if (end && (vaddr >= end))
194
                                break;
195
 
196
                        pte_base = pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
197
 
198
                        for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
199
                                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
200
                                if (end && (vaddr >= end))
201
                                        break;
202
                                *pte = mk_pte_phys(vaddr, __pgprot(page_attrs));
203
                        }
204
                        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
205
 
206
                        if (pte_base != pte_offset(pmd, 0))
207
                          BUG();
208
                }
209
        }
210
}
211
 
212
void __init paging_init(void)
213
{
214
        extern void tlb_init(void);
215
 
216
        unsigned long vaddr, end;
217
        pgd_t *pgd, *pgd_base;
218
        int i, j, k;
219
        pmd_t *pmd;
220
        pte_t *pte, *pte_base;
221
 
222
        printk("Setting up paging and PTEs.\n");
223
 
224
        /* clear out the init_mm.pgd that will contain the kernel's mappings */
225
 
226
        for(i = 0; i < PTRS_PER_PGD; i++)
227
                swapper_pg_dir[i] = __pgd(0);
228
 
229
        /* make sure the current pgd table points to something sane
230
         * (even if it is most probably not used until the next
231
         *  switch_mm)
232
         */
233
         current_pgd = init_mm.pgd;
234
 
235
         /* initialise the TLB (tlb.c) */
236
         tlb_init();
237
 
238
        /*
239
         * This can be zero as well - no problem, in that case we exit
240
         * the loops anyway due to the PTRS_PER_* conditions.
241
         */
242
        end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
243
 
244
        pgd_base = swapper_pg_dir;
245
        i = __pgd_offset(PAGE_OFFSET);
246
        pgd = pgd_base + i;
247
 
248
        for (; i < PTRS_PER_PGD; pgd++, i++) {
249
                vaddr = i*PGDIR_SIZE;
250
                if (end && (vaddr >= end))
251
                        break;
252
                pmd = (pmd_t *)pgd;
253
 
254
                if (pmd != pmd_offset(pgd, 0))
255
                        BUG();
256
                for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
257
                        vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
258
                        if (end && (vaddr >= end))
259
                                break;
260
 
261
                        pte_base = pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
262
 
263
                        for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
264
                                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
265
                                if (end && (vaddr >= end))
266
                                        break;
267
#ifdef CONFIG_OR32_GUARD_PROTECTED_CORE
268
                                {
269
                                        extern char _e_protected_core;
270
                                        unsigned long page_attrs, offset;
271
 
272
                                        offset = i*PGDIR_SIZE + j*PMD_SIZE;
273
                                        page_attrs = _PAGE_ALL | _PAGE_SRE |
274
                                                _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC;
275
 
276
                                        /* make all but first and last page of .text and .rodata
277
                                         * sections write protected.
278
                                         */
279
                                        if ((vaddr > (PAGE_SIZE + offset)) &&
280
                                            ((vaddr + PAGE_SIZE) < ((unsigned long)&(_e_protected_core))))
281
                                                *pte = mk_pte_phys(__pa(vaddr), __pgprot(page_attrs));
282
                                        else
283
                                                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
284
                                }
285
#else
286
                                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
287
#endif /* CONFIG_OR32_GUARD_PROTECTED_CORE */
288
                        }
289
                        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
290
 
291
                        if (pte_base != pte_offset(pmd, 0))
292
                          BUG();
293
                }
294
        }
295
 
296
#ifdef CONFIG_OR32_GUARD_PROTECTED_CORE
297
        {
298
                extern char _e_protected_core;
299
 
300
                printk("write protecting ro sections (0x%x - 0x%x)\n",
301
                       PAGE_OFFSET + PAGE_SIZE, PAGE_MASK&((unsigned long)&(_e_protected_core)));
302
        }
303
#endif /* CONFIG_OR32_GUARD_PROTECTED_CORE */
304
 
305
        /* __PHX__: fixme,
306
         * - detect units via UPR,
307
         * - set up only apropriate mappings
308
         * - make oeth_probe not poke around if ethernet is not present in upr
309
         *   or make sure that it doesn't kill of the kernel when no oeth
310
         *   present
311
         */
312
 
313
        /* map the UART address space */
314
        identical_mapping(0x90000000, 0x2000, _PAGE_CI);
315
        identical_mapping(0x92000000, 0x2000, _PAGE_CI |
316
                          _PAGE_URE | _PAGE_UWE);
317
 
318
        zone_sizes_init();
319
 
320
        /*
321
         * Fixed mappings, only the page table structure has to be
322
         * created - mappings will be set by set_fixmap():
323
         */
324
        /*
325
          __PHX__: clean it up, remove unneded function
326
        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
327
        fixrange_init(vaddr, 0, pgd_base);
328
        */
329
 
330
 
331
        /*
332
         * enable EA translations via PT mechanism
333
         */
334
 
335
        /* self modifing code ;) */
336
        {
337
          extern unsigned long dtlb_miss_handler;
338
          extern unsigned long itlb_miss_handler;
339
 
340
          unsigned long *dtlb_vector = __va(0x900);
341
          unsigned long *itlb_vector = __va(0xa00);
342
 
343
          printk("dtlb_miss_handler %p\n", &dtlb_miss_handler);
344
          *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
345
                          (unsigned long)dtlb_vector) >> 2;
346
 
347
          printk("itlb_miss_handler %p\n", &itlb_miss_handler);
348
          *itlb_vector = ((unsigned long)&itlb_miss_handler -
349
                          (unsigned long)itlb_vector) >> 2;
350
        }
351
}
352
 
353
 
354
/* References to section boundaries */
355
 
356
extern char _stext, _etext, _edata, __bss_start, _end;
357
extern char __init_begin, __init_end;
358
 
359
unsigned long loops_per_usec;
360
 
361
static int __init free_pages_init(void)
362
{
363
        int reservedpages, pfn;
364
 
365
        /* this will put all low memory onto the freelists */
366
        totalram_pages += free_all_bootmem();
367
 
368
        reservedpages = 0;
369
        for (pfn = 0; pfn < max_low_pfn; pfn++) {
370
                /*
371
                 * Only count reserved RAM pages
372
                 */
373
                if (PageReserved(mem_map+pfn))
374
                        reservedpages++;
375
        }
376
 
377
        return reservedpages;
378
}
379
 
380
static void __init set_max_mapnr_init(void)
381
{
382
        max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
383
}
384
 
385
void __init mem_init(void)
386
{
387
        int codesize, reservedpages, datasize, initsize;
388
 
389
        phx_printk("mem_map %p", mem_map);
390
        if (!mem_map)
391
                BUG();
392
 
393
        set_max_mapnr_init();
394
 
395
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
396
 
397
        /* clear the zero-page */
398
        phx_printk("empty_zero_page %p", empty_zero_page);
399
        memset((void*)empty_zero_page, 0, PAGE_SIZE);
400
 
401
        reservedpages = free_pages_init();
402
 
403
        codesize =  (unsigned long) &_etext - (unsigned long) &_stext;
404
        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
405
        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
406
 
407
        printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
408
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
409
                max_mapnr << (PAGE_SHIFT-10),
410
                codesize >> 10,
411
                reservedpages << (PAGE_SHIFT-10),
412
                datasize >> 10,
413
                initsize >> 10,
414
                (unsigned long) (0 << (PAGE_SHIFT-10))
415
               );
416
 
417
        return;
418
}
419
 
420
#ifdef CONFIG_BLK_DEV_INITRD
421
void free_initrd_mem(unsigned long start, unsigned long end)
422
{
423
        printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
424
 
425
        for (; start < end; start += PAGE_SIZE) {
426
                ClearPageReserved(virt_to_page(start));
427
                set_page_count(virt_to_page(start), 1);
428
                free_page(start);
429
                totalram_pages++;
430
        }
431
}
432
#endif
433
 
434
void free_initmem(void)
435
{
436
        unsigned long addr;
437
 
438
        addr = (unsigned long)(&__init_begin);
439
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
440
                ClearPageReserved(virt_to_page(addr));
441
                set_page_count(virt_to_page(addr), 1);
442
                free_page(addr);
443
                totalram_pages++;
444
        }
445
        printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
446
                ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
447
}
448
 
449
 
450
void si_meminfo(struct sysinfo *val)
451
{
452
        val->totalram = totalram_pages;
453
        val->sharedram = 0;
454
        val->freeram = nr_free_pages();
455
        val->bufferram = atomic_read(&buffermem_pages);
456
        val->totalhigh = 0;
457
        val->freehigh = 0;
458
        val->mem_unit = PAGE_SIZE;
459
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.