OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [or32/] [mm/] [init.c] - Blame information for rev 9

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 7 xianfeng
/*
2
 *  linux/arch/or32/mm/init.c
3
 *
4
 *  or32 version
5
 *    author(s): Matjaz Breskvar (phoenix@bsemi.com)
6
 *
7
 *  derived from cris, i386, m68k, ppc, sh ports.
8
 *
9
 *  changes:
10
 *  18. 11. 2003: Matjaz Breskvar (phoenix@bsemi.com)
11
 *    initial port to or32 architecture
12
 *
13
 *  22. 11. 2003: Matjaz Breskvar (phoenix@bsemi.com)
14
 *    cleanups, identical mapping for serial console
15
 */
16
 
17
#include <linux/signal.h>
18
#include <linux/sched.h>
19
#include <linux/kernel.h>
20
#include <linux/errno.h>
21
#include <linux/string.h>
22
#include <linux/types.h>
23
#include <linux/ptrace.h>
24
#include <linux/mman.h>
25
#include <linux/mm.h>
26
#include <linux/swap.h>
27
#include <linux/smp.h>
28
#include <linux/bootmem.h>
29
#include <linux/init.h>
30
#include <linux/delay.h>
31
#ifdef CONFIG_BLK_DEV_INITRD
32
#include <linux/blkdev.h>          /* for initrd_* */
33
#endif
34
 
35
#include <asm/system.h>
36
#include <asm/segment.h>
37
#include <asm/pgalloc.h>
38
#include <asm/pgtable.h>
39
#include <asm/dma.h>
40
#include <asm/io.h>
41
#include <asm/tlb.h>
42
#include <asm/mmu_context.h>
43
#include <asm/or32-hf.h>
44
#include <asm/kmap_types.h>
45
#include <asm/fixmap.h>
46
 
47
int mem_init_done = 0;
48
 
49
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
50
 
51
void show_mem(void)
52
{
53
 
54
        int i,free = 0,total = 0,cached = 0, reserved = 0, nonshared = 0;
55
        int shared = 0;
56
 
57
        printk("\nMem-info:\n");
58
        show_free_areas();
59
        printk("Free swap:       %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
60
        i = max_mapnr;
61
        while (i-- > 0)
62
        {
63
                total++;
64
                if (PageReserved(mem_map+i))
65
                        reserved++;
66
                else if (PageSwapCache(mem_map+i))
67
                        cached++;
68
                else if (!page_count(mem_map+i))
69
                        free++;
70
                else if (page_count(mem_map+i) == 1)
71
                        nonshared++;
72
                else
73
                        shared += page_count(mem_map+i) - 1;
74
        }
75
 
76
        printk("%d pages of RAM\n",total);
77
        printk("%d free pages\n",free);
78
        printk("%d reserved pages\n",reserved);
79
        printk("%d pages nonshared\n",nonshared);
80
        printk("%d pages shared\n",shared);
81
        printk("%d pages swap cached\n",cached);
82
}
83
 
84
pte_t *kmap_pte;
85
pgprot_t kmap_prot;
86
 
87
#define kmap_get_fixmap_pte(vaddr)                                      \
88
        pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
89
 
90
static void __init kmap_init(void)
91
{
92
        unsigned long kmap_vstart;
93
 
94
        /* cache the first kmap pte */
95
        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
96
        kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
97
 
98
        kmap_prot = PAGE_KERNEL;
99
}
100
 
101
static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
102
{
103
        pgd_t *pgd;
104
        pmd_t *pmd;
105
        pte_t *pte;
106
        int i, j;
107
        unsigned long vaddr;
108
 
109
        vaddr = start;
110
        i = __pgd_offset(vaddr);
111
        j = __pmd_offset(vaddr);
112
        pgd = pgd_base + i;
113
 
114
        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
115
                pmd = (pmd_t *)pgd;
116
 
117
                for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
118
                        if (pmd_none(*pmd)) {
119
                                pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
120
                                set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
121
                                if (pte != pte_offset_kernel(pmd, 0))
122
                                        BUG();
123
                        }
124
                        vaddr += PMD_SIZE;
125
                }
126
                j = 0;
127
        }
128
}
129
 
130
static void __init zone_sizes_init(void)
131
{
132 9 xianfeng
        unsigned long zones_size[MAX_NR_ZONES];
133
        unsigned long max_dma, low;
134 7 xianfeng
 
135 9 xianfeng
        memset(zones_size, 0, sizeof(zones_size));
136
 
137
        // MAX_DMA_ADDRESS = PAGE_OFFSET+0x1000000
138 7 xianfeng
        max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
139
        low = max_low_pfn;
140
 
141 9 xianfeng
        printk("max_low_pfn=%ld, max_dma=%ld\n",max_low_pfn, max_dma);
142
 
143 7 xianfeng
        if (low < max_dma)
144
                zones_size[ZONE_DMA] = low;
145
        else {
146
                zones_size[ZONE_DMA] = max_dma;
147
                zones_size[ZONE_NORMAL] = low - max_dma;
148
        }
149
        free_area_init(zones_size);
150
}
151
 
152
static void __init identical_mapping(unsigned long start, unsigned long size,
153
                                     unsigned long page_attrs)
154
{
155
 
156
        unsigned long vaddr, end;
157
        pgd_t *pgd, *pgd_base;
158
        int i, j, k;
159
        pmd_t *pmd;
160
        pte_t *pte, *pte_base;
161
 
162
        printk("Setting up identical mapping (0x%lx - 0x%lx)\n",
163
               start, start + size);
164
 
165
        page_attrs |= _PAGE_ALL | _PAGE_SRE | _PAGE_SWE |
166
                _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC;
167
        /*
168
         * This can be zero as well - no problem, in that case we exit
169
         * the loops anyway due to the PTRS_PER_* conditions.
170
         */
171
        end = start + size;
172
 
173
        pgd_base = swapper_pg_dir;
174
        i = __pgd_offset(start);
175
        pgd = pgd_base + i;
176
 
177
        for (; i < PTRS_PER_PGD; pgd++, i++) {
178
                vaddr = i*PGDIR_SIZE;
179
                if (end && (vaddr >= end))
180
                        break;
181
                pmd = (pmd_t *)pgd;
182
 
183
                if (pmd != pmd_offset(pgd, 0))
184
                        BUG();
185
                for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
186
                        vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
187
                        if (end && (vaddr >= end))
188
                                break;
189
 
190
                        pte_base = pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
191
 
192
                        for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
193
                                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
194
                                if (end && (vaddr >= end))
195
                                        break;
196
                                *pte = mk_pte_phys(vaddr, __pgprot(page_attrs));
197
                        }
198
                        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
199
 
200
                        if (pte_base != pte_offset_kernel(pmd, 0))
201
                          BUG();
202
                }
203
        }
204
}
205
 
206
void __init paging_init(void)
207
{
208
        extern void tlb_init(void);
209
 
210
        unsigned long vaddr, end;
211
        pgd_t *pgd, *pgd_base;
212
        int i, j, k;
213
        pmd_t *pmd;
214
        pte_t *pte, *pte_base;
215
 
216
        printk("Setting up paging and PTEs.\n");
217
 
218
        /* clear out the init_mm.pgd that will contain the kernel's mappings */
219
 
220
        for(i = 0; i < PTRS_PER_PGD; i++)
221
                swapper_pg_dir[i] = __pgd(0);
222
 
223
        /* make sure the current pgd table points to something sane
224
         * (even if it is most probably not used until the next
225
         *  switch_mm)
226
         */
227
         current_pgd = init_mm.pgd;
228
 
229
         /* initialise the TLB (tlb.c) */
230
         tlb_init();
231
 
232
        /*
233
         * This can be zero as well - no problem, in that case we exit
234
         * the loops anyway due to the PTRS_PER_* conditions.
235
         */
236
        end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
237
 
238
        pgd_base = swapper_pg_dir;
239
        i = __pgd_offset(PAGE_OFFSET);
240
        pgd = pgd_base + i;
241
 
242
        for (; i < PTRS_PER_PGD; pgd++, i++) {
243
                vaddr = i*PGDIR_SIZE;
244
                if (end && (vaddr >= end))
245
                        break;
246
                pmd = (pmd_t *)pgd;
247
 
248
                if (pmd != pmd_offset(pgd, 0))
249
                        BUG();
250
                for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
251
                        vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
252
                        if (end && (vaddr >= end))
253
                                break;
254
 
255
                        pte_base = pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
256
 
257
                        for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
258
                                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
259
                                if (end && (vaddr >= end))
260
                                        break;
261
#ifdef CONFIG_OR32_GUARD_PROTECTED_CORE
262
                                {
263
                                        extern char _e_protected_core;
264
                                        unsigned long page_attrs, offset;
265
 
266
                                        offset = i*PGDIR_SIZE + j*PMD_SIZE;
267
                                        page_attrs = _PAGE_ALL | _PAGE_SRE |
268
                                                _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC;
269
 
270
                                        /* make all but first and last page of .text and .rodata
271
                                         * sections write protected.
272
                                         */
273
                                        if ((vaddr > (PAGE_SIZE + offset)) &&
274
                                            ((vaddr + PAGE_SIZE) < ((unsigned long)&(_e_protected_core))))
275
                                                *pte = mk_pte_phys(__pa(vaddr), __pgprot(page_attrs));
276
                                        else
277
                                                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
278
                                }
279
#else
280
                                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
281
#endif /* CONFIG_OR32_GUARD_PROTECTED_CORE */
282
                        }
283
                        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
284
 
285
                        if (pte_base != pte_offset_kernel(pmd, 0))
286
                          BUG();
287
                }
288
        }
289
 
290
#ifdef CONFIG_OR32_GUARD_PROTECTED_CORE
291
        {
292
                extern char _e_protected_core;
293
 
294
                printk("write protecting ro sections (0x%lx - 0x%lx)\n",
295
                       PAGE_OFFSET + PAGE_SIZE, PAGE_MASK&((unsigned long)&(_e_protected_core)));
296
        }
297
#endif /* CONFIG_OR32_GUARD_PROTECTED_CORE */
298
 
299
        /* __PHX__: fixme,
300
         * - detect units via UPR,
301
         * - set up only apropriate mappings
302
         * - make oeth_probe not poke around if ethernet is not present in upr
303
         *   or make sure that it doesn't kill of the kernel when no oeth
304
         *   present
305
         */
306
 
307
        /* map the UART address space */
308 9 xianfeng
        identical_mapping(0x20000000, 0x2000, _PAGE_CI |
309
                          _PAGE_URE | _PAGE_UWE);
310
        identical_mapping(0x30000000, 0x2000, _PAGE_CI);
311
/*
312 7 xianfeng
        identical_mapping(0x80000000, 0x10000000, _PAGE_CI |
313
                          _PAGE_URE | _PAGE_UWE);
314
        identical_mapping(0x92000000, 0x2000, _PAGE_CI |
315
                          _PAGE_URE | _PAGE_UWE);
316
        identical_mapping(0xb8070000, 0x2000, _PAGE_CI |
317
                          _PAGE_URE | _PAGE_UWE);
318
        identical_mapping(0x97000000, 0x2000, _PAGE_CI |
319
                          _PAGE_URE | _PAGE_UWE);
320
        identical_mapping(0x99000000, 0x1000000, _PAGE_CI |
321
                          _PAGE_URE | _PAGE_UWE);
322
        identical_mapping(0x93000000, 0x2000, _PAGE_CI |
323
                          _PAGE_URE | _PAGE_UWE);
324
        identical_mapping(0xa6000000, 0x100000, _PAGE_CI |
325
                          _PAGE_URE | _PAGE_UWE);
326
        identical_mapping(0x1e50000, 0x150000, _PAGE_CI |
327
                          _PAGE_URE | _PAGE_UWE);
328 9 xianfeng
*/
329 7 xianfeng
        zone_sizes_init();
330
 
331
        /*
332
         * Fixed mappings, only the page table structure has to be
333
         * created - mappings will be set by set_fixmap():
334
         */
335
        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
336
        fixrange_init(vaddr, 0, pgd_base);
337
 
338
 
339
        /*
340
         * enable EA translations via PT mechanism
341
         */
342
 
343
        /* self modifing code ;) */
344
        {
345
          extern unsigned long dtlb_miss_handler;
346
          extern unsigned long itlb_miss_handler;
347
 
348
          unsigned long *dtlb_vector = __va(0x900);
349
          unsigned long *itlb_vector = __va(0xa00);
350
 
351
          printk("dtlb_miss_handler %p\n", &dtlb_miss_handler);
352
          *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
353
                          (unsigned long)dtlb_vector) >> 2;
354
 
355
          printk("itlb_miss_handler %p\n", &itlb_miss_handler);
356
          *itlb_vector = ((unsigned long)&itlb_miss_handler -
357
                          (unsigned long)itlb_vector) >> 2;
358
        }
359
        kmap_init();
360
}
361
 
362
 
363
/* References to section boundaries */
364
 
365
extern char _stext, _etext, _edata, __bss_start, _end;
366
extern char __init_begin, __init_end;
367
 
368
/*unsigned long loops_per_usec;  Removed by JPB*/
369
 
370
static int __init free_pages_init(void)
371
{
372
        int reservedpages, pfn;
373
 
374
        /* this will put all low memory onto the freelists */
375
        totalram_pages = free_all_bootmem();
376
 
377
        reservedpages = 0;
378
        for (pfn = 0; pfn < max_low_pfn; pfn++) {
379
                /*
380
                 * Only count reserved RAM pages
381
                 */
382
                if (PageReserved(mem_map+pfn))
383
                        reservedpages++;
384
        }
385
 
386
        return reservedpages;
387
}
388
 
389
static void __init set_max_mapnr_init(void)
390
{
391
        max_mapnr = num_physpages = max_low_pfn;
392
}
393
 
394
void __init mem_init(void)
395
{
396
        int codesize, reservedpages, datasize, initsize;
397
 
398
        phx_warn("mem_map %p", mem_map);
399
        if (!mem_map)
400
                BUG();
401
 
402
        set_max_mapnr_init();
403
 
404
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
405
 
406
        /* clear the zero-page */
407
        phx_printk("empty_zero_page %p", empty_zero_page);
408
        memset((void*)empty_zero_page, 0, PAGE_SIZE);
409
 
410
        reservedpages = free_pages_init();
411
 
412
        codesize =  (unsigned long) &_etext - (unsigned long) &_stext;
413
        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
414
        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
415
 
416
        printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
417
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
418
                max_mapnr << (PAGE_SHIFT-10),
419
                codesize >> 10,
420
                reservedpages << (PAGE_SHIFT-10),
421
                datasize >> 10,
422
                initsize >> 10,
423
                (unsigned long) (0 << (PAGE_SHIFT-10))
424
               );
425
 
426
        mem_init_done = 1;
427
        return;
428
}
429
 
430
#ifdef CONFIG_BLK_DEV_INITRD
431
void free_initrd_mem(unsigned long start, unsigned long end)
432
{
433
        printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
434
 
435
        for (; start < end; start += PAGE_SIZE) {
436
                ClearPageReserved(virt_to_page(start));
437
                init_page_count(virt_to_page(start));
438
                free_page(start);
439
                totalram_pages++;
440
        }
441
}
442
#endif
443
 
444
void free_initmem(void)
445
{
446
        unsigned long addr;
447
 
448
        addr = (unsigned long)(&__init_begin);
449
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
450
                ClearPageReserved(virt_to_page(addr));
451
                init_page_count(virt_to_page(addr));
452
                free_page(addr);
453
                totalram_pages++;
454
        }
455
        printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
456
                ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
457
}
458
 
459
/*
460
 * Associate a virtual page frame with a given physical page frame
461
 * and protection flags for that frame.
462
 */
463
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
464
{
465
        pgd_t *pgd;
466
        pud_t *pud;
467
        pmd_t *pmd;
468
        pte_t *pte;
469
 
470
        pgd = swapper_pg_dir + pgd_index(vaddr);
471
        if (pgd_none(*pgd)) {
472
                BUG();
473
                return;
474
        }
475
        pud = pud_offset(pgd, vaddr);
476
        if (pud_none(*pud)) {
477
                BUG();
478
                return;
479
        }
480
        pmd = pmd_offset(pud, vaddr);
481
        if (pmd_none(*pmd)) {
482
                BUG();
483
                return;
484
        }
485
        pte = pte_offset_kernel(pmd, vaddr);
486
        /* <pfn,flags> stored as-is, to permit clearing entries */
487
        set_pte(pte, pfn_pte(pfn, flags));
488
}
489
 
490
void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
491
{
492
        unsigned long address = __fix_to_virt(idx);
493
 
494
        if (idx >= __end_of_fixed_addresses) {
495
                BUG();
496
                return;
497
        }
498
        set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
499
        flush_tlb_all();
500
}
501
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.