OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [or32/] [mm/] [init.c] - Blame information for rev 7

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 7 xianfeng
/*
2
 *  linux/arch/or32/mm/init.c
3
 *
4
 *  or32 version
5
 *    author(s): Matjaz Breskvar (phoenix@bsemi.com)
6
 *
7
 *  derived from cris, i386, m68k, ppc, sh ports.
8
 *
9
 *  changes:
10
 *  18. 11. 2003: Matjaz Breskvar (phoenix@bsemi.com)
11
 *    initial port to or32 architecture
12
 *
13
 *  22. 11. 2003: Matjaz Breskvar (phoenix@bsemi.com)
14
 *    cleanups, identical mapping for serial console
15
 */
16
 
17
#include <linux/signal.h>
18
#include <linux/sched.h>
19
#include <linux/kernel.h>
20
#include <linux/errno.h>
21
#include <linux/string.h>
22
#include <linux/types.h>
23
#include <linux/ptrace.h>
24
#include <linux/mman.h>
25
#include <linux/mm.h>
26
#include <linux/swap.h>
27
#include <linux/smp.h>
28
#include <linux/bootmem.h>
29
#include <linux/init.h>
30
#include <linux/delay.h>
31
#ifdef CONFIG_BLK_DEV_INITRD
32
#include <linux/blkdev.h>          /* for initrd_* */
33
#endif
34
 
35
#include <asm/system.h>
36
#include <asm/segment.h>
37
#include <asm/pgalloc.h>
38
#include <asm/pgtable.h>
39
#include <asm/dma.h>
40
#include <asm/io.h>
41
#include <asm/tlb.h>
42
#include <asm/mmu_context.h>
43
#include <asm/or32-hf.h>
44
#include <asm/kmap_types.h>
45
#include <asm/fixmap.h>
46
 
47
int mem_init_done = 0;
48
 
49
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
50
 
51
void show_mem(void)
52
{
53
 
54
        int i,free = 0,total = 0,cached = 0, reserved = 0, nonshared = 0;
55
        int shared = 0;
56
 
57
        printk("\nMem-info:\n");
58
        show_free_areas();
59
        printk("Free swap:       %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
60
        i = max_mapnr;
61
        while (i-- > 0)
62
        {
63
                total++;
64
                if (PageReserved(mem_map+i))
65
                        reserved++;
66
                else if (PageSwapCache(mem_map+i))
67
                        cached++;
68
                else if (!page_count(mem_map+i))
69
                        free++;
70
                else if (page_count(mem_map+i) == 1)
71
                        nonshared++;
72
                else
73
                        shared += page_count(mem_map+i) - 1;
74
        }
75
 
76
        printk("%d pages of RAM\n",total);
77
        printk("%d free pages\n",free);
78
        printk("%d reserved pages\n",reserved);
79
        printk("%d pages nonshared\n",nonshared);
80
        printk("%d pages shared\n",shared);
81
        printk("%d pages swap cached\n",cached);
82
}
83
 
84
pte_t *kmap_pte;
85
pgprot_t kmap_prot;
86
 
87
#define kmap_get_fixmap_pte(vaddr)                                      \
88
        pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
89
 
90
static void __init kmap_init(void)
91
{
92
        unsigned long kmap_vstart;
93
 
94
        /* cache the first kmap pte */
95
        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
96
        kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
97
 
98
        kmap_prot = PAGE_KERNEL;
99
}
100
 
101
static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
102
{
103
        pgd_t *pgd;
104
        pmd_t *pmd;
105
        pte_t *pte;
106
        int i, j;
107
        unsigned long vaddr;
108
 
109
        vaddr = start;
110
        i = __pgd_offset(vaddr);
111
        j = __pmd_offset(vaddr);
112
        pgd = pgd_base + i;
113
 
114
        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
115
                pmd = (pmd_t *)pgd;
116
 
117
                for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
118
                        if (pmd_none(*pmd)) {
119
                                pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
120
                                set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
121
                                if (pte != pte_offset_kernel(pmd, 0))
122
                                        BUG();
123
                        }
124
                        vaddr += PMD_SIZE;
125
                }
126
                j = 0;
127
        }
128
}
129
 
130
static void __init zone_sizes_init(void)
131
{
132
        unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
133
        unsigned int max_dma, low;
134
 
135
        max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
136
        low = max_low_pfn;
137
 
138
        if (low < max_dma)
139
                zones_size[ZONE_DMA] = low;
140
        else {
141
                zones_size[ZONE_DMA] = max_dma;
142
                zones_size[ZONE_NORMAL] = low - max_dma;
143
        }
144
        free_area_init(zones_size);
145
}
146
 
147
static void __init identical_mapping(unsigned long start, unsigned long size,
148
                                     unsigned long page_attrs)
149
{
150
 
151
        unsigned long vaddr, end;
152
        pgd_t *pgd, *pgd_base;
153
        int i, j, k;
154
        pmd_t *pmd;
155
        pte_t *pte, *pte_base;
156
 
157
        printk("Setting up identical mapping (0x%lx - 0x%lx)\n",
158
               start, start + size);
159
 
160
        page_attrs |= _PAGE_ALL | _PAGE_SRE | _PAGE_SWE |
161
                _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC;
162
        /*
163
         * This can be zero as well - no problem, in that case we exit
164
         * the loops anyway due to the PTRS_PER_* conditions.
165
         */
166
        end = start + size;
167
 
168
        pgd_base = swapper_pg_dir;
169
        i = __pgd_offset(start);
170
        pgd = pgd_base + i;
171
 
172
        for (; i < PTRS_PER_PGD; pgd++, i++) {
173
                vaddr = i*PGDIR_SIZE;
174
                if (end && (vaddr >= end))
175
                        break;
176
                pmd = (pmd_t *)pgd;
177
 
178
                if (pmd != pmd_offset(pgd, 0))
179
                        BUG();
180
                for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
181
                        vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
182
                        if (end && (vaddr >= end))
183
                                break;
184
 
185
                        pte_base = pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
186
 
187
                        for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
188
                                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
189
                                if (end && (vaddr >= end))
190
                                        break;
191
                                *pte = mk_pte_phys(vaddr, __pgprot(page_attrs));
192
                        }
193
                        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
194
 
195
                        if (pte_base != pte_offset_kernel(pmd, 0))
196
                          BUG();
197
                }
198
        }
199
}
200
 
201
void __init paging_init(void)
202
{
203
        extern void tlb_init(void);
204
 
205
        unsigned long vaddr, end;
206
        pgd_t *pgd, *pgd_base;
207
        int i, j, k;
208
        pmd_t *pmd;
209
        pte_t *pte, *pte_base;
210
 
211
        printk("Setting up paging and PTEs.\n");
212
 
213
        /* clear out the init_mm.pgd that will contain the kernel's mappings */
214
 
215
        for(i = 0; i < PTRS_PER_PGD; i++)
216
                swapper_pg_dir[i] = __pgd(0);
217
 
218
        /* make sure the current pgd table points to something sane
219
         * (even if it is most probably not used until the next
220
         *  switch_mm)
221
         */
222
         current_pgd = init_mm.pgd;
223
 
224
         /* initialise the TLB (tlb.c) */
225
         tlb_init();
226
 
227
        /*
228
         * This can be zero as well - no problem, in that case we exit
229
         * the loops anyway due to the PTRS_PER_* conditions.
230
         */
231
        end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
232
 
233
        pgd_base = swapper_pg_dir;
234
        i = __pgd_offset(PAGE_OFFSET);
235
        pgd = pgd_base + i;
236
 
237
        for (; i < PTRS_PER_PGD; pgd++, i++) {
238
                vaddr = i*PGDIR_SIZE;
239
                if (end && (vaddr >= end))
240
                        break;
241
                pmd = (pmd_t *)pgd;
242
 
243
                if (pmd != pmd_offset(pgd, 0))
244
                        BUG();
245
                for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
246
                        vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
247
                        if (end && (vaddr >= end))
248
                                break;
249
 
250
                        pte_base = pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
251
 
252
                        for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
253
                                vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
254
                                if (end && (vaddr >= end))
255
                                        break;
256
#ifdef CONFIG_OR32_GUARD_PROTECTED_CORE
257
                                {
258
                                        extern char _e_protected_core;
259
                                        unsigned long page_attrs, offset;
260
 
261
                                        offset = i*PGDIR_SIZE + j*PMD_SIZE;
262
                                        page_attrs = _PAGE_ALL | _PAGE_SRE |
263
                                                _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC;
264
 
265
                                        /* make all but first and last page of .text and .rodata
266
                                         * sections write protected.
267
                                         */
268
                                        if ((vaddr > (PAGE_SIZE + offset)) &&
269
                                            ((vaddr + PAGE_SIZE) < ((unsigned long)&(_e_protected_core))))
270
                                                *pte = mk_pte_phys(__pa(vaddr), __pgprot(page_attrs));
271
                                        else
272
                                                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
273
                                }
274
#else
275
                                *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
276
#endif /* CONFIG_OR32_GUARD_PROTECTED_CORE */
277
                        }
278
                        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
279
 
280
                        if (pte_base != pte_offset_kernel(pmd, 0))
281
                          BUG();
282
                }
283
        }
284
 
285
#ifdef CONFIG_OR32_GUARD_PROTECTED_CORE
286
        {
287
                extern char _e_protected_core;
288
 
289
                printk("write protecting ro sections (0x%lx - 0x%lx)\n",
290
                       PAGE_OFFSET + PAGE_SIZE, PAGE_MASK&((unsigned long)&(_e_protected_core)));
291
        }
292
#endif /* CONFIG_OR32_GUARD_PROTECTED_CORE */
293
 
294
        /* __PHX__: fixme,
295
         * - detect units via UPR,
296
         * - set up only apropriate mappings
297
         * - make oeth_probe not poke around if ethernet is not present in upr
298
         *   or make sure that it doesn't kill of the kernel when no oeth
299
         *   present
300
         */
301
 
302
        /* map the UART address space */
303
        identical_mapping(0x80000000, 0x10000000, _PAGE_CI |
304
                          _PAGE_URE | _PAGE_UWE);
305
        identical_mapping(0x92000000, 0x2000, _PAGE_CI |
306
                          _PAGE_URE | _PAGE_UWE);
307
        identical_mapping(0xb8070000, 0x2000, _PAGE_CI |
308
                          _PAGE_URE | _PAGE_UWE);
309
        identical_mapping(0x97000000, 0x2000, _PAGE_CI |
310
                          _PAGE_URE | _PAGE_UWE);
311
        identical_mapping(0x99000000, 0x1000000, _PAGE_CI |
312
                          _PAGE_URE | _PAGE_UWE);
313
        identical_mapping(0x93000000, 0x2000, _PAGE_CI |
314
                          _PAGE_URE | _PAGE_UWE);
315
        identical_mapping(0xa6000000, 0x100000, _PAGE_CI |
316
                          _PAGE_URE | _PAGE_UWE);
317
        identical_mapping(0x1e50000, 0x150000, _PAGE_CI |
318
                          _PAGE_URE | _PAGE_UWE);
319
 
320
        zone_sizes_init();
321
 
322
        /*
323
         * Fixed mappings, only the page table structure has to be
324
         * created - mappings will be set by set_fixmap():
325
         */
326
        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
327
        fixrange_init(vaddr, 0, pgd_base);
328
 
329
 
330
        /*
331
         * enable EA translations via PT mechanism
332
         */
333
 
334
        /* self modifing code ;) */
335
        {
336
          extern unsigned long dtlb_miss_handler;
337
          extern unsigned long itlb_miss_handler;
338
 
339
          unsigned long *dtlb_vector = __va(0x900);
340
          unsigned long *itlb_vector = __va(0xa00);
341
 
342
          printk("dtlb_miss_handler %p\n", &dtlb_miss_handler);
343
          *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
344
                          (unsigned long)dtlb_vector) >> 2;
345
 
346
          printk("itlb_miss_handler %p\n", &itlb_miss_handler);
347
          *itlb_vector = ((unsigned long)&itlb_miss_handler -
348
                          (unsigned long)itlb_vector) >> 2;
349
        }
350
        kmap_init();
351
}
352
 
353
 
354
/* References to section boundaries */
355
 
356
extern char _stext, _etext, _edata, __bss_start, _end;
357
extern char __init_begin, __init_end;
358
 
359
/*unsigned long loops_per_usec;  Removed by JPB*/
360
 
361
static int __init free_pages_init(void)
362
{
363
        int reservedpages, pfn;
364
 
365
        /* this will put all low memory onto the freelists */
366
        totalram_pages = free_all_bootmem();
367
 
368
        reservedpages = 0;
369
        for (pfn = 0; pfn < max_low_pfn; pfn++) {
370
                /*
371
                 * Only count reserved RAM pages
372
                 */
373
                if (PageReserved(mem_map+pfn))
374
                        reservedpages++;
375
        }
376
 
377
        return reservedpages;
378
}
379
 
380
static void __init set_max_mapnr_init(void)
381
{
382
        max_mapnr = num_physpages = max_low_pfn;
383
}
384
 
385
void __init mem_init(void)
386
{
387
        int codesize, reservedpages, datasize, initsize;
388
 
389
        phx_warn("mem_map %p", mem_map);
390
        if (!mem_map)
391
                BUG();
392
 
393
        set_max_mapnr_init();
394
 
395
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
396
 
397
        /* clear the zero-page */
398
        phx_printk("empty_zero_page %p", empty_zero_page);
399
        memset((void*)empty_zero_page, 0, PAGE_SIZE);
400
 
401
        reservedpages = free_pages_init();
402
 
403
        codesize =  (unsigned long) &_etext - (unsigned long) &_stext;
404
        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
405
        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
406
 
407
        printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
408
                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
409
                max_mapnr << (PAGE_SHIFT-10),
410
                codesize >> 10,
411
                reservedpages << (PAGE_SHIFT-10),
412
                datasize >> 10,
413
                initsize >> 10,
414
                (unsigned long) (0 << (PAGE_SHIFT-10))
415
               );
416
 
417
        mem_init_done = 1;
418
        return;
419
}
420
 
421
#ifdef CONFIG_BLK_DEV_INITRD
422
void free_initrd_mem(unsigned long start, unsigned long end)
423
{
424
        printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
425
 
426
        for (; start < end; start += PAGE_SIZE) {
427
                ClearPageReserved(virt_to_page(start));
428
                init_page_count(virt_to_page(start));
429
                free_page(start);
430
                totalram_pages++;
431
        }
432
}
433
#endif
434
 
435
void free_initmem(void)
436
{
437
        unsigned long addr;
438
 
439
        addr = (unsigned long)(&__init_begin);
440
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
441
                ClearPageReserved(virt_to_page(addr));
442
                init_page_count(virt_to_page(addr));
443
                free_page(addr);
444
                totalram_pages++;
445
        }
446
        printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
447
                ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
448
}
449
 
450
/*
451
 * Associate a virtual page frame with a given physical page frame
452
 * and protection flags for that frame.
453
 */
454
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
455
{
456
        pgd_t *pgd;
457
        pud_t *pud;
458
        pmd_t *pmd;
459
        pte_t *pte;
460
 
461
        pgd = swapper_pg_dir + pgd_index(vaddr);
462
        if (pgd_none(*pgd)) {
463
                BUG();
464
                return;
465
        }
466
        pud = pud_offset(pgd, vaddr);
467
        if (pud_none(*pud)) {
468
                BUG();
469
                return;
470
        }
471
        pmd = pmd_offset(pud, vaddr);
472
        if (pmd_none(*pmd)) {
473
                BUG();
474
                return;
475
        }
476
        pte = pte_offset_kernel(pmd, vaddr);
477
        /* <pfn,flags> stored as-is, to permit clearing entries */
478
        set_pte(pte, pfn_pte(pfn, flags));
479
}
480
 
481
void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
482
{
483
        unsigned long address = __fix_to_virt(idx);
484
 
485
        if (idx >= __end_of_fixed_addresses) {
486
                BUG();
487
                return;
488
        }
489
        set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
490
        flush_tlb_all();
491
}
492
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.