OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [uclinux/] [uClinux-2.0.x/] [arch/] [i386/] [mm/] [init.c] - Blame information for rev 199

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 *  linux/arch/i386/mm/init.c
3
 *
4
 *  Copyright (C) 1995  Linus Torvalds
5
 */
6
 
7
#include <linux/config.h>
8
#include <linux/signal.h>
9
#include <linux/sched.h>
10
#include <linux/head.h>
11
#include <linux/kernel.h>
12
#include <linux/errno.h>
13
#include <linux/string.h>
14
#include <linux/types.h>
15
#include <linux/ptrace.h>
16
#include <linux/mman.h>
17
#include <linux/mm.h>
18
#include <linux/swap.h>
19
#include <linux/smp.h>
20
#ifdef CONFIG_BLK_DEV_INITRD
21
#include <linux/blk.h>
22
#endif
23
 
24
#include <asm/system.h>
25
#include <asm/segment.h>
26
#include <asm/pgtable.h>
27
#include <asm/dma.h>
28
 
29
#if 0
30
/*
31
 * The SMP kernel can't handle the 4MB page table optimizations yet
32
 */
33
#ifdef __SMP__
34
#undef USE_PENTIUM_MM
35
#endif
36
#endif
37
 
38
const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
39
 
40
extern void die_if_kernel(char *,struct pt_regs *,long);
41
extern void show_net_buffers(void);
42
 
43
/*
44
 * BAD_PAGE is the page that is used for page faults when linux
45
 * is out-of-memory. Older versions of linux just did a
46
 * do_exit(), but using this instead means there is less risk
47
 * for a process dying in kernel mode, possibly leaving a inode
48
 * unused etc..
49
 *
50
 * BAD_PAGETABLE is the accompanying page-table: it is initialized
51
 * to point to BAD_PAGE entries.
52
 *
53
 * ZERO_PAGE is a special page that is used for zero-initialized
54
 * data and COW.
55
 */
56
pte_t * __bad_pagetable(void)
57
{
58
        extern char empty_bad_page_table[PAGE_SIZE];
59
 
60
        __asm__ __volatile__("cld ; rep ; stosl":
61
                :"a" (pte_val(BAD_PAGE)),
62
                 "D" ((long) empty_bad_page_table),
63
                 "c" (PAGE_SIZE/4)
64
                :"di","cx");
65
        return (pte_t *) empty_bad_page_table;
66
}
67
 
68
pte_t __bad_page(void)
69
{
70
        extern char empty_bad_page[PAGE_SIZE];
71
 
72
        __asm__ __volatile__("cld ; rep ; stosl":
73
                :"a" (0),
74
                 "D" ((long) empty_bad_page),
75
                 "c" (PAGE_SIZE/4)
76
                :"di","cx");
77
        return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
78
}
79
 
80
void show_mem(void)
81
{
82
        int i,free = 0,total = 0,reserved = 0;
83
        int shared = 0;
84
 
85
        printk("Mem-info:\n");
86
        show_free_areas();
87
        printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
88
        i = high_memory >> PAGE_SHIFT;
89
        while (i-- > 0) {
90
                total++;
91
                if (PageReserved(mem_map+i))
92
                        reserved++;
93
                else if (!mem_map[i].count)
94
                        free++;
95
                else
96
                        shared += mem_map[i].count-1;
97
        }
98
        printk("%d pages of RAM\n",total);
99
        printk("%d free pages\n",free);
100
        printk("%d reserved pages\n",reserved);
101
        printk("%d pages shared\n",shared);
102
        show_buffers();
103
#ifdef CONFIG_NET
104
        show_net_buffers();
105
#endif
106
}
107
 
108
extern unsigned long free_area_init(unsigned long, unsigned long);
109
 
110
/*
111
 * paging_init() sets up the page tables - note that the first 4MB are
112
 * already mapped by head.S.
113
 *
114
 * This routines also unmaps the page at virtual kernel address 0, so
115
 * that we can trap those pesky NULL-reference errors in the kernel.
116
 */
117
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
118
{
119
        pgd_t * pg_dir;
120
        pte_t * pg_table;
121
        unsigned long tmp;
122
        unsigned long address;
123
 
124
/*
125
 * Physical page 0 is special; it's not touched by Linux since BIOS
126
 * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
127
 * and write protected to detect null pointer references in the
128
 * kernel.
129
 * It may also hold the MP configuration table when we are booting SMP.
130
 */
131
#if 0
132
        memset((void *) 0, 0, PAGE_SIZE);
133
#endif
134
#ifdef __SMP__
135
        if (!smp_scan_config(0x0,0x400))        /* Scan the bottom 1K for a signature */
136
        {
137
                /*
138
                 *      FIXME: Linux assumes you have 640K of base ram.. this continues
139
                 *      the error...
140
                 */
141
                if (!smp_scan_config(639*0x400,0x400))  /* Scan the top 1K of base RAM */
142
                {
143
                        if(!smp_scan_config(0xF0000,0x10000)) /* Scan the 64K of bios */
144
                        {
145
                                /*
146
                                 * If it is an SMP machine we should know now, unless the
147
                                 * configuration is in an EISA/MCA bus machine with an
148
                                 * extended bios data area.
149
                                 *
150
                                 * there is a real-mode segmented pointer pointing to the
151
                                 * 4K EBDA area at 0x40E, calculate and scan it here:
152
                                 */
153
                                address = *(unsigned short *)phys_to_virt(0x40E);
154
                                address<<=4;
155
                                smp_scan_config(address, 0x1000);
156
                        }
157
                }
158
        }
159
        /*
160
         *      If it is an SMP machine we should know now, unless the configuration
161
         *      is in an EISA/MCA bus machine with an extended bios data area. I don't
162
         *      have such a machine so someone else can fill in the check of the EBDA
163
         *      here.
164
         */
165
/*      smp_alloc_memory(8192); */
166
#endif
167
#ifdef TEST_VERIFY_AREA
168
        wp_works_ok = 0;
169
#endif
170
        start_mem = PAGE_ALIGN(start_mem);
171
        address = 0;
172
        pg_dir = swapper_pg_dir;
173
        while (address < end_mem) {
174
#ifdef USE_PENTIUM_MM
175
                /*
176
                 * This will create page tables that
177
                 * span up to the next 4MB virtual
178
                 * memory boundary, but that's ok,
179
                 * we won't use that memory anyway.
180
                 */
181
                if (x86_capability & 8) {
182
#ifdef GAS_KNOWS_CR4
183
                        __asm__("movl %%cr4,%%eax\n\t"
184
                                "orl $16,%%eax\n\t"
185
                                "movl %%eax,%%cr4"
186
                                : : :"ax");
187
#else
188
                        __asm__(".byte 0x0f,0x20,0xe0\n\t"
189
                                "orl $16,%%eax\n\t"
190
                                ".byte 0x0f,0x22,0xe0"
191
                                : : :"ax");
192
#endif
193
                        wp_works_ok = 1;
194
                        pgd_val(pg_dir[0]) = _PAGE_TABLE | _PAGE_4M | address;
195
                        pgd_val(pg_dir[USER_PGD_PTRS]) = _PAGE_TABLE | _PAGE_4M | address;
196
                        pg_dir++;
197
                        address += 4*1024*1024;
198
                        continue;
199
                }
200
#endif
201
                /* map the memory at virtual addr PAGE_OFFSET */
202
                pg_table = (pte_t *) (PAGE_MASK & pgd_val(pg_dir[USER_PGD_PTRS]));
203
                if (!pg_table) {
204
                        pg_table = (pte_t *) start_mem;
205
                        start_mem += PAGE_SIZE;
206
                }
207
 
208
                /* also map it temporarily at 0x0000000 for init */
209
                pgd_val(pg_dir[0])   = _PAGE_TABLE | (unsigned long) pg_table;
210
                pgd_val(pg_dir[USER_PGD_PTRS]) = _PAGE_TABLE | (unsigned long) pg_table;
211
                pg_dir++;
212
                for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
213
                        if (address < end_mem)
214
                                set_pte(pg_table, mk_pte(address, PAGE_SHARED));
215
                        else
216
                                pte_clear(pg_table);
217
                        address += PAGE_SIZE;
218
                }
219
        }
220
        local_flush_tlb();
221
        return free_area_init(start_mem, end_mem);
222
}
223
 
224
void mem_init(unsigned long start_mem, unsigned long end_mem)
225
{
226
        unsigned long start_low_mem = PAGE_SIZE;
227
        int codepages = 0;
228
        int reservedpages = 0;
229
        int datapages = 0;
230
        unsigned long tmp;
231
        extern int _etext;
232
 
233
        end_mem &= PAGE_MASK;
234
        high_memory = end_mem;
235
 
236
        /* clear the zero-page */
237
        memset(empty_zero_page, 0, PAGE_SIZE);
238
 
239
        /* mark usable pages in the mem_map[] */
240
        start_low_mem = PAGE_ALIGN(start_low_mem);
241
 
242
#ifdef __SMP__
243
        /*
244
         * But first pinch a few for the stack/trampoline stuff
245
         */
246
        start_low_mem += PAGE_SIZE;                             /* 32bit startup code */
247
        start_low_mem = smp_alloc_memory(start_low_mem);        /* AP processor stacks */
248
#endif
249
        start_mem = PAGE_ALIGN(start_mem);
250
 
251
        /*
252
         * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
253
         * They seem to have done something stupid with the floppy
254
         * controller as well..
255
         */
256
        while (start_low_mem < 0x9f000) {
257
                clear_bit(PG_reserved, &mem_map[MAP_NR(start_low_mem)].flags);
258
                start_low_mem += PAGE_SIZE;
259
        }
260
 
261
        while (start_mem < high_memory) {
262
                clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
263
                start_mem += PAGE_SIZE;
264
        }
265
        for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
266
                if (tmp >= MAX_DMA_ADDRESS)
267
                        clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
268
                if (PageReserved(mem_map+MAP_NR(tmp))) {
269
                        if (tmp >= 0xA0000 && tmp < 0x100000)
270
                                reservedpages++;
271
                        else if (tmp < (unsigned long) &_etext)
272
                                codepages++;
273
                        else
274
                                datapages++;
275
                        continue;
276
                }
277
                mem_map[MAP_NR(tmp)].count = 1;
278
#ifdef CONFIG_BLK_DEV_INITRD
279
                if (!initrd_start || (tmp < initrd_start || tmp >=
280
                    initrd_end))
281
#endif
282
                        free_page(tmp);
283
        }
284
        tmp = nr_free_pages << PAGE_SHIFT;
285
        printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
286
                tmp >> 10,
287
                high_memory >> 10,
288
                codepages << (PAGE_SHIFT-10),
289
                reservedpages << (PAGE_SHIFT-10),
290
                datapages << (PAGE_SHIFT-10));
291
/* test if the WP bit is honoured in supervisor mode */
292
        if (wp_works_ok < 0) {
293
                pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
294
                local_flush_tlb();
295
                __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
296
                pg0[0] = 0;
297
                local_flush_tlb();
298
                if (wp_works_ok < 0)
299
                        wp_works_ok = 0;
300
        }
301
        return;
302
}
303
 
304
void si_meminfo(struct sysinfo *val)
305
{
306
        int i;
307
 
308
        i = high_memory >> PAGE_SHIFT;
309
        val->totalram = 0;
310
        val->sharedram = 0;
311
        val->freeram = nr_free_pages << PAGE_SHIFT;
312
        val->bufferram = buffermem;
313
        while (i-- > 0)  {
314
                if (PageReserved(mem_map+i))
315
                        continue;
316
                val->totalram++;
317
                if (!mem_map[i].count)
318
                        continue;
319
                val->sharedram += mem_map[i].count-1;
320
        }
321
        val->totalram <<= PAGE_SHIFT;
322
        val->sharedram <<= PAGE_SHIFT;
323
        return;
324
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.