1 |
1275 |
phoenix |
/*
|
2 |
|
|
* Initialize MMU support.
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 1998-2002 Hewlett-Packard Co
|
5 |
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
6 |
|
|
*/
|
7 |
|
|
#include <linux/config.h>
|
8 |
|
|
#include <linux/module.h>
|
9 |
|
|
#include <linux/kernel.h>
|
10 |
|
|
#include <linux/init.h>
|
11 |
|
|
|
12 |
|
|
#include <linux/bootmem.h>
|
13 |
|
|
#include <linux/mm.h>
|
14 |
|
|
#include <linux/personality.h>
|
15 |
|
|
#include <linux/reboot.h>
|
16 |
|
|
#include <linux/slab.h>
|
17 |
|
|
#include <linux/swap.h>
|
18 |
|
|
#include <linux/efi.h>
|
19 |
|
|
#include <linux/mmzone.h>
|
20 |
|
|
|
21 |
|
|
#include <asm/bitops.h>
|
22 |
|
|
#include <asm/dma.h>
|
23 |
|
|
#include <asm/ia32.h>
|
24 |
|
|
#include <asm/io.h>
|
25 |
|
|
#include <asm/machvec.h>
|
26 |
|
|
#include <asm/numa.h>
|
27 |
|
|
#include <asm/pgalloc.h>
|
28 |
|
|
#include <asm/sal.h>
|
29 |
|
|
#include <asm/system.h>
|
30 |
|
|
#include <asm/uaccess.h>
|
31 |
|
|
#include <asm/mca.h>
|
32 |
|
|
|
33 |
|
|
/* References to section boundaries: */
|
34 |
|
|
extern char _stext, _etext, _edata, __init_begin, __init_end;
|
35 |
|
|
|
36 |
|
|
extern void ia64_tlb_init (void);
|
37 |
|
|
extern int filter_rsvd_memory (unsigned long, unsigned long, void *);
|
38 |
|
|
|
39 |
|
|
/* Note - may be changed by platform_setup */
|
40 |
|
|
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
|
41 |
|
|
#define LARGE_GAP 0x40000000 /* Use virtual mem map if a hole is > than this */
|
42 |
|
|
|
43 |
|
|
static unsigned long totalram_pages, reserved_pages;
|
44 |
|
|
struct page *zero_page_memmap_ptr; /* map entry for zero page */
|
45 |
|
|
|
46 |
|
|
unsigned long vmalloc_end = VMALLOC_END_INIT;
|
47 |
|
|
|
48 |
|
|
static struct page *vmem_map;
|
49 |
|
|
static unsigned long num_dma_physpages;
|
50 |
|
|
|
51 |
|
|
int
|
52 |
|
|
do_check_pgt_cache (int low, int high)
|
53 |
|
|
{
|
54 |
|
|
int freed = 0;
|
55 |
|
|
|
56 |
|
|
if (pgtable_cache_size > high) {
|
57 |
|
|
do {
|
58 |
|
|
if (pgd_quicklist)
|
59 |
|
|
free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
|
60 |
|
|
if (pmd_quicklist)
|
61 |
|
|
free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
|
62 |
|
|
if (pte_quicklist)
|
63 |
|
|
free_page((unsigned long)pte_alloc_one_fast(0, 0)), ++freed;
|
64 |
|
|
} while (pgtable_cache_size > low);
|
65 |
|
|
}
|
66 |
|
|
return freed;
|
67 |
|
|
}
|
68 |
|
|
|
69 |
|
|
inline void
|
70 |
|
|
ia64_set_rbs_bot (void)
|
71 |
|
|
{
|
72 |
|
|
unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
|
73 |
|
|
|
74 |
|
|
if (stack_size > MAX_USER_STACK_SIZE)
|
75 |
|
|
stack_size = MAX_USER_STACK_SIZE;
|
76 |
|
|
current->thread.rbs_bot = STACK_TOP - stack_size;
|
77 |
|
|
}
|
78 |
|
|
|
79 |
|
|
/*
|
80 |
|
|
* This performs some platform-dependent address space initialization.
|
81 |
|
|
* On IA-64, we want to setup the VM area for the register backing
|
82 |
|
|
* store (which grows upwards) and install the gateway page which is
|
83 |
|
|
* used for signal trampolines, etc.
|
84 |
|
|
*/
|
85 |
|
|
void
|
86 |
|
|
ia64_init_addr_space (void)
|
87 |
|
|
{
|
88 |
|
|
struct vm_area_struct *vma;
|
89 |
|
|
|
90 |
|
|
ia64_set_rbs_bot();
|
91 |
|
|
|
92 |
|
|
/*
|
93 |
|
|
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
|
94 |
|
|
* the problem. When the process attempts to write to the register backing store
|
95 |
|
|
* for the first time, it will get a SEGFAULT in this case.
|
96 |
|
|
*/
|
97 |
|
|
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
98 |
|
|
if (vma) {
|
99 |
|
|
vma->vm_mm = current->mm;
|
100 |
|
|
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
101 |
|
|
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
102 |
|
|
vma->vm_page_prot = PAGE_COPY;
|
103 |
|
|
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
|
104 |
|
|
vma->vm_ops = NULL;
|
105 |
|
|
vma->vm_pgoff = 0;
|
106 |
|
|
vma->vm_file = NULL;
|
107 |
|
|
vma->vm_private_data = NULL;
|
108 |
|
|
insert_vm_struct(current->mm, vma);
|
109 |
|
|
}
|
110 |
|
|
|
111 |
|
|
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
112 |
|
|
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
113 |
|
|
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
114 |
|
|
if (vma) {
|
115 |
|
|
memset(vma, 0, sizeof(*vma));
|
116 |
|
|
vma->vm_mm = current->mm;
|
117 |
|
|
vma->vm_end = PAGE_SIZE;
|
118 |
|
|
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
119 |
|
|
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
|
120 |
|
|
insert_vm_struct(current->mm, vma);
|
121 |
|
|
}
|
122 |
|
|
}
|
123 |
|
|
}
|
124 |
|
|
|
125 |
|
|
void
|
126 |
|
|
free_initmem (void)
|
127 |
|
|
{
|
128 |
|
|
unsigned long addr, eaddr;
|
129 |
|
|
|
130 |
|
|
addr = (unsigned long) ia64_imva(&__init_begin);
|
131 |
|
|
eaddr = (unsigned long) ia64_imva(&__init_end);
|
132 |
|
|
for (; addr < eaddr; addr += PAGE_SIZE) {
|
133 |
|
|
clear_bit(PG_reserved, &virt_to_page((void *)addr)->flags);
|
134 |
|
|
set_page_count(virt_to_page((void *)addr), 1);
|
135 |
|
|
free_page(addr);
|
136 |
|
|
++totalram_pages;
|
137 |
|
|
}
|
138 |
|
|
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
|
139 |
|
|
(&__init_end - &__init_begin) >> 10);
|
140 |
|
|
}
|
141 |
|
|
|
142 |
|
|
void
|
143 |
|
|
free_initrd_mem(unsigned long start, unsigned long end)
|
144 |
|
|
{
|
145 |
|
|
/*
|
146 |
|
|
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
|
147 |
|
|
* Thus EFI and the kernel may have different page sizes. It is
|
148 |
|
|
* therefore possible to have the initrd share the same page as
|
149 |
|
|
* the end of the kernel (given current setup).
|
150 |
|
|
*
|
151 |
|
|
* To avoid freeing/using the wrong page (kernel sized) we:
|
152 |
|
|
* - align up the beginning of initrd
|
153 |
|
|
* - align down the end of initrd
|
154 |
|
|
*
|
155 |
|
|
* | |
|
156 |
|
|
* |=============| a000
|
157 |
|
|
* | |
|
158 |
|
|
* | |
|
159 |
|
|
* | | 9000
|
160 |
|
|
* |/////////////|
|
161 |
|
|
* |/////////////|
|
162 |
|
|
* |=============| 8000
|
163 |
|
|
* |///INITRD////|
|
164 |
|
|
* |/////////////|
|
165 |
|
|
* |/////////////| 7000
|
166 |
|
|
* | |
|
167 |
|
|
* |KKKKKKKKKKKKK|
|
168 |
|
|
* |=============| 6000
|
169 |
|
|
* |KKKKKKKKKKKKK|
|
170 |
|
|
* |KKKKKKKKKKKKK|
|
171 |
|
|
* K=kernel using 8KB pages
|
172 |
|
|
*
|
173 |
|
|
* In this example, we must free page 8000 ONLY. So we must align up
|
174 |
|
|
* initrd_start and keep initrd_end as is.
|
175 |
|
|
*/
|
176 |
|
|
start = PAGE_ALIGN(start);
|
177 |
|
|
end = end & PAGE_MASK;
|
178 |
|
|
|
179 |
|
|
if (start < end)
|
180 |
|
|
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
|
181 |
|
|
|
182 |
|
|
for (; start < end; start += PAGE_SIZE) {
|
183 |
|
|
if (!VALID_PAGE(virt_to_page((void *)start)))
|
184 |
|
|
continue;
|
185 |
|
|
clear_bit(PG_reserved, &virt_to_page((void *)start)->flags);
|
186 |
|
|
set_page_count(virt_to_page((void *)start), 1);
|
187 |
|
|
free_page(start);
|
188 |
|
|
++totalram_pages;
|
189 |
|
|
}
|
190 |
|
|
}
|
191 |
|
|
|
192 |
|
|
void
|
193 |
|
|
si_meminfo (struct sysinfo *val)
|
194 |
|
|
{
|
195 |
|
|
val->totalram = totalram_pages;
|
196 |
|
|
val->sharedram = 0;
|
197 |
|
|
val->freeram = nr_free_pages();
|
198 |
|
|
val->bufferram = atomic_read(&buffermem_pages);
|
199 |
|
|
val->totalhigh = 0;
|
200 |
|
|
val->freehigh = 0;
|
201 |
|
|
val->mem_unit = PAGE_SIZE;
|
202 |
|
|
return;
|
203 |
|
|
}
|
204 |
|
|
|
205 |
|
|
void
|
206 |
|
|
show_mem(void)
|
207 |
|
|
{
|
208 |
|
|
int i, reserved;
|
209 |
|
|
int shared, cached;
|
210 |
|
|
pg_data_t *pgdat;
|
211 |
|
|
char *tchar = (numnodes > 1) ? "\t" : "";
|
212 |
|
|
|
213 |
|
|
printk("Mem-info:\n");
|
214 |
|
|
show_free_areas();
|
215 |
|
|
|
216 |
|
|
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
|
217 |
|
|
for_each_pgdat(pgdat) {
|
218 |
|
|
reserved=0;
|
219 |
|
|
cached=0;
|
220 |
|
|
shared=0;
|
221 |
|
|
if (numnodes > 1)
|
222 |
|
|
printk("Node ID: %d\n", pgdat->node_id);
|
223 |
|
|
for(i = 0; i < pgdat->node_size; i++) {
|
224 |
|
|
if (!VALID_PAGE(pgdat->node_mem_map+i))
|
225 |
|
|
continue;
|
226 |
|
|
if (PageReserved(pgdat->node_mem_map+i))
|
227 |
|
|
reserved++;
|
228 |
|
|
else if (PageSwapCache(pgdat->node_mem_map+i))
|
229 |
|
|
cached++;
|
230 |
|
|
else if (page_count(pgdat->node_mem_map + i))
|
231 |
|
|
shared += page_count(pgdat->node_mem_map + i) - 1;
|
232 |
|
|
}
|
233 |
|
|
printk("%s%ld pages of RAM\n", tchar, pgdat->node_size);
|
234 |
|
|
printk("%s%d reserved pages\n", tchar, reserved);
|
235 |
|
|
printk("%s%d pages shared\n", tchar, shared);
|
236 |
|
|
printk("%s%d pages swap cached\n", tchar, cached);
|
237 |
|
|
}
|
238 |
|
|
printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
|
239 |
|
|
show_buffers();
|
240 |
|
|
printk("%d free buffer pages\n", nr_free_buffer_pages());
|
241 |
|
|
}
|
242 |
|
|
|
243 |
|
|
/*
|
244 |
|
|
* This is like put_dirty_page() but installs a clean page with PAGE_GATE protection
|
245 |
|
|
* (execute-only, typically).
|
246 |
|
|
*/
|
247 |
|
|
struct page *
|
248 |
|
|
put_gate_page (struct page *page, unsigned long address)
|
249 |
|
|
{
|
250 |
|
|
pgd_t *pgd;
|
251 |
|
|
pmd_t *pmd;
|
252 |
|
|
pte_t *pte;
|
253 |
|
|
|
254 |
|
|
if (!PageReserved(page))
|
255 |
|
|
printk(KERN_ERR "put_gate_page: gate page at 0x%p not in reserved memory\n",
|
256 |
|
|
page_address(page));
|
257 |
|
|
|
258 |
|
|
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
|
259 |
|
|
|
260 |
|
|
spin_lock(&init_mm.page_table_lock);
|
261 |
|
|
{
|
262 |
|
|
pmd = pmd_alloc(&init_mm, pgd, address);
|
263 |
|
|
if (!pmd)
|
264 |
|
|
goto out;
|
265 |
|
|
pte = pte_alloc(&init_mm, pmd, address);
|
266 |
|
|
if (!pte)
|
267 |
|
|
goto out;
|
268 |
|
|
if (!pte_none(*pte)) {
|
269 |
|
|
pte_ERROR(*pte);
|
270 |
|
|
goto out;
|
271 |
|
|
}
|
272 |
|
|
flush_page_to_ram(page);
|
273 |
|
|
set_pte(pte, mk_pte(page, PAGE_GATE));
|
274 |
|
|
}
|
275 |
|
|
out: spin_unlock(&init_mm.page_table_lock);
|
276 |
|
|
/* no need for flush_tlb */
|
277 |
|
|
return page;
|
278 |
|
|
}
|
279 |
|
|
|
280 |
|
|
void __init
|
281 |
|
|
ia64_mmu_init (void *my_cpu_data)
|
282 |
|
|
{
|
283 |
|
|
unsigned long psr, rid, pta, impl_va_bits;
|
284 |
|
|
extern void __init tlb_init (void);
|
285 |
|
|
#ifdef CONFIG_IA64_MCA
|
286 |
|
|
int cpu;
|
287 |
|
|
#endif
|
288 |
|
|
|
289 |
|
|
#ifdef CONFIG_DISABLE_VHPT
|
290 |
|
|
# define VHPT_ENABLE_BIT 0
|
291 |
|
|
#else
|
292 |
|
|
# define VHPT_ENABLE_BIT 1
|
293 |
|
|
#endif
|
294 |
|
|
|
295 |
|
|
/*
|
296 |
|
|
* Set up the kernel identity mapping for regions 6 and 5. The mapping for region
|
297 |
|
|
* 7 is setup up in _start().
|
298 |
|
|
*/
|
299 |
|
|
psr = ia64_clear_ic();
|
300 |
|
|
|
301 |
|
|
rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
|
302 |
|
|
ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2));
|
303 |
|
|
|
304 |
|
|
rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START);
|
305 |
|
|
ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1);
|
306 |
|
|
|
307 |
|
|
/* ensure rr6 is up-to-date before inserting the PERCPU_ADDR translation: */
|
308 |
|
|
ia64_srlz_d();
|
309 |
|
|
|
310 |
|
|
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
|
311 |
|
|
pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL)), PAGE_SHIFT);
|
312 |
|
|
|
313 |
|
|
ia64_set_psr(psr);
|
314 |
|
|
ia64_srlz_i();
|
315 |
|
|
|
316 |
|
|
/*
|
317 |
|
|
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
|
318 |
|
|
* address space. The IA-64 architecture guarantees that at least 50 bits of
|
319 |
|
|
* virtual address space are implemented but if we pick a large enough page size
|
320 |
|
|
* (e.g., 64KB), the mapped address space is big enough that it will overlap with
|
321 |
|
|
* VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
|
322 |
|
|
* IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
|
323 |
|
|
* problem in practice. Alternatively, we could truncate the top of the mapped
|
324 |
|
|
* address space to not permit mappings that would overlap with the VMLPT.
|
325 |
|
|
* --davidm 00/12/06
|
326 |
|
|
*/
|
327 |
|
|
# define pte_bits 3
|
328 |
|
|
# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
|
329 |
|
|
/*
|
330 |
|
|
* The virtual page table has to cover the entire implemented address space within
|
331 |
|
|
* a region even though not all of this space may be mappable. The reason for
|
332 |
|
|
* this is that the Access bit and Dirty bit fault handlers perform
|
333 |
|
|
* non-speculative accesses to the virtual page table, so the address range of the
|
334 |
|
|
* virtual page table itself needs to be covered by virtual page table.
|
335 |
|
|
*/
|
336 |
|
|
# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
|
337 |
|
|
# define POW2(n) (1ULL << (n))
|
338 |
|
|
|
339 |
|
|
impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
|
340 |
|
|
|
341 |
|
|
if (impl_va_bits < 51 || impl_va_bits > 61)
|
342 |
|
|
panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
|
343 |
|
|
|
344 |
|
|
/* place the VMLPT at the end of each page-table mapped region: */
|
345 |
|
|
pta = POW2(61) - POW2(vmlpt_bits);
|
346 |
|
|
|
347 |
|
|
if (POW2(mapped_space_bits) >= pta)
|
348 |
|
|
panic("mm/init: overlap between virtually mapped linear page table and "
|
349 |
|
|
"mapped kernel space!");
|
350 |
|
|
/*
|
351 |
|
|
* Set the (virtually mapped linear) page table address. Bit
|
352 |
|
|
* 8 selects between the short and long format, bits 2-7 the
|
353 |
|
|
* size of the table, and bit 0 whether the VHPT walker is
|
354 |
|
|
* enabled.
|
355 |
|
|
*/
|
356 |
|
|
ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
|
357 |
|
|
|
358 |
|
|
ia64_tlb_init();
|
359 |
|
|
|
360 |
|
|
#ifdef CONFIG_IA64_MCA
|
361 |
|
|
cpu = smp_processor_id();
|
362 |
|
|
|
363 |
|
|
/* mca handler uses cr.lid as key to pick the right entry */
|
364 |
|
|
ia64_mca_tlb_list[cpu].cr_lid = ia64_get_lid();
|
365 |
|
|
|
366 |
|
|
/* insert this percpu data information into our list for MCA recovery purposes */
|
367 |
|
|
ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
|
368 |
|
|
/* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
|
369 |
|
|
ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
|
370 |
|
|
ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
|
371 |
|
|
ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
|
372 |
|
|
ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
|
373 |
|
|
ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
|
374 |
|
|
#endif
|
375 |
|
|
}
|
376 |
|
|
|
377 |
|
|
static int
|
378 |
|
|
create_mem_map_page_table (u64 start, u64 end, void *arg)
|
379 |
|
|
{
|
380 |
|
|
unsigned long address, start_page, end_page, next_blk_page;
|
381 |
|
|
unsigned long blk_start;
|
382 |
|
|
struct page *map_start, *map_end;
|
383 |
|
|
int node=0;
|
384 |
|
|
pgd_t *pgd;
|
385 |
|
|
pmd_t *pmd;
|
386 |
|
|
pte_t *pte;
|
387 |
|
|
|
388 |
|
|
/* should we use platform_map_nr here? */
|
389 |
|
|
|
390 |
|
|
map_start = vmem_map + MAP_NR_DENSE(start);
|
391 |
|
|
map_end = vmem_map + MAP_NR_DENSE(end);
|
392 |
|
|
|
393 |
|
|
start_page = (unsigned long) map_start & PAGE_MASK;
|
394 |
|
|
end_page = PAGE_ALIGN((unsigned long) map_end);
|
395 |
|
|
|
396 |
|
|
/* force the first iteration to get node id */
|
397 |
|
|
blk_start = start;
|
398 |
|
|
next_blk_page = 0;
|
399 |
|
|
|
400 |
|
|
for (address = start_page; address < end_page; address += PAGE_SIZE) {
|
401 |
|
|
|
402 |
|
|
/* if we went across a node boundary, get new nid */
|
403 |
|
|
if (address >= next_blk_page) {
|
404 |
|
|
struct page *map_next_blk;
|
405 |
|
|
|
406 |
|
|
node = paddr_to_nid(__pa(blk_start));
|
407 |
|
|
|
408 |
|
|
/* get end addr of this memblk as next blk_start */
|
409 |
|
|
blk_start = (unsigned long) __va(min(end, memblk_endpaddr(__pa(blk_start))));
|
410 |
|
|
map_next_blk = vmem_map + MAP_NR_DENSE(blk_start);
|
411 |
|
|
next_blk_page = PAGE_ALIGN((unsigned long) map_next_blk);
|
412 |
|
|
}
|
413 |
|
|
|
414 |
|
|
pgd = pgd_offset_k(address);
|
415 |
|
|
if (pgd_none(*pgd))
|
416 |
|
|
pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
|
417 |
|
|
pmd = pmd_offset(pgd, address);
|
418 |
|
|
|
419 |
|
|
if (pmd_none(*pmd))
|
420 |
|
|
pmd_populate(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
|
421 |
|
|
pte = pte_offset(pmd, address);
|
422 |
|
|
|
423 |
|
|
if (pte_none(*pte))
|
424 |
|
|
set_pte(pte, mk_pte_phys(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)),
|
425 |
|
|
PAGE_KERNEL));
|
426 |
|
|
}
|
427 |
|
|
return 0;
|
428 |
|
|
}
|
429 |
|
|
|
430 |
|
|
struct memmap_init_callback_data {
|
431 |
|
|
memmap_init_callback_t *memmap_init;
|
432 |
|
|
struct page *start;
|
433 |
|
|
struct page *end;
|
434 |
|
|
int zone;
|
435 |
|
|
int highmem;
|
436 |
|
|
};
|
437 |
|
|
|
438 |
|
|
struct memmap_count_callback_data {
|
439 |
|
|
int node;
|
440 |
|
|
unsigned long num_physpages;
|
441 |
|
|
unsigned long num_dma_physpages;
|
442 |
|
|
unsigned long min_pfn;
|
443 |
|
|
unsigned long max_pfn;
|
444 |
|
|
} cdata;
|
445 |
|
|
|
446 |
|
|
static int
|
447 |
|
|
virtual_memmap_init (u64 start, u64 end, void *arg)
|
448 |
|
|
{
|
449 |
|
|
struct memmap_init_callback_data *args;
|
450 |
|
|
struct page *map_start, *map_end;
|
451 |
|
|
|
452 |
|
|
args = (struct memmap_init_callback_data *) arg;
|
453 |
|
|
|
454 |
|
|
/* Should we use platform_map_nr here? */
|
455 |
|
|
|
456 |
|
|
map_start = mem_map + MAP_NR_DENSE(start);
|
457 |
|
|
map_end = mem_map + MAP_NR_DENSE(end);
|
458 |
|
|
|
459 |
|
|
if (map_start < args->start)
|
460 |
|
|
map_start = args->start;
|
461 |
|
|
if (map_end > args->end)
|
462 |
|
|
map_end = args->end;
|
463 |
|
|
|
464 |
|
|
/*
|
465 |
|
|
* We have to initialize "out of bounds" struct page elements
|
466 |
|
|
* that fit completely on the same pages that were allocated
|
467 |
|
|
* for the "in bounds" elements because they may be referenced
|
468 |
|
|
* later (and found to be "reserved").
|
469 |
|
|
*/
|
470 |
|
|
map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
|
471 |
|
|
/ sizeof(struct page);
|
472 |
|
|
map_end += ((PAGE_ALIGN((unsigned long) map_end) -
|
473 |
|
|
(unsigned long) map_end)
|
474 |
|
|
/ sizeof(struct page));
|
475 |
|
|
|
476 |
|
|
if (map_start < map_end)
|
477 |
|
|
(*args->memmap_init)(map_start, map_end, args->zone,
|
478 |
|
|
page_to_phys(map_start), args->highmem);
|
479 |
|
|
|
480 |
|
|
return 0;
|
481 |
|
|
}
|
482 |
|
|
|
483 |
|
|
unsigned long
|
484 |
|
|
arch_memmap_init (memmap_init_callback_t *memmap_init, struct page *start,
|
485 |
|
|
struct page *end, int zone, unsigned long start_paddr, int highmem)
|
486 |
|
|
{
|
487 |
|
|
if (!vmem_map)
|
488 |
|
|
memmap_init(start,end,zone,page_to_phys(start),highmem);
|
489 |
|
|
else {
|
490 |
|
|
struct memmap_init_callback_data args;
|
491 |
|
|
|
492 |
|
|
args.memmap_init = memmap_init;
|
493 |
|
|
args.start = start;
|
494 |
|
|
args.end = end;
|
495 |
|
|
args.zone = zone;
|
496 |
|
|
args.highmem = highmem;
|
497 |
|
|
|
498 |
|
|
efi_memmap_walk(virtual_memmap_init, &args);
|
499 |
|
|
}
|
500 |
|
|
|
501 |
|
|
return page_to_phys(end-1) + PAGE_SIZE;;
|
502 |
|
|
}
|
503 |
|
|
|
504 |
|
|
int
|
505 |
|
|
ia64_page_valid (struct page *page)
|
506 |
|
|
{
|
507 |
|
|
char byte;
|
508 |
|
|
|
509 |
|
|
return (__get_user(byte, (char *) page) == 0)
|
510 |
|
|
&& (__get_user(byte, (char *) (page + 1) - 1) == 0);
|
511 |
|
|
}
|
512 |
|
|
|
513 |
|
|
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
|
514 |
|
|
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
|
515 |
|
|
#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
|
516 |
|
|
static int
|
517 |
|
|
count_pages (u64 start, u64 end, int node)
|
518 |
|
|
{
|
519 |
|
|
start = __pa(start);
|
520 |
|
|
end = __pa(end);
|
521 |
|
|
if (node == cdata.node) {
|
522 |
|
|
cdata.num_physpages += (end - start) >> PAGE_SHIFT;
|
523 |
|
|
if (start <= __pa(MAX_DMA_ADDRESS))
|
524 |
|
|
cdata.num_dma_physpages += (min(end, __pa(MAX_DMA_ADDRESS)) - start) >> PAGE_SHIFT;
|
525 |
|
|
start = GRANULEROUNDDOWN(__pa(start));
|
526 |
|
|
start = ORDERROUNDDOWN(start);
|
527 |
|
|
end = GRANULEROUNDUP(__pa(end));
|
528 |
|
|
cdata.max_pfn = max(cdata.max_pfn, end >> PAGE_SHIFT);
|
529 |
|
|
cdata.min_pfn = min(cdata.min_pfn, start >> PAGE_SHIFT);
|
530 |
|
|
}
|
531 |
|
|
return 0;
|
532 |
|
|
}
|
533 |
|
|
|
534 |
|
|
static int
|
535 |
|
|
find_largest_hole(u64 start, u64 end, void *arg)
|
536 |
|
|
{
|
537 |
|
|
u64 *max_gap = arg;
|
538 |
|
|
static u64 last_end = PAGE_OFFSET;
|
539 |
|
|
|
540 |
|
|
/* NOTE: this algorithm assumes efi memmap table is ordered */
|
541 |
|
|
|
542 |
|
|
if (*max_gap < (start - last_end))
|
543 |
|
|
*max_gap = start - last_end;
|
544 |
|
|
last_end = end;
|
545 |
|
|
return 0;
|
546 |
|
|
}
|
547 |
|
|
|
548 |
|
|
/*
|
549 |
|
|
* Set up the page tables.
|
550 |
|
|
*/
|
551 |
|
|
void
|
552 |
|
|
paging_init (void)
|
553 |
|
|
{
|
554 |
|
|
unsigned long max_dma;
|
555 |
|
|
unsigned long zones_size[MAX_NR_ZONES];
|
556 |
|
|
unsigned long zholes_size[MAX_NR_ZONES];
|
557 |
|
|
unsigned long max_gap;
|
558 |
|
|
int node;
|
559 |
|
|
|
560 |
|
|
/* initialize mem_map[] */
|
561 |
|
|
|
562 |
|
|
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
563 |
|
|
max_gap = 0;
|
564 |
|
|
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
|
565 |
|
|
|
566 |
|
|
for (node=0; node < numnodes; node++) {
|
567 |
|
|
memset(zones_size, 0, sizeof(zones_size));
|
568 |
|
|
memset(zholes_size, 0, sizeof(zholes_size));
|
569 |
|
|
memset(&cdata, 0, sizeof(cdata));
|
570 |
|
|
|
571 |
|
|
cdata.node = node;
|
572 |
|
|
cdata.min_pfn = ~0;
|
573 |
|
|
|
574 |
|
|
efi_memmap_walk(filter_rsvd_memory, count_pages);
|
575 |
|
|
num_dma_physpages += cdata.num_dma_physpages;
|
576 |
|
|
num_physpages += cdata.num_physpages;
|
577 |
|
|
|
578 |
|
|
if (cdata.min_pfn >= max_dma) {
|
579 |
|
|
zones_size[ZONE_NORMAL] = cdata.max_pfn - cdata.min_pfn;
|
580 |
|
|
zholes_size[ZONE_NORMAL] = cdata.max_pfn - cdata.min_pfn - cdata.num_physpages;
|
581 |
|
|
} else if (cdata.max_pfn < max_dma) {
|
582 |
|
|
zones_size[ZONE_DMA] = cdata.max_pfn - cdata.min_pfn;
|
583 |
|
|
zholes_size[ZONE_DMA] = cdata.max_pfn - cdata.min_pfn - cdata.num_dma_physpages;
|
584 |
|
|
} else {
|
585 |
|
|
zones_size[ZONE_DMA] = max_dma - cdata.min_pfn;
|
586 |
|
|
zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - cdata.num_dma_physpages;
|
587 |
|
|
zones_size[ZONE_NORMAL] = cdata.max_pfn - max_dma;
|
588 |
|
|
zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - (cdata.num_physpages - cdata.num_dma_physpages);
|
589 |
|
|
}
|
590 |
|
|
|
591 |
|
|
if (numnodes == 1 && max_gap < LARGE_GAP) {
|
592 |
|
|
vmem_map = (struct page *)0;
|
593 |
|
|
zones_size[ZONE_DMA] += cdata.min_pfn;
|
594 |
|
|
zholes_size[ZONE_DMA] += cdata.min_pfn;
|
595 |
|
|
free_area_init_core(0, NODE_DATA(node), &mem_map, zones_size, 0, zholes_size, NULL);
|
596 |
|
|
} else {
|
597 |
|
|
|
598 |
|
|
/* allocate virtual mem_map */
|
599 |
|
|
|
600 |
|
|
if (node == 0) {
|
601 |
|
|
unsigned long map_size;
|
602 |
|
|
map_size = PAGE_ALIGN(max_low_pfn*sizeof(struct page));
|
603 |
|
|
vmalloc_end -= map_size;
|
604 |
|
|
mem_map = vmem_map = (struct page *) vmalloc_end;
|
605 |
|
|
efi_memmap_walk(create_mem_map_page_table, 0);
|
606 |
|
|
printk(KERN_INFO "Virtual mem_map starts at 0x%p\n", mem_map);
|
607 |
|
|
}
|
608 |
|
|
|
609 |
|
|
free_area_init_node(node, NODE_DATA(node), vmem_map+cdata.min_pfn, zones_size,
|
610 |
|
|
cdata.min_pfn<<PAGE_SHIFT, zholes_size);
|
611 |
|
|
}
|
612 |
|
|
}
|
613 |
|
|
|
614 |
|
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
615 |
|
|
}
|
616 |
|
|
|
617 |
|
|
static int
|
618 |
|
|
count_reserved_pages (u64 start, u64 end, void *arg)
|
619 |
|
|
{
|
620 |
|
|
unsigned long num_reserved = 0;
|
621 |
|
|
struct page *pg;
|
622 |
|
|
|
623 |
|
|
for (pg = virt_to_page((void *)start); pg < virt_to_page((void *)end); ++pg)
|
624 |
|
|
if (PageReserved(pg))
|
625 |
|
|
++num_reserved;
|
626 |
|
|
reserved_pages += num_reserved;
|
627 |
|
|
return 0;
|
628 |
|
|
}
|
629 |
|
|
|
630 |
|
|
void
|
631 |
|
|
mem_init (void)
|
632 |
|
|
{
|
633 |
|
|
extern char __start_gate_section[];
|
634 |
|
|
long codesize, datasize, initsize;
|
635 |
|
|
unsigned long num_pgt_pages;
|
636 |
|
|
pg_data_t *pgdat;
|
637 |
|
|
|
638 |
|
|
|
639 |
|
|
#ifdef CONFIG_PCI
|
640 |
|
|
/*
|
641 |
|
|
* This needs to be called _after_ the command line has been parsed but _before_
|
642 |
|
|
* any drivers that may need the PCI DMA interface are initialized or bootmem has
|
643 |
|
|
* been freed.
|
644 |
|
|
*/
|
645 |
|
|
platform_pci_dma_init();
|
646 |
|
|
#endif
|
647 |
|
|
|
648 |
|
|
if (!mem_map)
|
649 |
|
|
BUG();
|
650 |
|
|
|
651 |
|
|
max_mapnr = max_low_pfn;
|
652 |
|
|
high_memory = __va(max_low_pfn * PAGE_SIZE);
|
653 |
|
|
|
654 |
|
|
for_each_pgdat(pgdat)
|
655 |
|
|
totalram_pages += free_all_bootmem_node(pgdat);
|
656 |
|
|
|
657 |
|
|
reserved_pages = 0;
|
658 |
|
|
efi_memmap_walk(filter_rsvd_memory, count_reserved_pages);
|
659 |
|
|
|
660 |
|
|
codesize = (unsigned long) &_etext - (unsigned long) &_stext;
|
661 |
|
|
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
662 |
|
|
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
663 |
|
|
|
664 |
|
|
printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, %luk data, %luk init)\n",
|
665 |
|
|
(unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
|
666 |
|
|
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
|
667 |
|
|
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
|
668 |
|
|
|
669 |
|
|
/*
|
670 |
|
|
* Allow for enough (cached) page table pages so that we can map the entire memory
|
671 |
|
|
* at least once. Each task also needs a couple of page tables pages, so add in a
|
672 |
|
|
* fudge factor for that (don't use "threads-max" here; that would be wrong!).
|
673 |
|
|
* Don't allow the cache to be more than 10% of total memory, though.
|
674 |
|
|
*/
|
675 |
|
|
# define NUM_TASKS 500 /* typical number of tasks */
|
676 |
|
|
num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
|
677 |
|
|
if (num_pgt_pages > nr_free_pages() / 10)
|
678 |
|
|
num_pgt_pages = nr_free_pages() / 10;
|
679 |
|
|
if (num_pgt_pages > pgt_cache_water[1])
|
680 |
|
|
pgt_cache_water[1] = num_pgt_pages;
|
681 |
|
|
|
682 |
|
|
/* install the gate page in the global page table: */
|
683 |
|
|
put_gate_page(virt_to_page(ia64_imva(__start_gate_section)), GATE_ADDR);
|
684 |
|
|
|
685 |
|
|
#ifdef CONFIG_IA32_SUPPORT
|
686 |
|
|
ia32_gdt_init();
|
687 |
|
|
#endif
|
688 |
|
|
}
|