1 |
1275 |
phoenix |
/*
|
2 |
|
|
* Architecture-specific setup.
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 1998-2001 Hewlett-Packard Co
|
5 |
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
6 |
|
|
* Stephane Eranian <eranian@hpl.hp.com>
|
7 |
|
|
* Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
|
8 |
|
|
* Copyright (C) 1999 VA Linux Systems
|
9 |
|
|
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
|
10 |
|
|
*
|
11 |
|
|
* 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
|
12 |
|
|
* 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
|
13 |
|
|
* 03/31/00 R.Seth cpu_initialized and current->processor fixes
|
14 |
|
|
* 02/04/00 D.Mosberger some more get_cpuinfo fixes...
|
15 |
|
|
* 02/01/00 R.Seth fixed get_cpuinfo for SMP
|
16 |
|
|
* 01/07/99 S.Eranian added the support for command line argument
|
17 |
|
|
* 06/24/99 W.Drummond added boot_cpu_data.
|
18 |
|
|
*/
|
19 |
|
|
#include <linux/config.h>
|
20 |
|
|
#include <linux/init.h>
|
21 |
|
|
|
22 |
|
|
#include <linux/acpi.h>
|
23 |
|
|
#include <linux/bootmem.h>
|
24 |
|
|
#include <linux/delay.h>
|
25 |
|
|
#include <linux/kernel.h>
|
26 |
|
|
#include <linux/reboot.h>
|
27 |
|
|
#include <linux/sched.h>
|
28 |
|
|
#include <linux/seq_file.h>
|
29 |
|
|
#include <linux/string.h>
|
30 |
|
|
#include <linux/threads.h>
|
31 |
|
|
#include <linux/console.h>
|
32 |
|
|
#include <linux/ioport.h>
|
33 |
|
|
#include <linux/efi.h>
|
34 |
|
|
|
35 |
|
|
#ifdef CONFIG_BLK_DEV_RAM
|
36 |
|
|
# include <linux/blk.h>
|
37 |
|
|
#endif
|
38 |
|
|
|
39 |
|
|
#include <asm/ia32.h>
|
40 |
|
|
#include <asm/page.h>
|
41 |
|
|
#include <asm/machvec.h>
|
42 |
|
|
#include <asm/processor.h>
|
43 |
|
|
#include <asm/sal.h>
|
44 |
|
|
#include <asm/system.h>
|
45 |
|
|
#include <asm/mca.h>
|
46 |
|
|
#include <asm/pgtable.h>
|
47 |
|
|
#include <asm/pgalloc.h>
|
48 |
|
|
#include <asm/smp.h>
|
49 |
|
|
#include <asm/tlb.h>
|
50 |
|
|
|
51 |
|
|
#ifdef CONFIG_BLK_DEV_RAM
|
52 |
|
|
# include <linux/blk.h>
|
53 |
|
|
#endif
|
54 |
|
|
|
55 |
|
|
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
|
56 |
|
|
# error "struct cpuinfo_ia64 too big!"
|
57 |
|
|
#endif
|
58 |
|
|
|
59 |
|
|
#define MIN(a,b) ((a) < (b) ? (a) : (b))
|
60 |
|
|
#define MAX(a,b) ((a) > (b) ? (a) : (b))
|
61 |
|
|
|
62 |
|
|
extern char _end;
|
63 |
|
|
|
64 |
|
|
#ifdef CONFIG_NUMA
|
65 |
|
|
struct cpuinfo_ia64 *_cpu_data[NR_CPUS];
|
66 |
|
|
#else
|
67 |
|
|
struct cpuinfo_ia64 _cpu_data[NR_CPUS] __attribute__ ((section ("__special_page_section")));
|
68 |
|
|
mmu_gather_t mmu_gathers[NR_CPUS];
|
69 |
|
|
#endif
|
70 |
|
|
|
71 |
|
|
unsigned long ia64_cycles_per_usec;
|
72 |
|
|
struct ia64_boot_param *ia64_boot_param;
|
73 |
|
|
struct screen_info screen_info;
|
74 |
|
|
|
75 |
|
|
unsigned long ia64_iobase; /* virtual address for I/O accesses */
|
76 |
|
|
struct io_space io_space[MAX_IO_SPACES];
|
77 |
|
|
unsigned int num_io_spaces;
|
78 |
|
|
|
79 |
|
|
unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */
|
80 |
|
|
|
81 |
|
|
#define COMMAND_LINE_SIZE 512
|
82 |
|
|
|
83 |
|
|
char saved_command_line[COMMAND_LINE_SIZE]; /* used in proc filesystem */
|
84 |
|
|
|
85 |
|
|
/*
|
86 |
|
|
* Entries defined so far:
|
87 |
|
|
* - boot param structure itself
|
88 |
|
|
* - memory map
|
89 |
|
|
* - initrd (optional)
|
90 |
|
|
* - command line string
|
91 |
|
|
* - kernel code & data
|
92 |
|
|
*
|
93 |
|
|
* More could be added if necessary
|
94 |
|
|
*/
|
95 |
|
|
#define IA64_MAX_RSVD_REGIONS 5
|
96 |
|
|
|
97 |
|
|
struct rsvd_region {
|
98 |
|
|
unsigned long start; /* virtual address of beginning of element */
|
99 |
|
|
unsigned long end; /* virtual address of end of element + 1 */
|
100 |
|
|
};
|
101 |
|
|
|
102 |
|
|
/*
|
103 |
|
|
* We use a special marker for the end of memory and it uses the extra (+1) slot
|
104 |
|
|
*/
|
105 |
|
|
static struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
|
106 |
|
|
static int num_rsvd_regions;
|
107 |
|
|
|
108 |
|
|
#ifndef CONFIG_DISCONTIGMEM
|
109 |
|
|
static unsigned long bootmap_start; /* physical address where the bootmem map is located */
|
110 |
|
|
|
111 |
|
|
static int
|
112 |
|
|
find_max_pfn (unsigned long start, unsigned long end, void *arg)
|
113 |
|
|
{
|
114 |
|
|
unsigned long *max_pfn = arg, pfn;
|
115 |
|
|
|
116 |
|
|
pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
|
117 |
|
|
if (pfn > *max_pfn)
|
118 |
|
|
*max_pfn = pfn;
|
119 |
|
|
return 0;
|
120 |
|
|
}
|
121 |
|
|
#endif /* !CONFIG_DISCONTIGMEM */
|
122 |
|
|
|
123 |
|
|
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
|
124 |
|
|
|
125 |
|
|
#ifdef CONFIG_DISCONTIGMEM
|
126 |
|
|
/*
|
127 |
|
|
* efi_memmap_walk() knows nothing about layout of memory across nodes. Find
|
128 |
|
|
* out to which node a block of memory belongs. Ignore memory that we cannot
|
129 |
|
|
* identify, and split blocks that run across multiple nodes.
|
130 |
|
|
*
|
131 |
|
|
* Take this opportunity to round the start address up and the end address
|
132 |
|
|
* down to page boundaries.
|
133 |
|
|
*/
|
134 |
|
|
void
|
135 |
|
|
call_pernode_memory (unsigned long start, unsigned long end, void *arg)
|
136 |
|
|
{
|
137 |
|
|
unsigned long rs, re;
|
138 |
|
|
void (*func)(unsigned long, unsigned long, int);
|
139 |
|
|
int i;
|
140 |
|
|
|
141 |
|
|
start = PAGE_ALIGN(start);
|
142 |
|
|
end &= PAGE_MASK;
|
143 |
|
|
if (start >= end)
|
144 |
|
|
return;
|
145 |
|
|
|
146 |
|
|
func = arg;
|
147 |
|
|
|
148 |
|
|
if (!num_memblks) {
|
149 |
|
|
/* this machine doesn't have SRAT, */
|
150 |
|
|
/* so call func with nid=0, bank=0 */
|
151 |
|
|
if (start < end)
|
152 |
|
|
(*func)(start, end, 0);
|
153 |
|
|
return;
|
154 |
|
|
}
|
155 |
|
|
|
156 |
|
|
for (i = 0; i < num_memblks; i++) {
|
157 |
|
|
rs = MAX(__pa(start), node_memblk[i].start_paddr);
|
158 |
|
|
re = MIN(__pa(end), node_memblk[i].start_paddr+node_memblk[i].size);
|
159 |
|
|
|
160 |
|
|
if (rs < re)
|
161 |
|
|
(*func)((unsigned long)__va(rs), (unsigned long)__va(re), node_memblk[i].nid);
|
162 |
|
|
if ((unsigned long)__va(re) == end)
|
163 |
|
|
break;
|
164 |
|
|
}
|
165 |
|
|
}
|
166 |
|
|
|
167 |
|
|
#else /* CONFIG_DISCONTIGMEM */
|
168 |
|
|
|
169 |
|
|
static int
|
170 |
|
|
free_available_memory (unsigned long start, unsigned long end, void *arg)
|
171 |
|
|
{
|
172 |
|
|
free_bootmem(__pa(start), end - start);
|
173 |
|
|
return 0;
|
174 |
|
|
}
|
175 |
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
176 |
|
|
|
177 |
|
|
/*
|
178 |
|
|
* Filter incoming memory segments based on the primitive map created from
|
179 |
|
|
* the boot parameters. Segments contained in the map are removed from the
|
180 |
|
|
* memory ranges. A caller-specified function is called with the memory
|
181 |
|
|
* ranges that remain after filtering.
|
182 |
|
|
* This routine does not assume the incoming segments are sorted.
|
183 |
|
|
*/
|
184 |
|
|
int
|
185 |
|
|
filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
|
186 |
|
|
{
|
187 |
|
|
unsigned long range_start, range_end, prev_start;
|
188 |
|
|
void (*func)(unsigned long, unsigned long, int);
|
189 |
|
|
int i;
|
190 |
|
|
|
191 |
|
|
#if IGNORE_PFN0
|
192 |
|
|
if (start == PAGE_OFFSET) {
|
193 |
|
|
printk(KERN_WARNING "warning: skipping physical page 0\n");
|
194 |
|
|
start += PAGE_SIZE;
|
195 |
|
|
if (start >= end) return 0;
|
196 |
|
|
}
|
197 |
|
|
#endif
|
198 |
|
|
/*
|
199 |
|
|
* lowest possible address(walker uses virtual)
|
200 |
|
|
*/
|
201 |
|
|
prev_start = PAGE_OFFSET;
|
202 |
|
|
func = arg;
|
203 |
|
|
|
204 |
|
|
for (i = 0; i < num_rsvd_regions; ++i) {
|
205 |
|
|
range_start = MAX(start, prev_start);
|
206 |
|
|
range_end = MIN(end, rsvd_region[i].start);
|
207 |
|
|
|
208 |
|
|
if (range_start < range_end)
|
209 |
|
|
#ifdef CONFIG_DISCONTIGMEM
|
210 |
|
|
call_pernode_memory(range_start, range_end, func);
|
211 |
|
|
#else
|
212 |
|
|
(*func)(range_start, range_end, 0);
|
213 |
|
|
#endif
|
214 |
|
|
|
215 |
|
|
/* nothing more available in this segment */
|
216 |
|
|
if (range_end == end) return 0;
|
217 |
|
|
|
218 |
|
|
prev_start = rsvd_region[i].end;
|
219 |
|
|
}
|
220 |
|
|
/* end of memory marker allows full processing inside loop body */
|
221 |
|
|
return 0;
|
222 |
|
|
}
|
223 |
|
|
|
224 |
|
|
|
225 |
|
|
#ifndef CONFIG_DISCONTIGMEM
|
226 |
|
|
/*
|
227 |
|
|
* Find a place to put the bootmap and return its starting address in bootmap_start.
|
228 |
|
|
* This address must be page-aligned.
|
229 |
|
|
*/
|
230 |
|
|
static int
|
231 |
|
|
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
|
232 |
|
|
{
|
233 |
|
|
unsigned long needed = *(unsigned long *)arg;
|
234 |
|
|
unsigned long range_start, range_end, free_start;
|
235 |
|
|
int i;
|
236 |
|
|
|
237 |
|
|
#if IGNORE_PFN0
|
238 |
|
|
if (start == PAGE_OFFSET) {
|
239 |
|
|
start += PAGE_SIZE;
|
240 |
|
|
if (start >= end) return 0;
|
241 |
|
|
}
|
242 |
|
|
#endif
|
243 |
|
|
|
244 |
|
|
free_start = PAGE_OFFSET;
|
245 |
|
|
|
246 |
|
|
for (i = 0; i < num_rsvd_regions; i++) {
|
247 |
|
|
range_start = MAX(start, free_start);
|
248 |
|
|
range_end = MIN(end, rsvd_region[i].start & PAGE_MASK);
|
249 |
|
|
|
250 |
|
|
if (range_end <= range_start) continue; /* skip over empty range */
|
251 |
|
|
|
252 |
|
|
if (range_end - range_start >= needed) {
|
253 |
|
|
bootmap_start = __pa(range_start);
|
254 |
|
|
return 1; /* done */
|
255 |
|
|
}
|
256 |
|
|
|
257 |
|
|
/* nothing more available in this segment */
|
258 |
|
|
if (range_end == end) return 0;
|
259 |
|
|
|
260 |
|
|
free_start = PAGE_ALIGN(rsvd_region[i].end);
|
261 |
|
|
}
|
262 |
|
|
return 0;
|
263 |
|
|
}
|
264 |
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
265 |
|
|
|
266 |
|
|
static void
|
267 |
|
|
sort_regions (struct rsvd_region *rsvd_region, int max)
|
268 |
|
|
{
|
269 |
|
|
int j;
|
270 |
|
|
|
271 |
|
|
/* simple bubble sorting */
|
272 |
|
|
while (max--) {
|
273 |
|
|
for (j = 0; j < max; ++j) {
|
274 |
|
|
if (rsvd_region[j].start > rsvd_region[j+1].start) {
|
275 |
|
|
struct rsvd_region tmp;
|
276 |
|
|
tmp = rsvd_region[j];
|
277 |
|
|
rsvd_region[j] = rsvd_region[j + 1];
|
278 |
|
|
rsvd_region[j + 1] = tmp;
|
279 |
|
|
}
|
280 |
|
|
}
|
281 |
|
|
}
|
282 |
|
|
}
|
283 |
|
|
|
284 |
|
|
static void
|
285 |
|
|
find_memory (void)
|
286 |
|
|
{
|
287 |
|
|
# define KERNEL_END ((unsigned long) &_end)
|
288 |
|
|
unsigned long bootmap_size;
|
289 |
|
|
unsigned long max_pfn;
|
290 |
|
|
int n = 0;
|
291 |
|
|
|
292 |
|
|
/*
|
293 |
|
|
* none of the entries in this table overlap
|
294 |
|
|
*/
|
295 |
|
|
rsvd_region[n].start = (unsigned long) ia64_boot_param;
|
296 |
|
|
rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
|
297 |
|
|
n++;
|
298 |
|
|
|
299 |
|
|
rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
|
300 |
|
|
rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
|
301 |
|
|
n++;
|
302 |
|
|
|
303 |
|
|
rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
|
304 |
|
|
rsvd_region[n].end = (rsvd_region[n].start
|
305 |
|
|
+ strlen(__va(ia64_boot_param->command_line)) + 1);
|
306 |
|
|
n++;
|
307 |
|
|
|
308 |
|
|
rsvd_region[n].start = ia64_imva(KERNEL_START);
|
309 |
|
|
rsvd_region[n].end = ia64_imva(KERNEL_END);
|
310 |
|
|
n++;
|
311 |
|
|
|
312 |
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
313 |
|
|
if (ia64_boot_param->initrd_start) {
|
314 |
|
|
rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
|
315 |
|
|
rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
|
316 |
|
|
n++;
|
317 |
|
|
}
|
318 |
|
|
#endif
|
319 |
|
|
|
320 |
|
|
/* end of memory marker */
|
321 |
|
|
rsvd_region[n].start = ~0UL;
|
322 |
|
|
rsvd_region[n].end = ~0UL;
|
323 |
|
|
n++;
|
324 |
|
|
|
325 |
|
|
num_rsvd_regions = n;
|
326 |
|
|
|
327 |
|
|
sort_regions(rsvd_region, num_rsvd_regions);
|
328 |
|
|
|
329 |
|
|
#ifdef CONFIG_DISCONTIGMEM
|
330 |
|
|
{
|
331 |
|
|
extern void discontig_mem_init(void);
|
332 |
|
|
bootmap_size = max_pfn = 0; /* stop gcc warnings */
|
333 |
|
|
discontig_mem_init();
|
334 |
|
|
}
|
335 |
|
|
#else /* !CONFIG_DISCONTIGMEM */
|
336 |
|
|
|
337 |
|
|
/* first find highest page frame number */
|
338 |
|
|
max_pfn = 0;
|
339 |
|
|
efi_memmap_walk(find_max_pfn, &max_pfn);
|
340 |
|
|
|
341 |
|
|
/* how many bytes to cover all the pages */
|
342 |
|
|
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
|
343 |
|
|
|
344 |
|
|
/* look for a location to hold the bootmap */
|
345 |
|
|
bootmap_start = ~0UL;
|
346 |
|
|
efi_memmap_walk(find_bootmap_location, &bootmap_size);
|
347 |
|
|
if (bootmap_start == ~0UL)
|
348 |
|
|
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
|
349 |
|
|
|
350 |
|
|
bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
|
351 |
|
|
|
352 |
|
|
/* Free all available memory, then mark bootmem-map as being in use. */
|
353 |
|
|
efi_memmap_walk(filter_rsvd_memory, free_available_memory);
|
354 |
|
|
reserve_bootmem(bootmap_start, bootmap_size);
|
355 |
|
|
#endif /* !CONFIG_DISCONTIGMEM */
|
356 |
|
|
|
357 |
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
358 |
|
|
if (ia64_boot_param->initrd_start) {
|
359 |
|
|
initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
|
360 |
|
|
initrd_end = initrd_start+ia64_boot_param->initrd_size;
|
361 |
|
|
|
362 |
|
|
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
|
363 |
|
|
initrd_start, ia64_boot_param->initrd_size);
|
364 |
|
|
}
|
365 |
|
|
#endif
|
366 |
|
|
}
|
367 |
|
|
|
368 |
|
|
void __init
|
369 |
|
|
setup_arch (char **cmdline_p)
|
370 |
|
|
{
|
371 |
|
|
extern unsigned long ia64_iobase;
|
372 |
|
|
unsigned long phys_iobase;
|
373 |
|
|
|
374 |
|
|
unw_init();
|
375 |
|
|
|
376 |
|
|
*cmdline_p = __va(ia64_boot_param->command_line);
|
377 |
|
|
strncpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
|
378 |
|
|
saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; /* for safety */
|
379 |
|
|
|
380 |
|
|
efi_init();
|
381 |
|
|
|
382 |
|
|
#ifdef CONFIG_ACPI_BOOT
|
383 |
|
|
/* Initialize the ACPI boot-time table parser */
|
384 |
|
|
acpi_table_init();
|
385 |
|
|
|
386 |
|
|
# ifdef CONFIG_ACPI_NUMA
|
387 |
|
|
acpi_numa_init();
|
388 |
|
|
# endif
|
389 |
|
|
#else
|
390 |
|
|
# ifdef CONFIG_SMP
|
391 |
|
|
smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
|
392 |
|
|
# endif
|
393 |
|
|
#endif /* CONFIG_APCI_BOOT */
|
394 |
|
|
|
395 |
|
|
iomem_resource.end = ~0UL; /* FIXME probably belongs elsewhere */
|
396 |
|
|
find_memory();
|
397 |
|
|
|
398 |
|
|
#if 0
|
399 |
|
|
/* XXX fix me */
|
400 |
|
|
init_mm.start_code = (unsigned long) &_stext;
|
401 |
|
|
init_mm.end_code = (unsigned long) &_etext;
|
402 |
|
|
init_mm.end_data = (unsigned long) &_edata;
|
403 |
|
|
init_mm.brk = (unsigned long) &_end;
|
404 |
|
|
|
405 |
|
|
code_resource.start = virt_to_bus(&_text);
|
406 |
|
|
code_resource.end = virt_to_bus(&_etext) - 1;
|
407 |
|
|
data_resource.start = virt_to_bus(&_etext);
|
408 |
|
|
data_resource.end = virt_to_bus(&_edata) - 1;
|
409 |
|
|
#endif
|
410 |
|
|
|
411 |
|
|
/* process SAL system table: */
|
412 |
|
|
ia64_sal_init(efi.sal_systab);
|
413 |
|
|
|
414 |
|
|
#ifdef CONFIG_IA64_GENERIC
|
415 |
|
|
machvec_init(acpi_get_sysname());
|
416 |
|
|
#endif
|
417 |
|
|
|
418 |
|
|
/*
|
419 |
|
|
* Set `iobase' to the appropriate address in region 6 (uncached access range).
|
420 |
|
|
*
|
421 |
|
|
* The EFI memory map is the "preferred" location to get the I/O port space base,
|
422 |
|
|
* rather the relying on AR.KR0. This should become more clear in future SAL
|
423 |
|
|
* specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
|
424 |
|
|
* found in the memory map.
|
425 |
|
|
*/
|
426 |
|
|
phys_iobase = efi_get_iobase();
|
427 |
|
|
if (phys_iobase)
|
428 |
|
|
/* set AR.KR0 since this is all we use it for anyway */
|
429 |
|
|
ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
|
430 |
|
|
else {
|
431 |
|
|
phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
|
432 |
|
|
printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
|
433 |
|
|
"to AR.KR0\n");
|
434 |
|
|
printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
|
435 |
|
|
}
|
436 |
|
|
ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
|
437 |
|
|
|
438 |
|
|
/* setup legacy IO port space */
|
439 |
|
|
io_space[0].mmio_base = ia64_iobase;
|
440 |
|
|
io_space[0].sparse = 1;
|
441 |
|
|
num_io_spaces = 1;
|
442 |
|
|
|
443 |
|
|
#ifdef CONFIG_SMP
|
444 |
|
|
cpu_physical_id(0) = hard_smp_processor_id();
|
445 |
|
|
#endif
|
446 |
|
|
|
447 |
|
|
cpu_init(); /* initialize the bootstrap CPU */
|
448 |
|
|
|
449 |
|
|
#ifdef CONFIG_ACPI_BOOT
|
450 |
|
|
acpi_boot_init();
|
451 |
|
|
#endif
|
452 |
|
|
#ifdef CONFIG_SERIAL_HCDP
|
453 |
|
|
if (efi.hcdp) {
|
454 |
|
|
void setup_serial_hcdp(void *);
|
455 |
|
|
|
456 |
|
|
/* Setup the serial ports described by HCDP */
|
457 |
|
|
setup_serial_hcdp(efi.hcdp);
|
458 |
|
|
}
|
459 |
|
|
#endif
|
460 |
|
|
#ifdef CONFIG_VT
|
461 |
|
|
# if defined(CONFIG_DUMMY_CONSOLE)
|
462 |
|
|
conswitchp = &dummy_con;
|
463 |
|
|
# endif
|
464 |
|
|
# if defined(CONFIG_VGA_CONSOLE)
|
465 |
|
|
/*
|
466 |
|
|
* Non-legacy systems may route legacy VGA MMIO range to system
|
467 |
|
|
* memory. vga_con probes the MMIO hole, so memory looks like
|
468 |
|
|
* a VGA device to it. The EFI memory map can tell us if it's
|
469 |
|
|
* memory so we can avoid this problem.
|
470 |
|
|
*/
|
471 |
|
|
if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
|
472 |
|
|
conswitchp = &vga_con;
|
473 |
|
|
# endif
|
474 |
|
|
#endif
|
475 |
|
|
|
476 |
|
|
#ifdef CONFIG_IA64_MCA
|
477 |
|
|
/* enable IA-64 Machine Check Abort Handling */
|
478 |
|
|
ia64_mca_init();
|
479 |
|
|
#endif
|
480 |
|
|
|
481 |
|
|
platform_setup(cmdline_p);
|
482 |
|
|
paging_init();
|
483 |
|
|
|
484 |
|
|
unw_create_gate_table();
|
485 |
|
|
}
|
486 |
|
|
|
487 |
|
|
/*
|
488 |
|
|
* Display cpu info for all cpu's.
|
489 |
|
|
*/
|
490 |
|
|
static int
|
491 |
|
|
show_cpuinfo (struct seq_file *m, void *v)
|
492 |
|
|
{
|
493 |
|
|
#ifdef CONFIG_SMP
|
494 |
|
|
# define lpj c->loops_per_jiffy
|
495 |
|
|
# define cpu c->processor
|
496 |
|
|
#else
|
497 |
|
|
# define lpj loops_per_jiffy
|
498 |
|
|
# define cpu 0
|
499 |
|
|
#endif
|
500 |
|
|
char family[32], features[128], *cp;
|
501 |
|
|
struct cpuinfo_ia64 *c = v;
|
502 |
|
|
unsigned long mask;
|
503 |
|
|
|
504 |
|
|
mask = c->features;
|
505 |
|
|
|
506 |
|
|
switch (c->family) {
|
507 |
|
|
case 0x07: memcpy(family, "Itanium", 8); break;
|
508 |
|
|
case 0x1f: memcpy(family, "Itanium 2", 10); break;
|
509 |
|
|
default: sprintf(family, "%u", c->family); break;
|
510 |
|
|
}
|
511 |
|
|
|
512 |
|
|
/* build the feature string: */
|
513 |
|
|
memcpy(features, " standard", 10);
|
514 |
|
|
cp = features;
|
515 |
|
|
if (mask & 1) {
|
516 |
|
|
strcpy(cp, " branchlong");
|
517 |
|
|
cp = strchr(cp, '\0');
|
518 |
|
|
mask &= ~1UL;
|
519 |
|
|
}
|
520 |
|
|
if (mask)
|
521 |
|
|
sprintf(cp, " 0x%lx", mask);
|
522 |
|
|
|
523 |
|
|
seq_printf(m,
|
524 |
|
|
"processor : %d\n"
|
525 |
|
|
"vendor : %s\n"
|
526 |
|
|
"arch : IA-64\n"
|
527 |
|
|
"family : %s\n"
|
528 |
|
|
"model : %u\n"
|
529 |
|
|
"revision : %u\n"
|
530 |
|
|
"archrev : %u\n"
|
531 |
|
|
"features :%s\n" /* don't change this---it _is_ right! */
|
532 |
|
|
"cpu number : %lu\n"
|
533 |
|
|
"cpu regs : %u\n"
|
534 |
|
|
"cpu MHz : %lu.%06lu\n"
|
535 |
|
|
"itc MHz : %lu.%06lu\n"
|
536 |
|
|
"BogoMIPS : %lu.%02lu\n\n",
|
537 |
|
|
cpu, c->vendor, family, c->model, c->revision, c->archrev,
|
538 |
|
|
features, c->ppn, c->number,
|
539 |
|
|
c->proc_freq / 1000000, c->proc_freq % 1000000,
|
540 |
|
|
c->itc_freq / 1000000, c->itc_freq % 1000000,
|
541 |
|
|
lpj*HZ/500000, (lpj*HZ/5000) % 100);
|
542 |
|
|
return 0;
|
543 |
|
|
#undef lpj
|
544 |
|
|
#undef cpu
|
545 |
|
|
}
|
546 |
|
|
|
547 |
|
|
static void *
|
548 |
|
|
c_start (struct seq_file *m, loff_t *pos)
|
549 |
|
|
{
|
550 |
|
|
#ifdef CONFIG_SMP
|
551 |
|
|
while (*pos < NR_CPUS && !(cpu_online_map & (1UL << *pos)))
|
552 |
|
|
++*pos;
|
553 |
|
|
#endif
|
554 |
|
|
return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
|
555 |
|
|
}
|
556 |
|
|
|
557 |
|
|
static void *
|
558 |
|
|
c_next (struct seq_file *m, void *v, loff_t *pos)
|
559 |
|
|
{
|
560 |
|
|
++*pos;
|
561 |
|
|
return c_start(m, pos);
|
562 |
|
|
}
|
563 |
|
|
|
564 |
|
|
static void
|
565 |
|
|
c_stop (struct seq_file *m, void *v)
|
566 |
|
|
{
|
567 |
|
|
}
|
568 |
|
|
|
569 |
|
|
struct seq_operations cpuinfo_op = {
|
570 |
|
|
.start =c_start,
|
571 |
|
|
.next = c_next,
|
572 |
|
|
.stop = c_stop,
|
573 |
|
|
.show = show_cpuinfo
|
574 |
|
|
};
|
575 |
|
|
|
576 |
|
|
void
|
577 |
|
|
identify_cpu (struct cpuinfo_ia64 *c)
|
578 |
|
|
{
|
579 |
|
|
union {
|
580 |
|
|
unsigned long bits[5];
|
581 |
|
|
struct {
|
582 |
|
|
/* id 0 & 1: */
|
583 |
|
|
char vendor[16];
|
584 |
|
|
|
585 |
|
|
/* id 2 */
|
586 |
|
|
u64 ppn; /* processor serial number */
|
587 |
|
|
|
588 |
|
|
/* id 3: */
|
589 |
|
|
unsigned number : 8;
|
590 |
|
|
unsigned revision : 8;
|
591 |
|
|
unsigned model : 8;
|
592 |
|
|
unsigned family : 8;
|
593 |
|
|
unsigned archrev : 8;
|
594 |
|
|
unsigned reserved : 24;
|
595 |
|
|
|
596 |
|
|
/* id 4: */
|
597 |
|
|
u64 features;
|
598 |
|
|
} field;
|
599 |
|
|
} cpuid;
|
600 |
|
|
pal_vm_info_1_u_t vm1;
|
601 |
|
|
pal_vm_info_2_u_t vm2;
|
602 |
|
|
pal_status_t status;
|
603 |
|
|
unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
|
604 |
|
|
int i;
|
605 |
|
|
|
606 |
|
|
for (i = 0; i < 5; ++i)
|
607 |
|
|
cpuid.bits[i] = ia64_get_cpuid(i);
|
608 |
|
|
|
609 |
|
|
memcpy(c->vendor, cpuid.field.vendor, 16);
|
610 |
|
|
#ifdef CONFIG_SMP
|
611 |
|
|
c->processor = smp_processor_id();
|
612 |
|
|
#endif
|
613 |
|
|
c->ppn = cpuid.field.ppn;
|
614 |
|
|
c->number = cpuid.field.number;
|
615 |
|
|
c->revision = cpuid.field.revision;
|
616 |
|
|
c->model = cpuid.field.model;
|
617 |
|
|
c->family = cpuid.field.family;
|
618 |
|
|
c->archrev = cpuid.field.archrev;
|
619 |
|
|
c->features = cpuid.field.features;
|
620 |
|
|
|
621 |
|
|
status = ia64_pal_vm_summary(&vm1, &vm2);
|
622 |
|
|
if (status == PAL_STATUS_SUCCESS) {
|
623 |
|
|
impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
|
624 |
|
|
phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
|
625 |
|
|
}
|
626 |
|
|
printk(KERN_INFO "CPU %d: %lu virtual and %lu physical address bits\n",
|
627 |
|
|
smp_processor_id(), impl_va_msb + 1, phys_addr_size);
|
628 |
|
|
c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
|
629 |
|
|
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
|
630 |
|
|
}
|
631 |
|
|
|
632 |
|
|
/*
|
633 |
|
|
* cpu_init() initializes state that is per-CPU. This function acts
|
634 |
|
|
* as a 'CPU state barrier', nothing should get across.
|
635 |
|
|
*/
|
636 |
|
|
void
|
637 |
|
|
cpu_init (void)
|
638 |
|
|
{
|
639 |
|
|
extern void __init ia64_mmu_init (void *);
|
640 |
|
|
unsigned long num_phys_stacked;
|
641 |
|
|
pal_vm_info_2_u_t vmi;
|
642 |
|
|
unsigned int max_ctx;
|
643 |
|
|
struct cpuinfo_ia64 *my_cpu_data;
|
644 |
|
|
#ifdef CONFIG_NUMA
|
645 |
|
|
int cpu;
|
646 |
|
|
|
647 |
|
|
/*
|
648 |
|
|
* If NUMA is configured, the cpu_data array is not preallocated. The boot cpu
|
649 |
|
|
* allocates entries for every possible cpu. As the remaining cpus come online,
|
650 |
|
|
* they reallocate a new cpu_data structure on their local node. This extra work
|
651 |
|
|
* is required because some boot code references all cpu_data structures
|
652 |
|
|
* before the cpus are actually started.
|
653 |
|
|
*/
|
654 |
|
|
for (cpu=0; cpu < NR_CPUS; cpu++)
|
655 |
|
|
if (node_cpuid[cpu].phys_id == hard_smp_processor_id())
|
656 |
|
|
break;
|
657 |
|
|
my_cpu_data = _cpu_data[cpu];
|
658 |
|
|
my_cpu_data->node_data->active_cpu_count++;
|
659 |
|
|
|
660 |
|
|
for (cpu=0; cpu<NR_CPUS; cpu++)
|
661 |
|
|
_cpu_data[cpu]->cpu_data[smp_processor_id()] = my_cpu_data;
|
662 |
|
|
#else
|
663 |
|
|
my_cpu_data = cpu_data(smp_processor_id());
|
664 |
|
|
my_cpu_data->mmu_gathers = &mmu_gathers[smp_processor_id()];
|
665 |
|
|
#endif
|
666 |
|
|
|
667 |
|
|
/*
|
668 |
|
|
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
|
669 |
|
|
* ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
|
670 |
|
|
* depends on the data returned by identify_cpu(). We break the dependency by
|
671 |
|
|
* accessing cpu_data() the old way, through identity mapped space.
|
672 |
|
|
*/
|
673 |
|
|
identify_cpu(my_cpu_data);
|
674 |
|
|
|
675 |
|
|
#ifdef CONFIG_MCKINLEY
|
676 |
|
|
{
|
677 |
|
|
#define FEATURE_SET 16
|
678 |
|
|
struct ia64_pal_retval iprv;
|
679 |
|
|
|
680 |
|
|
if (my_cpu_data->family == 0x1f) {
|
681 |
|
|
|
682 |
|
|
PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
|
683 |
|
|
|
684 |
|
|
if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) {
|
685 |
|
|
|
686 |
|
|
PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
|
687 |
|
|
(iprv.v1 | 0x80), FEATURE_SET, 0);
|
688 |
|
|
}
|
689 |
|
|
}
|
690 |
|
|
}
|
691 |
|
|
#endif
|
692 |
|
|
|
693 |
|
|
/* Clear the stack memory reserved for pt_regs: */
|
694 |
|
|
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
|
695 |
|
|
|
696 |
|
|
ia64_set_kr(IA64_KR_FPU_OWNER, 0);
|
697 |
|
|
|
698 |
|
|
/*
|
699 |
|
|
* Initialize default control register to defer all speculative faults. The
|
700 |
|
|
* kernel MUST NOT depend on a particular setting of these bits (in other words,
|
701 |
|
|
* the kernel must have recovery code for all speculative accesses). Turn on
|
702 |
|
|
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps
|
703 |
|
|
* shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
|
704 |
|
|
* be fine).
|
705 |
|
|
*/
|
706 |
|
|
ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
|
707 |
|
|
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC);
|
708 |
|
|
atomic_inc(&init_mm.mm_count);
|
709 |
|
|
current->active_mm = &init_mm;
|
710 |
|
|
|
711 |
|
|
ia64_mmu_init(my_cpu_data);
|
712 |
|
|
|
713 |
|
|
#ifdef CONFIG_IA32_SUPPORT
|
714 |
|
|
/* initialize global ia32 state - CR0 and CR4 */
|
715 |
|
|
asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
|
716 |
|
|
#endif
|
717 |
|
|
|
718 |
|
|
/* disable all local interrupt sources: */
|
719 |
|
|
ia64_set_itv(1 << 16);
|
720 |
|
|
ia64_set_lrr0(1 << 16);
|
721 |
|
|
ia64_set_lrr1(1 << 16);
|
722 |
|
|
ia64_set_pmv(1 << 16);
|
723 |
|
|
ia64_set_cmcv(1 << 16);
|
724 |
|
|
|
725 |
|
|
/* clear TPR & XTP to enable all interrupt classes: */
|
726 |
|
|
ia64_set_tpr(0);
|
727 |
|
|
#ifdef CONFIG_SMP
|
728 |
|
|
normal_xtp();
|
729 |
|
|
#endif
|
730 |
|
|
|
731 |
|
|
/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
|
732 |
|
|
if (ia64_pal_vm_summary(NULL, &vmi) == 0)
|
733 |
|
|
max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
|
734 |
|
|
else {
|
735 |
|
|
printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
|
736 |
|
|
max_ctx = (1U << 15) - 1; /* use architected minimum */
|
737 |
|
|
}
|
738 |
|
|
while (max_ctx < ia64_ctx.max_ctx) {
|
739 |
|
|
unsigned int old = ia64_ctx.max_ctx;
|
740 |
|
|
if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
|
741 |
|
|
break;
|
742 |
|
|
}
|
743 |
|
|
|
744 |
|
|
if (ia64_pal_rse_info(&num_phys_stacked, 0) != 0) {
|
745 |
|
|
printk(KERN_WARNING "cpu_init: PAL RSE info failed, assuming 96 physical stacked regs\n");
|
746 |
|
|
num_phys_stacked = 96;
|
747 |
|
|
}
|
748 |
|
|
local_cpu_data->phys_stacked_size_p8 = num_phys_stacked*8 + 8;
|
749 |
|
|
|
750 |
|
|
platform_cpu_init();
|
751 |
|
|
}
|