1 |
1275 |
phoenix |
/*
|
2 |
|
|
* Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
|
3 |
|
|
* Copyright (c) 2001 Intel Corp.
|
4 |
|
|
* Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
|
5 |
|
|
* Copyright (c) 2002 NEC Corp.
|
6 |
|
|
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
|
7 |
|
|
*/
|
8 |
|
|
|
9 |
|
|
/*
|
10 |
|
|
* Platform initialization for Discontig Memory
|
11 |
|
|
*/
|
12 |
|
|
|
13 |
|
|
#include <linux/kernel.h>
|
14 |
|
|
#include <linux/mm.h>
|
15 |
|
|
#include <linux/bootmem.h>
|
16 |
|
|
#include <linux/mmzone.h>
|
17 |
|
|
#include <linux/acpi.h>
|
18 |
|
|
#include <linux/efi.h>
|
19 |
|
|
#include <asm/pgalloc.h>
|
20 |
|
|
#include <asm/tlb.h>
|
21 |
|
|
|
22 |
|
|
|
23 |
|
|
/*
|
24 |
|
|
* Round an address upward to the next multiple of GRANULE size.
|
25 |
|
|
*/
|
26 |
|
|
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
|
27 |
|
|
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
|
28 |
|
|
|
29 |
|
|
/*
|
30 |
|
|
* Used to locate BOOT_DATA prior to initializing the node data area.
|
31 |
|
|
*/
|
32 |
|
|
#define BOOT_NODE_DATA(node) pg_data_ptr[node]
|
33 |
|
|
|
34 |
|
|
/*
|
35 |
|
|
* To prevent cache aliasing effects, align per-node structures so that they
|
36 |
|
|
* start at addresses that are strided by node number.
|
37 |
|
|
*/
|
38 |
|
|
#define NODEDATA_ALIGN(addr, node) ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PAGE_SIZE)
|
39 |
|
|
|
40 |
|
|
|
41 |
|
|
static struct ia64_node_data *boot_node_data[NR_NODES] __initdata;
|
42 |
|
|
static pg_data_t *pg_data_ptr[NR_NODES] __initdata;
|
43 |
|
|
static bootmem_data_t bdata[NR_NODES] __initdata;
|
44 |
|
|
static unsigned long boot_pernode[NR_NODES] __initdata;
|
45 |
|
|
static unsigned long boot_pernodesize[NR_NODES] __initdata;
|
46 |
|
|
|
47 |
|
|
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
|
48 |
|
|
extern struct cpuinfo_ia64 *_cpu_data[NR_CPUS];
|
49 |
|
|
|
50 |
|
|
|
51 |
|
|
|
52 |
|
|
/*
|
53 |
|
|
* We allocate one of the bootmem_data_t structs for each piece of memory
|
54 |
|
|
* that we wish to treat as a contiguous block. Each such block must start
|
55 |
|
|
* on a GRANULE boundary. Multiple banks per node is not supported.
|
56 |
|
|
* (Note: on SN2, all memory on a node is trated as a single bank.
|
57 |
|
|
* Holes within the bank are supported. This works because memory
|
58 |
|
|
* from different banks is not interleaved. The bootmap bitmap
|
59 |
|
|
* for the node is somewhat large but not too large).
|
60 |
|
|
*/
|
61 |
|
|
static int __init
|
62 |
|
|
build_maps(unsigned long start, unsigned long end, int node)
|
63 |
|
|
{
|
64 |
|
|
bootmem_data_t *bdp;
|
65 |
|
|
unsigned long cstart, epfn;
|
66 |
|
|
|
67 |
|
|
bdp = &bdata[node];
|
68 |
|
|
epfn = GRANULEROUNDUP(__pa(end)) >> PAGE_SHIFT;
|
69 |
|
|
cstart = GRANULEROUNDDOWN(__pa(start));
|
70 |
|
|
|
71 |
|
|
if (!bdp->node_low_pfn) {
|
72 |
|
|
bdp->node_boot_start = cstart;
|
73 |
|
|
bdp->node_low_pfn = epfn;
|
74 |
|
|
} else {
|
75 |
|
|
bdp->node_boot_start = min(cstart, bdp->node_boot_start);
|
76 |
|
|
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
|
77 |
|
|
}
|
78 |
|
|
|
79 |
|
|
min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
|
80 |
|
|
max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
|
81 |
|
|
|
82 |
|
|
return 0;
|
83 |
|
|
}
|
84 |
|
|
|
85 |
|
|
|
86 |
|
|
/*
|
87 |
|
|
* Count the number of cpus on the node
|
88 |
|
|
*/
|
89 |
|
|
static __inline__ int
|
90 |
|
|
count_cpus(int node)
|
91 |
|
|
{
|
92 |
|
|
int cpu, n=0;
|
93 |
|
|
|
94 |
|
|
for (cpu=0; cpu < NR_CPUS; cpu++)
|
95 |
|
|
if (node == node_cpuid[cpu].nid)
|
96 |
|
|
n++;
|
97 |
|
|
return n;
|
98 |
|
|
}
|
99 |
|
|
|
100 |
|
|
|
101 |
|
|
/*
|
102 |
|
|
* Find space on each node for the bootmem map & other per-node data structures.
|
103 |
|
|
*
|
104 |
|
|
* Called by efi_memmap_walk to find boot memory on each node. Note that
|
105 |
|
|
* only blocks that are free are passed to this routine (currently filtered by
|
106 |
|
|
* free_available_memory).
|
107 |
|
|
*/
|
108 |
|
|
static int __init
|
109 |
|
|
find_pernode_space(unsigned long start, unsigned long end, int node)
|
110 |
|
|
{
|
111 |
|
|
unsigned long mapsize, pages, epfn, map=0, cpu, cpus;
|
112 |
|
|
unsigned long pernodesize=0, pernode;
|
113 |
|
|
unsigned long cpu_data, mmu_gathers;
|
114 |
|
|
unsigned long pstart, length;
|
115 |
|
|
bootmem_data_t *bdp;
|
116 |
|
|
|
117 |
|
|
pstart = __pa(start);
|
118 |
|
|
length = end - start;
|
119 |
|
|
epfn = (pstart + length) >> PAGE_SHIFT;
|
120 |
|
|
bdp = &bdata[node];
|
121 |
|
|
|
122 |
|
|
if (pstart < bdp->node_boot_start || epfn > bdp->node_low_pfn)
|
123 |
|
|
return 0;
|
124 |
|
|
|
125 |
|
|
if (!boot_pernode[node]) {
|
126 |
|
|
cpus = count_cpus(node);
|
127 |
|
|
pernodesize += PAGE_ALIGN(sizeof(struct cpuinfo_ia64)) * cpus;
|
128 |
|
|
pernodesize += L1_CACHE_ALIGN(sizeof(mmu_gather_t)) * cpus;
|
129 |
|
|
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
|
130 |
|
|
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
|
131 |
|
|
pernodesize = PAGE_ALIGN(pernodesize);
|
132 |
|
|
pernode = NODEDATA_ALIGN(pstart, node);
|
133 |
|
|
|
134 |
|
|
if (pstart + length > (pernode + pernodesize)) {
|
135 |
|
|
boot_pernode[node] = pernode;
|
136 |
|
|
boot_pernodesize[node] = pernodesize;
|
137 |
|
|
memset(__va(pernode), 0, pernodesize);
|
138 |
|
|
|
139 |
|
|
cpu_data = pernode;
|
140 |
|
|
pernode += PAGE_ALIGN(sizeof(struct cpuinfo_ia64)) * cpus;
|
141 |
|
|
|
142 |
|
|
mmu_gathers = pernode;
|
143 |
|
|
pernode += L1_CACHE_ALIGN(sizeof(mmu_gather_t)) * cpus;
|
144 |
|
|
|
145 |
|
|
pg_data_ptr[node] = __va(pernode);
|
146 |
|
|
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
|
147 |
|
|
|
148 |
|
|
boot_node_data[node] = __va(pernode);
|
149 |
|
|
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
|
150 |
|
|
|
151 |
|
|
pg_data_ptr[node]->bdata = &bdata[node];
|
152 |
|
|
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
|
153 |
|
|
|
154 |
|
|
for (cpu=0; cpu < NR_CPUS; cpu++) {
|
155 |
|
|
if (node == node_cpuid[cpu].nid) {
|
156 |
|
|
_cpu_data[cpu] = __va(cpu_data);
|
157 |
|
|
_cpu_data[cpu]->node_data = boot_node_data[node];
|
158 |
|
|
_cpu_data[cpu]->nodeid = node;
|
159 |
|
|
_cpu_data[cpu]->mmu_gathers = __va(mmu_gathers);
|
160 |
|
|
cpu_data += PAGE_ALIGN(sizeof(struct cpuinfo_ia64));
|
161 |
|
|
mmu_gathers += L1_CACHE_ALIGN(sizeof(mmu_gather_t));
|
162 |
|
|
}
|
163 |
|
|
}
|
164 |
|
|
|
165 |
|
|
}
|
166 |
|
|
}
|
167 |
|
|
|
168 |
|
|
pernode = boot_pernode[node];
|
169 |
|
|
pernodesize = boot_pernodesize[node];
|
170 |
|
|
if (pernode && !bdp->node_bootmem_map) {
|
171 |
|
|
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
|
172 |
|
|
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
|
173 |
|
|
|
174 |
|
|
if (pernode - pstart > mapsize)
|
175 |
|
|
map = pstart;
|
176 |
|
|
else if (pstart + length - pernode - pernodesize > mapsize)
|
177 |
|
|
map = pernode + pernodesize;
|
178 |
|
|
|
179 |
|
|
if (map) {
|
180 |
|
|
init_bootmem_node(
|
181 |
|
|
BOOT_NODE_DATA(node),
|
182 |
|
|
map>>PAGE_SHIFT,
|
183 |
|
|
bdp->node_boot_start>>PAGE_SHIFT,
|
184 |
|
|
bdp->node_low_pfn);
|
185 |
|
|
}
|
186 |
|
|
|
187 |
|
|
}
|
188 |
|
|
|
189 |
|
|
return 0;
|
190 |
|
|
}
|
191 |
|
|
|
192 |
|
|
|
193 |
|
|
/*
|
194 |
|
|
* Free available memory to the bootmem allocator.
|
195 |
|
|
*
|
196 |
|
|
* Note that only blocks that are free are passed to this routine (currently
|
197 |
|
|
* filtered by free_available_memory).
|
198 |
|
|
*
|
199 |
|
|
*/
|
200 |
|
|
static int __init
|
201 |
|
|
discontig_free_bootmem_node(unsigned long start, unsigned long end, int node)
|
202 |
|
|
{
|
203 |
|
|
free_bootmem_node(BOOT_NODE_DATA(node), __pa(start), end - start);
|
204 |
|
|
|
205 |
|
|
return 0;
|
206 |
|
|
}
|
207 |
|
|
|
208 |
|
|
|
209 |
|
|
/*
|
210 |
|
|
* Reserve the space used by the bootmem maps.
|
211 |
|
|
*/
|
212 |
|
|
static void __init
|
213 |
|
|
discontig_reserve_bootmem(void)
|
214 |
|
|
{
|
215 |
|
|
int node;
|
216 |
|
|
unsigned long base, size, pages;
|
217 |
|
|
bootmem_data_t *bdp;
|
218 |
|
|
|
219 |
|
|
for (node = 0; node < numnodes; node++) {
|
220 |
|
|
bdp = BOOT_NODE_DATA(node)->bdata;
|
221 |
|
|
|
222 |
|
|
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
|
223 |
|
|
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
|
224 |
|
|
base = __pa(bdp->node_bootmem_map);
|
225 |
|
|
reserve_bootmem_node(BOOT_NODE_DATA(node), base, size);
|
226 |
|
|
|
227 |
|
|
size = boot_pernodesize[node];
|
228 |
|
|
base = __pa(boot_pernode[node]);
|
229 |
|
|
reserve_bootmem_node(BOOT_NODE_DATA(node), base, size);
|
230 |
|
|
}
|
231 |
|
|
}
|
232 |
|
|
|
233 |
|
|
/*
|
234 |
|
|
* Initialize per-node data
|
235 |
|
|
*
|
236 |
|
|
* Finish setting up the node data for this node, then copy it to the other nodes.
|
237 |
|
|
*
|
238 |
|
|
*/
|
239 |
|
|
static void __init
|
240 |
|
|
initialize_pernode_data(void)
|
241 |
|
|
{
|
242 |
|
|
int cpu, node;
|
243 |
|
|
|
244 |
|
|
memcpy(boot_node_data[0]->pg_data_ptrs, pg_data_ptr, sizeof(pg_data_ptr));
|
245 |
|
|
memcpy(boot_node_data[0]->node_data_ptrs, boot_node_data, sizeof(boot_node_data));
|
246 |
|
|
|
247 |
|
|
for (node=1; node < numnodes; node++) {
|
248 |
|
|
memcpy(boot_node_data[node], boot_node_data[0], sizeof(struct ia64_node_data));
|
249 |
|
|
boot_node_data[node]->node = node;
|
250 |
|
|
}
|
251 |
|
|
|
252 |
|
|
for (cpu=0; cpu < NR_CPUS; cpu++) {
|
253 |
|
|
node = node_cpuid[cpu].nid;
|
254 |
|
|
_cpu_data[cpu]->node_data = boot_node_data[node];
|
255 |
|
|
_cpu_data[cpu]->nodeid = node;
|
256 |
|
|
}
|
257 |
|
|
}
|
258 |
|
|
|
259 |
|
|
|
260 |
|
|
/*
|
261 |
|
|
* Called early in boot to setup the boot memory allocator, and to
|
262 |
|
|
* allocate the node-local pg_data & node-directory data structures..
|
263 |
|
|
*/
|
264 |
|
|
void __init
|
265 |
|
|
discontig_mem_init(void)
|
266 |
|
|
{
|
267 |
|
|
if (numnodes == 0) {
|
268 |
|
|
printk("node info missing!\n");
|
269 |
|
|
numnodes = 1;
|
270 |
|
|
}
|
271 |
|
|
|
272 |
|
|
min_low_pfn = -1;
|
273 |
|
|
max_low_pfn = 0;
|
274 |
|
|
|
275 |
|
|
efi_memmap_walk(filter_rsvd_memory, build_maps);
|
276 |
|
|
efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
|
277 |
|
|
efi_memmap_walk(filter_rsvd_memory, discontig_free_bootmem_node);
|
278 |
|
|
|
279 |
|
|
discontig_reserve_bootmem();
|
280 |
|
|
initialize_pernode_data();
|
281 |
|
|
}
|
282 |
|
|
|