]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * pSeries NUMA support | |
3 | * | |
4 | * Copyright (C) 2002 Anton Blanchard <[email protected]>, IBM | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/threads.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/mmzone.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/nodemask.h> | |
18 | #include <linux/cpu.h> | |
19 | #include <linux/notifier.h> | |
45fb6cea | 20 | #include <asm/sparsemem.h> |
1da177e4 | 21 | #include <asm/lmb.h> |
cf00a8d1 | 22 | #include <asm/system.h> |
2249ca9d | 23 | #include <asm/smp.h> |
1da177e4 LT |
24 | |
25 | static int numa_enabled = 1; | |
26 | ||
27 | static int numa_debug; | |
28 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | |
29 | ||
45fb6cea | 30 | int numa_cpu_lookup_table[NR_CPUS]; |
1da177e4 | 31 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; |
1da177e4 | 32 | struct pglist_data *node_data[MAX_NUMNODES]; |
45fb6cea AB |
33 | |
34 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
35 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | |
36 | EXPORT_SYMBOL(node_data); | |
37 | ||
38 | static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; | |
1da177e4 | 39 | static int min_common_depth; |
237a0989 | 40 | static int n_mem_addr_cells, n_mem_size_cells; |
1da177e4 | 41 | |
2e5ce39d | 42 | static void __cpuinit map_cpu_to_node(int cpu, int node) |
1da177e4 LT |
43 | { |
44 | numa_cpu_lookup_table[cpu] = node; | |
45fb6cea | 45 | |
bf4b85b0 NL |
46 | dbg("adding cpu %d to node %d\n", cpu, node); |
47 | ||
45fb6cea | 48 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) |
1da177e4 | 49 | cpu_set(cpu, numa_cpumask_lookup_table[node]); |
1da177e4 LT |
50 | } |
51 | ||
52 | #ifdef CONFIG_HOTPLUG_CPU | |
53 | static void unmap_cpu_from_node(unsigned long cpu) | |
54 | { | |
55 | int node = numa_cpu_lookup_table[cpu]; | |
56 | ||
57 | dbg("removing cpu %lu from node %d\n", cpu, node); | |
58 | ||
59 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
60 | cpu_clear(cpu, numa_cpumask_lookup_table[node]); | |
1da177e4 LT |
61 | } else { |
62 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | |
63 | cpu, node); | |
64 | } | |
65 | } | |
66 | #endif /* CONFIG_HOTPLUG_CPU */ | |
67 | ||
2e5ce39d | 68 | static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) |
1da177e4 LT |
69 | { |
70 | unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); | |
71 | struct device_node *cpu_node = NULL; | |
a7f67bdf | 72 | const unsigned int *interrupt_server, *reg; |
1da177e4 LT |
73 | int len; |
74 | ||
75 | while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) { | |
76 | /* Try interrupt server first */ | |
a7f67bdf | 77 | interrupt_server = get_property(cpu_node, |
1da177e4 LT |
78 | "ibm,ppc-interrupt-server#s", &len); |
79 | ||
80 | len = len / sizeof(u32); | |
81 | ||
82 | if (interrupt_server && (len > 0)) { | |
83 | while (len--) { | |
84 | if (interrupt_server[len] == hw_cpuid) | |
85 | return cpu_node; | |
86 | } | |
87 | } else { | |
a7f67bdf | 88 | reg = get_property(cpu_node, "reg", &len); |
1da177e4 LT |
89 | if (reg && (len > 0) && (reg[0] == hw_cpuid)) |
90 | return cpu_node; | |
91 | } | |
92 | } | |
93 | ||
94 | return NULL; | |
95 | } | |
96 | ||
97 | /* must hold reference to node during call */ | |
a7f67bdf | 98 | static const int *of_get_associativity(struct device_node *dev) |
1da177e4 | 99 | { |
a7f67bdf | 100 | return get_property(dev, "ibm,associativity", NULL); |
1da177e4 LT |
101 | } |
102 | ||
482ec7c4 NL |
103 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa |
104 | * info is found. | |
105 | */ | |
953039c8 | 106 | static int of_node_to_nid_single(struct device_node *device) |
1da177e4 | 107 | { |
482ec7c4 | 108 | int nid = -1; |
a7f67bdf | 109 | const unsigned int *tmp; |
1da177e4 LT |
110 | |
111 | if (min_common_depth == -1) | |
482ec7c4 | 112 | goto out; |
1da177e4 LT |
113 | |
114 | tmp = of_get_associativity(device); | |
482ec7c4 NL |
115 | if (!tmp) |
116 | goto out; | |
117 | ||
118 | if (tmp[0] >= min_common_depth) | |
cf950b7a | 119 | nid = tmp[min_common_depth]; |
bc16a759 NL |
120 | |
121 | /* POWER4 LPAR uses 0xffff as invalid node */ | |
482ec7c4 NL |
122 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
123 | nid = -1; | |
124 | out: | |
cf950b7a | 125 | return nid; |
1da177e4 LT |
126 | } |
127 | ||
953039c8 JK |
128 | /* Walk the device tree upwards, looking for an associativity id */ |
129 | int of_node_to_nid(struct device_node *device) | |
130 | { | |
131 | struct device_node *tmp; | |
132 | int nid = -1; | |
133 | ||
134 | of_node_get(device); | |
135 | while (device) { | |
136 | nid = of_node_to_nid_single(device); | |
137 | if (nid != -1) | |
138 | break; | |
139 | ||
140 | tmp = device; | |
141 | device = of_get_parent(tmp); | |
142 | of_node_put(tmp); | |
143 | } | |
144 | of_node_put(device); | |
145 | ||
146 | return nid; | |
147 | } | |
148 | EXPORT_SYMBOL_GPL(of_node_to_nid); | |
149 | ||
1da177e4 LT |
150 | /* |
151 | * In theory, the "ibm,associativity" property may contain multiple | |
152 | * associativity lists because a resource may be multiply connected | |
153 | * into the machine. This resource then has different associativity | |
154 | * characteristics relative to its multiple connections. We ignore | |
155 | * this for now. We also assume that all cpu and memory sets have | |
156 | * their distances represented at a common level. This won't be | |
157 | * true for heirarchical NUMA. | |
158 | * | |
159 | * In any case the ibm,associativity-reference-points should give | |
160 | * the correct depth for a normal NUMA system. | |
161 | * | |
162 | * - Dave Hansen <[email protected]> | |
163 | */ | |
164 | static int __init find_min_common_depth(void) | |
165 | { | |
166 | int depth; | |
a7f67bdf | 167 | const unsigned int *ref_points; |
1da177e4 LT |
168 | struct device_node *rtas_root; |
169 | unsigned int len; | |
170 | ||
171 | rtas_root = of_find_node_by_path("/rtas"); | |
172 | ||
173 | if (!rtas_root) | |
174 | return -1; | |
175 | ||
176 | /* | |
177 | * this property is 2 32-bit integers, each representing a level of | |
178 | * depth in the associativity nodes. The first is for an SMP | |
179 | * configuration (should be all 0's) and the second is for a normal | |
180 | * NUMA configuration. | |
181 | */ | |
a7f67bdf | 182 | ref_points = get_property(rtas_root, |
1da177e4 LT |
183 | "ibm,associativity-reference-points", &len); |
184 | ||
185 | if ((len >= 1) && ref_points) { | |
186 | depth = ref_points[1]; | |
187 | } else { | |
bf4b85b0 | 188 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); |
1da177e4 LT |
189 | depth = -1; |
190 | } | |
191 | of_node_put(rtas_root); | |
192 | ||
193 | return depth; | |
194 | } | |
195 | ||
84c9fdd1 | 196 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
1da177e4 LT |
197 | { |
198 | struct device_node *memory = NULL; | |
1da177e4 LT |
199 | |
200 | memory = of_find_node_by_type(memory, "memory"); | |
54c23310 | 201 | if (!memory) |
84c9fdd1 | 202 | panic("numa.c: No memory nodes found!"); |
54c23310 | 203 | |
84c9fdd1 MK |
204 | *n_addr_cells = prom_n_addr_cells(memory); |
205 | *n_size_cells = prom_n_size_cells(memory); | |
206 | of_node_put(memory); | |
1da177e4 LT |
207 | } |
208 | ||
a7f67bdf | 209 | static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) |
1da177e4 LT |
210 | { |
211 | unsigned long result = 0; | |
212 | ||
213 | while (n--) { | |
214 | result = (result << 32) | **buf; | |
215 | (*buf)++; | |
216 | } | |
217 | return result; | |
218 | } | |
219 | ||
220 | /* | |
221 | * Figure out to which domain a cpu belongs and stick it there. | |
222 | * Return the id of the domain used. | |
223 | */ | |
2e5ce39d | 224 | static int __cpuinit numa_setup_cpu(unsigned long lcpu) |
1da177e4 | 225 | { |
cf950b7a | 226 | int nid = 0; |
1da177e4 LT |
227 | struct device_node *cpu = find_cpu_node(lcpu); |
228 | ||
229 | if (!cpu) { | |
230 | WARN_ON(1); | |
231 | goto out; | |
232 | } | |
233 | ||
953039c8 | 234 | nid = of_node_to_nid_single(cpu); |
1da177e4 | 235 | |
482ec7c4 NL |
236 | if (nid < 0 || !node_online(nid)) |
237 | nid = any_online_node(NODE_MASK_ALL); | |
1da177e4 | 238 | out: |
cf950b7a | 239 | map_cpu_to_node(lcpu, nid); |
1da177e4 LT |
240 | |
241 | of_node_put(cpu); | |
242 | ||
cf950b7a | 243 | return nid; |
1da177e4 LT |
244 | } |
245 | ||
74b85f37 | 246 | static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, |
1da177e4 LT |
247 | unsigned long action, |
248 | void *hcpu) | |
249 | { | |
250 | unsigned long lcpu = (unsigned long)hcpu; | |
251 | int ret = NOTIFY_DONE; | |
252 | ||
253 | switch (action) { | |
254 | case CPU_UP_PREPARE: | |
2b261227 | 255 | numa_setup_cpu(lcpu); |
1da177e4 LT |
256 | ret = NOTIFY_OK; |
257 | break; | |
258 | #ifdef CONFIG_HOTPLUG_CPU | |
259 | case CPU_DEAD: | |
260 | case CPU_UP_CANCELED: | |
261 | unmap_cpu_from_node(lcpu); | |
262 | break; | |
263 | ret = NOTIFY_OK; | |
264 | #endif | |
265 | } | |
266 | return ret; | |
267 | } | |
268 | ||
269 | /* | |
270 | * Check and possibly modify a memory region to enforce the memory limit. | |
271 | * | |
272 | * Returns the size the region should have to enforce the memory limit. | |
273 | * This will either be the original value of size, a truncated value, | |
274 | * or zero. If the returned value of size is 0 the region should be | |
275 | * discarded as it lies wholy above the memory limit. | |
276 | */ | |
45fb6cea AB |
277 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
278 | unsigned long size) | |
1da177e4 LT |
279 | { |
280 | /* | |
281 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | |
282 | * we've already adjusted it for the limit and it takes care of | |
283 | * having memory holes below the limit. | |
284 | */ | |
1da177e4 LT |
285 | |
286 | if (! memory_limit) | |
287 | return size; | |
288 | ||
289 | if (start + size <= lmb_end_of_DRAM()) | |
290 | return size; | |
291 | ||
292 | if (start >= lmb_end_of_DRAM()) | |
293 | return 0; | |
294 | ||
295 | return lmb_end_of_DRAM() - start; | |
296 | } | |
297 | ||
0204568a PM |
298 | /* |
299 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory | |
300 | * node. This assumes n_mem_{addr,size}_cells have been set. | |
301 | */ | |
302 | static void __init parse_drconf_memory(struct device_node *memory) | |
303 | { | |
304 | const unsigned int *lm, *dm, *aa; | |
305 | unsigned int ls, ld, la; | |
306 | unsigned int n, aam, aalen; | |
307 | unsigned long lmb_size, size; | |
308 | int nid, default_nid = 0; | |
309 | unsigned int start, ai, flags; | |
310 | ||
311 | lm = get_property(memory, "ibm,lmb-size", &ls); | |
312 | dm = get_property(memory, "ibm,dynamic-memory", &ld); | |
313 | aa = get_property(memory, "ibm,associativity-lookup-arrays", &la); | |
314 | if (!lm || !dm || !aa || | |
315 | ls < sizeof(unsigned int) || ld < sizeof(unsigned int) || | |
316 | la < 2 * sizeof(unsigned int)) | |
317 | return; | |
318 | ||
319 | lmb_size = read_n_cells(n_mem_size_cells, &lm); | |
320 | n = *dm++; /* number of LMBs */ | |
321 | aam = *aa++; /* number of associativity lists */ | |
322 | aalen = *aa++; /* length of each associativity list */ | |
323 | if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) || | |
324 | la < (aam * aalen + 2) * sizeof(unsigned int)) | |
325 | return; | |
326 | ||
327 | for (; n != 0; --n) { | |
328 | start = read_n_cells(n_mem_addr_cells, &dm); | |
329 | ai = dm[2]; | |
330 | flags = dm[3]; | |
331 | dm += 4; | |
332 | /* 0x80 == reserved, 0x8 = assigned to us */ | |
333 | if ((flags & 0x80) || !(flags & 0x8)) | |
334 | continue; | |
335 | nid = default_nid; | |
336 | /* flags & 0x40 means associativity index is invalid */ | |
337 | if (min_common_depth > 0 && min_common_depth <= aalen && | |
338 | (flags & 0x40) == 0 && ai < aam) { | |
339 | /* this is like of_node_to_nid_single */ | |
340 | nid = aa[ai * aalen + min_common_depth - 1]; | |
341 | if (nid == 0xffff || nid >= MAX_NUMNODES) | |
342 | nid = default_nid; | |
343 | } | |
344 | node_set_online(nid); | |
345 | ||
346 | size = numa_enforce_memory_limit(start, lmb_size); | |
347 | if (!size) | |
348 | continue; | |
349 | ||
350 | add_active_range(nid, start >> PAGE_SHIFT, | |
351 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); | |
352 | } | |
353 | } | |
354 | ||
1da177e4 LT |
355 | static int __init parse_numa_properties(void) |
356 | { | |
357 | struct device_node *cpu = NULL; | |
358 | struct device_node *memory = NULL; | |
482ec7c4 | 359 | int default_nid = 0; |
1da177e4 LT |
360 | unsigned long i; |
361 | ||
362 | if (numa_enabled == 0) { | |
363 | printk(KERN_WARNING "NUMA disabled by user\n"); | |
364 | return -1; | |
365 | } | |
366 | ||
1da177e4 LT |
367 | min_common_depth = find_min_common_depth(); |
368 | ||
1da177e4 LT |
369 | if (min_common_depth < 0) |
370 | return min_common_depth; | |
371 | ||
bf4b85b0 NL |
372 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); |
373 | ||
1da177e4 | 374 | /* |
482ec7c4 NL |
375 | * Even though we connect cpus to numa domains later in SMP |
376 | * init, we need to know the node ids now. This is because | |
377 | * each node to be onlined must have NODE_DATA etc backing it. | |
1da177e4 | 378 | */ |
482ec7c4 | 379 | for_each_present_cpu(i) { |
cf950b7a | 380 | int nid; |
1da177e4 LT |
381 | |
382 | cpu = find_cpu_node(i); | |
482ec7c4 | 383 | BUG_ON(!cpu); |
953039c8 | 384 | nid = of_node_to_nid_single(cpu); |
482ec7c4 | 385 | of_node_put(cpu); |
1da177e4 | 386 | |
482ec7c4 NL |
387 | /* |
388 | * Don't fall back to default_nid yet -- we will plug | |
389 | * cpus into nodes once the memory scan has discovered | |
390 | * the topology. | |
391 | */ | |
392 | if (nid < 0) | |
393 | continue; | |
394 | node_set_online(nid); | |
1da177e4 LT |
395 | } |
396 | ||
237a0989 | 397 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
1da177e4 LT |
398 | memory = NULL; |
399 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | |
400 | unsigned long start; | |
401 | unsigned long size; | |
cf950b7a | 402 | int nid; |
1da177e4 | 403 | int ranges; |
a7f67bdf | 404 | const unsigned int *memcell_buf; |
1da177e4 LT |
405 | unsigned int len; |
406 | ||
a7f67bdf | 407 | memcell_buf = get_property(memory, |
ba759485 ME |
408 | "linux,usable-memory", &len); |
409 | if (!memcell_buf || len <= 0) | |
a7f67bdf | 410 | memcell_buf = get_property(memory, "reg", &len); |
1da177e4 LT |
411 | if (!memcell_buf || len <= 0) |
412 | continue; | |
413 | ||
cc5d0189 BH |
414 | /* ranges in cell */ |
415 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1da177e4 LT |
416 | new_range: |
417 | /* these are order-sensitive, and modify the buffer pointer */ | |
237a0989 MK |
418 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
419 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1da177e4 | 420 | |
482ec7c4 NL |
421 | /* |
422 | * Assumption: either all memory nodes or none will | |
423 | * have associativity properties. If none, then | |
424 | * everything goes to default_nid. | |
425 | */ | |
953039c8 | 426 | nid = of_node_to_nid_single(memory); |
482ec7c4 NL |
427 | if (nid < 0) |
428 | nid = default_nid; | |
429 | node_set_online(nid); | |
1da177e4 | 430 | |
45fb6cea | 431 | if (!(size = numa_enforce_memory_limit(start, size))) { |
1da177e4 LT |
432 | if (--ranges) |
433 | goto new_range; | |
434 | else | |
435 | continue; | |
436 | } | |
437 | ||
c67c3cb4 MG |
438 | add_active_range(nid, start >> PAGE_SHIFT, |
439 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); | |
1da177e4 LT |
440 | |
441 | if (--ranges) | |
442 | goto new_range; | |
443 | } | |
444 | ||
0204568a PM |
445 | /* |
446 | * Now do the same thing for each LMB listed in the ibm,dynamic-memory | |
447 | * property in the ibm,dynamic-reconfiguration-memory node. | |
448 | */ | |
449 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
450 | if (memory) | |
451 | parse_drconf_memory(memory); | |
452 | ||
1da177e4 LT |
453 | return 0; |
454 | } | |
455 | ||
456 | static void __init setup_nonnuma(void) | |
457 | { | |
458 | unsigned long top_of_ram = lmb_end_of_DRAM(); | |
459 | unsigned long total_ram = lmb_phys_mem_size(); | |
c67c3cb4 | 460 | unsigned long start_pfn, end_pfn; |
fb6d73d3 | 461 | unsigned int i; |
1da177e4 | 462 | |
e110b281 | 463 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
1da177e4 | 464 | top_of_ram, total_ram); |
e110b281 | 465 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
1da177e4 LT |
466 | (top_of_ram - total_ram) >> 20); |
467 | ||
c67c3cb4 MG |
468 | for (i = 0; i < lmb.memory.cnt; ++i) { |
469 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | |
470 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | |
471 | add_active_range(0, start_pfn, end_pfn); | |
472 | } | |
1da177e4 | 473 | node_set_online(0); |
1da177e4 LT |
474 | } |
475 | ||
4b703a23 AB |
476 | void __init dump_numa_cpu_topology(void) |
477 | { | |
478 | unsigned int node; | |
479 | unsigned int cpu, count; | |
480 | ||
481 | if (min_common_depth == -1 || !numa_enabled) | |
482 | return; | |
483 | ||
484 | for_each_online_node(node) { | |
e110b281 | 485 | printk(KERN_DEBUG "Node %d CPUs:", node); |
4b703a23 AB |
486 | |
487 | count = 0; | |
488 | /* | |
489 | * If we used a CPU iterator here we would miss printing | |
490 | * the holes in the cpumap. | |
491 | */ | |
492 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
493 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
494 | if (count == 0) | |
495 | printk(" %u", cpu); | |
496 | ++count; | |
497 | } else { | |
498 | if (count > 1) | |
499 | printk("-%u", cpu - 1); | |
500 | count = 0; | |
501 | } | |
502 | } | |
503 | ||
504 | if (count > 1) | |
505 | printk("-%u", NR_CPUS - 1); | |
506 | printk("\n"); | |
507 | } | |
508 | } | |
509 | ||
510 | static void __init dump_numa_memory_topology(void) | |
1da177e4 LT |
511 | { |
512 | unsigned int node; | |
513 | unsigned int count; | |
514 | ||
515 | if (min_common_depth == -1 || !numa_enabled) | |
516 | return; | |
517 | ||
518 | for_each_online_node(node) { | |
519 | unsigned long i; | |
520 | ||
e110b281 | 521 | printk(KERN_DEBUG "Node %d Memory:", node); |
1da177e4 LT |
522 | |
523 | count = 0; | |
524 | ||
45fb6cea AB |
525 | for (i = 0; i < lmb_end_of_DRAM(); |
526 | i += (1 << SECTION_SIZE_BITS)) { | |
527 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | |
1da177e4 LT |
528 | if (count == 0) |
529 | printk(" 0x%lx", i); | |
530 | ++count; | |
531 | } else { | |
532 | if (count > 0) | |
533 | printk("-0x%lx", i); | |
534 | count = 0; | |
535 | } | |
536 | } | |
537 | ||
538 | if (count > 0) | |
539 | printk("-0x%lx", i); | |
540 | printk("\n"); | |
541 | } | |
1da177e4 LT |
542 | } |
543 | ||
544 | /* | |
545 | * Allocate some memory, satisfying the lmb or bootmem allocator where | |
546 | * required. nid is the preferred node and end is the physical address of | |
547 | * the highest address in the node. | |
548 | * | |
549 | * Returns the physical address of the memory. | |
550 | */ | |
45fb6cea AB |
551 | static void __init *careful_allocation(int nid, unsigned long size, |
552 | unsigned long align, | |
553 | unsigned long end_pfn) | |
1da177e4 | 554 | { |
45fb6cea | 555 | int new_nid; |
d7a5b2ff | 556 | unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); |
1da177e4 LT |
557 | |
558 | /* retry over all memory */ | |
559 | if (!ret) | |
d7a5b2ff | 560 | ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); |
1da177e4 LT |
561 | |
562 | if (!ret) | |
563 | panic("numa.c: cannot allocate %lu bytes on node %d", | |
564 | size, nid); | |
565 | ||
566 | /* | |
567 | * If the memory came from a previously allocated node, we must | |
568 | * retry with the bootmem allocator. | |
569 | */ | |
45fb6cea AB |
570 | new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); |
571 | if (new_nid < nid) { | |
572 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), | |
1da177e4 LT |
573 | size, align, 0); |
574 | ||
575 | if (!ret) | |
576 | panic("numa.c: cannot allocate %lu bytes on node %d", | |
45fb6cea | 577 | size, new_nid); |
1da177e4 | 578 | |
45fb6cea | 579 | ret = __pa(ret); |
1da177e4 LT |
580 | |
581 | dbg("alloc_bootmem %lx %lx\n", ret, size); | |
582 | } | |
583 | ||
45fb6cea | 584 | return (void *)ret; |
1da177e4 LT |
585 | } |
586 | ||
74b85f37 CS |
587 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { |
588 | .notifier_call = cpu_numa_callback, | |
589 | .priority = 1 /* Must run before sched domains notifier. */ | |
590 | }; | |
591 | ||
1da177e4 LT |
592 | void __init do_init_bootmem(void) |
593 | { | |
594 | int nid; | |
45fb6cea | 595 | unsigned int i; |
1da177e4 LT |
596 | |
597 | min_low_pfn = 0; | |
598 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | |
599 | max_pfn = max_low_pfn; | |
600 | ||
601 | if (parse_numa_properties()) | |
602 | setup_nonnuma(); | |
603 | else | |
4b703a23 | 604 | dump_numa_memory_topology(); |
1da177e4 LT |
605 | |
606 | register_cpu_notifier(&ppc64_numa_nb); | |
2b261227 NL |
607 | cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, |
608 | (void *)(unsigned long)boot_cpuid); | |
1da177e4 LT |
609 | |
610 | for_each_online_node(nid) { | |
c67c3cb4 | 611 | unsigned long start_pfn, end_pfn; |
1da177e4 LT |
612 | unsigned long bootmem_paddr; |
613 | unsigned long bootmap_pages; | |
614 | ||
c67c3cb4 | 615 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
1da177e4 LT |
616 | |
617 | /* Allocate the node structure node local if possible */ | |
45fb6cea | 618 | NODE_DATA(nid) = careful_allocation(nid, |
1da177e4 | 619 | sizeof(struct pglist_data), |
45fb6cea AB |
620 | SMP_CACHE_BYTES, end_pfn); |
621 | NODE_DATA(nid) = __va(NODE_DATA(nid)); | |
1da177e4 LT |
622 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
623 | ||
624 | dbg("node %d\n", nid); | |
625 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | |
626 | ||
627 | NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; | |
45fb6cea AB |
628 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
629 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
1da177e4 LT |
630 | |
631 | if (NODE_DATA(nid)->node_spanned_pages == 0) | |
632 | continue; | |
633 | ||
45fb6cea AB |
634 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); |
635 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | |
1da177e4 | 636 | |
45fb6cea AB |
637 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
638 | bootmem_paddr = (unsigned long)careful_allocation(nid, | |
639 | bootmap_pages << PAGE_SHIFT, | |
640 | PAGE_SIZE, end_pfn); | |
641 | memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); | |
1da177e4 | 642 | |
1da177e4 LT |
643 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); |
644 | ||
645 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | |
45fb6cea | 646 | start_pfn, end_pfn); |
1da177e4 | 647 | |
c67c3cb4 | 648 | free_bootmem_with_active_regions(nid, end_pfn); |
1da177e4 | 649 | |
45fb6cea | 650 | /* Mark reserved regions on this node */ |
1da177e4 | 651 | for (i = 0; i < lmb.reserved.cnt; i++) { |
180379dc | 652 | unsigned long physbase = lmb.reserved.region[i].base; |
1da177e4 | 653 | unsigned long size = lmb.reserved.region[i].size; |
45fb6cea AB |
654 | unsigned long start_paddr = start_pfn << PAGE_SHIFT; |
655 | unsigned long end_paddr = end_pfn << PAGE_SHIFT; | |
1da177e4 | 656 | |
45fb6cea AB |
657 | if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid && |
658 | early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid) | |
1da177e4 LT |
659 | continue; |
660 | ||
661 | if (physbase < end_paddr && | |
662 | (physbase+size) > start_paddr) { | |
663 | /* overlaps */ | |
664 | if (physbase < start_paddr) { | |
665 | size -= start_paddr - physbase; | |
666 | physbase = start_paddr; | |
667 | } | |
668 | ||
669 | if (size > end_paddr - physbase) | |
670 | size = end_paddr - physbase; | |
671 | ||
672 | dbg("reserve_bootmem %lx %lx\n", physbase, | |
673 | size); | |
674 | reserve_bootmem_node(NODE_DATA(nid), physbase, | |
675 | size); | |
676 | } | |
677 | } | |
802f192e | 678 | |
c67c3cb4 | 679 | sparse_memory_present_with_active_regions(nid); |
1da177e4 LT |
680 | } |
681 | } | |
682 | ||
683 | void __init paging_init(void) | |
684 | { | |
6391af17 MG |
685 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
686 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
687 | max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; | |
c67c3cb4 | 688 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
689 | } |
690 | ||
691 | static int __init early_numa(char *p) | |
692 | { | |
693 | if (!p) | |
694 | return 0; | |
695 | ||
696 | if (strstr(p, "off")) | |
697 | numa_enabled = 0; | |
698 | ||
699 | if (strstr(p, "debug")) | |
700 | numa_debug = 1; | |
701 | ||
702 | return 0; | |
703 | } | |
704 | early_param("numa", early_numa); | |
237a0989 MK |
705 | |
706 | #ifdef CONFIG_MEMORY_HOTPLUG | |
707 | /* | |
708 | * Find the node associated with a hot added memory section. Section | |
709 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that | |
710 | * sections are fully contained within a single LMB. | |
711 | */ | |
712 | int hot_add_scn_to_nid(unsigned long scn_addr) | |
713 | { | |
714 | struct device_node *memory = NULL; | |
b226e462 | 715 | nodemask_t nodes; |
482ec7c4 | 716 | int default_nid = any_online_node(NODE_MASK_ALL); |
069007ae | 717 | int nid; |
237a0989 MK |
718 | |
719 | if (!numa_enabled || (min_common_depth < 0)) | |
482ec7c4 | 720 | return default_nid; |
237a0989 MK |
721 | |
722 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | |
723 | unsigned long start, size; | |
b226e462 | 724 | int ranges; |
a7f67bdf | 725 | const unsigned int *memcell_buf; |
237a0989 MK |
726 | unsigned int len; |
727 | ||
a7f67bdf | 728 | memcell_buf = get_property(memory, "reg", &len); |
237a0989 MK |
729 | if (!memcell_buf || len <= 0) |
730 | continue; | |
731 | ||
cc5d0189 BH |
732 | /* ranges in cell */ |
733 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
237a0989 MK |
734 | ha_new_range: |
735 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
736 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
953039c8 | 737 | nid = of_node_to_nid_single(memory); |
237a0989 MK |
738 | |
739 | /* Domains not present at boot default to 0 */ | |
482ec7c4 NL |
740 | if (nid < 0 || !node_online(nid)) |
741 | nid = default_nid; | |
237a0989 MK |
742 | |
743 | if ((scn_addr >= start) && (scn_addr < (start + size))) { | |
744 | of_node_put(memory); | |
cf950b7a | 745 | goto got_nid; |
237a0989 MK |
746 | } |
747 | ||
748 | if (--ranges) /* process all ranges in cell */ | |
749 | goto ha_new_range; | |
750 | } | |
237a0989 | 751 | BUG(); /* section address should be found above */ |
069007ae | 752 | return 0; |
b226e462 MK |
753 | |
754 | /* Temporary code to ensure that returned node is not empty */ | |
cf950b7a | 755 | got_nid: |
b226e462 | 756 | nodes_setall(nodes); |
cf950b7a NL |
757 | while (NODE_DATA(nid)->node_spanned_pages == 0) { |
758 | node_clear(nid, nodes); | |
759 | nid = any_online_node(nodes); | |
b226e462 | 760 | } |
cf950b7a | 761 | return nid; |
237a0989 MK |
762 | } |
763 | #endif /* CONFIG_MEMORY_HOTPLUG */ |