]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * NUMA parameter parsing routines | |
3 | * | |
4 | * Copyright (c) 2014 Fujitsu Ltd. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "sysemu/numa.h" | |
26 | #include "exec/cpu-common.h" | |
27 | #include "qemu/bitmap.h" | |
28 | #include "qom/cpu.h" | |
29 | #include "qemu/error-report.h" | |
30 | #include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */ | |
31 | #include "qapi-visit.h" | |
32 | #include "qapi/opts-visitor.h" | |
33 | #include "qapi/dealloc-visitor.h" | |
34 | #include "hw/boards.h" | |
35 | #include "sysemu/hostmem.h" | |
36 | #include "qmp-commands.h" | |
37 | #include "hw/mem/pc-dimm.h" | |
38 | #include "qemu/option.h" | |
39 | #include "qemu/config-file.h" | |
40 | ||
41 | QemuOptsList qemu_numa_opts = { | |
42 | .name = "numa", | |
43 | .implied_opt_name = "type", | |
44 | .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), | |
45 | .desc = { { 0 } } /* validated with OptsVisitor */ | |
46 | }; | |
47 | ||
48 | static int have_memdevs = -1; | |
49 | static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. | |
50 | * For all nodes, nodeid < max_numa_nodeid | |
51 | */ | |
52 | int nb_numa_nodes; | |
53 | NodeInfo numa_info[MAX_NODES]; | |
54 | ||
55 | void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node) | |
56 | { | |
57 | struct numa_addr_range *range; | |
58 | ||
59 | /* | |
60 | * Memory-less nodes can come here with 0 size in which case, | |
61 | * there is nothing to do. | |
62 | */ | |
63 | if (!size) { | |
64 | return; | |
65 | } | |
66 | ||
67 | range = g_malloc0(sizeof(*range)); | |
68 | range->mem_start = addr; | |
69 | range->mem_end = addr + size - 1; | |
70 | QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry); | |
71 | } | |
72 | ||
73 | void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node) | |
74 | { | |
75 | struct numa_addr_range *range, *next; | |
76 | ||
77 | QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) { | |
78 | if (addr == range->mem_start && (addr + size - 1) == range->mem_end) { | |
79 | QLIST_REMOVE(range, entry); | |
80 | g_free(range); | |
81 | return; | |
82 | } | |
83 | } | |
84 | } | |
85 | ||
86 | static void numa_set_mem_ranges(void) | |
87 | { | |
88 | int i; | |
89 | ram_addr_t mem_start = 0; | |
90 | ||
91 | /* | |
92 | * Deduce start address of each node and use it to store | |
93 | * the address range info in numa_info address range list | |
94 | */ | |
95 | for (i = 0; i < nb_numa_nodes; i++) { | |
96 | numa_set_mem_node_id(mem_start, numa_info[i].node_mem, i); | |
97 | mem_start += numa_info[i].node_mem; | |
98 | } | |
99 | } | |
100 | ||
101 | /* | |
102 | * Check if @addr falls under NUMA @node. | |
103 | */ | |
104 | static bool numa_addr_belongs_to_node(ram_addr_t addr, uint32_t node) | |
105 | { | |
106 | struct numa_addr_range *range; | |
107 | ||
108 | QLIST_FOREACH(range, &numa_info[node].addr, entry) { | |
109 | if (addr >= range->mem_start && addr <= range->mem_end) { | |
110 | return true; | |
111 | } | |
112 | } | |
113 | return false; | |
114 | } | |
115 | ||
116 | /* | |
117 | * Given an address, return the index of the NUMA node to which the | |
118 | * address belongs to. | |
119 | */ | |
120 | uint32_t numa_get_node(ram_addr_t addr, Error **errp) | |
121 | { | |
122 | uint32_t i; | |
123 | ||
124 | /* For non NUMA configurations, check if the addr falls under node 0 */ | |
125 | if (!nb_numa_nodes) { | |
126 | if (numa_addr_belongs_to_node(addr, 0)) { | |
127 | return 0; | |
128 | } | |
129 | } | |
130 | ||
131 | for (i = 0; i < nb_numa_nodes; i++) { | |
132 | if (numa_addr_belongs_to_node(addr, i)) { | |
133 | return i; | |
134 | } | |
135 | } | |
136 | ||
137 | error_setg(errp, "Address 0x" RAM_ADDR_FMT " doesn't belong to any " | |
138 | "NUMA node", addr); | |
139 | return -1; | |
140 | } | |
141 | ||
142 | static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) | |
143 | { | |
144 | uint16_t nodenr; | |
145 | uint16List *cpus = NULL; | |
146 | ||
147 | if (node->has_nodeid) { | |
148 | nodenr = node->nodeid; | |
149 | } else { | |
150 | nodenr = nb_numa_nodes; | |
151 | } | |
152 | ||
153 | if (nodenr >= MAX_NODES) { | |
154 | error_setg(errp, "Max number of NUMA nodes reached: %" | |
155 | PRIu16 "", nodenr); | |
156 | return; | |
157 | } | |
158 | ||
159 | if (numa_info[nodenr].present) { | |
160 | error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); | |
161 | return; | |
162 | } | |
163 | ||
164 | for (cpus = node->cpus; cpus; cpus = cpus->next) { | |
165 | if (cpus->value >= max_cpus) { | |
166 | error_setg(errp, | |
167 | "CPU index (%" PRIu16 ")" | |
168 | " should be smaller than maxcpus (%d)", | |
169 | cpus->value, max_cpus); | |
170 | return; | |
171 | } | |
172 | bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1); | |
173 | } | |
174 | ||
175 | if (node->has_mem && node->has_memdev) { | |
176 | error_setg(errp, "qemu: cannot specify both mem= and memdev="); | |
177 | return; | |
178 | } | |
179 | ||
180 | if (have_memdevs == -1) { | |
181 | have_memdevs = node->has_memdev; | |
182 | } | |
183 | if (node->has_memdev != have_memdevs) { | |
184 | error_setg(errp, "qemu: memdev option must be specified for either " | |
185 | "all or no nodes"); | |
186 | return; | |
187 | } | |
188 | ||
189 | if (node->has_mem) { | |
190 | uint64_t mem_size = node->mem; | |
191 | const char *mem_str = qemu_opt_get(opts, "mem"); | |
192 | /* Fix up legacy suffix-less format */ | |
193 | if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { | |
194 | mem_size <<= 20; | |
195 | } | |
196 | numa_info[nodenr].node_mem = mem_size; | |
197 | } | |
198 | if (node->has_memdev) { | |
199 | Object *o; | |
200 | o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); | |
201 | if (!o) { | |
202 | error_setg(errp, "memdev=%s is ambiguous", node->memdev); | |
203 | return; | |
204 | } | |
205 | ||
206 | object_ref(o); | |
207 | numa_info[nodenr].node_mem = object_property_get_int(o, "size", NULL); | |
208 | numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); | |
209 | } | |
210 | numa_info[nodenr].present = true; | |
211 | max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); | |
212 | } | |
213 | ||
214 | static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) | |
215 | { | |
216 | NumaOptions *object = NULL; | |
217 | Error *err = NULL; | |
218 | ||
219 | { | |
220 | OptsVisitor *ov = opts_visitor_new(opts); | |
221 | visit_type_NumaOptions(opts_get_visitor(ov), &object, NULL, &err); | |
222 | opts_visitor_cleanup(ov); | |
223 | } | |
224 | ||
225 | if (err) { | |
226 | goto error; | |
227 | } | |
228 | ||
229 | switch (object->type) { | |
230 | case NUMA_OPTIONS_KIND_NODE: | |
231 | numa_node_parse(object->u.node, opts, &err); | |
232 | if (err) { | |
233 | goto error; | |
234 | } | |
235 | nb_numa_nodes++; | |
236 | break; | |
237 | default: | |
238 | abort(); | |
239 | } | |
240 | ||
241 | return 0; | |
242 | ||
243 | error: | |
244 | error_report_err(err); | |
245 | ||
246 | if (object) { | |
247 | QapiDeallocVisitor *dv = qapi_dealloc_visitor_new(); | |
248 | visit_type_NumaOptions(qapi_dealloc_get_visitor(dv), | |
249 | &object, NULL, NULL); | |
250 | qapi_dealloc_visitor_cleanup(dv); | |
251 | } | |
252 | ||
253 | return -1; | |
254 | } | |
255 | ||
256 | static char *enumerate_cpus(unsigned long *cpus, int max_cpus) | |
257 | { | |
258 | int cpu; | |
259 | bool first = true; | |
260 | GString *s = g_string_new(NULL); | |
261 | ||
262 | for (cpu = find_first_bit(cpus, max_cpus); | |
263 | cpu < max_cpus; | |
264 | cpu = find_next_bit(cpus, max_cpus, cpu + 1)) { | |
265 | g_string_append_printf(s, "%s%d", first ? "" : " ", cpu); | |
266 | first = false; | |
267 | } | |
268 | return g_string_free(s, FALSE); | |
269 | } | |
270 | ||
271 | static void validate_numa_cpus(void) | |
272 | { | |
273 | int i; | |
274 | DECLARE_BITMAP(seen_cpus, MAX_CPUMASK_BITS); | |
275 | ||
276 | bitmap_zero(seen_cpus, MAX_CPUMASK_BITS); | |
277 | for (i = 0; i < nb_numa_nodes; i++) { | |
278 | if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu, | |
279 | MAX_CPUMASK_BITS)) { | |
280 | bitmap_and(seen_cpus, seen_cpus, | |
281 | numa_info[i].node_cpu, MAX_CPUMASK_BITS); | |
282 | error_report("CPU(s) present in multiple NUMA nodes: %s", | |
283 | enumerate_cpus(seen_cpus, max_cpus)); | |
284 | exit(EXIT_FAILURE); | |
285 | } | |
286 | bitmap_or(seen_cpus, seen_cpus, | |
287 | numa_info[i].node_cpu, MAX_CPUMASK_BITS); | |
288 | } | |
289 | ||
290 | if (!bitmap_full(seen_cpus, max_cpus)) { | |
291 | char *msg; | |
292 | bitmap_complement(seen_cpus, seen_cpus, max_cpus); | |
293 | msg = enumerate_cpus(seen_cpus, max_cpus); | |
294 | error_report("warning: CPU(s) not present in any NUMA nodes: %s", msg); | |
295 | error_report("warning: All CPU(s) up to maxcpus should be described " | |
296 | "in NUMA config"); | |
297 | g_free(msg); | |
298 | } | |
299 | } | |
300 | ||
301 | void parse_numa_opts(MachineClass *mc) | |
302 | { | |
303 | int i; | |
304 | ||
305 | if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) { | |
306 | exit(1); | |
307 | } | |
308 | ||
309 | assert(max_numa_nodeid <= MAX_NODES); | |
310 | ||
311 | /* No support for sparse NUMA node IDs yet: */ | |
312 | for (i = max_numa_nodeid - 1; i >= 0; i--) { | |
313 | /* Report large node IDs first, to make mistakes easier to spot */ | |
314 | if (!numa_info[i].present) { | |
315 | error_report("numa: Node ID missing: %d", i); | |
316 | exit(1); | |
317 | } | |
318 | } | |
319 | ||
320 | /* This must be always true if all nodes are present: */ | |
321 | assert(nb_numa_nodes == max_numa_nodeid); | |
322 | ||
323 | if (nb_numa_nodes > 0) { | |
324 | uint64_t numa_total; | |
325 | ||
326 | if (nb_numa_nodes > MAX_NODES) { | |
327 | nb_numa_nodes = MAX_NODES; | |
328 | } | |
329 | ||
330 | /* If no memory size is given for any node, assume the default case | |
331 | * and distribute the available memory equally across all nodes | |
332 | */ | |
333 | for (i = 0; i < nb_numa_nodes; i++) { | |
334 | if (numa_info[i].node_mem != 0) { | |
335 | break; | |
336 | } | |
337 | } | |
338 | if (i == nb_numa_nodes) { | |
339 | uint64_t usedmem = 0; | |
340 | ||
341 | /* On Linux, each node's border has to be 8MB aligned, | |
342 | * the final node gets the rest. | |
343 | */ | |
344 | for (i = 0; i < nb_numa_nodes - 1; i++) { | |
345 | numa_info[i].node_mem = (ram_size / nb_numa_nodes) & | |
346 | ~((1 << 23UL) - 1); | |
347 | usedmem += numa_info[i].node_mem; | |
348 | } | |
349 | numa_info[i].node_mem = ram_size - usedmem; | |
350 | } | |
351 | ||
352 | numa_total = 0; | |
353 | for (i = 0; i < nb_numa_nodes; i++) { | |
354 | numa_total += numa_info[i].node_mem; | |
355 | } | |
356 | if (numa_total != ram_size) { | |
357 | error_report("total memory for NUMA nodes (0x%" PRIx64 ")" | |
358 | " should equal RAM size (0x" RAM_ADDR_FMT ")", | |
359 | numa_total, ram_size); | |
360 | exit(1); | |
361 | } | |
362 | ||
363 | for (i = 0; i < nb_numa_nodes; i++) { | |
364 | QLIST_INIT(&numa_info[i].addr); | |
365 | } | |
366 | ||
367 | numa_set_mem_ranges(); | |
368 | ||
369 | for (i = 0; i < nb_numa_nodes; i++) { | |
370 | if (!bitmap_empty(numa_info[i].node_cpu, MAX_CPUMASK_BITS)) { | |
371 | break; | |
372 | } | |
373 | } | |
374 | /* Historically VCPUs were assigned in round-robin order to NUMA | |
375 | * nodes. However it causes issues with guest not handling it nice | |
376 | * in case where cores/threads from a multicore CPU appear on | |
377 | * different nodes. So allow boards to override default distribution | |
378 | * rule grouping VCPUs by socket so that VCPUs from the same socket | |
379 | * would be on the same node. | |
380 | */ | |
381 | if (i == nb_numa_nodes) { | |
382 | for (i = 0; i < max_cpus; i++) { | |
383 | unsigned node_id = i % nb_numa_nodes; | |
384 | if (mc->cpu_index_to_socket_id) { | |
385 | node_id = mc->cpu_index_to_socket_id(i) % nb_numa_nodes; | |
386 | } | |
387 | ||
388 | set_bit(i, numa_info[node_id].node_cpu); | |
389 | } | |
390 | } | |
391 | ||
392 | validate_numa_cpus(); | |
393 | } else { | |
394 | numa_set_mem_node_id(0, ram_size, 0); | |
395 | } | |
396 | } | |
397 | ||
398 | void numa_post_machine_init(void) | |
399 | { | |
400 | CPUState *cpu; | |
401 | int i; | |
402 | ||
403 | CPU_FOREACH(cpu) { | |
404 | for (i = 0; i < nb_numa_nodes; i++) { | |
405 | if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) { | |
406 | cpu->numa_node = i; | |
407 | } | |
408 | } | |
409 | } | |
410 | } | |
411 | ||
412 | static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner, | |
413 | const char *name, | |
414 | uint64_t ram_size) | |
415 | { | |
416 | if (mem_path) { | |
417 | #ifdef __linux__ | |
418 | Error *err = NULL; | |
419 | memory_region_init_ram_from_file(mr, owner, name, ram_size, false, | |
420 | mem_path, &err); | |
421 | if (err) { | |
422 | error_report_err(err); | |
423 | if (mem_prealloc) { | |
424 | exit(1); | |
425 | } | |
426 | ||
427 | /* Legacy behavior: if allocation failed, fall back to | |
428 | * regular RAM allocation. | |
429 | */ | |
430 | memory_region_init_ram(mr, owner, name, ram_size, &error_fatal); | |
431 | } | |
432 | #else | |
433 | fprintf(stderr, "-mem-path not supported on this host\n"); | |
434 | exit(1); | |
435 | #endif | |
436 | } else { | |
437 | memory_region_init_ram(mr, owner, name, ram_size, &error_fatal); | |
438 | } | |
439 | vmstate_register_ram_global(mr); | |
440 | } | |
441 | ||
442 | void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, | |
443 | const char *name, | |
444 | uint64_t ram_size) | |
445 | { | |
446 | uint64_t addr = 0; | |
447 | int i; | |
448 | ||
449 | if (nb_numa_nodes == 0 || !have_memdevs) { | |
450 | allocate_system_memory_nonnuma(mr, owner, name, ram_size); | |
451 | return; | |
452 | } | |
453 | ||
454 | memory_region_init(mr, owner, name, ram_size); | |
455 | for (i = 0; i < MAX_NODES; i++) { | |
456 | uint64_t size = numa_info[i].node_mem; | |
457 | HostMemoryBackend *backend = numa_info[i].node_memdev; | |
458 | if (!backend) { | |
459 | continue; | |
460 | } | |
461 | MemoryRegion *seg = host_memory_backend_get_memory(backend, | |
462 | &error_fatal); | |
463 | ||
464 | if (memory_region_is_mapped(seg)) { | |
465 | char *path = object_get_canonical_path_component(OBJECT(backend)); | |
466 | error_report("memory backend %s is used multiple times. Each " | |
467 | "-numa option must use a different memdev value.", | |
468 | path); | |
469 | exit(1); | |
470 | } | |
471 | ||
472 | memory_region_add_subregion(mr, addr, seg); | |
473 | vmstate_register_ram_global(seg); | |
474 | addr += size; | |
475 | } | |
476 | } | |
477 | ||
478 | static void numa_stat_memory_devices(uint64_t node_mem[]) | |
479 | { | |
480 | MemoryDeviceInfoList *info_list = NULL; | |
481 | MemoryDeviceInfoList **prev = &info_list; | |
482 | MemoryDeviceInfoList *info; | |
483 | ||
484 | qmp_pc_dimm_device_list(qdev_get_machine(), &prev); | |
485 | for (info = info_list; info; info = info->next) { | |
486 | MemoryDeviceInfo *value = info->value; | |
487 | ||
488 | if (value) { | |
489 | switch (value->type) { | |
490 | case MEMORY_DEVICE_INFO_KIND_DIMM: | |
491 | node_mem[value->u.dimm->node] += value->u.dimm->size; | |
492 | break; | |
493 | default: | |
494 | break; | |
495 | } | |
496 | } | |
497 | } | |
498 | qapi_free_MemoryDeviceInfoList(info_list); | |
499 | } | |
500 | ||
501 | void query_numa_node_mem(uint64_t node_mem[]) | |
502 | { | |
503 | int i; | |
504 | ||
505 | if (nb_numa_nodes <= 0) { | |
506 | return; | |
507 | } | |
508 | ||
509 | numa_stat_memory_devices(node_mem); | |
510 | for (i = 0; i < nb_numa_nodes; i++) { | |
511 | node_mem[i] += numa_info[i].node_mem; | |
512 | } | |
513 | } | |
514 | ||
515 | static int query_memdev(Object *obj, void *opaque) | |
516 | { | |
517 | MemdevList **list = opaque; | |
518 | MemdevList *m = NULL; | |
519 | ||
520 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { | |
521 | m = g_malloc0(sizeof(*m)); | |
522 | ||
523 | m->value = g_malloc0(sizeof(*m->value)); | |
524 | ||
525 | m->value->size = object_property_get_int(obj, "size", | |
526 | &error_abort); | |
527 | m->value->merge = object_property_get_bool(obj, "merge", | |
528 | &error_abort); | |
529 | m->value->dump = object_property_get_bool(obj, "dump", | |
530 | &error_abort); | |
531 | m->value->prealloc = object_property_get_bool(obj, | |
532 | "prealloc", | |
533 | &error_abort); | |
534 | m->value->policy = object_property_get_enum(obj, | |
535 | "policy", | |
536 | "HostMemPolicy", | |
537 | &error_abort); | |
538 | object_property_get_uint16List(obj, "host-nodes", | |
539 | &m->value->host_nodes, | |
540 | &error_abort); | |
541 | ||
542 | m->next = *list; | |
543 | *list = m; | |
544 | } | |
545 | ||
546 | return 0; | |
547 | } | |
548 | ||
549 | MemdevList *qmp_query_memdev(Error **errp) | |
550 | { | |
551 | Object *obj = object_get_objects_root(); | |
552 | MemdevList *list = NULL; | |
553 | ||
554 | object_child_foreach(obj, query_memdev, &list); | |
555 | return list; | |
556 | } |