]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * NUMA parameter parsing routines | |
3 | * | |
4 | * Copyright (c) 2014 Fujitsu Ltd. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "sysemu/numa.h" | |
27 | #include "exec/cpu-common.h" | |
28 | #include "exec/ramlist.h" | |
29 | #include "qemu/bitmap.h" | |
30 | #include "qom/cpu.h" | |
31 | #include "qemu/error-report.h" | |
32 | #include "qapi/error.h" | |
33 | #include "qapi/opts-visitor.h" | |
34 | #include "qapi/qapi-commands-misc.h" | |
35 | #include "qapi/qapi-visit-misc.h" | |
36 | #include "hw/boards.h" | |
37 | #include "sysemu/hostmem.h" | |
38 | #include "hw/mem/pc-dimm.h" | |
39 | #include "hw/mem/memory-device.h" | |
40 | #include "qemu/option.h" | |
41 | #include "qemu/config-file.h" | |
42 | #include "qemu/cutils.h" | |
43 | ||
44 | QemuOptsList qemu_numa_opts = { | |
45 | .name = "numa", | |
46 | .implied_opt_name = "type", | |
47 | .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), | |
48 | .desc = { { 0 } } /* validated with OptsVisitor */ | |
49 | }; | |
50 | ||
51 | static int have_memdevs = -1; | |
52 | static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. | |
53 | * For all nodes, nodeid < max_numa_nodeid | |
54 | */ | |
55 | int nb_numa_nodes; | |
56 | bool have_numa_distance; | |
57 | NodeInfo numa_info[MAX_NODES]; | |
58 | ||
59 | ||
60 | static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, | |
61 | Error **errp) | |
62 | { | |
63 | uint16_t nodenr; | |
64 | uint16List *cpus = NULL; | |
65 | MachineClass *mc = MACHINE_GET_CLASS(ms); | |
66 | ||
67 | if (node->has_nodeid) { | |
68 | nodenr = node->nodeid; | |
69 | } else { | |
70 | nodenr = nb_numa_nodes; | |
71 | } | |
72 | ||
73 | if (nodenr >= MAX_NODES) { | |
74 | error_setg(errp, "Max number of NUMA nodes reached: %" | |
75 | PRIu16 "", nodenr); | |
76 | return; | |
77 | } | |
78 | ||
79 | if (numa_info[nodenr].present) { | |
80 | error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); | |
81 | return; | |
82 | } | |
83 | ||
84 | if (!mc->cpu_index_to_instance_props || !mc->get_default_cpu_node_id) { | |
85 | error_report("NUMA is not supported by this machine-type"); | |
86 | exit(1); | |
87 | } | |
88 | for (cpus = node->cpus; cpus; cpus = cpus->next) { | |
89 | CpuInstanceProperties props; | |
90 | if (cpus->value >= max_cpus) { | |
91 | error_setg(errp, | |
92 | "CPU index (%" PRIu16 ")" | |
93 | " should be smaller than maxcpus (%d)", | |
94 | cpus->value, max_cpus); | |
95 | return; | |
96 | } | |
97 | props = mc->cpu_index_to_instance_props(ms, cpus->value); | |
98 | props.node_id = nodenr; | |
99 | props.has_node_id = true; | |
100 | machine_set_cpu_numa_node(ms, &props, &error_fatal); | |
101 | } | |
102 | ||
103 | if (node->has_mem && node->has_memdev) { | |
104 | error_setg(errp, "cannot specify both mem= and memdev="); | |
105 | return; | |
106 | } | |
107 | ||
108 | if (have_memdevs == -1) { | |
109 | have_memdevs = node->has_memdev; | |
110 | } | |
111 | if (node->has_memdev != have_memdevs) { | |
112 | error_setg(errp, "memdev option must be specified for either " | |
113 | "all or no nodes"); | |
114 | return; | |
115 | } | |
116 | ||
117 | if (node->has_mem) { | |
118 | numa_info[nodenr].node_mem = node->mem; | |
119 | } | |
120 | if (node->has_memdev) { | |
121 | Object *o; | |
122 | o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); | |
123 | if (!o) { | |
124 | error_setg(errp, "memdev=%s is ambiguous", node->memdev); | |
125 | return; | |
126 | } | |
127 | ||
128 | object_ref(o); | |
129 | numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); | |
130 | numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); | |
131 | } | |
132 | numa_info[nodenr].present = true; | |
133 | max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); | |
134 | nb_numa_nodes++; | |
135 | } | |
136 | ||
137 | static void parse_numa_distance(NumaDistOptions *dist, Error **errp) | |
138 | { | |
139 | uint16_t src = dist->src; | |
140 | uint16_t dst = dist->dst; | |
141 | uint8_t val = dist->val; | |
142 | ||
143 | if (src >= MAX_NODES || dst >= MAX_NODES) { | |
144 | error_setg(errp, "Parameter '%s' expects an integer between 0 and %d", | |
145 | src >= MAX_NODES ? "src" : "dst", MAX_NODES - 1); | |
146 | return; | |
147 | } | |
148 | ||
149 | if (!numa_info[src].present || !numa_info[dst].present) { | |
150 | error_setg(errp, "Source/Destination NUMA node is missing. " | |
151 | "Please use '-numa node' option to declare it first."); | |
152 | return; | |
153 | } | |
154 | ||
155 | if (val < NUMA_DISTANCE_MIN) { | |
156 | error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " | |
157 | "it shouldn't be less than %d.", | |
158 | val, NUMA_DISTANCE_MIN); | |
159 | return; | |
160 | } | |
161 | ||
162 | if (src == dst && val != NUMA_DISTANCE_MIN) { | |
163 | error_setg(errp, "Local distance of node %d should be %d.", | |
164 | src, NUMA_DISTANCE_MIN); | |
165 | return; | |
166 | } | |
167 | ||
168 | numa_info[src].distance[dst] = val; | |
169 | have_numa_distance = true; | |
170 | } | |
171 | ||
172 | static | |
173 | void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp) | |
174 | { | |
175 | Error *err = NULL; | |
176 | ||
177 | switch (object->type) { | |
178 | case NUMA_OPTIONS_TYPE_NODE: | |
179 | parse_numa_node(ms, &object->u.node, &err); | |
180 | if (err) { | |
181 | goto end; | |
182 | } | |
183 | break; | |
184 | case NUMA_OPTIONS_TYPE_DIST: | |
185 | parse_numa_distance(&object->u.dist, &err); | |
186 | if (err) { | |
187 | goto end; | |
188 | } | |
189 | break; | |
190 | case NUMA_OPTIONS_TYPE_CPU: | |
191 | if (!object->u.cpu.has_node_id) { | |
192 | error_setg(&err, "Missing mandatory node-id property"); | |
193 | goto end; | |
194 | } | |
195 | if (!numa_info[object->u.cpu.node_id].present) { | |
196 | error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be " | |
197 | "defined with -numa node,nodeid=ID before it's used with " | |
198 | "-numa cpu,node-id=ID", object->u.cpu.node_id); | |
199 | goto end; | |
200 | } | |
201 | ||
202 | machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu), | |
203 | &err); | |
204 | break; | |
205 | default: | |
206 | abort(); | |
207 | } | |
208 | ||
209 | end: | |
210 | error_propagate(errp, err); | |
211 | } | |
212 | ||
213 | int parse_numa(void *opaque, QemuOpts *opts, Error **errp) | |
214 | { | |
215 | NumaOptions *object = NULL; | |
216 | MachineState *ms = MACHINE(opaque); | |
217 | Error *err = NULL; | |
218 | Visitor *v = opts_visitor_new(opts); | |
219 | ||
220 | visit_type_NumaOptions(v, NULL, &object, &err); | |
221 | visit_free(v); | |
222 | if (err) { | |
223 | goto end; | |
224 | } | |
225 | ||
226 | /* Fix up legacy suffix-less format */ | |
227 | if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { | |
228 | const char *mem_str = qemu_opt_get(opts, "mem"); | |
229 | qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); | |
230 | } | |
231 | ||
232 | set_numa_options(ms, object, &err); | |
233 | ||
234 | end: | |
235 | qapi_free_NumaOptions(object); | |
236 | if (err) { | |
237 | error_report_err(err); | |
238 | return -1; | |
239 | } | |
240 | ||
241 | return 0; | |
242 | } | |
243 | ||
244 | /* If all node pair distances are symmetric, then only distances | |
245 | * in one direction are enough. If there is even one asymmetric | |
246 | * pair, though, then all distances must be provided. The | |
247 | * distance from a node to itself is always NUMA_DISTANCE_MIN, | |
248 | * so providing it is never necessary. | |
249 | */ | |
250 | static void validate_numa_distance(void) | |
251 | { | |
252 | int src, dst; | |
253 | bool is_asymmetrical = false; | |
254 | ||
255 | for (src = 0; src < nb_numa_nodes; src++) { | |
256 | for (dst = src; dst < nb_numa_nodes; dst++) { | |
257 | if (numa_info[src].distance[dst] == 0 && | |
258 | numa_info[dst].distance[src] == 0) { | |
259 | if (src != dst) { | |
260 | error_report("The distance between node %d and %d is " | |
261 | "missing, at least one distance value " | |
262 | "between each nodes should be provided.", | |
263 | src, dst); | |
264 | exit(EXIT_FAILURE); | |
265 | } | |
266 | } | |
267 | ||
268 | if (numa_info[src].distance[dst] != 0 && | |
269 | numa_info[dst].distance[src] != 0 && | |
270 | numa_info[src].distance[dst] != | |
271 | numa_info[dst].distance[src]) { | |
272 | is_asymmetrical = true; | |
273 | } | |
274 | } | |
275 | } | |
276 | ||
277 | if (is_asymmetrical) { | |
278 | for (src = 0; src < nb_numa_nodes; src++) { | |
279 | for (dst = 0; dst < nb_numa_nodes; dst++) { | |
280 | if (src != dst && numa_info[src].distance[dst] == 0) { | |
281 | error_report("At least one asymmetrical pair of " | |
282 | "distances is given, please provide distances " | |
283 | "for both directions of all node pairs."); | |
284 | exit(EXIT_FAILURE); | |
285 | } | |
286 | } | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | static void complete_init_numa_distance(void) | |
292 | { | |
293 | int src, dst; | |
294 | ||
295 | /* Fixup NUMA distance by symmetric policy because if it is an | |
296 | * asymmetric distance table, it should be a complete table and | |
297 | * there would not be any missing distance except local node, which | |
298 | * is verified by validate_numa_distance above. | |
299 | */ | |
300 | for (src = 0; src < nb_numa_nodes; src++) { | |
301 | for (dst = 0; dst < nb_numa_nodes; dst++) { | |
302 | if (numa_info[src].distance[dst] == 0) { | |
303 | if (src == dst) { | |
304 | numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; | |
305 | } else { | |
306 | numa_info[src].distance[dst] = numa_info[dst].distance[src]; | |
307 | } | |
308 | } | |
309 | } | |
310 | } | |
311 | } | |
312 | ||
313 | void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, | |
314 | int nb_nodes, ram_addr_t size) | |
315 | { | |
316 | int i; | |
317 | uint64_t usedmem = 0; | |
318 | ||
319 | /* Align each node according to the alignment | |
320 | * requirements of the machine class | |
321 | */ | |
322 | ||
323 | for (i = 0; i < nb_nodes - 1; i++) { | |
324 | nodes[i].node_mem = (size / nb_nodes) & | |
325 | ~((1 << mc->numa_mem_align_shift) - 1); | |
326 | usedmem += nodes[i].node_mem; | |
327 | } | |
328 | nodes[i].node_mem = size - usedmem; | |
329 | } | |
330 | ||
331 | void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, | |
332 | int nb_nodes, ram_addr_t size) | |
333 | { | |
334 | int i; | |
335 | uint64_t usedmem = 0, node_mem; | |
336 | uint64_t granularity = size / nb_nodes; | |
337 | uint64_t propagate = 0; | |
338 | ||
339 | for (i = 0; i < nb_nodes - 1; i++) { | |
340 | node_mem = (granularity + propagate) & | |
341 | ~((1 << mc->numa_mem_align_shift) - 1); | |
342 | propagate = granularity + propagate - node_mem; | |
343 | nodes[i].node_mem = node_mem; | |
344 | usedmem += node_mem; | |
345 | } | |
346 | nodes[i].node_mem = size - usedmem; | |
347 | } | |
348 | ||
349 | void numa_complete_configuration(MachineState *ms) | |
350 | { | |
351 | int i; | |
352 | MachineClass *mc = MACHINE_GET_CLASS(ms); | |
353 | ||
354 | /* | |
355 | * If memory hotplug is enabled (slots > 0) but without '-numa' | |
356 | * options explicitly on CLI, guestes will break. | |
357 | * | |
358 | * Windows: won't enable memory hotplug without SRAT table at all | |
359 | * | |
360 | * Linux: if QEMU is started with initial memory all below 4Gb | |
361 | * and no SRAT table present, guest kernel will use nommu DMA ops, | |
362 | * which breaks 32bit hw drivers when memory is hotplugged and | |
363 | * guest tries to use it with that drivers. | |
364 | * | |
365 | * Enable NUMA implicitly by adding a new NUMA node automatically. | |
366 | */ | |
367 | if (ms->ram_slots > 0 && nb_numa_nodes == 0 && | |
368 | mc->auto_enable_numa_with_memhp) { | |
369 | NumaNodeOptions node = { }; | |
370 | parse_numa_node(ms, &node, NULL); | |
371 | } | |
372 | ||
373 | assert(max_numa_nodeid <= MAX_NODES); | |
374 | ||
375 | /* No support for sparse NUMA node IDs yet: */ | |
376 | for (i = max_numa_nodeid - 1; i >= 0; i--) { | |
377 | /* Report large node IDs first, to make mistakes easier to spot */ | |
378 | if (!numa_info[i].present) { | |
379 | error_report("numa: Node ID missing: %d", i); | |
380 | exit(1); | |
381 | } | |
382 | } | |
383 | ||
384 | /* This must be always true if all nodes are present: */ | |
385 | assert(nb_numa_nodes == max_numa_nodeid); | |
386 | ||
387 | if (nb_numa_nodes > 0) { | |
388 | uint64_t numa_total; | |
389 | ||
390 | if (nb_numa_nodes > MAX_NODES) { | |
391 | nb_numa_nodes = MAX_NODES; | |
392 | } | |
393 | ||
394 | /* If no memory size is given for any node, assume the default case | |
395 | * and distribute the available memory equally across all nodes | |
396 | */ | |
397 | for (i = 0; i < nb_numa_nodes; i++) { | |
398 | if (numa_info[i].node_mem != 0) { | |
399 | break; | |
400 | } | |
401 | } | |
402 | if (i == nb_numa_nodes) { | |
403 | assert(mc->numa_auto_assign_ram); | |
404 | mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size); | |
405 | } | |
406 | ||
407 | numa_total = 0; | |
408 | for (i = 0; i < nb_numa_nodes; i++) { | |
409 | numa_total += numa_info[i].node_mem; | |
410 | } | |
411 | if (numa_total != ram_size) { | |
412 | error_report("total memory for NUMA nodes (0x%" PRIx64 ")" | |
413 | " should equal RAM size (0x" RAM_ADDR_FMT ")", | |
414 | numa_total, ram_size); | |
415 | exit(1); | |
416 | } | |
417 | ||
418 | /* QEMU needs at least all unique node pair distances to build | |
419 | * the whole NUMA distance table. QEMU treats the distance table | |
420 | * as symmetric by default, i.e. distance A->B == distance B->A. | |
421 | * Thus, QEMU is able to complete the distance table | |
422 | * initialization even though only distance A->B is provided and | |
423 | * distance B->A is not. QEMU knows the distance of a node to | |
424 | * itself is always 10, so A->A distances may be omitted. When | |
425 | * the distances of two nodes of a pair differ, i.e. distance | |
426 | * A->B != distance B->A, then that means the distance table is | |
427 | * asymmetric. In this case, the distances for both directions | |
428 | * of all node pairs are required. | |
429 | */ | |
430 | if (have_numa_distance) { | |
431 | /* Validate enough NUMA distance information was provided. */ | |
432 | validate_numa_distance(); | |
433 | ||
434 | /* Validation succeeded, now fill in any missing distances. */ | |
435 | complete_init_numa_distance(); | |
436 | } | |
437 | } | |
438 | } | |
439 | ||
440 | void parse_numa_opts(MachineState *ms) | |
441 | { | |
442 | if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) { | |
443 | exit(1); | |
444 | } | |
445 | } | |
446 | ||
447 | void qmp_set_numa_node(NumaOptions *cmd, Error **errp) | |
448 | { | |
449 | if (!runstate_check(RUN_STATE_PRECONFIG)) { | |
450 | error_setg(errp, "The command is permitted only in '%s' state", | |
451 | RunState_str(RUN_STATE_PRECONFIG)); | |
452 | return; | |
453 | } | |
454 | ||
455 | set_numa_options(MACHINE(qdev_get_machine()), cmd, errp); | |
456 | } | |
457 | ||
458 | void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp) | |
459 | { | |
460 | int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort); | |
461 | ||
462 | if (node_id == CPU_UNSET_NUMA_NODE_ID) { | |
463 | /* due to bug in libvirt, it doesn't pass node-id from props on | |
464 | * device_add as expected, so we have to fix it up here */ | |
465 | if (slot->props.has_node_id) { | |
466 | object_property_set_int(OBJECT(dev), slot->props.node_id, | |
467 | "node-id", errp); | |
468 | } | |
469 | } else if (node_id != slot->props.node_id) { | |
470 | error_setg(errp, "node-id=%d must match numa node specified " | |
471 | "with -numa option", node_id); | |
472 | } | |
473 | } | |
474 | ||
475 | static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner, | |
476 | const char *name, | |
477 | uint64_t ram_size) | |
478 | { | |
479 | if (mem_path) { | |
480 | #ifdef __linux__ | |
481 | Error *err = NULL; | |
482 | memory_region_init_ram_from_file(mr, owner, name, ram_size, 0, 0, | |
483 | mem_path, &err); | |
484 | if (err) { | |
485 | error_report_err(err); | |
486 | if (mem_prealloc) { | |
487 | exit(1); | |
488 | } | |
489 | error_report("falling back to regular RAM allocation."); | |
490 | ||
491 | /* Legacy behavior: if allocation failed, fall back to | |
492 | * regular RAM allocation. | |
493 | */ | |
494 | mem_path = NULL; | |
495 | memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); | |
496 | } | |
497 | #else | |
498 | fprintf(stderr, "-mem-path not supported on this host\n"); | |
499 | exit(1); | |
500 | #endif | |
501 | } else { | |
502 | memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); | |
503 | } | |
504 | vmstate_register_ram_global(mr); | |
505 | } | |
506 | ||
507 | void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, | |
508 | const char *name, | |
509 | uint64_t ram_size) | |
510 | { | |
511 | uint64_t addr = 0; | |
512 | int i; | |
513 | ||
514 | if (nb_numa_nodes == 0 || !have_memdevs) { | |
515 | allocate_system_memory_nonnuma(mr, owner, name, ram_size); | |
516 | return; | |
517 | } | |
518 | ||
519 | memory_region_init(mr, owner, name, ram_size); | |
520 | for (i = 0; i < nb_numa_nodes; i++) { | |
521 | uint64_t size = numa_info[i].node_mem; | |
522 | HostMemoryBackend *backend = numa_info[i].node_memdev; | |
523 | if (!backend) { | |
524 | continue; | |
525 | } | |
526 | MemoryRegion *seg = host_memory_backend_get_memory(backend); | |
527 | ||
528 | if (memory_region_is_mapped(seg)) { | |
529 | char *path = object_get_canonical_path_component(OBJECT(backend)); | |
530 | error_report("memory backend %s is used multiple times. Each " | |
531 | "-numa option must use a different memdev value.", | |
532 | path); | |
533 | exit(1); | |
534 | } | |
535 | ||
536 | host_memory_backend_set_mapped(backend, true); | |
537 | memory_region_add_subregion(mr, addr, seg); | |
538 | vmstate_register_ram_global(seg); | |
539 | addr += size; | |
540 | } | |
541 | } | |
542 | ||
543 | static void numa_stat_memory_devices(NumaNodeMem node_mem[]) | |
544 | { | |
545 | MemoryDeviceInfoList *info_list = qmp_memory_device_list(); | |
546 | MemoryDeviceInfoList *info; | |
547 | PCDIMMDeviceInfo *pcdimm_info; | |
548 | ||
549 | for (info = info_list; info; info = info->next) { | |
550 | MemoryDeviceInfo *value = info->value; | |
551 | ||
552 | if (value) { | |
553 | switch (value->type) { | |
554 | case MEMORY_DEVICE_INFO_KIND_DIMM: | |
555 | pcdimm_info = value->u.dimm.data; | |
556 | break; | |
557 | ||
558 | case MEMORY_DEVICE_INFO_KIND_NVDIMM: | |
559 | pcdimm_info = value->u.nvdimm.data; | |
560 | break; | |
561 | ||
562 | default: | |
563 | pcdimm_info = NULL; | |
564 | break; | |
565 | } | |
566 | ||
567 | if (pcdimm_info) { | |
568 | node_mem[pcdimm_info->node].node_mem += pcdimm_info->size; | |
569 | node_mem[pcdimm_info->node].node_plugged_mem += | |
570 | pcdimm_info->size; | |
571 | } | |
572 | } | |
573 | } | |
574 | qapi_free_MemoryDeviceInfoList(info_list); | |
575 | } | |
576 | ||
577 | void query_numa_node_mem(NumaNodeMem node_mem[]) | |
578 | { | |
579 | int i; | |
580 | ||
581 | if (nb_numa_nodes <= 0) { | |
582 | return; | |
583 | } | |
584 | ||
585 | numa_stat_memory_devices(node_mem); | |
586 | for (i = 0; i < nb_numa_nodes; i++) { | |
587 | node_mem[i].node_mem += numa_info[i].node_mem; | |
588 | } | |
589 | } | |
590 | ||
591 | static int query_memdev(Object *obj, void *opaque) | |
592 | { | |
593 | MemdevList **list = opaque; | |
594 | MemdevList *m = NULL; | |
595 | ||
596 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { | |
597 | m = g_malloc0(sizeof(*m)); | |
598 | ||
599 | m->value = g_malloc0(sizeof(*m->value)); | |
600 | ||
601 | m->value->id = object_get_canonical_path_component(obj); | |
602 | m->value->has_id = !!m->value->id; | |
603 | ||
604 | m->value->size = object_property_get_uint(obj, "size", | |
605 | &error_abort); | |
606 | m->value->merge = object_property_get_bool(obj, "merge", | |
607 | &error_abort); | |
608 | m->value->dump = object_property_get_bool(obj, "dump", | |
609 | &error_abort); | |
610 | m->value->prealloc = object_property_get_bool(obj, | |
611 | "prealloc", | |
612 | &error_abort); | |
613 | m->value->policy = object_property_get_enum(obj, | |
614 | "policy", | |
615 | "HostMemPolicy", | |
616 | &error_abort); | |
617 | object_property_get_uint16List(obj, "host-nodes", | |
618 | &m->value->host_nodes, | |
619 | &error_abort); | |
620 | ||
621 | m->next = *list; | |
622 | *list = m; | |
623 | } | |
624 | ||
625 | return 0; | |
626 | } | |
627 | ||
628 | MemdevList *qmp_query_memdev(Error **errp) | |
629 | { | |
630 | Object *obj = object_get_objects_root(); | |
631 | MemdevList *list = NULL; | |
632 | ||
633 | object_child_foreach(obj, query_memdev, &list); | |
634 | return list; | |
635 | } | |
636 | ||
637 | void ram_block_notifier_add(RAMBlockNotifier *n) | |
638 | { | |
639 | QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); | |
640 | } | |
641 | ||
642 | void ram_block_notifier_remove(RAMBlockNotifier *n) | |
643 | { | |
644 | QLIST_REMOVE(n, next); | |
645 | } | |
646 | ||
647 | void ram_block_notify_add(void *host, size_t size) | |
648 | { | |
649 | RAMBlockNotifier *notifier; | |
650 | ||
651 | QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { | |
652 | notifier->ram_block_added(notifier, host, size); | |
653 | } | |
654 | } | |
655 | ||
656 | void ram_block_notify_remove(void *host, size_t size) | |
657 | { | |
658 | RAMBlockNotifier *notifier; | |
659 | ||
660 | QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { | |
661 | notifier->ram_block_removed(notifier, host, size); | |
662 | } | |
663 | } |