]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * NUMA parameter parsing routines | |
3 | * | |
4 | * Copyright (c) 2014 Fujitsu Ltd. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "sysemu/numa.h" | |
27 | #include "exec/cpu-common.h" | |
28 | #include "exec/ramlist.h" | |
29 | #include "qemu/bitmap.h" | |
30 | #include "qom/cpu.h" | |
31 | #include "qemu/error-report.h" | |
32 | #include "qapi/error.h" | |
33 | #include "qapi/opts-visitor.h" | |
34 | #include "qapi/qapi-commands-misc.h" | |
35 | #include "qapi/qapi-visit-misc.h" | |
36 | #include "hw/boards.h" | |
37 | #include "sysemu/hostmem.h" | |
38 | #include "hw/mem/pc-dimm.h" | |
39 | #include "hw/mem/memory-device.h" | |
40 | #include "qemu/option.h" | |
41 | #include "qemu/config-file.h" | |
42 | #include "qemu/cutils.h" | |
43 | ||
44 | QemuOptsList qemu_numa_opts = { | |
45 | .name = "numa", | |
46 | .implied_opt_name = "type", | |
47 | .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), | |
48 | .desc = { { 0 } } /* validated with OptsVisitor */ | |
49 | }; | |
50 | ||
51 | static int have_memdevs = -1; | |
52 | static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. | |
53 | * For all nodes, nodeid < max_numa_nodeid | |
54 | */ | |
55 | int nb_numa_nodes; | |
56 | bool have_numa_distance; | |
57 | NodeInfo numa_info[MAX_NODES]; | |
58 | ||
59 | ||
60 | static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, | |
61 | Error **errp) | |
62 | { | |
63 | uint16_t nodenr; | |
64 | uint16List *cpus = NULL; | |
65 | MachineClass *mc = MACHINE_GET_CLASS(ms); | |
66 | ||
67 | if (node->has_nodeid) { | |
68 | nodenr = node->nodeid; | |
69 | } else { | |
70 | nodenr = nb_numa_nodes; | |
71 | } | |
72 | ||
73 | if (nodenr >= MAX_NODES) { | |
74 | error_setg(errp, "Max number of NUMA nodes reached: %" | |
75 | PRIu16 "", nodenr); | |
76 | return; | |
77 | } | |
78 | ||
79 | if (numa_info[nodenr].present) { | |
80 | error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); | |
81 | return; | |
82 | } | |
83 | ||
84 | if (!mc->cpu_index_to_instance_props || !mc->get_default_cpu_node_id) { | |
85 | error_report("NUMA is not supported by this machine-type"); | |
86 | exit(1); | |
87 | } | |
88 | for (cpus = node->cpus; cpus; cpus = cpus->next) { | |
89 | CpuInstanceProperties props; | |
90 | if (cpus->value >= max_cpus) { | |
91 | error_setg(errp, | |
92 | "CPU index (%" PRIu16 ")" | |
93 | " should be smaller than maxcpus (%d)", | |
94 | cpus->value, max_cpus); | |
95 | return; | |
96 | } | |
97 | props = mc->cpu_index_to_instance_props(ms, cpus->value); | |
98 | props.node_id = nodenr; | |
99 | props.has_node_id = true; | |
100 | machine_set_cpu_numa_node(ms, &props, &error_fatal); | |
101 | } | |
102 | ||
103 | if (node->has_mem && node->has_memdev) { | |
104 | error_setg(errp, "cannot specify both mem= and memdev="); | |
105 | return; | |
106 | } | |
107 | ||
108 | if (have_memdevs == -1) { | |
109 | have_memdevs = node->has_memdev; | |
110 | } | |
111 | if (node->has_memdev != have_memdevs) { | |
112 | error_setg(errp, "memdev option must be specified for either " | |
113 | "all or no nodes"); | |
114 | return; | |
115 | } | |
116 | ||
117 | if (node->has_mem) { | |
118 | numa_info[nodenr].node_mem = node->mem; | |
119 | } | |
120 | if (node->has_memdev) { | |
121 | Object *o; | |
122 | o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); | |
123 | if (!o) { | |
124 | error_setg(errp, "memdev=%s is ambiguous", node->memdev); | |
125 | return; | |
126 | } | |
127 | ||
128 | object_ref(o); | |
129 | numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); | |
130 | numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); | |
131 | } | |
132 | numa_info[nodenr].present = true; | |
133 | max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); | |
134 | nb_numa_nodes++; | |
135 | } | |
136 | ||
137 | static void parse_numa_distance(NumaDistOptions *dist, Error **errp) | |
138 | { | |
139 | uint16_t src = dist->src; | |
140 | uint16_t dst = dist->dst; | |
141 | uint8_t val = dist->val; | |
142 | ||
143 | if (src >= MAX_NODES || dst >= MAX_NODES) { | |
144 | error_setg(errp, | |
145 | "Invalid node %d, max possible could be %d", | |
146 | MAX(src, dst), MAX_NODES); | |
147 | return; | |
148 | } | |
149 | ||
150 | if (!numa_info[src].present || !numa_info[dst].present) { | |
151 | error_setg(errp, "Source/Destination NUMA node is missing. " | |
152 | "Please use '-numa node' option to declare it first."); | |
153 | return; | |
154 | } | |
155 | ||
156 | if (val < NUMA_DISTANCE_MIN) { | |
157 | error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " | |
158 | "it shouldn't be less than %d.", | |
159 | val, NUMA_DISTANCE_MIN); | |
160 | return; | |
161 | } | |
162 | ||
163 | if (src == dst && val != NUMA_DISTANCE_MIN) { | |
164 | error_setg(errp, "Local distance of node %d should be %d.", | |
165 | src, NUMA_DISTANCE_MIN); | |
166 | return; | |
167 | } | |
168 | ||
169 | numa_info[src].distance[dst] = val; | |
170 | have_numa_distance = true; | |
171 | } | |
172 | ||
173 | static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) | |
174 | { | |
175 | NumaOptions *object = NULL; | |
176 | MachineState *ms = opaque; | |
177 | Error *err = NULL; | |
178 | ||
179 | { | |
180 | Visitor *v = opts_visitor_new(opts); | |
181 | visit_type_NumaOptions(v, NULL, &object, &err); | |
182 | visit_free(v); | |
183 | } | |
184 | ||
185 | if (err) { | |
186 | goto end; | |
187 | } | |
188 | ||
189 | /* Fix up legacy suffix-less format */ | |
190 | if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { | |
191 | const char *mem_str = qemu_opt_get(opts, "mem"); | |
192 | qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); | |
193 | } | |
194 | ||
195 | switch (object->type) { | |
196 | case NUMA_OPTIONS_TYPE_NODE: | |
197 | parse_numa_node(ms, &object->u.node, &err); | |
198 | if (err) { | |
199 | goto end; | |
200 | } | |
201 | break; | |
202 | case NUMA_OPTIONS_TYPE_DIST: | |
203 | parse_numa_distance(&object->u.dist, &err); | |
204 | if (err) { | |
205 | goto end; | |
206 | } | |
207 | break; | |
208 | case NUMA_OPTIONS_TYPE_CPU: | |
209 | if (!object->u.cpu.has_node_id) { | |
210 | error_setg(&err, "Missing mandatory node-id property"); | |
211 | goto end; | |
212 | } | |
213 | if (!numa_info[object->u.cpu.node_id].present) { | |
214 | error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be " | |
215 | "defined with -numa node,nodeid=ID before it's used with " | |
216 | "-numa cpu,node-id=ID", object->u.cpu.node_id); | |
217 | goto end; | |
218 | } | |
219 | ||
220 | machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu), | |
221 | &err); | |
222 | break; | |
223 | default: | |
224 | abort(); | |
225 | } | |
226 | ||
227 | end: | |
228 | qapi_free_NumaOptions(object); | |
229 | if (err) { | |
230 | error_report_err(err); | |
231 | return -1; | |
232 | } | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
237 | /* If all node pair distances are symmetric, then only distances | |
238 | * in one direction are enough. If there is even one asymmetric | |
239 | * pair, though, then all distances must be provided. The | |
240 | * distance from a node to itself is always NUMA_DISTANCE_MIN, | |
241 | * so providing it is never necessary. | |
242 | */ | |
243 | static void validate_numa_distance(void) | |
244 | { | |
245 | int src, dst; | |
246 | bool is_asymmetrical = false; | |
247 | ||
248 | for (src = 0; src < nb_numa_nodes; src++) { | |
249 | for (dst = src; dst < nb_numa_nodes; dst++) { | |
250 | if (numa_info[src].distance[dst] == 0 && | |
251 | numa_info[dst].distance[src] == 0) { | |
252 | if (src != dst) { | |
253 | error_report("The distance between node %d and %d is " | |
254 | "missing, at least one distance value " | |
255 | "between each nodes should be provided.", | |
256 | src, dst); | |
257 | exit(EXIT_FAILURE); | |
258 | } | |
259 | } | |
260 | ||
261 | if (numa_info[src].distance[dst] != 0 && | |
262 | numa_info[dst].distance[src] != 0 && | |
263 | numa_info[src].distance[dst] != | |
264 | numa_info[dst].distance[src]) { | |
265 | is_asymmetrical = true; | |
266 | } | |
267 | } | |
268 | } | |
269 | ||
270 | if (is_asymmetrical) { | |
271 | for (src = 0; src < nb_numa_nodes; src++) { | |
272 | for (dst = 0; dst < nb_numa_nodes; dst++) { | |
273 | if (src != dst && numa_info[src].distance[dst] == 0) { | |
274 | error_report("At least one asymmetrical pair of " | |
275 | "distances is given, please provide distances " | |
276 | "for both directions of all node pairs."); | |
277 | exit(EXIT_FAILURE); | |
278 | } | |
279 | } | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | static void complete_init_numa_distance(void) | |
285 | { | |
286 | int src, dst; | |
287 | ||
288 | /* Fixup NUMA distance by symmetric policy because if it is an | |
289 | * asymmetric distance table, it should be a complete table and | |
290 | * there would not be any missing distance except local node, which | |
291 | * is verified by validate_numa_distance above. | |
292 | */ | |
293 | for (src = 0; src < nb_numa_nodes; src++) { | |
294 | for (dst = 0; dst < nb_numa_nodes; dst++) { | |
295 | if (numa_info[src].distance[dst] == 0) { | |
296 | if (src == dst) { | |
297 | numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; | |
298 | } else { | |
299 | numa_info[src].distance[dst] = numa_info[dst].distance[src]; | |
300 | } | |
301 | } | |
302 | } | |
303 | } | |
304 | } | |
305 | ||
306 | void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, | |
307 | int nb_nodes, ram_addr_t size) | |
308 | { | |
309 | int i; | |
310 | uint64_t usedmem = 0; | |
311 | ||
312 | /* Align each node according to the alignment | |
313 | * requirements of the machine class | |
314 | */ | |
315 | ||
316 | for (i = 0; i < nb_nodes - 1; i++) { | |
317 | nodes[i].node_mem = (size / nb_nodes) & | |
318 | ~((1 << mc->numa_mem_align_shift) - 1); | |
319 | usedmem += nodes[i].node_mem; | |
320 | } | |
321 | nodes[i].node_mem = size - usedmem; | |
322 | } | |
323 | ||
324 | void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, | |
325 | int nb_nodes, ram_addr_t size) | |
326 | { | |
327 | int i; | |
328 | uint64_t usedmem = 0, node_mem; | |
329 | uint64_t granularity = size / nb_nodes; | |
330 | uint64_t propagate = 0; | |
331 | ||
332 | for (i = 0; i < nb_nodes - 1; i++) { | |
333 | node_mem = (granularity + propagate) & | |
334 | ~((1 << mc->numa_mem_align_shift) - 1); | |
335 | propagate = granularity + propagate - node_mem; | |
336 | nodes[i].node_mem = node_mem; | |
337 | usedmem += node_mem; | |
338 | } | |
339 | nodes[i].node_mem = size - usedmem; | |
340 | } | |
341 | ||
342 | void parse_numa_opts(MachineState *ms) | |
343 | { | |
344 | int i; | |
345 | MachineClass *mc = MACHINE_GET_CLASS(ms); | |
346 | ||
347 | if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) { | |
348 | exit(1); | |
349 | } | |
350 | ||
351 | /* | |
352 | * If memory hotplug is enabled (slots > 0) but without '-numa' | |
353 | * options explicitly on CLI, guestes will break. | |
354 | * | |
355 | * Windows: won't enable memory hotplug without SRAT table at all | |
356 | * | |
357 | * Linux: if QEMU is started with initial memory all below 4Gb | |
358 | * and no SRAT table present, guest kernel will use nommu DMA ops, | |
359 | * which breaks 32bit hw drivers when memory is hotplugged and | |
360 | * guest tries to use it with that drivers. | |
361 | * | |
362 | * Enable NUMA implicitly by adding a new NUMA node automatically. | |
363 | */ | |
364 | if (ms->ram_slots > 0 && nb_numa_nodes == 0 && | |
365 | mc->auto_enable_numa_with_memhp) { | |
366 | NumaNodeOptions node = { }; | |
367 | parse_numa_node(ms, &node, NULL); | |
368 | } | |
369 | ||
370 | assert(max_numa_nodeid <= MAX_NODES); | |
371 | ||
372 | /* No support for sparse NUMA node IDs yet: */ | |
373 | for (i = max_numa_nodeid - 1; i >= 0; i--) { | |
374 | /* Report large node IDs first, to make mistakes easier to spot */ | |
375 | if (!numa_info[i].present) { | |
376 | error_report("numa: Node ID missing: %d", i); | |
377 | exit(1); | |
378 | } | |
379 | } | |
380 | ||
381 | /* This must be always true if all nodes are present: */ | |
382 | assert(nb_numa_nodes == max_numa_nodeid); | |
383 | ||
384 | if (nb_numa_nodes > 0) { | |
385 | uint64_t numa_total; | |
386 | ||
387 | if (nb_numa_nodes > MAX_NODES) { | |
388 | nb_numa_nodes = MAX_NODES; | |
389 | } | |
390 | ||
391 | /* If no memory size is given for any node, assume the default case | |
392 | * and distribute the available memory equally across all nodes | |
393 | */ | |
394 | for (i = 0; i < nb_numa_nodes; i++) { | |
395 | if (numa_info[i].node_mem != 0) { | |
396 | break; | |
397 | } | |
398 | } | |
399 | if (i == nb_numa_nodes) { | |
400 | assert(mc->numa_auto_assign_ram); | |
401 | mc->numa_auto_assign_ram(mc, numa_info, nb_numa_nodes, ram_size); | |
402 | } | |
403 | ||
404 | numa_total = 0; | |
405 | for (i = 0; i < nb_numa_nodes; i++) { | |
406 | numa_total += numa_info[i].node_mem; | |
407 | } | |
408 | if (numa_total != ram_size) { | |
409 | error_report("total memory for NUMA nodes (0x%" PRIx64 ")" | |
410 | " should equal RAM size (0x" RAM_ADDR_FMT ")", | |
411 | numa_total, ram_size); | |
412 | exit(1); | |
413 | } | |
414 | ||
415 | /* QEMU needs at least all unique node pair distances to build | |
416 | * the whole NUMA distance table. QEMU treats the distance table | |
417 | * as symmetric by default, i.e. distance A->B == distance B->A. | |
418 | * Thus, QEMU is able to complete the distance table | |
419 | * initialization even though only distance A->B is provided and | |
420 | * distance B->A is not. QEMU knows the distance of a node to | |
421 | * itself is always 10, so A->A distances may be omitted. When | |
422 | * the distances of two nodes of a pair differ, i.e. distance | |
423 | * A->B != distance B->A, then that means the distance table is | |
424 | * asymmetric. In this case, the distances for both directions | |
425 | * of all node pairs are required. | |
426 | */ | |
427 | if (have_numa_distance) { | |
428 | /* Validate enough NUMA distance information was provided. */ | |
429 | validate_numa_distance(); | |
430 | ||
431 | /* Validation succeeded, now fill in any missing distances. */ | |
432 | complete_init_numa_distance(); | |
433 | } | |
434 | } | |
435 | } | |
436 | ||
437 | void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp) | |
438 | { | |
439 | int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort); | |
440 | ||
441 | if (node_id == CPU_UNSET_NUMA_NODE_ID) { | |
442 | /* due to bug in libvirt, it doesn't pass node-id from props on | |
443 | * device_add as expected, so we have to fix it up here */ | |
444 | if (slot->props.has_node_id) { | |
445 | object_property_set_int(OBJECT(dev), slot->props.node_id, | |
446 | "node-id", errp); | |
447 | } | |
448 | } else if (node_id != slot->props.node_id) { | |
449 | error_setg(errp, "node-id=%d must match numa node specified " | |
450 | "with -numa option", node_id); | |
451 | } | |
452 | } | |
453 | ||
454 | static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner, | |
455 | const char *name, | |
456 | uint64_t ram_size) | |
457 | { | |
458 | if (mem_path) { | |
459 | #ifdef __linux__ | |
460 | Error *err = NULL; | |
461 | memory_region_init_ram_from_file(mr, owner, name, ram_size, 0, false, | |
462 | mem_path, &err); | |
463 | if (err) { | |
464 | error_report_err(err); | |
465 | if (mem_prealloc) { | |
466 | exit(1); | |
467 | } | |
468 | error_report("falling back to regular RAM allocation."); | |
469 | ||
470 | /* Legacy behavior: if allocation failed, fall back to | |
471 | * regular RAM allocation. | |
472 | */ | |
473 | mem_path = NULL; | |
474 | memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); | |
475 | } | |
476 | #else | |
477 | fprintf(stderr, "-mem-path not supported on this host\n"); | |
478 | exit(1); | |
479 | #endif | |
480 | } else { | |
481 | memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); | |
482 | } | |
483 | vmstate_register_ram_global(mr); | |
484 | } | |
485 | ||
486 | void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, | |
487 | const char *name, | |
488 | uint64_t ram_size) | |
489 | { | |
490 | uint64_t addr = 0; | |
491 | int i; | |
492 | ||
493 | if (nb_numa_nodes == 0 || !have_memdevs) { | |
494 | allocate_system_memory_nonnuma(mr, owner, name, ram_size); | |
495 | return; | |
496 | } | |
497 | ||
498 | memory_region_init(mr, owner, name, ram_size); | |
499 | for (i = 0; i < nb_numa_nodes; i++) { | |
500 | uint64_t size = numa_info[i].node_mem; | |
501 | HostMemoryBackend *backend = numa_info[i].node_memdev; | |
502 | if (!backend) { | |
503 | continue; | |
504 | } | |
505 | MemoryRegion *seg = host_memory_backend_get_memory(backend, | |
506 | &error_fatal); | |
507 | ||
508 | if (memory_region_is_mapped(seg)) { | |
509 | char *path = object_get_canonical_path_component(OBJECT(backend)); | |
510 | error_report("memory backend %s is used multiple times. Each " | |
511 | "-numa option must use a different memdev value.", | |
512 | path); | |
513 | exit(1); | |
514 | } | |
515 | ||
516 | host_memory_backend_set_mapped(backend, true); | |
517 | memory_region_add_subregion(mr, addr, seg); | |
518 | vmstate_register_ram_global(seg); | |
519 | addr += size; | |
520 | } | |
521 | } | |
522 | ||
523 | static void numa_stat_memory_devices(NumaNodeMem node_mem[]) | |
524 | { | |
525 | MemoryDeviceInfoList *info_list = qmp_memory_device_list(); | |
526 | MemoryDeviceInfoList *info; | |
527 | PCDIMMDeviceInfo *pcdimm_info; | |
528 | ||
529 | for (info = info_list; info; info = info->next) { | |
530 | MemoryDeviceInfo *value = info->value; | |
531 | ||
532 | if (value) { | |
533 | switch (value->type) { | |
534 | case MEMORY_DEVICE_INFO_KIND_DIMM: | |
535 | pcdimm_info = value->u.dimm.data; | |
536 | break; | |
537 | ||
538 | case MEMORY_DEVICE_INFO_KIND_NVDIMM: | |
539 | pcdimm_info = value->u.nvdimm.data; | |
540 | break; | |
541 | ||
542 | default: | |
543 | pcdimm_info = NULL; | |
544 | break; | |
545 | } | |
546 | ||
547 | if (pcdimm_info) { | |
548 | node_mem[pcdimm_info->node].node_mem += pcdimm_info->size; | |
549 | if (pcdimm_info->hotpluggable && pcdimm_info->hotplugged) { | |
550 | node_mem[pcdimm_info->node].node_plugged_mem += | |
551 | pcdimm_info->size; | |
552 | } | |
553 | } | |
554 | } | |
555 | } | |
556 | qapi_free_MemoryDeviceInfoList(info_list); | |
557 | } | |
558 | ||
559 | void query_numa_node_mem(NumaNodeMem node_mem[]) | |
560 | { | |
561 | int i; | |
562 | ||
563 | if (nb_numa_nodes <= 0) { | |
564 | return; | |
565 | } | |
566 | ||
567 | numa_stat_memory_devices(node_mem); | |
568 | for (i = 0; i < nb_numa_nodes; i++) { | |
569 | node_mem[i].node_mem += numa_info[i].node_mem; | |
570 | } | |
571 | } | |
572 | ||
573 | static int query_memdev(Object *obj, void *opaque) | |
574 | { | |
575 | MemdevList **list = opaque; | |
576 | MemdevList *m = NULL; | |
577 | ||
578 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { | |
579 | m = g_malloc0(sizeof(*m)); | |
580 | ||
581 | m->value = g_malloc0(sizeof(*m->value)); | |
582 | ||
583 | m->value->id = object_get_canonical_path_component(obj); | |
584 | m->value->has_id = !!m->value->id; | |
585 | ||
586 | m->value->size = object_property_get_uint(obj, "size", | |
587 | &error_abort); | |
588 | m->value->merge = object_property_get_bool(obj, "merge", | |
589 | &error_abort); | |
590 | m->value->dump = object_property_get_bool(obj, "dump", | |
591 | &error_abort); | |
592 | m->value->prealloc = object_property_get_bool(obj, | |
593 | "prealloc", | |
594 | &error_abort); | |
595 | m->value->policy = object_property_get_enum(obj, | |
596 | "policy", | |
597 | "HostMemPolicy", | |
598 | &error_abort); | |
599 | object_property_get_uint16List(obj, "host-nodes", | |
600 | &m->value->host_nodes, | |
601 | &error_abort); | |
602 | ||
603 | m->next = *list; | |
604 | *list = m; | |
605 | } | |
606 | ||
607 | return 0; | |
608 | } | |
609 | ||
610 | MemdevList *qmp_query_memdev(Error **errp) | |
611 | { | |
612 | Object *obj = object_get_objects_root(); | |
613 | MemdevList *list = NULL; | |
614 | ||
615 | object_child_foreach(obj, query_memdev, &list); | |
616 | return list; | |
617 | } | |
618 | ||
619 | void ram_block_notifier_add(RAMBlockNotifier *n) | |
620 | { | |
621 | QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); | |
622 | } | |
623 | ||
624 | void ram_block_notifier_remove(RAMBlockNotifier *n) | |
625 | { | |
626 | QLIST_REMOVE(n, next); | |
627 | } | |
628 | ||
629 | void ram_block_notify_add(void *host, size_t size) | |
630 | { | |
631 | RAMBlockNotifier *notifier; | |
632 | ||
633 | QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { | |
634 | notifier->ram_block_added(notifier, host, size); | |
635 | } | |
636 | } | |
637 | ||
638 | void ram_block_notify_remove(void *host, size_t size) | |
639 | { | |
640 | RAMBlockNotifier *notifier; | |
641 | ||
642 | QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { | |
643 | notifier->ram_block_removed(notifier, host, size); | |
644 | } | |
645 | } |