]>
Commit | Line | Data |
---|---|---|
1f070489 IM |
1 | /* |
2 | * QEMU Host Memory Backend | |
3 | * | |
4 | * Copyright (C) 2013-2014 Red Hat Inc | |
5 | * | |
6 | * Authors: | |
7 | * Igor Mammedov <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | */ | |
9af23989 | 12 | |
9c058332 | 13 | #include "qemu/osdep.h" |
1f070489 | 14 | #include "sysemu/hostmem.h" |
46517dd4 | 15 | #include "sysemu/sysemu.h" |
6b269967 | 16 | #include "hw/boards.h" |
da34e65c | 17 | #include "qapi/error.h" |
eb815e24 | 18 | #include "qapi/qapi-builtin-visit.h" |
1f070489 | 19 | #include "qapi/visitor.h" |
1f070489 IM |
20 | #include "qemu/config-file.h" |
21 | #include "qom/object_interfaces.h" | |
2b108085 | 22 | #include "qemu/mmap-alloc.h" |
1f070489 | 23 | |
4cf1b76b HT |
24 | #ifdef CONFIG_NUMA |
25 | #include <numaif.h> | |
26 | QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT); | |
27 | QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED); | |
28 | QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND); | |
29 | QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE); | |
30 | #endif | |
31 | ||
fa0cb34d MAL |
32 | char * |
33 | host_memory_backend_get_name(HostMemoryBackend *backend) | |
34 | { | |
35 | if (!backend->use_canonical_path) { | |
7a309cc9 | 36 | return g_strdup(object_get_canonical_path_component(OBJECT(backend))); |
fa0cb34d MAL |
37 | } |
38 | ||
39 | return object_get_canonical_path(OBJECT(backend)); | |
40 | } | |
41 | ||
1f070489 | 42 | static void |
d7bce999 EB |
43 | host_memory_backend_get_size(Object *obj, Visitor *v, const char *name, |
44 | void *opaque, Error **errp) | |
1f070489 IM |
45 | { |
46 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
47 | uint64_t value = backend->size; | |
48 | ||
51e72bc1 | 49 | visit_type_size(v, name, &value, errp); |
1f070489 IM |
50 | } |
51 | ||
52 | static void | |
d7bce999 EB |
53 | host_memory_backend_set_size(Object *obj, Visitor *v, const char *name, |
54 | void *opaque, Error **errp) | |
1f070489 IM |
55 | { |
56 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
1f070489 IM |
57 | uint64_t value; |
58 | ||
6f4c60e4 | 59 | if (host_memory_backend_mr_inited(backend)) { |
dcfe4805 MA |
60 | error_setg(errp, "cannot change property %s of %s ", name, |
61 | object_get_typename(obj)); | |
62 | return; | |
1f070489 IM |
63 | } |
64 | ||
668f62ec | 65 | if (!visit_type_size(v, name, &value, errp)) { |
dcfe4805 | 66 | return; |
1f070489 IM |
67 | } |
68 | if (!value) { | |
dcfe4805 | 69 | error_setg(errp, |
21d16836 ZY |
70 | "property '%s' of %s doesn't take value '%" PRIu64 "'", |
71 | name, object_get_typename(obj), value); | |
dcfe4805 | 72 | return; |
1f070489 IM |
73 | } |
74 | backend->size = value; | |
1f070489 IM |
75 | } |
76 | ||
4cf1b76b | 77 | static void |
d7bce999 EB |
78 | host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name, |
79 | void *opaque, Error **errp) | |
4cf1b76b HT |
80 | { |
81 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
82 | uint16List *host_nodes = NULL; | |
83 | uint16List **node = &host_nodes; | |
84 | unsigned long value; | |
85 | ||
86 | value = find_first_bit(backend->host_nodes, MAX_NODES); | |
87 | if (value == MAX_NODES) { | |
15160ab7 | 88 | goto ret; |
4cf1b76b HT |
89 | } |
90 | ||
658ae5a7 MA |
91 | *node = g_malloc0(sizeof(**node)); |
92 | (*node)->value = value; | |
93 | node = &(*node)->next; | |
94 | ||
4cf1b76b HT |
95 | do { |
96 | value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1); | |
97 | if (value == MAX_NODES) { | |
98 | break; | |
99 | } | |
100 | ||
658ae5a7 MA |
101 | *node = g_malloc0(sizeof(**node)); |
102 | (*node)->value = value; | |
103 | node = &(*node)->next; | |
4cf1b76b HT |
104 | } while (true); |
105 | ||
15160ab7 | 106 | ret: |
51e72bc1 | 107 | visit_type_uint16List(v, name, &host_nodes, errp); |
4cf1b76b HT |
108 | } |
109 | ||
110 | static void | |
d7bce999 EB |
111 | host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name, |
112 | void *opaque, Error **errp) | |
4cf1b76b HT |
113 | { |
114 | #ifdef CONFIG_NUMA | |
115 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
ffa144b3 | 116 | uint16List *l, *host_nodes = NULL; |
4cf1b76b | 117 | |
ffa144b3 EH |
118 | visit_type_uint16List(v, name, &host_nodes, errp); |
119 | ||
120 | for (l = host_nodes; l; l = l->next) { | |
121 | if (l->value >= MAX_NODES) { | |
122 | error_setg(errp, "Invalid host-nodes value: %d", l->value); | |
123 | goto out; | |
124 | } | |
125 | } | |
4cf1b76b | 126 | |
ffa144b3 | 127 | for (l = host_nodes; l; l = l->next) { |
4cf1b76b | 128 | bitmap_set(backend->host_nodes, l->value, 1); |
4cf1b76b | 129 | } |
ffa144b3 EH |
130 | |
131 | out: | |
132 | qapi_free_uint16List(host_nodes); | |
4cf1b76b HT |
133 | #else |
134 | error_setg(errp, "NUMA node binding are not supported by this QEMU"); | |
135 | #endif | |
136 | } | |
137 | ||
a3590dac DB |
138 | static int |
139 | host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED) | |
4cf1b76b HT |
140 | { |
141 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
a3590dac | 142 | return backend->policy; |
4cf1b76b HT |
143 | } |
144 | ||
145 | static void | |
a3590dac | 146 | host_memory_backend_set_policy(Object *obj, int policy, Error **errp) |
4cf1b76b HT |
147 | { |
148 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
4cf1b76b HT |
149 | backend->policy = policy; |
150 | ||
151 | #ifndef CONFIG_NUMA | |
152 | if (policy != HOST_MEM_POLICY_DEFAULT) { | |
153 | error_setg(errp, "NUMA policies are not supported by this QEMU"); | |
154 | } | |
155 | #endif | |
156 | } | |
157 | ||
605d0a94 PB |
158 | static bool host_memory_backend_get_merge(Object *obj, Error **errp) |
159 | { | |
160 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
161 | ||
162 | return backend->merge; | |
163 | } | |
164 | ||
165 | static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp) | |
166 | { | |
167 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
168 | ||
6f4c60e4 | 169 | if (!host_memory_backend_mr_inited(backend)) { |
605d0a94 PB |
170 | backend->merge = value; |
171 | return; | |
172 | } | |
173 | ||
174 | if (value != backend->merge) { | |
175 | void *ptr = memory_region_get_ram_ptr(&backend->mr); | |
176 | uint64_t sz = memory_region_size(&backend->mr); | |
177 | ||
178 | qemu_madvise(ptr, sz, | |
179 | value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE); | |
180 | backend->merge = value; | |
181 | } | |
182 | } | |
183 | ||
184 | static bool host_memory_backend_get_dump(Object *obj, Error **errp) | |
185 | { | |
186 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
187 | ||
188 | return backend->dump; | |
189 | } | |
190 | ||
191 | static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp) | |
192 | { | |
193 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
194 | ||
6f4c60e4 | 195 | if (!host_memory_backend_mr_inited(backend)) { |
605d0a94 PB |
196 | backend->dump = value; |
197 | return; | |
198 | } | |
199 | ||
200 | if (value != backend->dump) { | |
201 | void *ptr = memory_region_get_ram_ptr(&backend->mr); | |
202 | uint64_t sz = memory_region_size(&backend->mr); | |
203 | ||
204 | qemu_madvise(ptr, sz, | |
205 | value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP); | |
206 | backend->dump = value; | |
207 | } | |
208 | } | |
209 | ||
a35ba7be PB |
210 | static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) |
211 | { | |
212 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
213 | ||
4ebc74db | 214 | return backend->prealloc; |
a35ba7be PB |
215 | } |
216 | ||
217 | static void host_memory_backend_set_prealloc(Object *obj, bool value, | |
218 | Error **errp) | |
219 | { | |
056b68af | 220 | Error *local_err = NULL; |
a35ba7be PB |
221 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); |
222 | ||
6f4c60e4 | 223 | if (!host_memory_backend_mr_inited(backend)) { |
a35ba7be PB |
224 | backend->prealloc = value; |
225 | return; | |
226 | } | |
227 | ||
228 | if (value && !backend->prealloc) { | |
229 | int fd = memory_region_get_fd(&backend->mr); | |
230 | void *ptr = memory_region_get_ram_ptr(&backend->mr); | |
231 | uint64_t sz = memory_region_size(&backend->mr); | |
232 | ||
ffac16fa | 233 | os_mem_prealloc(fd, ptr, sz, backend->prealloc_threads, &local_err); |
056b68af IM |
234 | if (local_err) { |
235 | error_propagate(errp, local_err); | |
236 | return; | |
237 | } | |
a35ba7be PB |
238 | backend->prealloc = true; |
239 | } | |
240 | } | |
241 | ||
ffac16fa IM |
242 | static void host_memory_backend_get_prealloc_threads(Object *obj, Visitor *v, |
243 | const char *name, void *opaque, Error **errp) | |
244 | { | |
245 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
246 | visit_type_uint32(v, name, &backend->prealloc_threads, errp); | |
247 | } | |
248 | ||
249 | static void host_memory_backend_set_prealloc_threads(Object *obj, Visitor *v, | |
250 | const char *name, void *opaque, Error **errp) | |
251 | { | |
252 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
ffac16fa IM |
253 | uint32_t value; |
254 | ||
668f62ec | 255 | if (!visit_type_uint32(v, name, &value, errp)) { |
dcfe4805 | 256 | return; |
ffac16fa IM |
257 | } |
258 | if (value <= 0) { | |
dcfe4805 MA |
259 | error_setg(errp, "property '%s' of %s doesn't take value '%d'", name, |
260 | object_get_typename(obj), value); | |
261 | return; | |
ffac16fa IM |
262 | } |
263 | backend->prealloc_threads = value; | |
ffac16fa IM |
264 | } |
265 | ||
58f4662c | 266 | static void host_memory_backend_init(Object *obj) |
1f070489 | 267 | { |
605d0a94 | 268 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); |
6b269967 | 269 | MachineState *machine = MACHINE(qdev_get_machine()); |
605d0a94 | 270 | |
ffac16fa | 271 | /* TODO: convert access to globals to compat properties */ |
6b269967 EH |
272 | backend->merge = machine_mem_merge(machine); |
273 | backend->dump = machine_dump_guest_core(machine); | |
2a4e02d1 | 274 | backend->prealloc_threads = 1; |
1f070489 IM |
275 | } |
276 | ||
fa0cb34d MAL |
277 | static void host_memory_backend_post_init(Object *obj) |
278 | { | |
279 | object_apply_compat_props(obj); | |
280 | } | |
281 | ||
4728b574 PX |
282 | bool host_memory_backend_mr_inited(HostMemoryBackend *backend) |
283 | { | |
284 | /* | |
285 | * NOTE: We forbid zero-length memory backend, so here zero means | |
286 | * "we haven't inited the backend memory region yet". | |
287 | */ | |
288 | return memory_region_size(&backend->mr) != 0; | |
289 | } | |
290 | ||
7943e97b | 291 | MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend) |
1f070489 | 292 | { |
6f4c60e4 | 293 | return host_memory_backend_mr_inited(backend) ? &backend->mr : NULL; |
1f070489 IM |
294 | } |
295 | ||
2aece63c XG |
296 | void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped) |
297 | { | |
298 | backend->is_mapped = mapped; | |
299 | } | |
300 | ||
301 | bool host_memory_backend_is_mapped(HostMemoryBackend *backend) | |
302 | { | |
303 | return backend->is_mapped; | |
304 | } | |
305 | ||
2b108085 DG |
306 | #ifdef __linux__ |
307 | size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) | |
308 | { | |
309 | Object *obj = OBJECT(memdev); | |
310 | char *path = object_property_get_str(obj, "mem-path", NULL); | |
311 | size_t pagesize = qemu_mempath_getpagesize(path); | |
312 | ||
313 | g_free(path); | |
314 | return pagesize; | |
315 | } | |
316 | #else | |
317 | size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) | |
318 | { | |
038adc2f | 319 | return qemu_real_host_page_size; |
2b108085 DG |
320 | } |
321 | #endif | |
322 | ||
bd9262d9 HT |
323 | static void |
324 | host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) | |
325 | { | |
326 | HostMemoryBackend *backend = MEMORY_BACKEND(uc); | |
327 | HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); | |
605d0a94 PB |
328 | Error *local_err = NULL; |
329 | void *ptr; | |
330 | uint64_t sz; | |
bd9262d9 HT |
331 | |
332 | if (bc->alloc) { | |
605d0a94 PB |
333 | bc->alloc(backend, &local_err); |
334 | if (local_err) { | |
056b68af | 335 | goto out; |
605d0a94 PB |
336 | } |
337 | ||
338 | ptr = memory_region_get_ram_ptr(&backend->mr); | |
339 | sz = memory_region_size(&backend->mr); | |
340 | ||
341 | if (backend->merge) { | |
342 | qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); | |
343 | } | |
344 | if (!backend->dump) { | |
345 | qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); | |
346 | } | |
4cf1b76b HT |
347 | #ifdef CONFIG_NUMA |
348 | unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); | |
349 | /* lastbit == MAX_NODES means maxnode = 0 */ | |
350 | unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); | |
351 | /* ensure policy won't be ignored in case memory is preallocated | |
352 | * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so | |
353 | * this doesn't catch hugepage case. */ | |
288d3322 | 354 | unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; |
4cf1b76b HT |
355 | |
356 | /* check for invalid host-nodes and policies and give more verbose | |
357 | * error messages than mbind(). */ | |
358 | if (maxnode && backend->policy == MPOL_DEFAULT) { | |
359 | error_setg(errp, "host-nodes must be empty for policy default," | |
360 | " or you should explicitly specify a policy other" | |
361 | " than default"); | |
362 | return; | |
363 | } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { | |
364 | error_setg(errp, "host-nodes must be set for policy %s", | |
977c736f | 365 | HostMemPolicy_str(backend->policy)); |
4cf1b76b HT |
366 | return; |
367 | } | |
368 | ||
369 | /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 | |
370 | * as argument to mbind() due to an old Linux bug (feature?) which | |
371 | * cuts off the last specified node. This means backend->host_nodes | |
372 | * must have MAX_NODES+1 bits available. | |
373 | */ | |
374 | assert(sizeof(backend->host_nodes) >= | |
375 | BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); | |
376 | assert(maxnode <= MAX_NODES); | |
70b6d525 IM |
377 | |
378 | if (maxnode && | |
379 | mbind(ptr, sz, backend->policy, backend->host_nodes, maxnode + 1, | |
380 | flags)) { | |
a3567ba1 PF |
381 | if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { |
382 | error_setg_errno(errp, errno, | |
383 | "cannot bind memory to host NUMA nodes"); | |
384 | return; | |
385 | } | |
4cf1b76b HT |
386 | } |
387 | #endif | |
388 | /* Preallocate memory after the NUMA policy has been instantiated. | |
389 | * This is necessary to guarantee memory is allocated with | |
390 | * specified NUMA policy in place. | |
391 | */ | |
a35ba7be | 392 | if (backend->prealloc) { |
056b68af | 393 | os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz, |
ffac16fa | 394 | backend->prealloc_threads, &local_err); |
056b68af IM |
395 | if (local_err) { |
396 | goto out; | |
397 | } | |
a35ba7be | 398 | } |
bd9262d9 | 399 | } |
056b68af IM |
400 | out: |
401 | error_propagate(errp, local_err); | |
bd9262d9 HT |
402 | } |
403 | ||
36bce5ca | 404 | static bool |
3beacfb9 | 405 | host_memory_backend_can_be_deleted(UserCreatable *uc) |
36bce5ca | 406 | { |
2aece63c | 407 | if (host_memory_backend_is_mapped(MEMORY_BACKEND(uc))) { |
36bce5ca LM |
408 | return false; |
409 | } else { | |
410 | return true; | |
411 | } | |
412 | } | |
413 | ||
06329cce MA |
414 | static bool host_memory_backend_get_share(Object *o, Error **errp) |
415 | { | |
416 | HostMemoryBackend *backend = MEMORY_BACKEND(o); | |
417 | ||
418 | return backend->share; | |
419 | } | |
420 | ||
421 | static void host_memory_backend_set_share(Object *o, bool value, Error **errp) | |
422 | { | |
423 | HostMemoryBackend *backend = MEMORY_BACKEND(o); | |
424 | ||
425 | if (host_memory_backend_mr_inited(backend)) { | |
426 | error_setg(errp, "cannot change property value"); | |
427 | return; | |
428 | } | |
429 | backend->share = value; | |
430 | } | |
431 | ||
fa0cb34d MAL |
432 | static bool |
433 | host_memory_backend_get_use_canonical_path(Object *obj, Error **errp) | |
434 | { | |
435 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
436 | ||
437 | return backend->use_canonical_path; | |
438 | } | |
439 | ||
440 | static void | |
441 | host_memory_backend_set_use_canonical_path(Object *obj, bool value, | |
442 | Error **errp) | |
443 | { | |
444 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
445 | ||
446 | backend->use_canonical_path = value; | |
447 | } | |
448 | ||
bd9262d9 HT |
449 | static void |
450 | host_memory_backend_class_init(ObjectClass *oc, void *data) | |
451 | { | |
452 | UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); | |
453 | ||
454 | ucc->complete = host_memory_backend_memory_complete; | |
36bce5ca | 455 | ucc->can_be_deleted = host_memory_backend_can_be_deleted; |
e62834ca EH |
456 | |
457 | object_class_property_add_bool(oc, "merge", | |
458 | host_memory_backend_get_merge, | |
d2623129 | 459 | host_memory_backend_set_merge); |
033bfc5e | 460 | object_class_property_set_description(oc, "merge", |
7eecec7d | 461 | "Mark memory as mergeable"); |
e62834ca EH |
462 | object_class_property_add_bool(oc, "dump", |
463 | host_memory_backend_get_dump, | |
d2623129 | 464 | host_memory_backend_set_dump); |
033bfc5e | 465 | object_class_property_set_description(oc, "dump", |
7eecec7d | 466 | "Set to 'off' to exclude from core dump"); |
e62834ca EH |
467 | object_class_property_add_bool(oc, "prealloc", |
468 | host_memory_backend_get_prealloc, | |
d2623129 | 469 | host_memory_backend_set_prealloc); |
033bfc5e | 470 | object_class_property_set_description(oc, "prealloc", |
7eecec7d | 471 | "Preallocate memory"); |
ffac16fa IM |
472 | object_class_property_add(oc, "prealloc-threads", "int", |
473 | host_memory_backend_get_prealloc_threads, | |
474 | host_memory_backend_set_prealloc_threads, | |
d2623129 | 475 | NULL, NULL); |
ffac16fa | 476 | object_class_property_set_description(oc, "prealloc-threads", |
7eecec7d | 477 | "Number of CPU threads to use for prealloc"); |
e62834ca EH |
478 | object_class_property_add(oc, "size", "int", |
479 | host_memory_backend_get_size, | |
480 | host_memory_backend_set_size, | |
d2623129 | 481 | NULL, NULL); |
033bfc5e | 482 | object_class_property_set_description(oc, "size", |
7eecec7d | 483 | "Size of the memory region (ex: 500M)"); |
e62834ca EH |
484 | object_class_property_add(oc, "host-nodes", "int", |
485 | host_memory_backend_get_host_nodes, | |
486 | host_memory_backend_set_host_nodes, | |
d2623129 | 487 | NULL, NULL); |
033bfc5e | 488 | object_class_property_set_description(oc, "host-nodes", |
7eecec7d | 489 | "Binds memory to the list of NUMA host nodes"); |
e62834ca | 490 | object_class_property_add_enum(oc, "policy", "HostMemPolicy", |
f7abe0ec | 491 | &HostMemPolicy_lookup, |
e62834ca | 492 | host_memory_backend_get_policy, |
d2623129 | 493 | host_memory_backend_set_policy); |
033bfc5e | 494 | object_class_property_set_description(oc, "policy", |
7eecec7d | 495 | "Set the NUMA policy"); |
06329cce | 496 | object_class_property_add_bool(oc, "share", |
d2623129 | 497 | host_memory_backend_get_share, host_memory_backend_set_share); |
033bfc5e | 498 | object_class_property_set_description(oc, "share", |
7eecec7d | 499 | "Mark the memory as private to QEMU or shared"); |
fa0cb34d MAL |
500 | object_class_property_add_bool(oc, "x-use-canonical-path-for-ramblock-id", |
501 | host_memory_backend_get_use_canonical_path, | |
d2623129 | 502 | host_memory_backend_set_use_canonical_path); |
e1ff3c67 IM |
503 | } |
504 | ||
58f4662c | 505 | static const TypeInfo host_memory_backend_info = { |
1f070489 IM |
506 | .name = TYPE_MEMORY_BACKEND, |
507 | .parent = TYPE_OBJECT, | |
508 | .abstract = true, | |
509 | .class_size = sizeof(HostMemoryBackendClass), | |
bd9262d9 | 510 | .class_init = host_memory_backend_class_init, |
1f070489 | 511 | .instance_size = sizeof(HostMemoryBackend), |
58f4662c | 512 | .instance_init = host_memory_backend_init, |
fa0cb34d | 513 | .instance_post_init = host_memory_backend_post_init, |
1f070489 IM |
514 | .interfaces = (InterfaceInfo[]) { |
515 | { TYPE_USER_CREATABLE }, | |
516 | { } | |
517 | } | |
518 | }; | |
519 | ||
520 | static void register_types(void) | |
521 | { | |
58f4662c | 522 | type_register_static(&host_memory_backend_info); |
1f070489 IM |
523 | } |
524 | ||
525 | type_init(register_types); |