]> Git Repo - qemu.git/blame - hw/block/vhost-user-blk.c
block/ssh: add support for sha256 host key fingerprints
[qemu.git] / hw / block / vhost-user-blk.c
CommitLineData
00343e4b
CL
1/*
2 * vhost-user-blk host device
3 *
4 * Copyright(C) 2017 Intel Corporation.
5 *
6 * Authors:
7 * Changpeng Liu <[email protected]>
8 *
9 * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by:
10 * Felipe Franciosi <[email protected]>
11 * Stefan Hajnoczi <[email protected]>
12 * Nicholas Bellinger <[email protected]>
13 *
14 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
15 * See the COPYING.LIB file in the top-level directory.
16 *
17 */
18
19#include "qemu/osdep.h"
20#include "qapi/error.h"
21#include "qemu/error-report.h"
00343e4b 22#include "qemu/cutils.h"
00343e4b 23#include "hw/qdev-core.h"
a27bd6c7 24#include "hw/qdev-properties.h"
ce35e229 25#include "hw/qdev-properties-system.h"
00343e4b
CL
26#include "hw/virtio/vhost.h"
27#include "hw/virtio/vhost-user-blk.h"
28#include "hw/virtio/virtio.h"
29#include "hw/virtio/virtio-bus.h"
30#include "hw/virtio/virtio-access.h"
2f780b6a 31#include "sysemu/sysemu.h"
54d31236 32#include "sysemu/runstate.h"
00343e4b
CL
33
34static const int user_feature_bits[] = {
35 VIRTIO_BLK_F_SIZE_MAX,
36 VIRTIO_BLK_F_SEG_MAX,
37 VIRTIO_BLK_F_GEOMETRY,
38 VIRTIO_BLK_F_BLK_SIZE,
39 VIRTIO_BLK_F_TOPOLOGY,
40 VIRTIO_BLK_F_MQ,
41 VIRTIO_BLK_F_RO,
42 VIRTIO_BLK_F_FLUSH,
43 VIRTIO_BLK_F_CONFIG_WCE,
caa1ee43
CL
44 VIRTIO_BLK_F_DISCARD,
45 VIRTIO_BLK_F_WRITE_ZEROES,
00343e4b
CL
46 VIRTIO_F_VERSION_1,
47 VIRTIO_RING_F_INDIRECT_DESC,
48 VIRTIO_RING_F_EVENT_IDX,
49 VIRTIO_F_NOTIFY_ON_EMPTY,
7556a320
KW
50 VIRTIO_F_RING_PACKED,
51 VIRTIO_F_IOMMU_PLATFORM,
00343e4b
CL
52 VHOST_INVALID_FEATURE_BIT
53};
54
dabefdd6
KW
55static void vhost_user_blk_event(void *opaque, QEMUChrEvent event);
56
00343e4b
CL
57static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config)
58{
59 VHostUserBlk *s = VHOST_USER_BLK(vdev);
60
535255b4
SH
61 /* Our num_queues overrides the device backend */
62 virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues);
63
00343e4b
CL
64 memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config));
65}
66
67static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
68{
69 VHostUserBlk *s = VHOST_USER_BLK(vdev);
70 struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config;
71 int ret;
72
73 if (blkcfg->wce == s->blkcfg.wce) {
74 return;
75 }
76
77 ret = vhost_dev_set_config(&s->dev, &blkcfg->wce,
78 offsetof(struct virtio_blk_config, wce),
79 sizeof(blkcfg->wce),
80 VHOST_SET_CONFIG_TYPE_MASTER);
81 if (ret) {
82 error_report("set device config space failed");
83 return;
84 }
85
86 s->blkcfg.wce = blkcfg->wce;
87}
88
89static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
90{
91 int ret;
92 struct virtio_blk_config blkcfg;
93 VHostUserBlk *s = VHOST_USER_BLK(dev->vdev);
94
95 ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg,
96 sizeof(struct virtio_blk_config));
97 if (ret < 0) {
98 error_report("get config space failed");
99 return -1;
100 }
101
102 /* valid for resize only */
103 if (blkcfg.capacity != s->blkcfg.capacity) {
104 s->blkcfg.capacity = blkcfg.capacity;
105 memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config));
106 virtio_notify_config(dev->vdev);
107 }
108
109 return 0;
110}
111
112const VhostDevConfigOps blk_ops = {
113 .vhost_dev_config_notifier = vhost_user_blk_handle_config_change,
114};
115
a57f0091 116static int vhost_user_blk_start(VirtIODevice *vdev)
00343e4b
CL
117{
118 VHostUserBlk *s = VHOST_USER_BLK(vdev);
119 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
120 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
121 int i, ret;
122
123 if (!k->set_guest_notifiers) {
124 error_report("binding does not support guest notifiers");
a57f0091 125 return -ENOSYS;
00343e4b
CL
126 }
127
128 ret = vhost_dev_enable_notifiers(&s->dev, vdev);
129 if (ret < 0) {
130 error_report("Error enabling host notifiers: %d", -ret);
a57f0091 131 return ret;
00343e4b
CL
132 }
133
134 ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true);
135 if (ret < 0) {
136 error_report("Error binding guest notifier: %d", -ret);
137 goto err_host_notifiers;
138 }
139
140 s->dev.acked_features = vdev->guest_features;
a1fe0b8f 141
1b0063b3
JY
142 ret = vhost_dev_prepare_inflight(&s->dev, vdev);
143 if (ret < 0) {
144 error_report("Error set inflight format: %d", -ret);
145 goto err_guest_notifiers;
146 }
147
a1fe0b8f
XY
148 if (!s->inflight->addr) {
149 ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight);
150 if (ret < 0) {
151 error_report("Error get inflight: %d", -ret);
152 goto err_guest_notifiers;
153 }
154 }
155
156 ret = vhost_dev_set_inflight(&s->dev, s->inflight);
157 if (ret < 0) {
158 error_report("Error set inflight: %d", -ret);
159 goto err_guest_notifiers;
160 }
161
00343e4b
CL
162 ret = vhost_dev_start(&s->dev, vdev);
163 if (ret < 0) {
164 error_report("Error starting vhost: %d", -ret);
165 goto err_guest_notifiers;
166 }
f5b22d06 167 s->started_vu = true;
00343e4b
CL
168
169 /* guest_notifier_mask/pending not used yet, so just unmask
170 * everything here. virtio-pci will do the right thing by
171 * enabling/disabling irqfd.
172 */
173 for (i = 0; i < s->dev.nvqs; i++) {
174 vhost_virtqueue_mask(&s->dev, vdev, i, false);
175 }
176
a57f0091 177 return ret;
00343e4b
CL
178
179err_guest_notifiers:
180 k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
181err_host_notifiers:
182 vhost_dev_disable_notifiers(&s->dev, vdev);
a57f0091 183 return ret;
00343e4b
CL
184}
185
186static void vhost_user_blk_stop(VirtIODevice *vdev)
187{
188 VHostUserBlk *s = VHOST_USER_BLK(vdev);
189 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
190 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
191 int ret;
192
f5b22d06
DS
193 if (!s->started_vu) {
194 return;
195 }
196 s->started_vu = false;
197
00343e4b
CL
198 if (!k->set_guest_notifiers) {
199 return;
200 }
201
202 vhost_dev_stop(&s->dev, vdev);
203
204 ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
205 if (ret < 0) {
206 error_report("vhost guest notifier cleanup failed: %d", ret);
207 return;
208 }
209
210 vhost_dev_disable_notifiers(&s->dev, vdev);
211}
212
213static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
214{
215 VHostUserBlk *s = VHOST_USER_BLK(vdev);
e57f2c31 216 bool should_start = virtio_device_started(vdev, status);
77542d43 217 int ret;
00343e4b
CL
218
219 if (!vdev->vm_running) {
220 should_start = false;
221 }
222
77542d43
XY
223 if (!s->connected) {
224 return;
225 }
226
00343e4b
CL
227 if (s->dev.started == should_start) {
228 return;
229 }
230
231 if (should_start) {
77542d43
XY
232 ret = vhost_user_blk_start(vdev);
233 if (ret < 0) {
234 error_report("vhost-user-blk: vhost start failed: %s",
235 strerror(-ret));
236 qemu_chr_fe_disconnect(&s->chardev);
237 }
00343e4b
CL
238 } else {
239 vhost_user_blk_stop(vdev);
240 }
241
242}
243
244static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
245 uint64_t features,
246 Error **errp)
247{
248 VHostUserBlk *s = VHOST_USER_BLK(vdev);
00343e4b
CL
249
250 /* Turn on pre-defined features */
251 virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
252 virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
253 virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
254 virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
255 virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH);
25b1d45a 256 virtio_add_feature(&features, VIRTIO_BLK_F_RO);
caa1ee43
CL
257 virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD);
258 virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES);
00343e4b
CL
259
260 if (s->config_wce) {
261 virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
262 }
00343e4b
CL
263 if (s->num_queues > 1) {
264 virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
265 }
266
4a4ff4c5 267 return vhost_get_features(&s->dev, user_feature_bits, features);
00343e4b
CL
268}
269
270static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
271{
110b9463 272 VHostUserBlk *s = VHOST_USER_BLK(vdev);
77542d43 273 int i, ret;
110b9463 274
f3facbe6 275 if (!vdev->start_on_kick) {
110b9463
YX
276 return;
277 }
278
77542d43
XY
279 if (!s->connected) {
280 return;
281 }
282
110b9463
YX
283 if (s->dev.started) {
284 return;
285 }
286
287 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
288 * vhost here instead of waiting for .set_status().
289 */
77542d43
XY
290 ret = vhost_user_blk_start(vdev);
291 if (ret < 0) {
292 error_report("vhost-user-blk: vhost start failed: %s",
293 strerror(-ret));
294 qemu_chr_fe_disconnect(&s->chardev);
295 return;
296 }
00343e4b 297
110b9463
YX
298 /* Kick right away to begin processing requests already in vring */
299 for (i = 0; i < s->dev.nvqs; i++) {
300 VirtQueue *kick_vq = virtio_get_queue(vdev, i);
301
302 if (!virtio_queue_get_desc_addr(vdev, i)) {
303 continue;
304 }
305 event_notifier_set(virtio_queue_get_host_notifier(kick_vq));
306 }
00343e4b
CL
307}
308
a1fe0b8f
XY
309static void vhost_user_blk_reset(VirtIODevice *vdev)
310{
311 VHostUserBlk *s = VHOST_USER_BLK(vdev);
312
313 vhost_dev_free_inflight(s->inflight);
314}
315
5b9243d2 316static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
77542d43
XY
317{
318 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
319 VHostUserBlk *s = VHOST_USER_BLK(vdev);
320 int ret = 0;
321
322 if (s->connected) {
323 return 0;
324 }
325 s->connected = true;
326
c90bd505 327 s->dev.num_queues = s->num_queues;
77542d43 328 s->dev.nvqs = s->num_queues;
38e245a4 329 s->dev.vqs = s->vhost_vqs;
77542d43
XY
330 s->dev.vq_index = 0;
331 s->dev.backend_features = 0;
332
333 vhost_dev_set_config_notifier(&s->dev, &blk_ops);
334
335 ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0);
336 if (ret < 0) {
5b9243d2 337 error_setg_errno(errp, -ret, "vhost initialization failed");
77542d43
XY
338 return ret;
339 }
340
341 /* restore vhost state */
e57f2c31 342 if (virtio_device_started(vdev, vdev->status)) {
77542d43
XY
343 ret = vhost_user_blk_start(vdev);
344 if (ret < 0) {
5b9243d2 345 error_setg_errno(errp, -ret, "vhost start failed");
77542d43
XY
346 return ret;
347 }
348 }
349
350 return 0;
351}
352
353static void vhost_user_blk_disconnect(DeviceState *dev)
354{
355 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
356 VHostUserBlk *s = VHOST_USER_BLK(vdev);
357
358 if (!s->connected) {
359 return;
360 }
361 s->connected = false;
362
f5b22d06 363 vhost_user_blk_stop(vdev);
77542d43
XY
364
365 vhost_dev_cleanup(&s->dev);
366}
367
4bcad76f
DS
368static void vhost_user_blk_chr_closed_bh(void *opaque)
369{
370 DeviceState *dev = opaque;
371 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
372 VHostUserBlk *s = VHOST_USER_BLK(vdev);
373
374 vhost_user_blk_disconnect(dev);
dabefdd6
KW
375 qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
376 NULL, opaque, NULL, true);
4bcad76f
DS
377}
378
dabefdd6 379static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
77542d43
XY
380{
381 DeviceState *dev = opaque;
382 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
383 VHostUserBlk *s = VHOST_USER_BLK(vdev);
5b9243d2 384 Error *local_err = NULL;
77542d43
XY
385
386 switch (event) {
387 case CHR_EVENT_OPENED:
5b9243d2
KW
388 if (vhost_user_blk_connect(dev, &local_err) < 0) {
389 error_report_err(local_err);
77542d43
XY
390 qemu_chr_fe_disconnect(&s->chardev);
391 return;
392 }
77542d43
XY
393 break;
394 case CHR_EVENT_CLOSED:
dabefdd6 395 if (!runstate_check(RUN_STATE_SHUTDOWN)) {
bc79c87b
DP
396 /*
397 * A close event may happen during a read/write, but vhost
398 * code assumes the vhost_dev remains setup, so delay the
399 * stop & clear.
400 */
4bcad76f
DS
401 AioContext *ctx = qemu_get_current_aio_context();
402
403 qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
404 NULL, NULL, false);
405 aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
f5b22d06 406
bc79c87b
DP
407 /*
408 * Move vhost device to the stopped state. The vhost-user device
409 * will be clean up and disconnected in BH. This can be useful in
410 * the vhost migration code. If disconnect was caught there is an
411 * option for the general vhost code to get the dev state without
412 * knowing its type (in this case vhost-user).
413 */
414 s->dev.started = false;
bc79c87b 415 }
77542d43 416 break;
669457f3
PMD
417 case CHR_EVENT_BREAK:
418 case CHR_EVENT_MUX_IN:
419 case CHR_EVENT_MUX_OUT:
420 /* Ignore */
421 break;
77542d43
XY
422 }
423}
424
00343e4b
CL
425static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
426{
427 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
428 VHostUserBlk *s = VHOST_USER_BLK(vdev);
429 int i, ret;
430
431 if (!s->chardev.chr) {
5b9243d2 432 error_setg(errp, "chardev is mandatory");
00343e4b
CL
433 return;
434 }
435
a4eef071
SH
436 if (s->num_queues == VHOST_USER_BLK_AUTO_NUM_QUEUES) {
437 s->num_queues = 1;
438 }
00343e4b 439 if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) {
5b9243d2 440 error_setg(errp, "invalid number of IO queues");
00343e4b
CL
441 return;
442 }
443
444 if (!s->queue_size) {
5b9243d2 445 error_setg(errp, "queue size must be non-zero");
00343e4b
CL
446 return;
447 }
68bf7336 448 if (s->queue_size > VIRTQUEUE_MAX_SIZE) {
5b9243d2 449 error_setg(errp, "queue size must not exceed %d",
68bf7336
KW
450 VIRTQUEUE_MAX_SIZE);
451 return;
452 }
00343e4b 453
0b99f224 454 if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) {
4d0cf552
TB
455 return;
456 }
457
00343e4b
CL
458 virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
459 sizeof(struct virtio_blk_config));
460
38e245a4 461 s->virtqs = g_new(VirtQueue *, s->num_queues);
00343e4b 462 for (i = 0; i < s->num_queues; i++) {
38e245a4
PN
463 s->virtqs[i] = virtio_add_queue(vdev, s->queue_size,
464 vhost_user_blk_handle_output);
00343e4b
CL
465 }
466
a1fe0b8f 467 s->inflight = g_new0(struct vhost_inflight, 1);
38e245a4 468 s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
77542d43 469 s->connected = false;
a1fe0b8f 470
f2672971 471 if (qemu_chr_fe_wait_connected(&s->chardev, errp) < 0) {
00343e4b
CL
472 goto virtio_err;
473 }
474
5b9243d2 475 if (vhost_user_blk_connect(dev, errp) < 0) {
dabefdd6
KW
476 qemu_chr_fe_disconnect(&s->chardev);
477 goto virtio_err;
77542d43 478 }
dabefdd6 479 assert(s->connected);
77542d43 480
00343e4b 481 ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
77542d43 482 sizeof(struct virtio_blk_config));
00343e4b 483 if (ret < 0) {
dabefdd6
KW
484 error_setg(errp, "vhost-user-blk: get block config failed");
485 goto vhost_err;
00343e4b
CL
486 }
487
dabefdd6 488 /* we're fully initialized, now we can operate, so add the handler */
0c99d722 489 qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL,
dabefdd6 490 vhost_user_blk_event, NULL, (void *)dev,
0c99d722 491 NULL, true);
00343e4b
CL
492 return;
493
dabefdd6
KW
494vhost_err:
495 vhost_dev_cleanup(&s->dev);
00343e4b 496virtio_err:
38e245a4 497 g_free(s->vhost_vqs);
0ac2e635 498 s->vhost_vqs = NULL;
a1fe0b8f 499 g_free(s->inflight);
0ac2e635 500 s->inflight = NULL;
13e54681 501 for (i = 0; i < s->num_queues; i++) {
38e245a4 502 virtio_delete_queue(s->virtqs[i]);
13e54681 503 }
38e245a4 504 g_free(s->virtqs);
00343e4b 505 virtio_cleanup(vdev);
0b99f224 506 vhost_user_cleanup(&s->vhost_user);
00343e4b
CL
507}
508
b69c3c21 509static void vhost_user_blk_device_unrealize(DeviceState *dev)
00343e4b
CL
510{
511 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
512 VHostUserBlk *s = VHOST_USER_BLK(dev);
13e54681 513 int i;
00343e4b 514
96cb5498 515 virtio_set_status(vdev, 0);
77542d43
XY
516 qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL,
517 NULL, NULL, NULL, false);
00343e4b 518 vhost_dev_cleanup(&s->dev);
a1fe0b8f 519 vhost_dev_free_inflight(s->inflight);
38e245a4 520 g_free(s->vhost_vqs);
0ac2e635 521 s->vhost_vqs = NULL;
a1fe0b8f 522 g_free(s->inflight);
0ac2e635 523 s->inflight = NULL;
13e54681
PN
524
525 for (i = 0; i < s->num_queues; i++) {
38e245a4 526 virtio_delete_queue(s->virtqs[i]);
13e54681 527 }
38e245a4 528 g_free(s->virtqs);
00343e4b 529 virtio_cleanup(vdev);
0b99f224 530 vhost_user_cleanup(&s->vhost_user);
00343e4b
CL
531}
532
533static void vhost_user_blk_instance_init(Object *obj)
534{
535 VHostUserBlk *s = VHOST_USER_BLK(obj);
536
537 device_add_bootindex_property(obj, &s->bootindex, "bootindex",
40c2281c 538 "/disk@0,0", DEVICE(obj));
00343e4b
CL
539}
540
541static const VMStateDescription vmstate_vhost_user_blk = {
542 .name = "vhost-user-blk",
543 .minimum_version_id = 1,
544 .version_id = 1,
545 .fields = (VMStateField[]) {
546 VMSTATE_VIRTIO_DEVICE,
547 VMSTATE_END_OF_LIST()
548 },
549};
550
551static Property vhost_user_blk_properties[] = {
552 DEFINE_PROP_CHR("chardev", VHostUserBlk, chardev),
a4eef071
SH
553 DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues,
554 VHOST_USER_BLK_AUTO_NUM_QUEUES),
00343e4b
CL
555 DEFINE_PROP_UINT32("queue-size", VHostUserBlk, queue_size, 128),
556 DEFINE_PROP_BIT("config-wce", VHostUserBlk, config_wce, 0, true),
00343e4b
CL
557 DEFINE_PROP_END_OF_LIST(),
558};
559
560static void vhost_user_blk_class_init(ObjectClass *klass, void *data)
561{
562 DeviceClass *dc = DEVICE_CLASS(klass);
563 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
564
4f67d30b 565 device_class_set_props(dc, vhost_user_blk_properties);
00343e4b
CL
566 dc->vmsd = &vmstate_vhost_user_blk;
567 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
568 vdc->realize = vhost_user_blk_device_realize;
569 vdc->unrealize = vhost_user_blk_device_unrealize;
570 vdc->get_config = vhost_user_blk_update_config;
571 vdc->set_config = vhost_user_blk_set_config;
572 vdc->get_features = vhost_user_blk_get_features;
573 vdc->set_status = vhost_user_blk_set_status;
a1fe0b8f 574 vdc->reset = vhost_user_blk_reset;
00343e4b
CL
575}
576
577static const TypeInfo vhost_user_blk_info = {
578 .name = TYPE_VHOST_USER_BLK,
579 .parent = TYPE_VIRTIO_DEVICE,
580 .instance_size = sizeof(VHostUserBlk),
581 .instance_init = vhost_user_blk_instance_init,
582 .class_init = vhost_user_blk_class_init,
583};
584
585static void virtio_register_types(void)
586{
587 type_register_static(&vhost_user_blk_info);
588}
589
590type_init(virtio_register_types)
This page took 0.346232 seconds and 4 git commands to generate.