]>
Commit | Line | Data |
---|---|---|
6e02c38d AL |
1 | /* |
2 | * Virtio Block Device | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
869a5c6d | 14 | #include <qemu-common.h> |
6e02c38d | 15 | #include "virtio-blk.h" |
1063b8b1 CH |
16 | #ifdef __linux__ |
17 | # include <scsi/sg.h> | |
18 | #endif | |
6e02c38d AL |
19 | |
20 | typedef struct VirtIOBlock | |
21 | { | |
22 | VirtIODevice vdev; | |
23 | BlockDriverState *bs; | |
24 | VirtQueue *vq; | |
869a5c6d | 25 | void *rq; |
213189ab | 26 | QEMUBH *bh; |
9752c371 | 27 | BlockConf *conf; |
8cfacf07 | 28 | unsigned short sector_mask; |
6e02c38d AL |
29 | } VirtIOBlock; |
30 | ||
31 | static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev) | |
32 | { | |
33 | return (VirtIOBlock *)vdev; | |
34 | } | |
35 | ||
36 | typedef struct VirtIOBlockReq | |
37 | { | |
38 | VirtIOBlock *dev; | |
39 | VirtQueueElement elem; | |
40 | struct virtio_blk_inhdr *in; | |
41 | struct virtio_blk_outhdr *out; | |
1063b8b1 | 42 | struct virtio_scsi_inhdr *scsi; |
d28a1b6e | 43 | QEMUIOVector qiov; |
869a5c6d | 44 | struct VirtIOBlockReq *next; |
6e02c38d AL |
45 | } VirtIOBlockReq; |
46 | ||
869a5c6d AL |
47 | static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) |
48 | { | |
49 | VirtIOBlock *s = req->dev; | |
50 | ||
51 | req->in->status = status; | |
d28a1b6e | 52 | virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in)); |
869a5c6d AL |
53 | virtio_notify(&s->vdev, s->vq); |
54 | ||
869a5c6d AL |
55 | qemu_free(req); |
56 | } | |
57 | ||
f35d68f0 KW |
58 | static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, |
59 | int is_read) | |
869a5c6d | 60 | { |
f35d68f0 KW |
61 | BlockInterfaceErrorAction action = |
62 | drive_get_on_error(req->dev->bs, is_read); | |
869a5c6d AL |
63 | VirtIOBlock *s = req->dev; |
64 | ||
eaa6c85f | 65 | if (action == BLOCK_ERR_IGNORE) { |
908bb949 | 66 | bdrv_mon_event(s->bs, BDRV_ACTION_IGNORE, is_read); |
869a5c6d | 67 | return 0; |
eaa6c85f | 68 | } |
869a5c6d AL |
69 | |
70 | if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC) | |
71 | || action == BLOCK_ERR_STOP_ANY) { | |
72 | req->next = s->rq; | |
73 | s->rq = req; | |
908bb949 | 74 | bdrv_mon_event(s->bs, BDRV_ACTION_STOP, is_read); |
554a310b | 75 | vm_stop(0); |
869a5c6d AL |
76 | } else { |
77 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | |
908bb949 | 78 | bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read); |
869a5c6d AL |
79 | } |
80 | ||
81 | return 1; | |
82 | } | |
83 | ||
6e02c38d AL |
84 | static void virtio_blk_rw_complete(void *opaque, int ret) |
85 | { | |
86 | VirtIOBlockReq *req = opaque; | |
6e02c38d | 87 | |
f35d68f0 KW |
88 | if (ret) { |
89 | int is_read = !(req->out->type & VIRTIO_BLK_T_OUT); | |
90 | if (virtio_blk_handle_rw_error(req, -ret, is_read)) | |
869a5c6d | 91 | return; |
6e02c38d AL |
92 | } |
93 | ||
f35d68f0 | 94 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
869a5c6d | 95 | } |
6e02c38d | 96 | |
aa659be3 CH |
97 | static void virtio_blk_flush_complete(void *opaque, int ret) |
98 | { | |
99 | VirtIOBlockReq *req = opaque; | |
100 | ||
101 | virtio_blk_req_complete(req, ret ? VIRTIO_BLK_S_IOERR : VIRTIO_BLK_S_OK); | |
102 | } | |
103 | ||
869a5c6d AL |
104 | static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s) |
105 | { | |
de6c8042 | 106 | VirtIOBlockReq *req = qemu_malloc(sizeof(*req)); |
487414f1 | 107 | req->dev = s; |
de6c8042 SH |
108 | req->qiov.size = 0; |
109 | req->next = NULL; | |
869a5c6d | 110 | return req; |
6e02c38d AL |
111 | } |
112 | ||
113 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) | |
114 | { | |
869a5c6d | 115 | VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
6e02c38d | 116 | |
869a5c6d AL |
117 | if (req != NULL) { |
118 | if (!virtqueue_pop(s->vq, &req->elem)) { | |
119 | qemu_free(req); | |
120 | return NULL; | |
121 | } | |
6e02c38d AL |
122 | } |
123 | ||
124 | return req; | |
125 | } | |
126 | ||
1063b8b1 CH |
127 | #ifdef __linux__ |
128 | static void virtio_blk_handle_scsi(VirtIOBlockReq *req) | |
129 | { | |
130 | struct sg_io_hdr hdr; | |
4277906d | 131 | int ret; |
1063b8b1 CH |
132 | int status; |
133 | int i; | |
134 | ||
135 | /* | |
136 | * We require at least one output segment each for the virtio_blk_outhdr | |
137 | * and the SCSI command block. | |
138 | * | |
139 | * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr | |
140 | * and the sense buffer pointer in the input segments. | |
141 | */ | |
142 | if (req->elem.out_num < 2 || req->elem.in_num < 3) { | |
143 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | |
144 | return; | |
145 | } | |
146 | ||
147 | /* | |
148 | * No support for bidirection commands yet. | |
149 | */ | |
150 | if (req->elem.out_num > 2 && req->elem.in_num > 3) { | |
151 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | |
152 | return; | |
153 | } | |
154 | ||
155 | /* | |
156 | * The scsi inhdr is placed in the second-to-last input segment, just | |
157 | * before the regular inhdr. | |
158 | */ | |
159 | req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; | |
1063b8b1 CH |
160 | |
161 | memset(&hdr, 0, sizeof(struct sg_io_hdr)); | |
162 | hdr.interface_id = 'S'; | |
163 | hdr.cmd_len = req->elem.out_sg[1].iov_len; | |
164 | hdr.cmdp = req->elem.out_sg[1].iov_base; | |
165 | hdr.dxfer_len = 0; | |
166 | ||
167 | if (req->elem.out_num > 2) { | |
168 | /* | |
169 | * If there are more than the minimally required 2 output segments | |
170 | * there is write payload starting from the third iovec. | |
171 | */ | |
172 | hdr.dxfer_direction = SG_DXFER_TO_DEV; | |
173 | hdr.iovec_count = req->elem.out_num - 2; | |
174 | ||
175 | for (i = 0; i < hdr.iovec_count; i++) | |
176 | hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len; | |
177 | ||
178 | hdr.dxferp = req->elem.out_sg + 2; | |
179 | ||
180 | } else if (req->elem.in_num > 3) { | |
181 | /* | |
182 | * If we have more than 3 input segments the guest wants to actually | |
183 | * read data. | |
184 | */ | |
185 | hdr.dxfer_direction = SG_DXFER_FROM_DEV; | |
186 | hdr.iovec_count = req->elem.in_num - 3; | |
187 | for (i = 0; i < hdr.iovec_count; i++) | |
188 | hdr.dxfer_len += req->elem.in_sg[i].iov_len; | |
189 | ||
190 | hdr.dxferp = req->elem.in_sg; | |
1063b8b1 CH |
191 | } else { |
192 | /* | |
193 | * Some SCSI commands don't actually transfer any data. | |
194 | */ | |
195 | hdr.dxfer_direction = SG_DXFER_NONE; | |
196 | } | |
197 | ||
198 | hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base; | |
199 | hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len; | |
1063b8b1 CH |
200 | |
201 | ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr); | |
202 | if (ret) { | |
203 | status = VIRTIO_BLK_S_UNSUPP; | |
204 | hdr.status = ret; | |
205 | hdr.resid = hdr.dxfer_len; | |
206 | } else if (hdr.status) { | |
207 | status = VIRTIO_BLK_S_IOERR; | |
208 | } else { | |
209 | status = VIRTIO_BLK_S_OK; | |
210 | } | |
211 | ||
212 | req->scsi->errors = hdr.status; | |
213 | req->scsi->residual = hdr.resid; | |
214 | req->scsi->sense_len = hdr.sb_len_wr; | |
215 | req->scsi->data_len = hdr.dxfer_len; | |
216 | ||
217 | virtio_blk_req_complete(req, status); | |
218 | } | |
219 | #else | |
220 | static void virtio_blk_handle_scsi(VirtIOBlockReq *req) | |
221 | { | |
222 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | |
223 | } | |
224 | #endif /* __linux__ */ | |
225 | ||
91553dcc KW |
226 | static void do_multiwrite(BlockDriverState *bs, BlockRequest *blkreq, |
227 | int num_writes) | |
869a5c6d | 228 | { |
91553dcc KW |
229 | int i, ret; |
230 | ret = bdrv_aio_multiwrite(bs, blkreq, num_writes); | |
231 | ||
232 | if (ret != 0) { | |
233 | for (i = 0; i < num_writes; i++) { | |
234 | if (blkreq[i].error) { | |
6c510fbf | 235 | virtio_blk_rw_complete(blkreq[i].opaque, -EIO); |
91553dcc KW |
236 | } |
237 | } | |
238 | } | |
239 | } | |
87b245db | 240 | |
618fbb84 | 241 | static void virtio_blk_handle_flush(BlockRequest *blkreq, int *num_writes, |
7e608e89 | 242 | VirtIOBlockReq *req) |
aa659be3 CH |
243 | { |
244 | BlockDriverAIOCB *acb; | |
245 | ||
618fbb84 CH |
246 | /* |
247 | * Make sure all outstanding writes are posted to the backing device. | |
248 | */ | |
7e608e89 CH |
249 | if (*num_writes > 0) { |
250 | do_multiwrite(req->dev->bs, blkreq, *num_writes); | |
618fbb84 CH |
251 | } |
252 | *num_writes = 0; | |
618fbb84 | 253 | |
aa659be3 CH |
254 | acb = bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req); |
255 | if (!acb) { | |
256 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | |
257 | } | |
258 | } | |
259 | ||
91553dcc | 260 | static void virtio_blk_handle_write(BlockRequest *blkreq, int *num_writes, |
7e608e89 | 261 | VirtIOBlockReq *req) |
91553dcc | 262 | { |
8cfacf07 CH |
263 | if (req->out->sector & req->dev->sector_mask) { |
264 | virtio_blk_rw_complete(req, -EIO); | |
265 | return; | |
266 | } | |
267 | ||
7e608e89 CH |
268 | if (*num_writes == 32) { |
269 | do_multiwrite(req->dev->bs, blkreq, *num_writes); | |
91553dcc | 270 | *num_writes = 0; |
87b245db | 271 | } |
91553dcc KW |
272 | |
273 | blkreq[*num_writes].sector = req->out->sector; | |
1573a35d | 274 | blkreq[*num_writes].nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE; |
91553dcc KW |
275 | blkreq[*num_writes].qiov = &req->qiov; |
276 | blkreq[*num_writes].cb = virtio_blk_rw_complete; | |
277 | blkreq[*num_writes].opaque = req; | |
278 | blkreq[*num_writes].error = 0; | |
279 | ||
280 | (*num_writes)++; | |
d28a1b6e | 281 | } |
869a5c6d | 282 | |
d28a1b6e AL |
283 | static void virtio_blk_handle_read(VirtIOBlockReq *req) |
284 | { | |
87b245db CH |
285 | BlockDriverAIOCB *acb; |
286 | ||
8cfacf07 CH |
287 | if (req->out->sector & req->dev->sector_mask) { |
288 | virtio_blk_rw_complete(req, -EIO); | |
289 | return; | |
290 | } | |
291 | ||
87b245db | 292 | acb = bdrv_aio_readv(req->dev->bs, req->out->sector, &req->qiov, |
1573a35d JS |
293 | req->qiov.size / BDRV_SECTOR_SIZE, |
294 | virtio_blk_rw_complete, req); | |
87b245db | 295 | if (!acb) { |
6c510fbf | 296 | virtio_blk_rw_complete(req, -EIO); |
87b245db | 297 | } |
869a5c6d AL |
298 | } |
299 | ||
bc6694d4 KW |
300 | typedef struct MultiReqBuffer { |
301 | BlockRequest blkreq[32]; | |
302 | int num_writes; | |
bc6694d4 KW |
303 | } MultiReqBuffer; |
304 | ||
305 | static void virtio_blk_handle_request(VirtIOBlockReq *req, | |
306 | MultiReqBuffer *mrb) | |
307 | { | |
308 | if (req->elem.out_num < 1 || req->elem.in_num < 1) { | |
309 | fprintf(stderr, "virtio-blk missing headers\n"); | |
310 | exit(1); | |
311 | } | |
312 | ||
313 | if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || | |
314 | req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { | |
315 | fprintf(stderr, "virtio-blk header not in correct element\n"); | |
316 | exit(1); | |
317 | } | |
318 | ||
319 | req->out = (void *)req->elem.out_sg[0].iov_base; | |
320 | req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; | |
321 | ||
322 | if (req->out->type & VIRTIO_BLK_T_FLUSH) { | |
7e608e89 | 323 | virtio_blk_handle_flush(mrb->blkreq, &mrb->num_writes, req); |
bc6694d4 KW |
324 | } else if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) { |
325 | virtio_blk_handle_scsi(req); | |
326 | } else if (req->out->type & VIRTIO_BLK_T_OUT) { | |
327 | qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1], | |
328 | req->elem.out_num - 1); | |
7e608e89 | 329 | virtio_blk_handle_write(mrb->blkreq, &mrb->num_writes, req); |
bc6694d4 KW |
330 | } else { |
331 | qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0], | |
332 | req->elem.in_num - 1); | |
333 | virtio_blk_handle_read(req); | |
334 | } | |
335 | } | |
336 | ||
6e02c38d AL |
337 | static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
338 | { | |
339 | VirtIOBlock *s = to_virtio_blk(vdev); | |
340 | VirtIOBlockReq *req; | |
bc6694d4 KW |
341 | MultiReqBuffer mrb = { |
342 | .num_writes = 0, | |
bc6694d4 | 343 | }; |
6e02c38d AL |
344 | |
345 | while ((req = virtio_blk_get_request(s))) { | |
bc6694d4 | 346 | virtio_blk_handle_request(req, &mrb); |
6e02c38d | 347 | } |
91553dcc | 348 | |
bc6694d4 | 349 | if (mrb.num_writes > 0) { |
7e608e89 | 350 | do_multiwrite(s->bs, mrb.blkreq, mrb.num_writes); |
91553dcc KW |
351 | } |
352 | ||
6e02c38d AL |
353 | /* |
354 | * FIXME: Want to check for completions before returning to guest mode, | |
355 | * so cached reads and writes are reported as quickly as possible. But | |
356 | * that should be done in the generic block layer. | |
357 | */ | |
358 | } | |
359 | ||
213189ab | 360 | static void virtio_blk_dma_restart_bh(void *opaque) |
869a5c6d AL |
361 | { |
362 | VirtIOBlock *s = opaque; | |
363 | VirtIOBlockReq *req = s->rq; | |
f1b52868 KW |
364 | MultiReqBuffer mrb = { |
365 | .num_writes = 0, | |
f1b52868 | 366 | }; |
869a5c6d | 367 | |
213189ab MA |
368 | qemu_bh_delete(s->bh); |
369 | s->bh = NULL; | |
869a5c6d AL |
370 | |
371 | s->rq = NULL; | |
372 | ||
373 | while (req) { | |
f1b52868 | 374 | virtio_blk_handle_request(req, &mrb); |
869a5c6d AL |
375 | req = req->next; |
376 | } | |
f1b52868 KW |
377 | |
378 | if (mrb.num_writes > 0) { | |
7e608e89 | 379 | do_multiwrite(s->bs, mrb.blkreq, mrb.num_writes); |
f1b52868 | 380 | } |
869a5c6d AL |
381 | } |
382 | ||
213189ab MA |
383 | static void virtio_blk_dma_restart_cb(void *opaque, int running, int reason) |
384 | { | |
385 | VirtIOBlock *s = opaque; | |
386 | ||
387 | if (!running) | |
388 | return; | |
389 | ||
390 | if (!s->bh) { | |
391 | s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); | |
392 | qemu_bh_schedule(s->bh); | |
393 | } | |
394 | } | |
395 | ||
6e02c38d AL |
396 | static void virtio_blk_reset(VirtIODevice *vdev) |
397 | { | |
398 | /* | |
399 | * This should cancel pending requests, but can't do nicely until there | |
400 | * are per-device request lists. | |
401 | */ | |
402 | qemu_aio_flush(); | |
403 | } | |
404 | ||
bf011293 | 405 | /* coalesce internal state, copy to pci i/o region 0 |
406 | */ | |
6e02c38d AL |
407 | static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) |
408 | { | |
409 | VirtIOBlock *s = to_virtio_blk(vdev); | |
410 | struct virtio_blk_config blkcfg; | |
411 | uint64_t capacity; | |
412 | int cylinders, heads, secs; | |
413 | ||
414 | bdrv_get_geometry(s->bs, &capacity); | |
415 | bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs); | |
5c5dafdc | 416 | memset(&blkcfg, 0, sizeof(blkcfg)); |
6e02c38d AL |
417 | stq_raw(&blkcfg.capacity, capacity); |
418 | stl_raw(&blkcfg.seg_max, 128 - 2); | |
419 | stw_raw(&blkcfg.cylinders, cylinders); | |
420 | blkcfg.heads = heads; | |
8cfacf07 CH |
421 | blkcfg.sectors = secs & ~s->sector_mask; |
422 | blkcfg.blk_size = s->conf->logical_block_size; | |
c7085da7 | 423 | blkcfg.size_max = 0; |
9752c371 CH |
424 | blkcfg.physical_block_exp = get_physical_block_exp(s->conf); |
425 | blkcfg.alignment_offset = 0; | |
8cfacf07 CH |
426 | blkcfg.min_io_size = s->conf->min_io_size / blkcfg.blk_size; |
427 | blkcfg.opt_io_size = s->conf->opt_io_size / blkcfg.blk_size; | |
37d5ddd6 | 428 | memcpy(config, &blkcfg, sizeof(struct virtio_blk_config)); |
6e02c38d AL |
429 | } |
430 | ||
8172539d | 431 | static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features) |
6e02c38d | 432 | { |
bf011293 | 433 | VirtIOBlock *s = to_virtio_blk(vdev); |
1063b8b1 CH |
434 | |
435 | features |= (1 << VIRTIO_BLK_F_SEG_MAX); | |
436 | features |= (1 << VIRTIO_BLK_F_GEOMETRY); | |
9752c371 | 437 | features |= (1 << VIRTIO_BLK_F_TOPOLOGY); |
8cfacf07 | 438 | features |= (1 << VIRTIO_BLK_F_BLK_SIZE); |
aa659be3 CH |
439 | |
440 | if (bdrv_enable_write_cache(s->bs)) | |
441 | features |= (1 << VIRTIO_BLK_F_WCACHE); | |
c79662f7 NS |
442 | |
443 | if (bdrv_is_read_only(s->bs)) | |
444 | features |= 1 << VIRTIO_BLK_F_RO; | |
1063b8b1 CH |
445 | |
446 | return features; | |
6e02c38d AL |
447 | } |
448 | ||
449 | static void virtio_blk_save(QEMUFile *f, void *opaque) | |
450 | { | |
451 | VirtIOBlock *s = opaque; | |
869a5c6d AL |
452 | VirtIOBlockReq *req = s->rq; |
453 | ||
6e02c38d | 454 | virtio_save(&s->vdev, f); |
869a5c6d AL |
455 | |
456 | while (req) { | |
457 | qemu_put_sbyte(f, 1); | |
458 | qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); | |
459 | req = req->next; | |
460 | } | |
461 | qemu_put_sbyte(f, 0); | |
6e02c38d AL |
462 | } |
463 | ||
464 | static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) | |
465 | { | |
466 | VirtIOBlock *s = opaque; | |
467 | ||
869a5c6d | 468 | if (version_id != 2) |
6e02c38d AL |
469 | return -EINVAL; |
470 | ||
471 | virtio_load(&s->vdev, f); | |
869a5c6d AL |
472 | while (qemu_get_sbyte(f)) { |
473 | VirtIOBlockReq *req = virtio_blk_alloc_request(s); | |
474 | qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); | |
475 | req->next = s->rq; | |
476 | s->rq = req->next; | |
477 | } | |
6e02c38d AL |
478 | |
479 | return 0; | |
480 | } | |
481 | ||
428c149b | 482 | VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf) |
6e02c38d AL |
483 | { |
484 | VirtIOBlock *s; | |
485 | int cylinders, heads, secs; | |
486 | static int virtio_blk_id; | |
cf21e106 | 487 | |
53c25cea | 488 | s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK, |
37d5ddd6 | 489 | sizeof(struct virtio_blk_config), |
53c25cea | 490 | sizeof(VirtIOBlock)); |
6e02c38d AL |
491 | |
492 | s->vdev.get_config = virtio_blk_update_config; | |
493 | s->vdev.get_features = virtio_blk_get_features; | |
494 | s->vdev.reset = virtio_blk_reset; | |
428c149b | 495 | s->bs = conf->dinfo->bdrv; |
9752c371 | 496 | s->conf = conf; |
869a5c6d | 497 | s->rq = NULL; |
1573a35d | 498 | s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; |
6e02c38d | 499 | bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); |
6e02c38d AL |
500 | |
501 | s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); | |
502 | ||
869a5c6d AL |
503 | qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); |
504 | register_savevm("virtio-blk", virtio_blk_id++, 2, | |
6e02c38d AL |
505 | virtio_blk_save, virtio_blk_load, s); |
506 | ||
53c25cea | 507 | return &s->vdev; |
6e02c38d | 508 | } |