static void virtio_blk_free_request(VirtIOBlockReq *req)
{
- if (req) {
- g_free(req);
- }
+ g_free(req);
}
static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
VirtIOBlock *s = req->dev;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
- trace_virtio_blk_req_complete(req, status);
+ trace_virtio_blk_req_complete(vdev, req, status);
stb_p(&req->in->status, status);
virtqueue_push(req->vq, &req->elem, req->in_len);
static void virtio_blk_rw_complete(void *opaque, int ret)
{
VirtIOBlockReq *next = opaque;
+ VirtIOBlock *s = next->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (next) {
VirtIOBlockReq *req = next;
next = req->mr_next;
- trace_virtio_blk_rw_complete(req, ret);
+ trace_virtio_blk_rw_complete(vdev, req, ret);
if (req->qiov.nalloc != -1) {
/* If nalloc is != 1 req->qiov is a local copy of the original
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
virtio_blk_free_request(req);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_flush_complete(void *opaque, int ret)
{
VirtIOBlockReq *req = opaque;
+ VirtIOBlock *s = req->dev;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
if (ret) {
if (virtio_blk_handle_rw_error(req, -ret, 0)) {
- return;
+ goto out;
}
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
virtio_blk_free_request(req);
+
+out:
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
#ifdef __linux__
{
VirtIOBlockIoctlReq *ioctl_req = opaque;
VirtIOBlockReq *req = ioctl_req->req;
- VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
struct virtio_scsi_inhdr *scsi;
struct sg_io_hdr *hdr;
virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
out:
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
virtio_blk_req_complete(req, status);
virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
g_free(ioctl_req);
}
mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
}
- trace_virtio_blk_submit_multireq(mrb, start, num_reqs,
+ trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
+ mrb, start, num_reqs,
sector_num << BDRV_SECTOR_BITS,
qiov->size, is_write);
block_acct_merge_done(blk_get_stats(blk),
if (is_write) {
qemu_iovec_init_external(&req->qiov, iov, out_num);
- trace_virtio_blk_handle_write(req, req->sector_num,
+ trace_virtio_blk_handle_write(vdev, req, req->sector_num,
req->qiov.size / BDRV_SECTOR_SIZE);
} else {
qemu_iovec_init_external(&req->qiov, in_iov, in_num);
- trace_virtio_blk_handle_read(req, req->sector_num,
+ trace_virtio_blk_handle_read(vdev, req, req->sector_num,
req->qiov.size / BDRV_SECTOR_SIZE);
}
return 0;
}
-void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
+bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
{
VirtIOBlockReq *req;
MultiReqBuffer mrb = {};
+ bool progress = false;
+ aio_context_acquire(blk_get_aio_context(s->blk));
blk_io_plug(s->blk);
- while ((req = virtio_blk_get_request(s, vq))) {
- if (virtio_blk_handle_request(req, &mrb)) {
- virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
- break;
+ do {
+ virtio_queue_set_notification(vq, 0);
+
+ while ((req = virtio_blk_get_request(s, vq))) {
+ progress = true;
+ if (virtio_blk_handle_request(req, &mrb)) {
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_blk_free_request(req);
+ break;
+ }
}
- }
+
+ virtio_queue_set_notification(vq, 1);
+ } while (!virtio_queue_empty(vq));
if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb);
}
blk_io_unplug(s->blk);
+ aio_context_release(blk_get_aio_context(s->blk));
+ return progress;
+}
+
+static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq)
+{
+ virtio_blk_handle_vq(s, vq);
}
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
*/
- virtio_blk_data_plane_start(s->dataplane);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_disabled) {
return;
}
}
- virtio_blk_handle_vq(s, vq);
+ virtio_blk_handle_output_do(s, vq);
}
static void virtio_blk_dma_restart_bh(void *opaque)
s->rq = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (req) {
VirtIOBlockReq *next = req->next;
if (virtio_blk_handle_request(req, &mrb)) {
if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_dma_restart_cb(void *opaque, int running,
virtio_blk_free_request(req);
}
- if (s->dataplane) {
- virtio_blk_data_plane_stop(s->dataplane);
- }
aio_context_release(ctx);
+ assert(!s->dataplane_started);
blk_set_enable_write_cache(s->blk, s->original_wce);
}
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
- if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
- VIRTIO_CONFIG_S_DRIVER_OK))) {
- virtio_blk_data_plane_stop(s->dataplane);
+ if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
+ assert(!s->dataplane_started);
}
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
}
}
-static void virtio_blk_save(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
-
- virtio_save(vdev, f);
-}
-
static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
qemu_put_sbyte(f, 0);
}
-static int virtio_blk_load(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIOBlock *s = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(s);
-
- return virtio_load(vdev, f, 2);
-}
-
static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
}
}
- req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq));
+ req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
req->next = s->rq;
s->rq = req;
}
blkconf_serial(&conf->conf, &conf->serial);
- blkconf_apply_backend_options(&conf->conf);
+ blkconf_apply_backend_options(&conf->conf,
+ blk_is_read_only(conf->conf.blk), true,
+ &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
s->original_wce = blk_enable_write_cache(conf->conf.blk);
blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, &err);
if (err) {
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
for (i = 0; i < conf->num_queues; i++) {
- virtio_add_queue_aio(vdev, 128, virtio_blk_handle_output);
+ virtio_add_queue(vdev, 128, virtio_blk_handle_output);
}
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
{
VirtIOBlock *s = VIRTIO_BLK(obj);
- object_property_add_link(obj, "iothread", TYPE_IOTHREAD,
- (Object **)&s->conf.iothread,
- qdev_prop_allow_set_link_before_realize,
- OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
device_add_bootindex_property(obj, &s->conf.conf.bootindex,
"bootindex", "/disk@0,0",
DEVICE(obj), NULL);
}
-VMSTATE_VIRTIO_DEVICE(blk, 2, virtio_blk_load, virtio_blk_save);
+static const VMStateDescription vmstate_virtio_blk = {
+ .name = "virtio-blk",
+ .minimum_version_id = 2,
+ .version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
static Property virtio_blk_properties[] = {
DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
true),
DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
+ DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
+ IOThread *),
DEFINE_PROP_END_OF_LIST(),
};
vdc->reset = virtio_blk_reset;
vdc->save = virtio_blk_save_device;
vdc->load = virtio_blk_load_device;
+ vdc->start_ioeventfd = virtio_blk_data_plane_start;
+ vdc->stop_ioeventfd = virtio_blk_data_plane_stop;
}
static const TypeInfo virtio_blk_info = {