2 * xen paravirt block device backend
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
39 #include "hw/xen/xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
43 /* ------------------------------------------------------------- */
45 static int batch_maps = 0;
47 static int max_requests = 32;
49 /* ------------------------------------------------------------- */
51 #define BLOCK_SIZE 512
52 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54 struct PersistentGrant {
56 struct XenBlkDev *blkdev;
59 typedef struct PersistentGrant PersistentGrant;
73 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
84 struct XenBlkDev *blkdev;
85 QLIST_ENTRY(ioreq) list;
90 struct XenDevice xendev; /* must be first */
97 const char *fileproto;
104 blkif_back_rings_t rings;
109 QLIST_HEAD(inflight_head, ioreq) inflight;
110 QLIST_HEAD(finished_head, ioreq) finished;
111 QLIST_HEAD(freelist_head, ioreq) freelist;
113 int requests_inflight;
114 int requests_finished;
116 /* Persistent grants extension */
117 gboolean feature_persistent;
118 GTree *persistent_gnts;
119 unsigned int persistent_gnt_count;
120 unsigned int max_grants;
122 /* qemu block driver */
124 BlockDriverState *bs;
128 /* ------------------------------------------------------------- */
130 static void ioreq_reset(struct ioreq *ioreq)
132 memset(&ioreq->req, 0, sizeof(ioreq->req));
139 memset(ioreq->domids, 0, sizeof(ioreq->domids));
140 memset(ioreq->refs, 0, sizeof(ioreq->refs));
142 memset(ioreq->page, 0, sizeof(ioreq->page));
145 ioreq->aio_inflight = 0;
146 ioreq->aio_errors = 0;
148 ioreq->blkdev = NULL;
149 memset(&ioreq->list, 0, sizeof(ioreq->list));
150 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
152 qemu_iovec_reset(&ioreq->v);
155 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
157 uint ua = GPOINTER_TO_UINT(a);
158 uint ub = GPOINTER_TO_UINT(b);
159 return (ua > ub) - (ua < ub);
162 static void destroy_grant(gpointer pgnt)
164 PersistentGrant *grant = pgnt;
165 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
167 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
168 xen_be_printf(&grant->blkdev->xendev, 0,
169 "xc_gnttab_munmap failed: %s\n",
172 grant->blkdev->persistent_gnt_count--;
173 xen_be_printf(&grant->blkdev->xendev, 3,
174 "unmapped grant %p\n", grant->page);
178 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
180 struct ioreq *ioreq = NULL;
182 if (QLIST_EMPTY(&blkdev->freelist)) {
183 if (blkdev->requests_total >= max_requests) {
186 /* allocate new struct */
187 ioreq = g_malloc0(sizeof(*ioreq));
188 ioreq->blkdev = blkdev;
189 blkdev->requests_total++;
190 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
192 /* get one from freelist */
193 ioreq = QLIST_FIRST(&blkdev->freelist);
194 QLIST_REMOVE(ioreq, list);
196 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
197 blkdev->requests_inflight++;
203 static void ioreq_finish(struct ioreq *ioreq)
205 struct XenBlkDev *blkdev = ioreq->blkdev;
207 QLIST_REMOVE(ioreq, list);
208 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
209 blkdev->requests_inflight--;
210 blkdev->requests_finished++;
213 static void ioreq_release(struct ioreq *ioreq, bool finish)
215 struct XenBlkDev *blkdev = ioreq->blkdev;
217 QLIST_REMOVE(ioreq, list);
219 ioreq->blkdev = blkdev;
220 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
222 blkdev->requests_finished--;
224 blkdev->requests_inflight--;
229 * translate request into iovec + start offset
230 * do sanity checks along the way
232 static int ioreq_parse(struct ioreq *ioreq)
234 struct XenBlkDev *blkdev = ioreq->blkdev;
239 xen_be_printf(&blkdev->xendev, 3,
240 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
241 ioreq->req.operation, ioreq->req.nr_segments,
242 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
243 switch (ioreq->req.operation) {
245 ioreq->prot = PROT_WRITE; /* to memory */
247 case BLKIF_OP_FLUSH_DISKCACHE:
249 if (!ioreq->req.nr_segments) {
254 ioreq->prot = PROT_READ; /* from memory */
257 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
258 ioreq->req.operation);
262 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
263 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
267 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
268 for (i = 0; i < ioreq->req.nr_segments; i++) {
269 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
270 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
273 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
274 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
277 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
278 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
282 ioreq->domids[i] = blkdev->xendev.dom;
283 ioreq->refs[i] = ioreq->req.seg[i].gref;
285 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
286 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
287 qemu_iovec_add(&ioreq->v, (void*)mem, len);
289 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
290 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
296 ioreq->status = BLKIF_RSP_ERROR;
300 static void ioreq_unmap(struct ioreq *ioreq)
302 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
305 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
312 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
313 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
316 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
319 for (i = 0; i < ioreq->num_unmap; i++) {
320 if (!ioreq->page[i]) {
323 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
324 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
327 ioreq->blkdev->cnt_map--;
328 ioreq->page[i] = NULL;
334 static int ioreq_map(struct ioreq *ioreq)
336 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
337 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
338 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
339 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
340 int i, j, new_maps = 0;
341 PersistentGrant *grant;
342 /* domids and refs variables will contain the information necessary
343 * to map the grants that are needed to fulfill this request.
345 * After mapping the needed grants, the page array will contain the
346 * memory address of each granted page in the order specified in ioreq
347 * (disregarding if it's a persistent grant or not).
350 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
353 if (ioreq->blkdev->feature_persistent) {
354 for (i = 0; i < ioreq->v.niov; i++) {
355 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
356 GUINT_TO_POINTER(ioreq->refs[i]));
359 page[i] = grant->page;
360 xen_be_printf(&ioreq->blkdev->xendev, 3,
361 "using persistent-grant %" PRIu32 "\n",
364 /* Add the grant to the list of grants that
367 domids[new_maps] = ioreq->domids[i];
368 refs[new_maps] = ioreq->refs[i];
373 /* Set the protection to RW, since grants may be reused later
374 * with a different protection than the one needed for this request
376 ioreq->prot = PROT_WRITE | PROT_READ;
378 /* All grants in the request should be mapped */
379 memcpy(refs, ioreq->refs, sizeof(refs));
380 memcpy(domids, ioreq->domids, sizeof(domids));
381 memset(page, 0, sizeof(page));
382 new_maps = ioreq->v.niov;
385 if (batch_maps && new_maps) {
386 ioreq->pages = xc_gnttab_map_grant_refs
387 (gnt, new_maps, domids, refs, ioreq->prot);
388 if (ioreq->pages == NULL) {
389 xen_be_printf(&ioreq->blkdev->xendev, 0,
390 "can't map %d grant refs (%s, %d maps)\n",
391 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
394 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
395 if (page[i] == NULL) {
396 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
399 ioreq->blkdev->cnt_map += new_maps;
400 } else if (new_maps) {
401 for (i = 0; i < new_maps; i++) {
402 ioreq->page[i] = xc_gnttab_map_grant_ref
403 (gnt, domids[i], refs[i], ioreq->prot);
404 if (ioreq->page[i] == NULL) {
405 xen_be_printf(&ioreq->blkdev->xendev, 0,
406 "can't map grant ref %d (%s, %d maps)\n",
407 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
412 ioreq->blkdev->cnt_map++;
414 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
415 if (page[i] == NULL) {
416 page[i] = ioreq->page[j++];
420 if (ioreq->blkdev->feature_persistent) {
421 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
423 /* Go through the list of newly mapped grants and add as many
424 * as possible to the list of persistently mapped grants.
426 * Since we start at the end of ioreq->page(s), we only need
427 * to decrease new_maps to prevent this granted pages from
428 * being unmapped in ioreq_unmap.
430 grant = g_malloc0(sizeof(*grant));
433 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
435 grant->page = ioreq->page[new_maps];
437 grant->blkdev = ioreq->blkdev;
438 xen_be_printf(&ioreq->blkdev->xendev, 3,
439 "adding grant %" PRIu32 " page: %p\n",
440 refs[new_maps], grant->page);
441 g_tree_insert(ioreq->blkdev->persistent_gnts,
442 GUINT_TO_POINTER(refs[new_maps]),
444 ioreq->blkdev->persistent_gnt_count++;
447 for (i = 0; i < ioreq->v.niov; i++) {
448 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
451 ioreq->num_unmap = new_maps;
455 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
457 static void qemu_aio_complete(void *opaque, int ret)
459 struct ioreq *ioreq = opaque;
462 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
463 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
467 ioreq->aio_inflight--;
468 if (ioreq->presync) {
470 ioreq_runio_qemu_aio(ioreq);
473 if (ioreq->aio_inflight > 0) {
476 if (ioreq->postsync) {
478 ioreq->aio_inflight++;
479 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
483 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
486 switch (ioreq->req.operation) {
488 case BLKIF_OP_FLUSH_DISKCACHE:
489 if (!ioreq->req.nr_segments) {
493 bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
498 qemu_bh_schedule(ioreq->blkdev->bh);
501 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
503 struct XenBlkDev *blkdev = ioreq->blkdev;
505 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
509 ioreq->aio_inflight++;
510 if (ioreq->presync) {
511 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
515 switch (ioreq->req.operation) {
517 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
518 ioreq->aio_inflight++;
519 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
520 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
521 qemu_aio_complete, ioreq);
524 case BLKIF_OP_FLUSH_DISKCACHE:
525 if (!ioreq->req.nr_segments) {
529 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
530 ioreq->aio_inflight++;
531 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
532 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
533 qemu_aio_complete, ioreq);
536 /* unknown operation (shouldn't happen -- parse catches this) */
540 qemu_aio_complete(ioreq, 0);
548 ioreq->status = BLKIF_RSP_ERROR;
552 static int blk_send_response_one(struct ioreq *ioreq)
554 struct XenBlkDev *blkdev = ioreq->blkdev;
556 int have_requests = 0;
557 blkif_response_t resp;
560 resp.id = ioreq->req.id;
561 resp.operation = ioreq->req.operation;
562 resp.status = ioreq->status;
564 /* Place on the response ring for the relevant domain. */
565 switch (blkdev->protocol) {
566 case BLKIF_PROTOCOL_NATIVE:
567 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
569 case BLKIF_PROTOCOL_X86_32:
570 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
571 blkdev->rings.x86_32_part.rsp_prod_pvt);
573 case BLKIF_PROTOCOL_X86_64:
574 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
575 blkdev->rings.x86_64_part.rsp_prod_pvt);
580 memcpy(dst, &resp, sizeof(resp));
581 blkdev->rings.common.rsp_prod_pvt++;
583 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
584 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
586 * Tail check for pending requests. Allows frontend to avoid
587 * notifications if requests are already in flight (lower
588 * overheads and promotes batching).
590 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
591 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
601 /* walk finished list, send outstanding responses, free requests */
602 static void blk_send_response_all(struct XenBlkDev *blkdev)
607 while (!QLIST_EMPTY(&blkdev->finished)) {
608 ioreq = QLIST_FIRST(&blkdev->finished);
609 send_notify += blk_send_response_one(ioreq);
610 ioreq_release(ioreq, true);
613 xen_be_send_notify(&blkdev->xendev);
617 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
619 switch (blkdev->protocol) {
620 case BLKIF_PROTOCOL_NATIVE:
621 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
624 case BLKIF_PROTOCOL_X86_32:
625 blkif_get_x86_32_req(&ioreq->req,
626 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
628 case BLKIF_PROTOCOL_X86_64:
629 blkif_get_x86_64_req(&ioreq->req,
630 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
636 static void blk_handle_requests(struct XenBlkDev *blkdev)
641 blkdev->more_work = 0;
643 rc = blkdev->rings.common.req_cons;
644 rp = blkdev->rings.common.sring->req_prod;
645 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
647 blk_send_response_all(blkdev);
649 /* pull request from ring */
650 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
653 ioreq = ioreq_start(blkdev);
658 blk_get_request(blkdev, ioreq, rc);
659 blkdev->rings.common.req_cons = ++rc;
662 if (ioreq_parse(ioreq) != 0) {
663 if (blk_send_response_one(ioreq)) {
664 xen_be_send_notify(&blkdev->xendev);
666 ioreq_release(ioreq, false);
670 ioreq_runio_qemu_aio(ioreq);
673 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
674 qemu_bh_schedule(blkdev->bh);
678 /* ------------------------------------------------------------- */
680 static void blk_bh(void *opaque)
682 struct XenBlkDev *blkdev = opaque;
683 blk_handle_requests(blkdev);
687 * We need to account for the grant allocations requiring contiguous
688 * chunks; the worst case number would be
689 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
690 * but in order to keep things simple just use
691 * 2 * max_req * max_seg.
693 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
695 static void blk_alloc(struct XenDevice *xendev)
697 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
699 QLIST_INIT(&blkdev->inflight);
700 QLIST_INIT(&blkdev->finished);
701 QLIST_INIT(&blkdev->freelist);
702 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
703 if (xen_mode != XEN_EMULATE) {
706 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
707 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
708 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
713 static int blk_init(struct XenDevice *xendev)
715 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
717 char *directiosafe = NULL;
719 /* read xenstore entries */
720 if (blkdev->params == NULL) {
722 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
723 if (blkdev->params != NULL) {
724 h = strchr(blkdev->params, ':');
727 blkdev->fileproto = blkdev->params;
728 blkdev->filename = h+1;
731 blkdev->fileproto = "<unset>";
732 blkdev->filename = blkdev->params;
735 if (!strcmp("aio", blkdev->fileproto)) {
736 blkdev->fileproto = "raw";
738 if (blkdev->mode == NULL) {
739 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
741 if (blkdev->type == NULL) {
742 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
744 if (blkdev->dev == NULL) {
745 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
747 if (blkdev->devtype == NULL) {
748 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
750 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
751 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
753 /* do we have all we need? */
754 if (blkdev->params == NULL ||
755 blkdev->mode == NULL ||
756 blkdev->type == NULL ||
757 blkdev->dev == NULL) {
762 if (strcmp(blkdev->mode, "w")) {
763 info |= VDISK_READONLY;
767 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
771 blkdev->file_blk = BLOCK_SIZE;
774 * blk_connect supplies sector-size and sectors
776 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
777 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
778 xenstore_write_be_int(&blkdev->xendev, "info", info);
780 g_free(directiosafe);
784 g_free(blkdev->params);
785 blkdev->params = NULL;
786 g_free(blkdev->mode);
788 g_free(blkdev->type);
792 g_free(blkdev->devtype);
793 blkdev->devtype = NULL;
794 g_free(directiosafe);
795 blkdev->directiosafe = false;
799 static int blk_connect(struct XenDevice *xendev)
801 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
802 int pers, index, qflags;
803 bool readonly = true;
806 if (blkdev->directiosafe) {
807 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
809 qflags = BDRV_O_CACHE_WB;
811 if (strcmp(blkdev->mode, "w") == 0) {
812 qflags |= BDRV_O_RDWR;
816 /* init qemu block driver */
817 index = (blkdev->xendev.dev - 202 * 256) / 16;
818 blkdev->dinfo = drive_get(IF_XEN, 0, index);
819 if (!blkdev->dinfo) {
820 /* setup via xenbus -> create new block driver instance */
821 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
822 blkdev->bs = bdrv_new(blkdev->dev);
824 Error *local_err = NULL;
825 BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
827 if (bdrv_open(blkdev->bs,
828 blkdev->filename, NULL, qflags, drv, &local_err) != 0)
830 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
831 error_get_pretty(local_err));
832 error_free(local_err);
833 bdrv_unref(blkdev->bs);
841 /* setup via qemu cmdline -> already setup for us */
842 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
843 blkdev->bs = blkdev->dinfo->bdrv;
844 if (bdrv_is_read_only(blkdev->bs) && !readonly) {
845 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
849 /* blkdev->bs is not create by us, we get a reference
850 * so we can bdrv_unref() unconditionally */
851 bdrv_ref(blkdev->bs);
853 bdrv_attach_dev_nofail(blkdev->bs, blkdev);
854 blkdev->file_size = bdrv_getlength(blkdev->bs);
855 if (blkdev->file_size < 0) {
856 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
857 (int)blkdev->file_size, strerror(-blkdev->file_size),
858 bdrv_get_format_name(blkdev->bs) ?: "-");
859 blkdev->file_size = 0;
862 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
863 " size %" PRId64 " (%" PRId64 " MB)\n",
864 blkdev->type, blkdev->fileproto, blkdev->filename,
865 blkdev->file_size, blkdev->file_size >> 20);
867 /* Fill in number of sector size and number of sectors */
868 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
869 xenstore_write_be_int64(&blkdev->xendev, "sectors",
870 blkdev->file_size / blkdev->file_blk);
872 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
875 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
876 &blkdev->xendev.remote_port) == -1) {
879 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
880 blkdev->feature_persistent = FALSE;
882 blkdev->feature_persistent = !!pers;
885 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
886 if (blkdev->xendev.protocol) {
887 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
888 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
890 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
891 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
895 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
898 PROT_READ | PROT_WRITE);
899 if (!blkdev->sring) {
904 switch (blkdev->protocol) {
905 case BLKIF_PROTOCOL_NATIVE:
907 blkif_sring_t *sring_native = blkdev->sring;
908 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
911 case BLKIF_PROTOCOL_X86_32:
913 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
915 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
918 case BLKIF_PROTOCOL_X86_64:
920 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
922 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
927 if (blkdev->feature_persistent) {
928 /* Init persistent grants */
929 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
930 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
932 (GDestroyNotify)destroy_grant);
933 blkdev->persistent_gnt_count = 0;
936 xen_be_bind_evtchn(&blkdev->xendev);
938 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
939 "remote port %d, local port %d\n",
940 blkdev->xendev.protocol, blkdev->ring_ref,
941 blkdev->xendev.remote_port, blkdev->xendev.local_port);
945 static void blk_disconnect(struct XenDevice *xendev)
947 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
950 bdrv_detach_dev(blkdev->bs, blkdev);
951 bdrv_unref(blkdev->bs);
954 xen_be_unbind_evtchn(&blkdev->xendev);
957 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
959 blkdev->sring = NULL;
963 static int blk_free(struct XenDevice *xendev)
965 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
968 if (blkdev->bs || blkdev->sring) {
969 blk_disconnect(xendev);
972 /* Free persistent grants */
973 if (blkdev->feature_persistent) {
974 g_tree_destroy(blkdev->persistent_gnts);
977 while (!QLIST_EMPTY(&blkdev->freelist)) {
978 ioreq = QLIST_FIRST(&blkdev->freelist);
979 QLIST_REMOVE(ioreq, list);
980 qemu_iovec_destroy(&ioreq->v);
984 g_free(blkdev->params);
985 g_free(blkdev->mode);
986 g_free(blkdev->type);
988 g_free(blkdev->devtype);
989 qemu_bh_delete(blkdev->bh);
993 static void blk_event(struct XenDevice *xendev)
995 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
997 qemu_bh_schedule(blkdev->bh);
1000 struct XenDevOps xen_blkdev_ops = {
1001 .size = sizeof(struct XenBlkDev),
1002 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1005 .initialise = blk_connect,
1006 .disconnect = blk_disconnect,