2 * xen paravirt block device backend
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
28 #include "hw/xen/xen_backend.h"
29 #include "xen_blkif.h"
30 #include "sysemu/blockdev.h"
31 #include "sysemu/block-backend.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
35 /* ------------------------------------------------------------- */
37 static int batch_maps = 0;
39 static int max_requests = 32;
41 /* ------------------------------------------------------------- */
43 #define BLOCK_SIZE 512
44 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
46 struct PersistentGrant {
48 struct XenBlkDev *blkdev;
51 typedef struct PersistentGrant PersistentGrant;
53 struct PersistentRegion {
58 typedef struct PersistentRegion PersistentRegion;
71 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
82 struct XenBlkDev *blkdev;
83 QLIST_ENTRY(ioreq) list;
88 struct XenDevice xendev; /* must be first */
95 const char *fileproto;
102 blkif_back_rings_t rings;
107 QLIST_HEAD(inflight_head, ioreq) inflight;
108 QLIST_HEAD(finished_head, ioreq) finished;
109 QLIST_HEAD(freelist_head, ioreq) freelist;
111 int requests_inflight;
112 int requests_finished;
114 /* Persistent grants extension */
115 gboolean feature_discard;
116 gboolean feature_persistent;
117 GTree *persistent_gnts;
118 GSList *persistent_regions;
119 unsigned int persistent_gnt_count;
120 unsigned int max_grants;
122 /* qemu block driver */
128 /* ------------------------------------------------------------- */
130 static void ioreq_reset(struct ioreq *ioreq)
132 memset(&ioreq->req, 0, sizeof(ioreq->req));
138 memset(ioreq->domids, 0, sizeof(ioreq->domids));
139 memset(ioreq->refs, 0, sizeof(ioreq->refs));
141 memset(ioreq->page, 0, sizeof(ioreq->page));
144 ioreq->aio_inflight = 0;
145 ioreq->aio_errors = 0;
147 ioreq->blkdev = NULL;
148 memset(&ioreq->list, 0, sizeof(ioreq->list));
149 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151 qemu_iovec_reset(&ioreq->v);
154 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156 uint ua = GPOINTER_TO_UINT(a);
157 uint ub = GPOINTER_TO_UINT(b);
158 return (ua > ub) - (ua < ub);
161 static void destroy_grant(gpointer pgnt)
163 PersistentGrant *grant = pgnt;
164 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
166 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
167 xen_be_printf(&grant->blkdev->xendev, 0,
168 "xc_gnttab_munmap failed: %s\n",
171 grant->blkdev->persistent_gnt_count--;
172 xen_be_printf(&grant->blkdev->xendev, 3,
173 "unmapped grant %p\n", grant->page);
177 static void remove_persistent_region(gpointer data, gpointer dev)
179 PersistentRegion *region = data;
180 struct XenBlkDev *blkdev = dev;
181 XenGnttab gnt = blkdev->xendev.gnttabdev;
183 if (xc_gnttab_munmap(gnt, region->addr, region->num) != 0) {
184 xen_be_printf(&blkdev->xendev, 0,
185 "xc_gnttab_munmap region %p failed: %s\n",
186 region->addr, strerror(errno));
188 xen_be_printf(&blkdev->xendev, 3,
189 "unmapped grant region %p with %d pages\n",
190 region->addr, region->num);
194 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
196 struct ioreq *ioreq = NULL;
198 if (QLIST_EMPTY(&blkdev->freelist)) {
199 if (blkdev->requests_total >= max_requests) {
202 /* allocate new struct */
203 ioreq = g_malloc0(sizeof(*ioreq));
204 ioreq->blkdev = blkdev;
205 blkdev->requests_total++;
206 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
208 /* get one from freelist */
209 ioreq = QLIST_FIRST(&blkdev->freelist);
210 QLIST_REMOVE(ioreq, list);
212 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
213 blkdev->requests_inflight++;
219 static void ioreq_finish(struct ioreq *ioreq)
221 struct XenBlkDev *blkdev = ioreq->blkdev;
223 QLIST_REMOVE(ioreq, list);
224 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
225 blkdev->requests_inflight--;
226 blkdev->requests_finished++;
229 static void ioreq_release(struct ioreq *ioreq, bool finish)
231 struct XenBlkDev *blkdev = ioreq->blkdev;
233 QLIST_REMOVE(ioreq, list);
235 ioreq->blkdev = blkdev;
236 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
238 blkdev->requests_finished--;
240 blkdev->requests_inflight--;
245 * translate request into iovec + start offset
246 * do sanity checks along the way
248 static int ioreq_parse(struct ioreq *ioreq)
250 struct XenBlkDev *blkdev = ioreq->blkdev;
255 xen_be_printf(&blkdev->xendev, 3,
256 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
257 ioreq->req.operation, ioreq->req.nr_segments,
258 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
259 switch (ioreq->req.operation) {
261 ioreq->prot = PROT_WRITE; /* to memory */
263 case BLKIF_OP_FLUSH_DISKCACHE:
265 if (!ioreq->req.nr_segments) {
270 ioreq->prot = PROT_READ; /* from memory */
272 case BLKIF_OP_DISCARD:
275 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
276 ioreq->req.operation);
280 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
281 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
285 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
286 for (i = 0; i < ioreq->req.nr_segments; i++) {
287 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
288 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
291 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
292 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
295 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
296 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
300 ioreq->domids[i] = blkdev->xendev.dom;
301 ioreq->refs[i] = ioreq->req.seg[i].gref;
303 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
304 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
305 qemu_iovec_add(&ioreq->v, (void*)mem, len);
307 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
308 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
314 ioreq->status = BLKIF_RSP_ERROR;
318 static void ioreq_unmap(struct ioreq *ioreq)
320 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
323 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
330 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
331 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
334 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
337 for (i = 0; i < ioreq->num_unmap; i++) {
338 if (!ioreq->page[i]) {
341 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
342 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
345 ioreq->blkdev->cnt_map--;
346 ioreq->page[i] = NULL;
352 static int ioreq_map(struct ioreq *ioreq)
354 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
355 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
356 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
357 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
358 int i, j, new_maps = 0;
359 PersistentGrant *grant;
360 PersistentRegion *region;
361 /* domids and refs variables will contain the information necessary
362 * to map the grants that are needed to fulfill this request.
364 * After mapping the needed grants, the page array will contain the
365 * memory address of each granted page in the order specified in ioreq
366 * (disregarding if it's a persistent grant or not).
369 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
372 if (ioreq->blkdev->feature_persistent) {
373 for (i = 0; i < ioreq->v.niov; i++) {
374 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
375 GUINT_TO_POINTER(ioreq->refs[i]));
378 page[i] = grant->page;
379 xen_be_printf(&ioreq->blkdev->xendev, 3,
380 "using persistent-grant %" PRIu32 "\n",
383 /* Add the grant to the list of grants that
386 domids[new_maps] = ioreq->domids[i];
387 refs[new_maps] = ioreq->refs[i];
392 /* Set the protection to RW, since grants may be reused later
393 * with a different protection than the one needed for this request
395 ioreq->prot = PROT_WRITE | PROT_READ;
397 /* All grants in the request should be mapped */
398 memcpy(refs, ioreq->refs, sizeof(refs));
399 memcpy(domids, ioreq->domids, sizeof(domids));
400 memset(page, 0, sizeof(page));
401 new_maps = ioreq->v.niov;
404 if (batch_maps && new_maps) {
405 ioreq->pages = xc_gnttab_map_grant_refs
406 (gnt, new_maps, domids, refs, ioreq->prot);
407 if (ioreq->pages == NULL) {
408 xen_be_printf(&ioreq->blkdev->xendev, 0,
409 "can't map %d grant refs (%s, %d maps)\n",
410 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
413 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
414 if (page[i] == NULL) {
415 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
418 ioreq->blkdev->cnt_map += new_maps;
419 } else if (new_maps) {
420 for (i = 0; i < new_maps; i++) {
421 ioreq->page[i] = xc_gnttab_map_grant_ref
422 (gnt, domids[i], refs[i], ioreq->prot);
423 if (ioreq->page[i] == NULL) {
424 xen_be_printf(&ioreq->blkdev->xendev, 0,
425 "can't map grant ref %d (%s, %d maps)\n",
426 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
431 ioreq->blkdev->cnt_map++;
433 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
434 if (page[i] == NULL) {
435 page[i] = ioreq->page[j++];
439 if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
440 (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
441 ioreq->blkdev->max_grants))) {
443 * If we are using persistent grants and batch mappings only
444 * add the new maps to the list of persistent grants if the whole
445 * area can be persistently mapped.
448 region = g_malloc0(sizeof(*region));
449 region->addr = ioreq->pages;
450 region->num = new_maps;
451 ioreq->blkdev->persistent_regions = g_slist_append(
452 ioreq->blkdev->persistent_regions,
455 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
457 /* Go through the list of newly mapped grants and add as many
458 * as possible to the list of persistently mapped grants.
460 * Since we start at the end of ioreq->page(s), we only need
461 * to decrease new_maps to prevent this granted pages from
462 * being unmapped in ioreq_unmap.
464 grant = g_malloc0(sizeof(*grant));
467 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
469 grant->page = ioreq->page[new_maps];
471 grant->blkdev = ioreq->blkdev;
472 xen_be_printf(&ioreq->blkdev->xendev, 3,
473 "adding grant %" PRIu32 " page: %p\n",
474 refs[new_maps], grant->page);
475 g_tree_insert(ioreq->blkdev->persistent_gnts,
476 GUINT_TO_POINTER(refs[new_maps]),
478 ioreq->blkdev->persistent_gnt_count++;
480 assert(!batch_maps || new_maps == 0);
482 for (i = 0; i < ioreq->v.niov; i++) {
483 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
486 ioreq->num_unmap = new_maps;
490 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
492 static void qemu_aio_complete(void *opaque, int ret)
494 struct ioreq *ioreq = opaque;
497 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
498 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
502 ioreq->aio_inflight--;
503 if (ioreq->presync) {
505 ioreq_runio_qemu_aio(ioreq);
508 if (ioreq->aio_inflight > 0) {
512 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
515 switch (ioreq->req.operation) {
517 case BLKIF_OP_FLUSH_DISKCACHE:
518 if (!ioreq->req.nr_segments) {
522 if (ioreq->status == BLKIF_RSP_OKAY) {
523 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
525 block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
528 case BLKIF_OP_DISCARD:
532 qemu_bh_schedule(ioreq->blkdev->bh);
535 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
537 struct XenBlkDev *blkdev = ioreq->blkdev;
539 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
543 ioreq->aio_inflight++;
544 if (ioreq->presync) {
545 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
549 switch (ioreq->req.operation) {
551 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
552 ioreq->v.size, BLOCK_ACCT_READ);
553 ioreq->aio_inflight++;
554 blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
555 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
556 qemu_aio_complete, ioreq);
559 case BLKIF_OP_FLUSH_DISKCACHE:
560 if (!ioreq->req.nr_segments) {
564 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
566 ioreq->req.operation == BLKIF_OP_WRITE ?
567 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
568 ioreq->aio_inflight++;
569 blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
570 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
571 qemu_aio_complete, ioreq);
573 case BLKIF_OP_DISCARD:
575 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
576 ioreq->aio_inflight++;
577 blk_aio_discard(blkdev->blk,
578 discard_req->sector_number, discard_req->nr_sectors,
579 qemu_aio_complete, ioreq);
583 /* unknown operation (shouldn't happen -- parse catches this) */
587 qemu_aio_complete(ioreq, 0);
595 ioreq->status = BLKIF_RSP_ERROR;
599 static int blk_send_response_one(struct ioreq *ioreq)
601 struct XenBlkDev *blkdev = ioreq->blkdev;
603 int have_requests = 0;
604 blkif_response_t resp;
607 resp.id = ioreq->req.id;
608 resp.operation = ioreq->req.operation;
609 resp.status = ioreq->status;
611 /* Place on the response ring for the relevant domain. */
612 switch (blkdev->protocol) {
613 case BLKIF_PROTOCOL_NATIVE:
614 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
616 case BLKIF_PROTOCOL_X86_32:
617 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
618 blkdev->rings.x86_32_part.rsp_prod_pvt);
620 case BLKIF_PROTOCOL_X86_64:
621 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
622 blkdev->rings.x86_64_part.rsp_prod_pvt);
628 memcpy(dst, &resp, sizeof(resp));
629 blkdev->rings.common.rsp_prod_pvt++;
631 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
632 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
634 * Tail check for pending requests. Allows frontend to avoid
635 * notifications if requests are already in flight (lower
636 * overheads and promotes batching).
638 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
639 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
649 /* walk finished list, send outstanding responses, free requests */
650 static void blk_send_response_all(struct XenBlkDev *blkdev)
655 while (!QLIST_EMPTY(&blkdev->finished)) {
656 ioreq = QLIST_FIRST(&blkdev->finished);
657 send_notify += blk_send_response_one(ioreq);
658 ioreq_release(ioreq, true);
661 xen_be_send_notify(&blkdev->xendev);
665 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
667 switch (blkdev->protocol) {
668 case BLKIF_PROTOCOL_NATIVE:
669 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
672 case BLKIF_PROTOCOL_X86_32:
673 blkif_get_x86_32_req(&ioreq->req,
674 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
676 case BLKIF_PROTOCOL_X86_64:
677 blkif_get_x86_64_req(&ioreq->req,
678 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
684 static void blk_handle_requests(struct XenBlkDev *blkdev)
689 blkdev->more_work = 0;
691 rc = blkdev->rings.common.req_cons;
692 rp = blkdev->rings.common.sring->req_prod;
693 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
695 blk_send_response_all(blkdev);
697 /* pull request from ring */
698 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
701 ioreq = ioreq_start(blkdev);
706 blk_get_request(blkdev, ioreq, rc);
707 blkdev->rings.common.req_cons = ++rc;
710 if (ioreq_parse(ioreq) != 0) {
712 switch (ioreq->req.operation) {
714 block_acct_invalid(blk_get_stats(blkdev->blk),
718 block_acct_invalid(blk_get_stats(blkdev->blk),
721 case BLKIF_OP_FLUSH_DISKCACHE:
722 block_acct_invalid(blk_get_stats(blkdev->blk),
728 if (blk_send_response_one(ioreq)) {
729 xen_be_send_notify(&blkdev->xendev);
731 ioreq_release(ioreq, false);
735 ioreq_runio_qemu_aio(ioreq);
738 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
739 qemu_bh_schedule(blkdev->bh);
743 /* ------------------------------------------------------------- */
745 static void blk_bh(void *opaque)
747 struct XenBlkDev *blkdev = opaque;
748 blk_handle_requests(blkdev);
752 * We need to account for the grant allocations requiring contiguous
753 * chunks; the worst case number would be
754 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
755 * but in order to keep things simple just use
756 * 2 * max_req * max_seg.
758 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
760 static void blk_alloc(struct XenDevice *xendev)
762 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
764 QLIST_INIT(&blkdev->inflight);
765 QLIST_INIT(&blkdev->finished);
766 QLIST_INIT(&blkdev->freelist);
767 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
768 if (xen_mode != XEN_EMULATE) {
771 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
772 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
773 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
778 static void blk_parse_discard(struct XenBlkDev *blkdev)
782 blkdev->feature_discard = true;
784 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
785 blkdev->feature_discard = !!enable;
788 if (blkdev->feature_discard) {
789 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
793 static int blk_init(struct XenDevice *xendev)
795 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
797 char *directiosafe = NULL;
799 /* read xenstore entries */
800 if (blkdev->params == NULL) {
802 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
803 if (blkdev->params != NULL) {
804 h = strchr(blkdev->params, ':');
807 blkdev->fileproto = blkdev->params;
808 blkdev->filename = h+1;
811 blkdev->fileproto = "<unset>";
812 blkdev->filename = blkdev->params;
815 if (!strcmp("aio", blkdev->fileproto)) {
816 blkdev->fileproto = "raw";
818 if (!strcmp("vhd", blkdev->fileproto)) {
819 blkdev->fileproto = "vpc";
821 if (blkdev->mode == NULL) {
822 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
824 if (blkdev->type == NULL) {
825 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
827 if (blkdev->dev == NULL) {
828 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
830 if (blkdev->devtype == NULL) {
831 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
833 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
834 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
836 /* do we have all we need? */
837 if (blkdev->params == NULL ||
838 blkdev->mode == NULL ||
839 blkdev->type == NULL ||
840 blkdev->dev == NULL) {
845 if (strcmp(blkdev->mode, "w")) {
846 info |= VDISK_READONLY;
850 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
854 blkdev->file_blk = BLOCK_SIZE;
857 * blk_connect supplies sector-size and sectors
859 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
860 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
861 xenstore_write_be_int(&blkdev->xendev, "info", info);
863 blk_parse_discard(blkdev);
865 g_free(directiosafe);
869 g_free(blkdev->params);
870 blkdev->params = NULL;
871 g_free(blkdev->mode);
873 g_free(blkdev->type);
877 g_free(blkdev->devtype);
878 blkdev->devtype = NULL;
879 g_free(directiosafe);
880 blkdev->directiosafe = false;
884 static int blk_connect(struct XenDevice *xendev)
886 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
887 int pers, index, qflags;
888 bool readonly = true;
891 if (blkdev->directiosafe) {
892 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
894 qflags = BDRV_O_CACHE_WB;
896 if (strcmp(blkdev->mode, "w") == 0) {
897 qflags |= BDRV_O_RDWR;
900 if (blkdev->feature_discard) {
901 qflags |= BDRV_O_UNMAP;
904 /* init qemu block driver */
905 index = (blkdev->xendev.dev - 202 * 256) / 16;
906 blkdev->dinfo = drive_get(IF_XEN, 0, index);
907 if (!blkdev->dinfo) {
908 Error *local_err = NULL;
909 QDict *options = NULL;
911 if (strcmp(blkdev->fileproto, "<unset>")) {
912 options = qdict_new();
913 qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
916 /* setup via xenbus -> create new block driver instance */
917 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
918 blkdev->blk = blk_new_open(blkdev->dev, blkdev->filename, NULL, options,
921 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
922 error_get_pretty(local_err));
923 error_free(local_err);
927 /* setup via qemu cmdline -> already setup for us */
928 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
929 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
930 if (blk_is_read_only(blkdev->blk) && !readonly) {
931 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
935 /* blkdev->blk is not create by us, we get a reference
936 * so we can blk_unref() unconditionally */
937 blk_ref(blkdev->blk);
939 blk_attach_dev_nofail(blkdev->blk, blkdev);
940 blkdev->file_size = blk_getlength(blkdev->blk);
941 if (blkdev->file_size < 0) {
942 BlockDriverState *bs = blk_bs(blkdev->blk);
943 const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
944 xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
945 (int)blkdev->file_size, strerror(-blkdev->file_size),
947 blkdev->file_size = 0;
950 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
951 " size %" PRId64 " (%" PRId64 " MB)\n",
952 blkdev->type, blkdev->fileproto, blkdev->filename,
953 blkdev->file_size, blkdev->file_size >> 20);
955 /* Fill in number of sector size and number of sectors */
956 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
957 xenstore_write_be_int64(&blkdev->xendev, "sectors",
958 blkdev->file_size / blkdev->file_blk);
960 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
963 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
964 &blkdev->xendev.remote_port) == -1) {
967 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
968 blkdev->feature_persistent = FALSE;
970 blkdev->feature_persistent = !!pers;
973 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
974 if (blkdev->xendev.protocol) {
975 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
976 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
978 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
979 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
983 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
986 PROT_READ | PROT_WRITE);
987 if (!blkdev->sring) {
992 switch (blkdev->protocol) {
993 case BLKIF_PROTOCOL_NATIVE:
995 blkif_sring_t *sring_native = blkdev->sring;
996 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
999 case BLKIF_PROTOCOL_X86_32:
1001 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1003 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1006 case BLKIF_PROTOCOL_X86_64:
1008 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1010 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1015 if (blkdev->feature_persistent) {
1016 /* Init persistent grants */
1017 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1018 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1021 (GDestroyNotify)g_free :
1022 (GDestroyNotify)destroy_grant);
1023 blkdev->persistent_regions = NULL;
1024 blkdev->persistent_gnt_count = 0;
1027 xen_be_bind_evtchn(&blkdev->xendev);
1029 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1030 "remote port %d, local port %d\n",
1031 blkdev->xendev.protocol, blkdev->ring_ref,
1032 blkdev->xendev.remote_port, blkdev->xendev.local_port);
1036 static void blk_disconnect(struct XenDevice *xendev)
1038 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1041 blk_detach_dev(blkdev->blk, blkdev);
1042 blk_unref(blkdev->blk);
1045 xen_be_unbind_evtchn(&blkdev->xendev);
1047 if (blkdev->sring) {
1048 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1050 blkdev->sring = NULL;
1054 * Unmap persistent grants before switching to the closed state
1055 * so the frontend can free them.
1057 * In the !batch_maps case g_tree_destroy will take care of unmapping
1058 * the grant, but in the batch_maps case we need to iterate over every
1059 * region in persistent_regions and unmap it.
1061 if (blkdev->feature_persistent) {
1062 g_tree_destroy(blkdev->persistent_gnts);
1063 assert(batch_maps || blkdev->persistent_gnt_count == 0);
1065 blkdev->persistent_gnt_count = 0;
1066 g_slist_foreach(blkdev->persistent_regions,
1067 (GFunc)remove_persistent_region, blkdev);
1068 g_slist_free(blkdev->persistent_regions);
1070 blkdev->feature_persistent = false;
1074 static int blk_free(struct XenDevice *xendev)
1076 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1077 struct ioreq *ioreq;
1079 if (blkdev->blk || blkdev->sring) {
1080 blk_disconnect(xendev);
1083 while (!QLIST_EMPTY(&blkdev->freelist)) {
1084 ioreq = QLIST_FIRST(&blkdev->freelist);
1085 QLIST_REMOVE(ioreq, list);
1086 qemu_iovec_destroy(&ioreq->v);
1090 g_free(blkdev->params);
1091 g_free(blkdev->mode);
1092 g_free(blkdev->type);
1093 g_free(blkdev->dev);
1094 g_free(blkdev->devtype);
1095 qemu_bh_delete(blkdev->bh);
1099 static void blk_event(struct XenDevice *xendev)
1101 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1103 qemu_bh_schedule(blkdev->bh);
1106 struct XenDevOps xen_blkdev_ops = {
1107 .size = sizeof(struct XenBlkDev),
1108 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1111 .initialise = blk_connect,
1112 .disconnect = blk_disconnect,