]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/berrange/tags/pull-qio-2017-02-27-2' into staging
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/uio.h>
25
26 #include "hw/hw.h"
27 #include "hw/xen/xen_backend.h"
28 #include "xen_blkif.h"
29 #include "sysemu/blockdev.h"
30 #include "sysemu/block-backend.h"
31 #include "qapi/error.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
34
35 /* ------------------------------------------------------------- */
36
37 static int batch_maps   = 0;
38
39 static int max_requests = 32;
40
41 /* ------------------------------------------------------------- */
42
43 #define BLOCK_SIZE  512
44 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
45
46 struct PersistentGrant {
47     void *page;
48     struct XenBlkDev *blkdev;
49 };
50
51 typedef struct PersistentGrant PersistentGrant;
52
53 struct PersistentRegion {
54     void *addr;
55     int num;
56 };
57
58 typedef struct PersistentRegion PersistentRegion;
59
60 struct ioreq {
61     blkif_request_t     req;
62     int16_t             status;
63
64     /* parsed request */
65     off_t               start;
66     QEMUIOVector        v;
67     int                 presync;
68     uint8_t             mapped;
69
70     /* grant mapping */
71     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73     int                 prot;
74     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75     void                *pages;
76     int                 num_unmap;
77
78     /* aio status */
79     int                 aio_inflight;
80     int                 aio_errors;
81
82     struct XenBlkDev    *blkdev;
83     QLIST_ENTRY(ioreq)   list;
84     BlockAcctCookie     acct;
85 };
86
87 struct XenBlkDev {
88     struct XenDevice    xendev;  /* must be first */
89     char                *params;
90     char                *mode;
91     char                *type;
92     char                *dev;
93     char                *devtype;
94     bool                directiosafe;
95     const char          *fileproto;
96     const char          *filename;
97     int                 ring_ref;
98     void                *sring;
99     int64_t             file_blk;
100     int64_t             file_size;
101     int                 protocol;
102     blkif_back_rings_t  rings;
103     int                 more_work;
104     int                 cnt_map;
105
106     /* request lists */
107     QLIST_HEAD(inflight_head, ioreq) inflight;
108     QLIST_HEAD(finished_head, ioreq) finished;
109     QLIST_HEAD(freelist_head, ioreq) freelist;
110     int                 requests_total;
111     int                 requests_inflight;
112     int                 requests_finished;
113
114     /* Persistent grants extension */
115     gboolean            feature_discard;
116     gboolean            feature_persistent;
117     GTree               *persistent_gnts;
118     GSList              *persistent_regions;
119     unsigned int        persistent_gnt_count;
120     unsigned int        max_grants;
121
122     /* Grant copy */
123     gboolean            feature_grant_copy;
124
125     /* qemu block driver */
126     DriveInfo           *dinfo;
127     BlockBackend        *blk;
128     QEMUBH              *bh;
129 };
130
131 /* ------------------------------------------------------------- */
132
133 static void ioreq_reset(struct ioreq *ioreq)
134 {
135     memset(&ioreq->req, 0, sizeof(ioreq->req));
136     ioreq->status = 0;
137     ioreq->start = 0;
138     ioreq->presync = 0;
139     ioreq->mapped = 0;
140
141     memset(ioreq->domids, 0, sizeof(ioreq->domids));
142     memset(ioreq->refs, 0, sizeof(ioreq->refs));
143     ioreq->prot = 0;
144     memset(ioreq->page, 0, sizeof(ioreq->page));
145     ioreq->pages = NULL;
146
147     ioreq->aio_inflight = 0;
148     ioreq->aio_errors = 0;
149
150     ioreq->blkdev = NULL;
151     memset(&ioreq->list, 0, sizeof(ioreq->list));
152     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
153
154     qemu_iovec_reset(&ioreq->v);
155 }
156
157 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
158 {
159     uint ua = GPOINTER_TO_UINT(a);
160     uint ub = GPOINTER_TO_UINT(b);
161     return (ua > ub) - (ua < ub);
162 }
163
164 static void destroy_grant(gpointer pgnt)
165 {
166     PersistentGrant *grant = pgnt;
167     xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
168
169     if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
170         xen_pv_printf(&grant->blkdev->xendev, 0,
171                       "xengnttab_unmap failed: %s\n",
172                       strerror(errno));
173     }
174     grant->blkdev->persistent_gnt_count--;
175     xen_pv_printf(&grant->blkdev->xendev, 3,
176                   "unmapped grant %p\n", grant->page);
177     g_free(grant);
178 }
179
180 static void remove_persistent_region(gpointer data, gpointer dev)
181 {
182     PersistentRegion *region = data;
183     struct XenBlkDev *blkdev = dev;
184     xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
185
186     if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
187         xen_pv_printf(&blkdev->xendev, 0,
188                       "xengnttab_unmap region %p failed: %s\n",
189                       region->addr, strerror(errno));
190     }
191     xen_pv_printf(&blkdev->xendev, 3,
192                   "unmapped grant region %p with %d pages\n",
193                   region->addr, region->num);
194     g_free(region);
195 }
196
197 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
198 {
199     struct ioreq *ioreq = NULL;
200
201     if (QLIST_EMPTY(&blkdev->freelist)) {
202         if (blkdev->requests_total >= max_requests) {
203             goto out;
204         }
205         /* allocate new struct */
206         ioreq = g_malloc0(sizeof(*ioreq));
207         ioreq->blkdev = blkdev;
208         blkdev->requests_total++;
209         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
210     } else {
211         /* get one from freelist */
212         ioreq = QLIST_FIRST(&blkdev->freelist);
213         QLIST_REMOVE(ioreq, list);
214     }
215     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
216     blkdev->requests_inflight++;
217
218 out:
219     return ioreq;
220 }
221
222 static void ioreq_finish(struct ioreq *ioreq)
223 {
224     struct XenBlkDev *blkdev = ioreq->blkdev;
225
226     QLIST_REMOVE(ioreq, list);
227     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
228     blkdev->requests_inflight--;
229     blkdev->requests_finished++;
230 }
231
232 static void ioreq_release(struct ioreq *ioreq, bool finish)
233 {
234     struct XenBlkDev *blkdev = ioreq->blkdev;
235
236     QLIST_REMOVE(ioreq, list);
237     ioreq_reset(ioreq);
238     ioreq->blkdev = blkdev;
239     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
240     if (finish) {
241         blkdev->requests_finished--;
242     } else {
243         blkdev->requests_inflight--;
244     }
245 }
246
247 /*
248  * translate request into iovec + start offset
249  * do sanity checks along the way
250  */
251 static int ioreq_parse(struct ioreq *ioreq)
252 {
253     struct XenBlkDev *blkdev = ioreq->blkdev;
254     uintptr_t mem;
255     size_t len;
256     int i;
257
258     xen_pv_printf(&blkdev->xendev, 3,
259                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
260                   ioreq->req.operation, ioreq->req.nr_segments,
261                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
262     switch (ioreq->req.operation) {
263     case BLKIF_OP_READ:
264         ioreq->prot = PROT_WRITE; /* to memory */
265         break;
266     case BLKIF_OP_FLUSH_DISKCACHE:
267         ioreq->presync = 1;
268         if (!ioreq->req.nr_segments) {
269             return 0;
270         }
271         /* fall through */
272     case BLKIF_OP_WRITE:
273         ioreq->prot = PROT_READ; /* from memory */
274         break;
275     case BLKIF_OP_DISCARD:
276         return 0;
277     default:
278         xen_pv_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
279                       ioreq->req.operation);
280         goto err;
281     };
282
283     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
284         xen_pv_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
285         goto err;
286     }
287
288     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
289     for (i = 0; i < ioreq->req.nr_segments; i++) {
290         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
291             xen_pv_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
292             goto err;
293         }
294         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
295             xen_pv_printf(&blkdev->xendev, 0, "error: first > last sector\n");
296             goto err;
297         }
298         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
299             xen_pv_printf(&blkdev->xendev, 0, "error: page crossing\n");
300             goto err;
301         }
302
303         ioreq->domids[i] = blkdev->xendev.dom;
304         ioreq->refs[i]   = ioreq->req.seg[i].gref;
305
306         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
307         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
308         qemu_iovec_add(&ioreq->v, (void*)mem, len);
309     }
310     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
311         xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
312         goto err;
313     }
314     return 0;
315
316 err:
317     ioreq->status = BLKIF_RSP_ERROR;
318     return -1;
319 }
320
321 static void ioreq_unmap(struct ioreq *ioreq)
322 {
323     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
324     int i;
325
326     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
327         return;
328     }
329     if (batch_maps) {
330         if (!ioreq->pages) {
331             return;
332         }
333         if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
334             xen_pv_printf(&ioreq->blkdev->xendev, 0,
335                           "xengnttab_unmap failed: %s\n",
336                           strerror(errno));
337         }
338         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
339         ioreq->pages = NULL;
340     } else {
341         for (i = 0; i < ioreq->num_unmap; i++) {
342             if (!ioreq->page[i]) {
343                 continue;
344             }
345             if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
346                 xen_pv_printf(&ioreq->blkdev->xendev, 0,
347                               "xengnttab_unmap failed: %s\n",
348                               strerror(errno));
349             }
350             ioreq->blkdev->cnt_map--;
351             ioreq->page[i] = NULL;
352         }
353     }
354     ioreq->mapped = 0;
355 }
356
357 static int ioreq_map(struct ioreq *ioreq)
358 {
359     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
360     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
361     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
362     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
363     int i, j, new_maps = 0;
364     PersistentGrant *grant;
365     PersistentRegion *region;
366     /* domids and refs variables will contain the information necessary
367      * to map the grants that are needed to fulfill this request.
368      *
369      * After mapping the needed grants, the page array will contain the
370      * memory address of each granted page in the order specified in ioreq
371      * (disregarding if it's a persistent grant or not).
372      */
373
374     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
375         return 0;
376     }
377     if (ioreq->blkdev->feature_persistent) {
378         for (i = 0; i < ioreq->v.niov; i++) {
379             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
380                                     GUINT_TO_POINTER(ioreq->refs[i]));
381
382             if (grant != NULL) {
383                 page[i] = grant->page;
384                 xen_pv_printf(&ioreq->blkdev->xendev, 3,
385                               "using persistent-grant %" PRIu32 "\n",
386                               ioreq->refs[i]);
387             } else {
388                     /* Add the grant to the list of grants that
389                      * should be mapped
390                      */
391                     domids[new_maps] = ioreq->domids[i];
392                     refs[new_maps] = ioreq->refs[i];
393                     page[i] = NULL;
394                     new_maps++;
395             }
396         }
397         /* Set the protection to RW, since grants may be reused later
398          * with a different protection than the one needed for this request
399          */
400         ioreq->prot = PROT_WRITE | PROT_READ;
401     } else {
402         /* All grants in the request should be mapped */
403         memcpy(refs, ioreq->refs, sizeof(refs));
404         memcpy(domids, ioreq->domids, sizeof(domids));
405         memset(page, 0, sizeof(page));
406         new_maps = ioreq->v.niov;
407     }
408
409     if (batch_maps && new_maps) {
410         ioreq->pages = xengnttab_map_grant_refs
411             (gnt, new_maps, domids, refs, ioreq->prot);
412         if (ioreq->pages == NULL) {
413             xen_pv_printf(&ioreq->blkdev->xendev, 0,
414                           "can't map %d grant refs (%s, %d maps)\n",
415                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
416             return -1;
417         }
418         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
419             if (page[i] == NULL) {
420                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
421             }
422         }
423         ioreq->blkdev->cnt_map += new_maps;
424     } else if (new_maps)  {
425         for (i = 0; i < new_maps; i++) {
426             ioreq->page[i] = xengnttab_map_grant_ref
427                 (gnt, domids[i], refs[i], ioreq->prot);
428             if (ioreq->page[i] == NULL) {
429                 xen_pv_printf(&ioreq->blkdev->xendev, 0,
430                               "can't map grant ref %d (%s, %d maps)\n",
431                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
432                 ioreq->mapped = 1;
433                 ioreq_unmap(ioreq);
434                 return -1;
435             }
436             ioreq->blkdev->cnt_map++;
437         }
438         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
439             if (page[i] == NULL) {
440                 page[i] = ioreq->page[j++];
441             }
442         }
443     }
444     if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
445         (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
446         ioreq->blkdev->max_grants))) {
447         /*
448          * If we are using persistent grants and batch mappings only
449          * add the new maps to the list of persistent grants if the whole
450          * area can be persistently mapped.
451          */
452         if (batch_maps) {
453             region = g_malloc0(sizeof(*region));
454             region->addr = ioreq->pages;
455             region->num = new_maps;
456             ioreq->blkdev->persistent_regions = g_slist_append(
457                                             ioreq->blkdev->persistent_regions,
458                                             region);
459         }
460         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
461               && new_maps) {
462             /* Go through the list of newly mapped grants and add as many
463              * as possible to the list of persistently mapped grants.
464              *
465              * Since we start at the end of ioreq->page(s), we only need
466              * to decrease new_maps to prevent this granted pages from
467              * being unmapped in ioreq_unmap.
468              */
469             grant = g_malloc0(sizeof(*grant));
470             new_maps--;
471             if (batch_maps) {
472                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
473             } else {
474                 grant->page = ioreq->page[new_maps];
475             }
476             grant->blkdev = ioreq->blkdev;
477             xen_pv_printf(&ioreq->blkdev->xendev, 3,
478                           "adding grant %" PRIu32 " page: %p\n",
479                           refs[new_maps], grant->page);
480             g_tree_insert(ioreq->blkdev->persistent_gnts,
481                           GUINT_TO_POINTER(refs[new_maps]),
482                           grant);
483             ioreq->blkdev->persistent_gnt_count++;
484         }
485         assert(!batch_maps || new_maps == 0);
486     }
487     for (i = 0; i < ioreq->v.niov; i++) {
488         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
489     }
490     ioreq->mapped = 1;
491     ioreq->num_unmap = new_maps;
492     return 0;
493 }
494
495 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 480
496
497 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
498 {
499     int i;
500
501     for (i = 0; i < ioreq->v.niov; i++) {
502         ioreq->page[i] = NULL;
503     }
504
505     qemu_vfree(ioreq->pages);
506 }
507
508 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
509 {
510     int i;
511
512     if (ioreq->v.niov == 0) {
513         return 0;
514     }
515
516     ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
517
518     for (i = 0; i < ioreq->v.niov; i++) {
519         ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
520         ioreq->v.iov[i].iov_base = ioreq->page[i];
521     }
522
523     return 0;
524 }
525
526 static int ioreq_grant_copy(struct ioreq *ioreq)
527 {
528     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
529     xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
530     int i, count, rc;
531     int64_t file_blk = ioreq->blkdev->file_blk;
532
533     if (ioreq->v.niov == 0) {
534         return 0;
535     }
536
537     count = ioreq->v.niov;
538
539     for (i = 0; i < count; i++) {
540         if (ioreq->req.operation == BLKIF_OP_READ) {
541             segs[i].flags = GNTCOPY_dest_gref;
542             segs[i].dest.foreign.ref = ioreq->refs[i];
543             segs[i].dest.foreign.domid = ioreq->domids[i];
544             segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
545             segs[i].source.virt = ioreq->v.iov[i].iov_base;
546         } else {
547             segs[i].flags = GNTCOPY_source_gref;
548             segs[i].source.foreign.ref = ioreq->refs[i];
549             segs[i].source.foreign.domid = ioreq->domids[i];
550             segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
551             segs[i].dest.virt = ioreq->v.iov[i].iov_base;
552         }
553         segs[i].len = (ioreq->req.seg[i].last_sect
554                        - ioreq->req.seg[i].first_sect + 1) * file_blk;
555     }
556
557     rc = xengnttab_grant_copy(gnt, count, segs);
558
559     if (rc) {
560         xen_pv_printf(&ioreq->blkdev->xendev, 0,
561                       "failed to copy data %d\n", rc);
562         ioreq->aio_errors++;
563         return -1;
564     }
565
566     for (i = 0; i < count; i++) {
567         if (segs[i].status != GNTST_okay) {
568             xen_pv_printf(&ioreq->blkdev->xendev, 3,
569                           "failed to copy data %d for gref %d, domid %d\n",
570                           segs[i].status, ioreq->refs[i], ioreq->domids[i]);
571             ioreq->aio_errors++;
572             rc = -1;
573         }
574     }
575
576     return rc;
577 }
578 #else
579 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
580 {
581     abort();
582 }
583
584 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
585 {
586     abort();
587 }
588
589 static int ioreq_grant_copy(struct ioreq *ioreq)
590 {
591     abort();
592 }
593 #endif
594
595 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
596
597 static void qemu_aio_complete(void *opaque, int ret)
598 {
599     struct ioreq *ioreq = opaque;
600
601     if (ret != 0) {
602         xen_pv_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
603                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
604         ioreq->aio_errors++;
605     }
606
607     ioreq->aio_inflight--;
608     if (ioreq->presync) {
609         ioreq->presync = 0;
610         ioreq_runio_qemu_aio(ioreq);
611         return;
612     }
613     if (ioreq->aio_inflight > 0) {
614         return;
615     }
616
617     if (ioreq->blkdev->feature_grant_copy) {
618         switch (ioreq->req.operation) {
619         case BLKIF_OP_READ:
620             /* in case of failure ioreq->aio_errors is increased */
621             if (ret == 0) {
622                 ioreq_grant_copy(ioreq);
623             }
624             ioreq_free_copy_buffers(ioreq);
625             break;
626         case BLKIF_OP_WRITE:
627         case BLKIF_OP_FLUSH_DISKCACHE:
628             if (!ioreq->req.nr_segments) {
629                 break;
630             }
631             ioreq_free_copy_buffers(ioreq);
632             break;
633         default:
634             break;
635         }
636     }
637
638     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
639     if (!ioreq->blkdev->feature_grant_copy) {
640         ioreq_unmap(ioreq);
641     }
642     ioreq_finish(ioreq);
643     switch (ioreq->req.operation) {
644     case BLKIF_OP_WRITE:
645     case BLKIF_OP_FLUSH_DISKCACHE:
646         if (!ioreq->req.nr_segments) {
647             break;
648         }
649     case BLKIF_OP_READ:
650         if (ioreq->status == BLKIF_RSP_OKAY) {
651             block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
652         } else {
653             block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
654         }
655         break;
656     case BLKIF_OP_DISCARD:
657     default:
658         break;
659     }
660     qemu_bh_schedule(ioreq->blkdev->bh);
661 }
662
663 static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
664                               uint64_t nr_sectors)
665 {
666     struct XenBlkDev *blkdev = ioreq->blkdev;
667     int64_t byte_offset;
668     int byte_chunk;
669     uint64_t byte_remaining, limit;
670     uint64_t sec_start = sector_number;
671     uint64_t sec_count = nr_sectors;
672
673     /* Wrap around, or overflowing byte limit? */
674     if (sec_start + sec_count < sec_count ||
675         sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
676         return false;
677     }
678
679     limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
680     byte_offset = sec_start << BDRV_SECTOR_BITS;
681     byte_remaining = sec_count << BDRV_SECTOR_BITS;
682
683     do {
684         byte_chunk = byte_remaining > limit ? limit : byte_remaining;
685         ioreq->aio_inflight++;
686         blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
687                          qemu_aio_complete, ioreq);
688         byte_remaining -= byte_chunk;
689         byte_offset += byte_chunk;
690     } while (byte_remaining > 0);
691
692     return true;
693 }
694
695 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
696 {
697     struct XenBlkDev *blkdev = ioreq->blkdev;
698
699     if (ioreq->blkdev->feature_grant_copy) {
700         ioreq_init_copy_buffers(ioreq);
701         if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
702             ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
703             ioreq_grant_copy(ioreq)) {
704                 ioreq_free_copy_buffers(ioreq);
705                 goto err;
706         }
707     } else {
708         if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
709             goto err;
710         }
711     }
712
713     ioreq->aio_inflight++;
714     if (ioreq->presync) {
715         blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
716         return 0;
717     }
718
719     switch (ioreq->req.operation) {
720     case BLKIF_OP_READ:
721         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
722                          ioreq->v.size, BLOCK_ACCT_READ);
723         ioreq->aio_inflight++;
724         blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
725                        qemu_aio_complete, ioreq);
726         break;
727     case BLKIF_OP_WRITE:
728     case BLKIF_OP_FLUSH_DISKCACHE:
729         if (!ioreq->req.nr_segments) {
730             break;
731         }
732
733         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
734                          ioreq->v.size,
735                          ioreq->req.operation == BLKIF_OP_WRITE ?
736                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
737         ioreq->aio_inflight++;
738         blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
739                         qemu_aio_complete, ioreq);
740         break;
741     case BLKIF_OP_DISCARD:
742     {
743         struct blkif_request_discard *req = (void *)&ioreq->req;
744         if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
745             goto err;
746         }
747         break;
748     }
749     default:
750         /* unknown operation (shouldn't happen -- parse catches this) */
751         if (!ioreq->blkdev->feature_grant_copy) {
752             ioreq_unmap(ioreq);
753         }
754         goto err;
755     }
756
757     qemu_aio_complete(ioreq, 0);
758
759     return 0;
760
761 err:
762     ioreq_finish(ioreq);
763     ioreq->status = BLKIF_RSP_ERROR;
764     return -1;
765 }
766
767 static int blk_send_response_one(struct ioreq *ioreq)
768 {
769     struct XenBlkDev  *blkdev = ioreq->blkdev;
770     int               send_notify   = 0;
771     int               have_requests = 0;
772     blkif_response_t  resp;
773     void              *dst;
774
775     resp.id        = ioreq->req.id;
776     resp.operation = ioreq->req.operation;
777     resp.status    = ioreq->status;
778
779     /* Place on the response ring for the relevant domain. */
780     switch (blkdev->protocol) {
781     case BLKIF_PROTOCOL_NATIVE:
782         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
783         break;
784     case BLKIF_PROTOCOL_X86_32:
785         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
786                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
787         break;
788     case BLKIF_PROTOCOL_X86_64:
789         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
790                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
791         break;
792     default:
793         dst = NULL;
794         return 0;
795     }
796     memcpy(dst, &resp, sizeof(resp));
797     blkdev->rings.common.rsp_prod_pvt++;
798
799     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
800     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
801         /*
802          * Tail check for pending requests. Allows frontend to avoid
803          * notifications if requests are already in flight (lower
804          * overheads and promotes batching).
805          */
806         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
807     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
808         have_requests = 1;
809     }
810
811     if (have_requests) {
812         blkdev->more_work++;
813     }
814     return send_notify;
815 }
816
817 /* walk finished list, send outstanding responses, free requests */
818 static void blk_send_response_all(struct XenBlkDev *blkdev)
819 {
820     struct ioreq *ioreq;
821     int send_notify = 0;
822
823     while (!QLIST_EMPTY(&blkdev->finished)) {
824         ioreq = QLIST_FIRST(&blkdev->finished);
825         send_notify += blk_send_response_one(ioreq);
826         ioreq_release(ioreq, true);
827     }
828     if (send_notify) {
829         xen_pv_send_notify(&blkdev->xendev);
830     }
831 }
832
833 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
834 {
835     switch (blkdev->protocol) {
836     case BLKIF_PROTOCOL_NATIVE:
837         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
838                sizeof(ioreq->req));
839         break;
840     case BLKIF_PROTOCOL_X86_32:
841         blkif_get_x86_32_req(&ioreq->req,
842                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
843         break;
844     case BLKIF_PROTOCOL_X86_64:
845         blkif_get_x86_64_req(&ioreq->req,
846                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
847         break;
848     }
849     /* Prevent the compiler from accessing the on-ring fields instead. */
850     barrier();
851     return 0;
852 }
853
854 static void blk_handle_requests(struct XenBlkDev *blkdev)
855 {
856     RING_IDX rc, rp;
857     struct ioreq *ioreq;
858
859     blkdev->more_work = 0;
860
861     rc = blkdev->rings.common.req_cons;
862     rp = blkdev->rings.common.sring->req_prod;
863     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
864
865     blk_send_response_all(blkdev);
866     while (rc != rp) {
867         /* pull request from ring */
868         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
869             break;
870         }
871         ioreq = ioreq_start(blkdev);
872         if (ioreq == NULL) {
873             blkdev->more_work++;
874             break;
875         }
876         blk_get_request(blkdev, ioreq, rc);
877         blkdev->rings.common.req_cons = ++rc;
878
879         /* parse them */
880         if (ioreq_parse(ioreq) != 0) {
881
882             switch (ioreq->req.operation) {
883             case BLKIF_OP_READ:
884                 block_acct_invalid(blk_get_stats(blkdev->blk),
885                                    BLOCK_ACCT_READ);
886                 break;
887             case BLKIF_OP_WRITE:
888                 block_acct_invalid(blk_get_stats(blkdev->blk),
889                                    BLOCK_ACCT_WRITE);
890                 break;
891             case BLKIF_OP_FLUSH_DISKCACHE:
892                 block_acct_invalid(blk_get_stats(blkdev->blk),
893                                    BLOCK_ACCT_FLUSH);
894             default:
895                 break;
896             };
897
898             if (blk_send_response_one(ioreq)) {
899                 xen_pv_send_notify(&blkdev->xendev);
900             }
901             ioreq_release(ioreq, false);
902             continue;
903         }
904
905         ioreq_runio_qemu_aio(ioreq);
906     }
907
908     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
909         qemu_bh_schedule(blkdev->bh);
910     }
911 }
912
913 /* ------------------------------------------------------------- */
914
915 static void blk_bh(void *opaque)
916 {
917     struct XenBlkDev *blkdev = opaque;
918     blk_handle_requests(blkdev);
919 }
920
921 /*
922  * We need to account for the grant allocations requiring contiguous
923  * chunks; the worst case number would be
924  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
925  * but in order to keep things simple just use
926  *     2 * max_req * max_seg.
927  */
928 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
929
930 static void blk_alloc(struct XenDevice *xendev)
931 {
932     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
933
934     QLIST_INIT(&blkdev->inflight);
935     QLIST_INIT(&blkdev->finished);
936     QLIST_INIT(&blkdev->freelist);
937     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
938     if (xen_mode != XEN_EMULATE) {
939         batch_maps = 1;
940     }
941     if (xengnttab_set_max_grants(xendev->gnttabdev,
942             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
943         xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
944                       strerror(errno));
945     }
946 }
947
948 static void blk_parse_discard(struct XenBlkDev *blkdev)
949 {
950     int enable;
951
952     blkdev->feature_discard = true;
953
954     if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
955         blkdev->feature_discard = !!enable;
956     }
957
958     if (blkdev->feature_discard) {
959         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
960     }
961 }
962
963 static int blk_init(struct XenDevice *xendev)
964 {
965     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
966     int info = 0;
967     char *directiosafe = NULL;
968
969     /* read xenstore entries */
970     if (blkdev->params == NULL) {
971         char *h = NULL;
972         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
973         if (blkdev->params != NULL) {
974             h = strchr(blkdev->params, ':');
975         }
976         if (h != NULL) {
977             blkdev->fileproto = blkdev->params;
978             blkdev->filename  = h+1;
979             *h = 0;
980         } else {
981             blkdev->fileproto = "<unset>";
982             blkdev->filename  = blkdev->params;
983         }
984     }
985     if (!strcmp("aio", blkdev->fileproto)) {
986         blkdev->fileproto = "raw";
987     }
988     if (!strcmp("vhd", blkdev->fileproto)) {
989         blkdev->fileproto = "vpc";
990     }
991     if (blkdev->mode == NULL) {
992         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
993     }
994     if (blkdev->type == NULL) {
995         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
996     }
997     if (blkdev->dev == NULL) {
998         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
999     }
1000     if (blkdev->devtype == NULL) {
1001         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
1002     }
1003     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
1004     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
1005
1006     /* do we have all we need? */
1007     if (blkdev->params == NULL ||
1008         blkdev->mode == NULL   ||
1009         blkdev->type == NULL   ||
1010         blkdev->dev == NULL) {
1011         goto out_error;
1012     }
1013
1014     /* read-only ? */
1015     if (strcmp(blkdev->mode, "w")) {
1016         info  |= VDISK_READONLY;
1017     }
1018
1019     /* cdrom ? */
1020     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
1021         info  |= VDISK_CDROM;
1022     }
1023
1024     blkdev->file_blk  = BLOCK_SIZE;
1025
1026     /* fill info
1027      * blk_connect supplies sector-size and sectors
1028      */
1029     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
1030     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
1031     xenstore_write_be_int(&blkdev->xendev, "info", info);
1032
1033     blk_parse_discard(blkdev);
1034
1035     g_free(directiosafe);
1036     return 0;
1037
1038 out_error:
1039     g_free(blkdev->params);
1040     blkdev->params = NULL;
1041     g_free(blkdev->mode);
1042     blkdev->mode = NULL;
1043     g_free(blkdev->type);
1044     blkdev->type = NULL;
1045     g_free(blkdev->dev);
1046     blkdev->dev = NULL;
1047     g_free(blkdev->devtype);
1048     blkdev->devtype = NULL;
1049     g_free(directiosafe);
1050     blkdev->directiosafe = false;
1051     return -1;
1052 }
1053
1054 static int blk_connect(struct XenDevice *xendev)
1055 {
1056     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1057     int pers, index, qflags;
1058     bool readonly = true;
1059     bool writethrough = true;
1060
1061     /* read-only ? */
1062     if (blkdev->directiosafe) {
1063         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
1064     } else {
1065         qflags = 0;
1066         writethrough = false;
1067     }
1068     if (strcmp(blkdev->mode, "w") == 0) {
1069         qflags |= BDRV_O_RDWR;
1070         readonly = false;
1071     }
1072     if (blkdev->feature_discard) {
1073         qflags |= BDRV_O_UNMAP;
1074     }
1075
1076     /* init qemu block driver */
1077     index = (blkdev->xendev.dev - 202 * 256) / 16;
1078     blkdev->dinfo = drive_get(IF_XEN, 0, index);
1079     if (!blkdev->dinfo) {
1080         Error *local_err = NULL;
1081         QDict *options = NULL;
1082
1083         if (strcmp(blkdev->fileproto, "<unset>")) {
1084             options = qdict_new();
1085             qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
1086         }
1087
1088         /* setup via xenbus -> create new block driver instance */
1089         xen_pv_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
1090         blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
1091                                    qflags, &local_err);
1092         if (!blkdev->blk) {
1093             xen_pv_printf(&blkdev->xendev, 0, "error: %s\n",
1094                           error_get_pretty(local_err));
1095             error_free(local_err);
1096             return -1;
1097         }
1098         blk_set_enable_write_cache(blkdev->blk, !writethrough);
1099     } else {
1100         /* setup via qemu cmdline -> already setup for us */
1101         xen_pv_printf(&blkdev->xendev, 2,
1102                       "get configured bdrv (cmdline setup)\n");
1103         blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
1104         if (blk_is_read_only(blkdev->blk) && !readonly) {
1105             xen_pv_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
1106             blkdev->blk = NULL;
1107             return -1;
1108         }
1109         /* blkdev->blk is not create by us, we get a reference
1110          * so we can blk_unref() unconditionally */
1111         blk_ref(blkdev->blk);
1112     }
1113     blk_attach_dev_legacy(blkdev->blk, blkdev);
1114     blkdev->file_size = blk_getlength(blkdev->blk);
1115     if (blkdev->file_size < 0) {
1116         BlockDriverState *bs = blk_bs(blkdev->blk);
1117         const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
1118         xen_pv_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
1119                       (int)blkdev->file_size, strerror(-blkdev->file_size),
1120                       drv_name ?: "-");
1121         blkdev->file_size = 0;
1122     }
1123
1124     xen_pv_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
1125                   " size %" PRId64 " (%" PRId64 " MB)\n",
1126                   blkdev->type, blkdev->fileproto, blkdev->filename,
1127                   blkdev->file_size, blkdev->file_size >> 20);
1128
1129     /* Fill in number of sector size and number of sectors */
1130     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
1131     xenstore_write_be_int64(&blkdev->xendev, "sectors",
1132                             blkdev->file_size / blkdev->file_blk);
1133
1134     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
1135         return -1;
1136     }
1137     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
1138                              &blkdev->xendev.remote_port) == -1) {
1139         return -1;
1140     }
1141     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
1142         blkdev->feature_persistent = FALSE;
1143     } else {
1144         blkdev->feature_persistent = !!pers;
1145     }
1146
1147     if (!blkdev->xendev.protocol) {
1148         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1149     } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
1150         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1151     } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
1152         blkdev->protocol = BLKIF_PROTOCOL_X86_32;
1153     } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
1154         blkdev->protocol = BLKIF_PROTOCOL_X86_64;
1155     } else {
1156         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1157     }
1158
1159     blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
1160                                             blkdev->xendev.dom,
1161                                             blkdev->ring_ref,
1162                                             PROT_READ | PROT_WRITE);
1163     if (!blkdev->sring) {
1164         return -1;
1165     }
1166     blkdev->cnt_map++;
1167
1168     switch (blkdev->protocol) {
1169     case BLKIF_PROTOCOL_NATIVE:
1170     {
1171         blkif_sring_t *sring_native = blkdev->sring;
1172         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
1173         break;
1174     }
1175     case BLKIF_PROTOCOL_X86_32:
1176     {
1177         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1178
1179         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1180         break;
1181     }
1182     case BLKIF_PROTOCOL_X86_64:
1183     {
1184         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1185
1186         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1187         break;
1188     }
1189     }
1190
1191     if (blkdev->feature_persistent) {
1192         /* Init persistent grants */
1193         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1194         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1195                                              NULL, NULL,
1196                                              batch_maps ?
1197                                              (GDestroyNotify)g_free :
1198                                              (GDestroyNotify)destroy_grant);
1199         blkdev->persistent_regions = NULL;
1200         blkdev->persistent_gnt_count = 0;
1201     }
1202
1203     xen_be_bind_evtchn(&blkdev->xendev);
1204
1205     blkdev->feature_grant_copy =
1206                 (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
1207
1208     xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
1209                   blkdev->feature_grant_copy ? "enabled" : "disabled");
1210
1211     xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1212                   "remote port %d, local port %d\n",
1213                   blkdev->xendev.protocol, blkdev->ring_ref,
1214                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
1215     return 0;
1216 }
1217
1218 static void blk_disconnect(struct XenDevice *xendev)
1219 {
1220     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1221
1222     if (blkdev->blk) {
1223         blk_detach_dev(blkdev->blk, blkdev);
1224         blk_unref(blkdev->blk);
1225         blkdev->blk = NULL;
1226     }
1227     xen_pv_unbind_evtchn(&blkdev->xendev);
1228
1229     if (blkdev->sring) {
1230         xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1231         blkdev->cnt_map--;
1232         blkdev->sring = NULL;
1233     }
1234
1235     /*
1236      * Unmap persistent grants before switching to the closed state
1237      * so the frontend can free them.
1238      *
1239      * In the !batch_maps case g_tree_destroy will take care of unmapping
1240      * the grant, but in the batch_maps case we need to iterate over every
1241      * region in persistent_regions and unmap it.
1242      */
1243     if (blkdev->feature_persistent) {
1244         g_tree_destroy(blkdev->persistent_gnts);
1245         assert(batch_maps || blkdev->persistent_gnt_count == 0);
1246         if (batch_maps) {
1247             blkdev->persistent_gnt_count = 0;
1248             g_slist_foreach(blkdev->persistent_regions,
1249                             (GFunc)remove_persistent_region, blkdev);
1250             g_slist_free(blkdev->persistent_regions);
1251         }
1252         blkdev->feature_persistent = false;
1253     }
1254 }
1255
1256 static int blk_free(struct XenDevice *xendev)
1257 {
1258     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1259     struct ioreq *ioreq;
1260
1261     if (blkdev->blk || blkdev->sring) {
1262         blk_disconnect(xendev);
1263     }
1264
1265     while (!QLIST_EMPTY(&blkdev->freelist)) {
1266         ioreq = QLIST_FIRST(&blkdev->freelist);
1267         QLIST_REMOVE(ioreq, list);
1268         qemu_iovec_destroy(&ioreq->v);
1269         g_free(ioreq);
1270     }
1271
1272     g_free(blkdev->params);
1273     g_free(blkdev->mode);
1274     g_free(blkdev->type);
1275     g_free(blkdev->dev);
1276     g_free(blkdev->devtype);
1277     qemu_bh_delete(blkdev->bh);
1278     return 0;
1279 }
1280
1281 static void blk_event(struct XenDevice *xendev)
1282 {
1283     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1284
1285     qemu_bh_schedule(blkdev->bh);
1286 }
1287
1288 struct XenDevOps xen_blkdev_ops = {
1289     .size       = sizeof(struct XenBlkDev),
1290     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1291     .alloc      = blk_alloc,
1292     .init       = blk_init,
1293     .initialise    = blk_connect,
1294     .disconnect = blk_disconnect,
1295     .event      = blk_event,
1296     .free       = blk_free,
1297 };
This page took 0.09635 seconds and 4 git commands to generate.