]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-input-20160928-1' into staging
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/uio.h>
25
26 #include "hw/hw.h"
27 #include "hw/xen/xen_backend.h"
28 #include "xen_blkif.h"
29 #include "sysemu/blockdev.h"
30 #include "sysemu/block-backend.h"
31 #include "qapi/error.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
34
35 /* ------------------------------------------------------------- */
36
37 static int batch_maps   = 0;
38
39 static int max_requests = 32;
40
41 /* ------------------------------------------------------------- */
42
43 #define BLOCK_SIZE  512
44 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
45
46 struct PersistentGrant {
47     void *page;
48     struct XenBlkDev *blkdev;
49 };
50
51 typedef struct PersistentGrant PersistentGrant;
52
53 struct PersistentRegion {
54     void *addr;
55     int num;
56 };
57
58 typedef struct PersistentRegion PersistentRegion;
59
60 struct ioreq {
61     blkif_request_t     req;
62     int16_t             status;
63
64     /* parsed request */
65     off_t               start;
66     QEMUIOVector        v;
67     int                 presync;
68     uint8_t             mapped;
69
70     /* grant mapping */
71     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73     int                 prot;
74     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75     void                *pages;
76     int                 num_unmap;
77
78     /* aio status */
79     int                 aio_inflight;
80     int                 aio_errors;
81
82     struct XenBlkDev    *blkdev;
83     QLIST_ENTRY(ioreq)   list;
84     BlockAcctCookie     acct;
85 };
86
87 struct XenBlkDev {
88     struct XenDevice    xendev;  /* must be first */
89     char                *params;
90     char                *mode;
91     char                *type;
92     char                *dev;
93     char                *devtype;
94     bool                directiosafe;
95     const char          *fileproto;
96     const char          *filename;
97     int                 ring_ref;
98     void                *sring;
99     int64_t             file_blk;
100     int64_t             file_size;
101     int                 protocol;
102     blkif_back_rings_t  rings;
103     int                 more_work;
104     int                 cnt_map;
105
106     /* request lists */
107     QLIST_HEAD(inflight_head, ioreq) inflight;
108     QLIST_HEAD(finished_head, ioreq) finished;
109     QLIST_HEAD(freelist_head, ioreq) freelist;
110     int                 requests_total;
111     int                 requests_inflight;
112     int                 requests_finished;
113
114     /* Persistent grants extension */
115     gboolean            feature_discard;
116     gboolean            feature_persistent;
117     GTree               *persistent_gnts;
118     GSList              *persistent_regions;
119     unsigned int        persistent_gnt_count;
120     unsigned int        max_grants;
121
122     /* Grant copy */
123     gboolean            feature_grant_copy;
124
125     /* qemu block driver */
126     DriveInfo           *dinfo;
127     BlockBackend        *blk;
128     QEMUBH              *bh;
129 };
130
131 /* ------------------------------------------------------------- */
132
133 static void ioreq_reset(struct ioreq *ioreq)
134 {
135     memset(&ioreq->req, 0, sizeof(ioreq->req));
136     ioreq->status = 0;
137     ioreq->start = 0;
138     ioreq->presync = 0;
139     ioreq->mapped = 0;
140
141     memset(ioreq->domids, 0, sizeof(ioreq->domids));
142     memset(ioreq->refs, 0, sizeof(ioreq->refs));
143     ioreq->prot = 0;
144     memset(ioreq->page, 0, sizeof(ioreq->page));
145     ioreq->pages = NULL;
146
147     ioreq->aio_inflight = 0;
148     ioreq->aio_errors = 0;
149
150     ioreq->blkdev = NULL;
151     memset(&ioreq->list, 0, sizeof(ioreq->list));
152     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
153
154     qemu_iovec_reset(&ioreq->v);
155 }
156
157 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
158 {
159     uint ua = GPOINTER_TO_UINT(a);
160     uint ub = GPOINTER_TO_UINT(b);
161     return (ua > ub) - (ua < ub);
162 }
163
164 static void destroy_grant(gpointer pgnt)
165 {
166     PersistentGrant *grant = pgnt;
167     xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
168
169     if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
170         xen_be_printf(&grant->blkdev->xendev, 0,
171                       "xengnttab_unmap failed: %s\n",
172                       strerror(errno));
173     }
174     grant->blkdev->persistent_gnt_count--;
175     xen_be_printf(&grant->blkdev->xendev, 3,
176                   "unmapped grant %p\n", grant->page);
177     g_free(grant);
178 }
179
180 static void remove_persistent_region(gpointer data, gpointer dev)
181 {
182     PersistentRegion *region = data;
183     struct XenBlkDev *blkdev = dev;
184     xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
185
186     if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
187         xen_be_printf(&blkdev->xendev, 0,
188                       "xengnttab_unmap region %p failed: %s\n",
189                       region->addr, strerror(errno));
190     }
191     xen_be_printf(&blkdev->xendev, 3,
192                   "unmapped grant region %p with %d pages\n",
193                   region->addr, region->num);
194     g_free(region);
195 }
196
197 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
198 {
199     struct ioreq *ioreq = NULL;
200
201     if (QLIST_EMPTY(&blkdev->freelist)) {
202         if (blkdev->requests_total >= max_requests) {
203             goto out;
204         }
205         /* allocate new struct */
206         ioreq = g_malloc0(sizeof(*ioreq));
207         ioreq->blkdev = blkdev;
208         blkdev->requests_total++;
209         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
210     } else {
211         /* get one from freelist */
212         ioreq = QLIST_FIRST(&blkdev->freelist);
213         QLIST_REMOVE(ioreq, list);
214     }
215     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
216     blkdev->requests_inflight++;
217
218 out:
219     return ioreq;
220 }
221
222 static void ioreq_finish(struct ioreq *ioreq)
223 {
224     struct XenBlkDev *blkdev = ioreq->blkdev;
225
226     QLIST_REMOVE(ioreq, list);
227     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
228     blkdev->requests_inflight--;
229     blkdev->requests_finished++;
230 }
231
232 static void ioreq_release(struct ioreq *ioreq, bool finish)
233 {
234     struct XenBlkDev *blkdev = ioreq->blkdev;
235
236     QLIST_REMOVE(ioreq, list);
237     ioreq_reset(ioreq);
238     ioreq->blkdev = blkdev;
239     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
240     if (finish) {
241         blkdev->requests_finished--;
242     } else {
243         blkdev->requests_inflight--;
244     }
245 }
246
247 /*
248  * translate request into iovec + start offset
249  * do sanity checks along the way
250  */
251 static int ioreq_parse(struct ioreq *ioreq)
252 {
253     struct XenBlkDev *blkdev = ioreq->blkdev;
254     uintptr_t mem;
255     size_t len;
256     int i;
257
258     xen_be_printf(&blkdev->xendev, 3,
259                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
260                   ioreq->req.operation, ioreq->req.nr_segments,
261                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
262     switch (ioreq->req.operation) {
263     case BLKIF_OP_READ:
264         ioreq->prot = PROT_WRITE; /* to memory */
265         break;
266     case BLKIF_OP_FLUSH_DISKCACHE:
267         ioreq->presync = 1;
268         if (!ioreq->req.nr_segments) {
269             return 0;
270         }
271         /* fall through */
272     case BLKIF_OP_WRITE:
273         ioreq->prot = PROT_READ; /* from memory */
274         break;
275     case BLKIF_OP_DISCARD:
276         return 0;
277     default:
278         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
279                       ioreq->req.operation);
280         goto err;
281     };
282
283     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
284         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
285         goto err;
286     }
287
288     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
289     for (i = 0; i < ioreq->req.nr_segments; i++) {
290         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
291             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
292             goto err;
293         }
294         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
295             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
296             goto err;
297         }
298         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
299             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
300             goto err;
301         }
302
303         ioreq->domids[i] = blkdev->xendev.dom;
304         ioreq->refs[i]   = ioreq->req.seg[i].gref;
305
306         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
307         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
308         qemu_iovec_add(&ioreq->v, (void*)mem, len);
309     }
310     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
311         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
312         goto err;
313     }
314     return 0;
315
316 err:
317     ioreq->status = BLKIF_RSP_ERROR;
318     return -1;
319 }
320
321 static void ioreq_unmap(struct ioreq *ioreq)
322 {
323     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
324     int i;
325
326     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
327         return;
328     }
329     if (batch_maps) {
330         if (!ioreq->pages) {
331             return;
332         }
333         if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
334             xen_be_printf(&ioreq->blkdev->xendev, 0,
335                           "xengnttab_unmap failed: %s\n",
336                           strerror(errno));
337         }
338         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
339         ioreq->pages = NULL;
340     } else {
341         for (i = 0; i < ioreq->num_unmap; i++) {
342             if (!ioreq->page[i]) {
343                 continue;
344             }
345             if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
346                 xen_be_printf(&ioreq->blkdev->xendev, 0,
347                               "xengnttab_unmap failed: %s\n",
348                               strerror(errno));
349             }
350             ioreq->blkdev->cnt_map--;
351             ioreq->page[i] = NULL;
352         }
353     }
354     ioreq->mapped = 0;
355 }
356
357 static int ioreq_map(struct ioreq *ioreq)
358 {
359     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
360     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
361     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
362     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
363     int i, j, new_maps = 0;
364     PersistentGrant *grant;
365     PersistentRegion *region;
366     /* domids and refs variables will contain the information necessary
367      * to map the grants that are needed to fulfill this request.
368      *
369      * After mapping the needed grants, the page array will contain the
370      * memory address of each granted page in the order specified in ioreq
371      * (disregarding if it's a persistent grant or not).
372      */
373
374     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
375         return 0;
376     }
377     if (ioreq->blkdev->feature_persistent) {
378         for (i = 0; i < ioreq->v.niov; i++) {
379             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
380                                     GUINT_TO_POINTER(ioreq->refs[i]));
381
382             if (grant != NULL) {
383                 page[i] = grant->page;
384                 xen_be_printf(&ioreq->blkdev->xendev, 3,
385                               "using persistent-grant %" PRIu32 "\n",
386                               ioreq->refs[i]);
387             } else {
388                     /* Add the grant to the list of grants that
389                      * should be mapped
390                      */
391                     domids[new_maps] = ioreq->domids[i];
392                     refs[new_maps] = ioreq->refs[i];
393                     page[i] = NULL;
394                     new_maps++;
395             }
396         }
397         /* Set the protection to RW, since grants may be reused later
398          * with a different protection than the one needed for this request
399          */
400         ioreq->prot = PROT_WRITE | PROT_READ;
401     } else {
402         /* All grants in the request should be mapped */
403         memcpy(refs, ioreq->refs, sizeof(refs));
404         memcpy(domids, ioreq->domids, sizeof(domids));
405         memset(page, 0, sizeof(page));
406         new_maps = ioreq->v.niov;
407     }
408
409     if (batch_maps && new_maps) {
410         ioreq->pages = xengnttab_map_grant_refs
411             (gnt, new_maps, domids, refs, ioreq->prot);
412         if (ioreq->pages == NULL) {
413             xen_be_printf(&ioreq->blkdev->xendev, 0,
414                           "can't map %d grant refs (%s, %d maps)\n",
415                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
416             return -1;
417         }
418         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
419             if (page[i] == NULL) {
420                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
421             }
422         }
423         ioreq->blkdev->cnt_map += new_maps;
424     } else if (new_maps)  {
425         for (i = 0; i < new_maps; i++) {
426             ioreq->page[i] = xengnttab_map_grant_ref
427                 (gnt, domids[i], refs[i], ioreq->prot);
428             if (ioreq->page[i] == NULL) {
429                 xen_be_printf(&ioreq->blkdev->xendev, 0,
430                               "can't map grant ref %d (%s, %d maps)\n",
431                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
432                 ioreq->mapped = 1;
433                 ioreq_unmap(ioreq);
434                 return -1;
435             }
436             ioreq->blkdev->cnt_map++;
437         }
438         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
439             if (page[i] == NULL) {
440                 page[i] = ioreq->page[j++];
441             }
442         }
443     }
444     if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
445         (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
446         ioreq->blkdev->max_grants))) {
447         /*
448          * If we are using persistent grants and batch mappings only
449          * add the new maps to the list of persistent grants if the whole
450          * area can be persistently mapped.
451          */
452         if (batch_maps) {
453             region = g_malloc0(sizeof(*region));
454             region->addr = ioreq->pages;
455             region->num = new_maps;
456             ioreq->blkdev->persistent_regions = g_slist_append(
457                                             ioreq->blkdev->persistent_regions,
458                                             region);
459         }
460         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
461               && new_maps) {
462             /* Go through the list of newly mapped grants and add as many
463              * as possible to the list of persistently mapped grants.
464              *
465              * Since we start at the end of ioreq->page(s), we only need
466              * to decrease new_maps to prevent this granted pages from
467              * being unmapped in ioreq_unmap.
468              */
469             grant = g_malloc0(sizeof(*grant));
470             new_maps--;
471             if (batch_maps) {
472                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
473             } else {
474                 grant->page = ioreq->page[new_maps];
475             }
476             grant->blkdev = ioreq->blkdev;
477             xen_be_printf(&ioreq->blkdev->xendev, 3,
478                           "adding grant %" PRIu32 " page: %p\n",
479                           refs[new_maps], grant->page);
480             g_tree_insert(ioreq->blkdev->persistent_gnts,
481                           GUINT_TO_POINTER(refs[new_maps]),
482                           grant);
483             ioreq->blkdev->persistent_gnt_count++;
484         }
485         assert(!batch_maps || new_maps == 0);
486     }
487     for (i = 0; i < ioreq->v.niov; i++) {
488         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
489     }
490     ioreq->mapped = 1;
491     ioreq->num_unmap = new_maps;
492     return 0;
493 }
494
495 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 480
496
497 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
498 {
499     int i;
500
501     for (i = 0; i < ioreq->v.niov; i++) {
502         ioreq->page[i] = NULL;
503     }
504
505     qemu_vfree(ioreq->pages);
506 }
507
508 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
509 {
510     int i;
511
512     if (ioreq->v.niov == 0) {
513         return 0;
514     }
515
516     ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
517
518     for (i = 0; i < ioreq->v.niov; i++) {
519         ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
520         ioreq->v.iov[i].iov_base = ioreq->page[i];
521     }
522
523     return 0;
524 }
525
526 static int ioreq_grant_copy(struct ioreq *ioreq)
527 {
528     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
529     xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
530     int i, count, rc;
531     int64_t file_blk = ioreq->blkdev->file_blk;
532
533     if (ioreq->v.niov == 0) {
534         return 0;
535     }
536
537     count = ioreq->v.niov;
538
539     for (i = 0; i < count; i++) {
540         if (ioreq->req.operation == BLKIF_OP_READ) {
541             segs[i].flags = GNTCOPY_dest_gref;
542             segs[i].dest.foreign.ref = ioreq->refs[i];
543             segs[i].dest.foreign.domid = ioreq->domids[i];
544             segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
545             segs[i].source.virt = ioreq->v.iov[i].iov_base;
546         } else {
547             segs[i].flags = GNTCOPY_source_gref;
548             segs[i].source.foreign.ref = ioreq->refs[i];
549             segs[i].source.foreign.domid = ioreq->domids[i];
550             segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
551             segs[i].dest.virt = ioreq->v.iov[i].iov_base;
552         }
553         segs[i].len = (ioreq->req.seg[i].last_sect
554                        - ioreq->req.seg[i].first_sect + 1) * file_blk;
555     }
556
557     rc = xengnttab_grant_copy(gnt, count, segs);
558
559     if (rc) {
560         xen_be_printf(&ioreq->blkdev->xendev, 0,
561                       "failed to copy data %d\n", rc);
562         ioreq->aio_errors++;
563         return -1;
564     }
565
566     for (i = 0; i < count; i++) {
567         if (segs[i].status != GNTST_okay) {
568             xen_be_printf(&ioreq->blkdev->xendev, 3,
569                           "failed to copy data %d for gref %d, domid %d\n",
570                           segs[i].status, ioreq->refs[i], ioreq->domids[i]);
571             ioreq->aio_errors++;
572             rc = -1;
573         }
574     }
575
576     return rc;
577 }
578 #else
579 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
580 {
581     abort();
582 }
583
584 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
585 {
586     abort();
587 }
588
589 static int ioreq_grant_copy(struct ioreq *ioreq)
590 {
591     abort();
592 }
593 #endif
594
595 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
596
597 static void qemu_aio_complete(void *opaque, int ret)
598 {
599     struct ioreq *ioreq = opaque;
600
601     if (ret != 0) {
602         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
603                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
604         ioreq->aio_errors++;
605     }
606
607     ioreq->aio_inflight--;
608     if (ioreq->presync) {
609         ioreq->presync = 0;
610         ioreq_runio_qemu_aio(ioreq);
611         return;
612     }
613     if (ioreq->aio_inflight > 0) {
614         return;
615     }
616
617     if (ioreq->blkdev->feature_grant_copy) {
618         switch (ioreq->req.operation) {
619         case BLKIF_OP_READ:
620             /* in case of failure ioreq->aio_errors is increased */
621             if (ret == 0) {
622                 ioreq_grant_copy(ioreq);
623             }
624             ioreq_free_copy_buffers(ioreq);
625             break;
626         case BLKIF_OP_WRITE:
627         case BLKIF_OP_FLUSH_DISKCACHE:
628             if (!ioreq->req.nr_segments) {
629                 break;
630             }
631             ioreq_free_copy_buffers(ioreq);
632             break;
633         default:
634             break;
635         }
636     }
637
638     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
639     if (!ioreq->blkdev->feature_grant_copy) {
640         ioreq_unmap(ioreq);
641     }
642     ioreq_finish(ioreq);
643     switch (ioreq->req.operation) {
644     case BLKIF_OP_WRITE:
645     case BLKIF_OP_FLUSH_DISKCACHE:
646         if (!ioreq->req.nr_segments) {
647             break;
648         }
649     case BLKIF_OP_READ:
650         if (ioreq->status == BLKIF_RSP_OKAY) {
651             block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
652         } else {
653             block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
654         }
655         break;
656     case BLKIF_OP_DISCARD:
657     default:
658         break;
659     }
660     qemu_bh_schedule(ioreq->blkdev->bh);
661 }
662
663 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
664 {
665     struct XenBlkDev *blkdev = ioreq->blkdev;
666
667     if (ioreq->blkdev->feature_grant_copy) {
668         ioreq_init_copy_buffers(ioreq);
669         if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
670             ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
671             ioreq_grant_copy(ioreq)) {
672                 ioreq_free_copy_buffers(ioreq);
673                 goto err;
674         }
675     } else {
676         if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
677             goto err;
678         }
679     }
680
681     ioreq->aio_inflight++;
682     if (ioreq->presync) {
683         blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
684         return 0;
685     }
686
687     switch (ioreq->req.operation) {
688     case BLKIF_OP_READ:
689         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
690                          ioreq->v.size, BLOCK_ACCT_READ);
691         ioreq->aio_inflight++;
692         blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
693                        qemu_aio_complete, ioreq);
694         break;
695     case BLKIF_OP_WRITE:
696     case BLKIF_OP_FLUSH_DISKCACHE:
697         if (!ioreq->req.nr_segments) {
698             break;
699         }
700
701         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
702                          ioreq->v.size,
703                          ioreq->req.operation == BLKIF_OP_WRITE ?
704                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
705         ioreq->aio_inflight++;
706         blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
707                         qemu_aio_complete, ioreq);
708         break;
709     case BLKIF_OP_DISCARD:
710     {
711         struct blkif_request_discard *discard_req = (void *)&ioreq->req;
712         ioreq->aio_inflight++;
713         blk_aio_pdiscard(blkdev->blk,
714                          discard_req->sector_number << BDRV_SECTOR_BITS,
715                          discard_req->nr_sectors << BDRV_SECTOR_BITS,
716                          qemu_aio_complete, ioreq);
717         break;
718     }
719     default:
720         /* unknown operation (shouldn't happen -- parse catches this) */
721         if (!ioreq->blkdev->feature_grant_copy) {
722             ioreq_unmap(ioreq);
723         }
724         goto err;
725     }
726
727     qemu_aio_complete(ioreq, 0);
728
729     return 0;
730
731 err:
732     ioreq_finish(ioreq);
733     ioreq->status = BLKIF_RSP_ERROR;
734     return -1;
735 }
736
737 static int blk_send_response_one(struct ioreq *ioreq)
738 {
739     struct XenBlkDev  *blkdev = ioreq->blkdev;
740     int               send_notify   = 0;
741     int               have_requests = 0;
742     blkif_response_t  resp;
743     void              *dst;
744
745     resp.id        = ioreq->req.id;
746     resp.operation = ioreq->req.operation;
747     resp.status    = ioreq->status;
748
749     /* Place on the response ring for the relevant domain. */
750     switch (blkdev->protocol) {
751     case BLKIF_PROTOCOL_NATIVE:
752         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
753         break;
754     case BLKIF_PROTOCOL_X86_32:
755         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
756                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
757         break;
758     case BLKIF_PROTOCOL_X86_64:
759         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
760                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
761         break;
762     default:
763         dst = NULL;
764         return 0;
765     }
766     memcpy(dst, &resp, sizeof(resp));
767     blkdev->rings.common.rsp_prod_pvt++;
768
769     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
770     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
771         /*
772          * Tail check for pending requests. Allows frontend to avoid
773          * notifications if requests are already in flight (lower
774          * overheads and promotes batching).
775          */
776         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
777     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
778         have_requests = 1;
779     }
780
781     if (have_requests) {
782         blkdev->more_work++;
783     }
784     return send_notify;
785 }
786
787 /* walk finished list, send outstanding responses, free requests */
788 static void blk_send_response_all(struct XenBlkDev *blkdev)
789 {
790     struct ioreq *ioreq;
791     int send_notify = 0;
792
793     while (!QLIST_EMPTY(&blkdev->finished)) {
794         ioreq = QLIST_FIRST(&blkdev->finished);
795         send_notify += blk_send_response_one(ioreq);
796         ioreq_release(ioreq, true);
797     }
798     if (send_notify) {
799         xen_be_send_notify(&blkdev->xendev);
800     }
801 }
802
803 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
804 {
805     switch (blkdev->protocol) {
806     case BLKIF_PROTOCOL_NATIVE:
807         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
808                sizeof(ioreq->req));
809         break;
810     case BLKIF_PROTOCOL_X86_32:
811         blkif_get_x86_32_req(&ioreq->req,
812                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
813         break;
814     case BLKIF_PROTOCOL_X86_64:
815         blkif_get_x86_64_req(&ioreq->req,
816                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
817         break;
818     }
819     /* Prevent the compiler from accessing the on-ring fields instead. */
820     barrier();
821     return 0;
822 }
823
824 static void blk_handle_requests(struct XenBlkDev *blkdev)
825 {
826     RING_IDX rc, rp;
827     struct ioreq *ioreq;
828
829     blkdev->more_work = 0;
830
831     rc = blkdev->rings.common.req_cons;
832     rp = blkdev->rings.common.sring->req_prod;
833     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
834
835     blk_send_response_all(blkdev);
836     while (rc != rp) {
837         /* pull request from ring */
838         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
839             break;
840         }
841         ioreq = ioreq_start(blkdev);
842         if (ioreq == NULL) {
843             blkdev->more_work++;
844             break;
845         }
846         blk_get_request(blkdev, ioreq, rc);
847         blkdev->rings.common.req_cons = ++rc;
848
849         /* parse them */
850         if (ioreq_parse(ioreq) != 0) {
851
852             switch (ioreq->req.operation) {
853             case BLKIF_OP_READ:
854                 block_acct_invalid(blk_get_stats(blkdev->blk),
855                                    BLOCK_ACCT_READ);
856                 break;
857             case BLKIF_OP_WRITE:
858                 block_acct_invalid(blk_get_stats(blkdev->blk),
859                                    BLOCK_ACCT_WRITE);
860                 break;
861             case BLKIF_OP_FLUSH_DISKCACHE:
862                 block_acct_invalid(blk_get_stats(blkdev->blk),
863                                    BLOCK_ACCT_FLUSH);
864             default:
865                 break;
866             };
867
868             if (blk_send_response_one(ioreq)) {
869                 xen_be_send_notify(&blkdev->xendev);
870             }
871             ioreq_release(ioreq, false);
872             continue;
873         }
874
875         ioreq_runio_qemu_aio(ioreq);
876     }
877
878     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
879         qemu_bh_schedule(blkdev->bh);
880     }
881 }
882
883 /* ------------------------------------------------------------- */
884
885 static void blk_bh(void *opaque)
886 {
887     struct XenBlkDev *blkdev = opaque;
888     blk_handle_requests(blkdev);
889 }
890
891 /*
892  * We need to account for the grant allocations requiring contiguous
893  * chunks; the worst case number would be
894  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
895  * but in order to keep things simple just use
896  *     2 * max_req * max_seg.
897  */
898 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
899
900 static void blk_alloc(struct XenDevice *xendev)
901 {
902     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
903
904     QLIST_INIT(&blkdev->inflight);
905     QLIST_INIT(&blkdev->finished);
906     QLIST_INIT(&blkdev->freelist);
907     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
908     if (xen_mode != XEN_EMULATE) {
909         batch_maps = 1;
910     }
911     if (xengnttab_set_max_grants(xendev->gnttabdev,
912             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
913         xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
914                       strerror(errno));
915     }
916 }
917
918 static void blk_parse_discard(struct XenBlkDev *blkdev)
919 {
920     int enable;
921
922     blkdev->feature_discard = true;
923
924     if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
925         blkdev->feature_discard = !!enable;
926     }
927
928     if (blkdev->feature_discard) {
929         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
930     }
931 }
932
933 static int blk_init(struct XenDevice *xendev)
934 {
935     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
936     int info = 0;
937     char *directiosafe = NULL;
938
939     /* read xenstore entries */
940     if (blkdev->params == NULL) {
941         char *h = NULL;
942         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
943         if (blkdev->params != NULL) {
944             h = strchr(blkdev->params, ':');
945         }
946         if (h != NULL) {
947             blkdev->fileproto = blkdev->params;
948             blkdev->filename  = h+1;
949             *h = 0;
950         } else {
951             blkdev->fileproto = "<unset>";
952             blkdev->filename  = blkdev->params;
953         }
954     }
955     if (!strcmp("aio", blkdev->fileproto)) {
956         blkdev->fileproto = "raw";
957     }
958     if (!strcmp("vhd", blkdev->fileproto)) {
959         blkdev->fileproto = "vpc";
960     }
961     if (blkdev->mode == NULL) {
962         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
963     }
964     if (blkdev->type == NULL) {
965         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
966     }
967     if (blkdev->dev == NULL) {
968         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
969     }
970     if (blkdev->devtype == NULL) {
971         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
972     }
973     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
974     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
975
976     /* do we have all we need? */
977     if (blkdev->params == NULL ||
978         blkdev->mode == NULL   ||
979         blkdev->type == NULL   ||
980         blkdev->dev == NULL) {
981         goto out_error;
982     }
983
984     /* read-only ? */
985     if (strcmp(blkdev->mode, "w")) {
986         info  |= VDISK_READONLY;
987     }
988
989     /* cdrom ? */
990     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
991         info  |= VDISK_CDROM;
992     }
993
994     blkdev->file_blk  = BLOCK_SIZE;
995
996     /* fill info
997      * blk_connect supplies sector-size and sectors
998      */
999     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
1000     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
1001     xenstore_write_be_int(&blkdev->xendev, "info", info);
1002
1003     blk_parse_discard(blkdev);
1004
1005     g_free(directiosafe);
1006     return 0;
1007
1008 out_error:
1009     g_free(blkdev->params);
1010     blkdev->params = NULL;
1011     g_free(blkdev->mode);
1012     blkdev->mode = NULL;
1013     g_free(blkdev->type);
1014     blkdev->type = NULL;
1015     g_free(blkdev->dev);
1016     blkdev->dev = NULL;
1017     g_free(blkdev->devtype);
1018     blkdev->devtype = NULL;
1019     g_free(directiosafe);
1020     blkdev->directiosafe = false;
1021     return -1;
1022 }
1023
1024 static int blk_connect(struct XenDevice *xendev)
1025 {
1026     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1027     int pers, index, qflags;
1028     bool readonly = true;
1029     bool writethrough = true;
1030
1031     /* read-only ? */
1032     if (blkdev->directiosafe) {
1033         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
1034     } else {
1035         qflags = 0;
1036         writethrough = false;
1037     }
1038     if (strcmp(blkdev->mode, "w") == 0) {
1039         qflags |= BDRV_O_RDWR;
1040         readonly = false;
1041     }
1042     if (blkdev->feature_discard) {
1043         qflags |= BDRV_O_UNMAP;
1044     }
1045
1046     /* init qemu block driver */
1047     index = (blkdev->xendev.dev - 202 * 256) / 16;
1048     blkdev->dinfo = drive_get(IF_XEN, 0, index);
1049     if (!blkdev->dinfo) {
1050         Error *local_err = NULL;
1051         QDict *options = NULL;
1052
1053         if (strcmp(blkdev->fileproto, "<unset>")) {
1054             options = qdict_new();
1055             qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
1056         }
1057
1058         /* setup via xenbus -> create new block driver instance */
1059         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
1060         blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
1061                                    qflags, &local_err);
1062         if (!blkdev->blk) {
1063             xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
1064                           error_get_pretty(local_err));
1065             error_free(local_err);
1066             return -1;
1067         }
1068         blk_set_enable_write_cache(blkdev->blk, !writethrough);
1069     } else {
1070         /* setup via qemu cmdline -> already setup for us */
1071         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
1072         blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
1073         if (blk_is_read_only(blkdev->blk) && !readonly) {
1074             xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
1075             blkdev->blk = NULL;
1076             return -1;
1077         }
1078         /* blkdev->blk is not create by us, we get a reference
1079          * so we can blk_unref() unconditionally */
1080         blk_ref(blkdev->blk);
1081     }
1082     blk_attach_dev_nofail(blkdev->blk, blkdev);
1083     blkdev->file_size = blk_getlength(blkdev->blk);
1084     if (blkdev->file_size < 0) {
1085         BlockDriverState *bs = blk_bs(blkdev->blk);
1086         const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
1087         xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
1088                       (int)blkdev->file_size, strerror(-blkdev->file_size),
1089                       drv_name ?: "-");
1090         blkdev->file_size = 0;
1091     }
1092
1093     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
1094                   " size %" PRId64 " (%" PRId64 " MB)\n",
1095                   blkdev->type, blkdev->fileproto, blkdev->filename,
1096                   blkdev->file_size, blkdev->file_size >> 20);
1097
1098     /* Fill in number of sector size and number of sectors */
1099     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
1100     xenstore_write_be_int64(&blkdev->xendev, "sectors",
1101                             blkdev->file_size / blkdev->file_blk);
1102
1103     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
1104         return -1;
1105     }
1106     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
1107                              &blkdev->xendev.remote_port) == -1) {
1108         return -1;
1109     }
1110     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
1111         blkdev->feature_persistent = FALSE;
1112     } else {
1113         blkdev->feature_persistent = !!pers;
1114     }
1115
1116     if (!blkdev->xendev.protocol) {
1117         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1118     } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
1119         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1120     } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
1121         blkdev->protocol = BLKIF_PROTOCOL_X86_32;
1122     } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
1123         blkdev->protocol = BLKIF_PROTOCOL_X86_64;
1124     } else {
1125         blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1126     }
1127
1128     blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
1129                                             blkdev->xendev.dom,
1130                                             blkdev->ring_ref,
1131                                             PROT_READ | PROT_WRITE);
1132     if (!blkdev->sring) {
1133         return -1;
1134     }
1135     blkdev->cnt_map++;
1136
1137     switch (blkdev->protocol) {
1138     case BLKIF_PROTOCOL_NATIVE:
1139     {
1140         blkif_sring_t *sring_native = blkdev->sring;
1141         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
1142         break;
1143     }
1144     case BLKIF_PROTOCOL_X86_32:
1145     {
1146         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1147
1148         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1149         break;
1150     }
1151     case BLKIF_PROTOCOL_X86_64:
1152     {
1153         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1154
1155         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1156         break;
1157     }
1158     }
1159
1160     if (blkdev->feature_persistent) {
1161         /* Init persistent grants */
1162         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1163         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1164                                              NULL, NULL,
1165                                              batch_maps ?
1166                                              (GDestroyNotify)g_free :
1167                                              (GDestroyNotify)destroy_grant);
1168         blkdev->persistent_regions = NULL;
1169         blkdev->persistent_gnt_count = 0;
1170     }
1171
1172     xen_be_bind_evtchn(&blkdev->xendev);
1173
1174     blkdev->feature_grant_copy =
1175                 (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
1176
1177     xen_be_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
1178                   blkdev->feature_grant_copy ? "enabled" : "disabled");
1179
1180     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1181                   "remote port %d, local port %d\n",
1182                   blkdev->xendev.protocol, blkdev->ring_ref,
1183                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
1184     return 0;
1185 }
1186
1187 static void blk_disconnect(struct XenDevice *xendev)
1188 {
1189     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1190
1191     if (blkdev->blk) {
1192         blk_detach_dev(blkdev->blk, blkdev);
1193         blk_unref(blkdev->blk);
1194         blkdev->blk = NULL;
1195     }
1196     xen_be_unbind_evtchn(&blkdev->xendev);
1197
1198     if (blkdev->sring) {
1199         xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1200         blkdev->cnt_map--;
1201         blkdev->sring = NULL;
1202     }
1203
1204     /*
1205      * Unmap persistent grants before switching to the closed state
1206      * so the frontend can free them.
1207      *
1208      * In the !batch_maps case g_tree_destroy will take care of unmapping
1209      * the grant, but in the batch_maps case we need to iterate over every
1210      * region in persistent_regions and unmap it.
1211      */
1212     if (blkdev->feature_persistent) {
1213         g_tree_destroy(blkdev->persistent_gnts);
1214         assert(batch_maps || blkdev->persistent_gnt_count == 0);
1215         if (batch_maps) {
1216             blkdev->persistent_gnt_count = 0;
1217             g_slist_foreach(blkdev->persistent_regions,
1218                             (GFunc)remove_persistent_region, blkdev);
1219             g_slist_free(blkdev->persistent_regions);
1220         }
1221         blkdev->feature_persistent = false;
1222     }
1223 }
1224
1225 static int blk_free(struct XenDevice *xendev)
1226 {
1227     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1228     struct ioreq *ioreq;
1229
1230     if (blkdev->blk || blkdev->sring) {
1231         blk_disconnect(xendev);
1232     }
1233
1234     while (!QLIST_EMPTY(&blkdev->freelist)) {
1235         ioreq = QLIST_FIRST(&blkdev->freelist);
1236         QLIST_REMOVE(ioreq, list);
1237         qemu_iovec_destroy(&ioreq->v);
1238         g_free(ioreq);
1239     }
1240
1241     g_free(blkdev->params);
1242     g_free(blkdev->mode);
1243     g_free(blkdev->type);
1244     g_free(blkdev->dev);
1245     g_free(blkdev->devtype);
1246     qemu_bh_delete(blkdev->bh);
1247     return 0;
1248 }
1249
1250 static void blk_event(struct XenDevice *xendev)
1251 {
1252     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1253
1254     qemu_bh_schedule(blkdev->bh);
1255 }
1256
1257 struct XenDevOps xen_blkdev_ops = {
1258     .size       = sizeof(struct XenBlkDev),
1259     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1260     .alloc      = blk_alloc,
1261     .init       = blk_init,
1262     .initialise    = blk_connect,
1263     .disconnect = blk_disconnect,
1264     .event      = blk_event,
1265     .free       = blk_free,
1266 };
This page took 0.098671 seconds and 4 git commands to generate.