]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
virtio-blk: add VirtIOBlockConf->num_queues
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/uio.h>
25
26 #include "hw/hw.h"
27 #include "hw/xen/xen_backend.h"
28 #include "xen_blkif.h"
29 #include "sysemu/blockdev.h"
30 #include "sysemu/block-backend.h"
31 #include "qapi/error.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
34
35 /* ------------------------------------------------------------- */
36
37 static int batch_maps   = 0;
38
39 static int max_requests = 32;
40
41 /* ------------------------------------------------------------- */
42
43 #define BLOCK_SIZE  512
44 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
45
46 struct PersistentGrant {
47     void *page;
48     struct XenBlkDev *blkdev;
49 };
50
51 typedef struct PersistentGrant PersistentGrant;
52
53 struct PersistentRegion {
54     void *addr;
55     int num;
56 };
57
58 typedef struct PersistentRegion PersistentRegion;
59
60 struct ioreq {
61     blkif_request_t     req;
62     int16_t             status;
63
64     /* parsed request */
65     off_t               start;
66     QEMUIOVector        v;
67     int                 presync;
68     uint8_t             mapped;
69
70     /* grant mapping */
71     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73     int                 prot;
74     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75     void                *pages;
76     int                 num_unmap;
77
78     /* aio status */
79     int                 aio_inflight;
80     int                 aio_errors;
81
82     struct XenBlkDev    *blkdev;
83     QLIST_ENTRY(ioreq)   list;
84     BlockAcctCookie     acct;
85 };
86
87 struct XenBlkDev {
88     struct XenDevice    xendev;  /* must be first */
89     char                *params;
90     char                *mode;
91     char                *type;
92     char                *dev;
93     char                *devtype;
94     bool                directiosafe;
95     const char          *fileproto;
96     const char          *filename;
97     int                 ring_ref;
98     void                *sring;
99     int64_t             file_blk;
100     int64_t             file_size;
101     int                 protocol;
102     blkif_back_rings_t  rings;
103     int                 more_work;
104     int                 cnt_map;
105
106     /* request lists */
107     QLIST_HEAD(inflight_head, ioreq) inflight;
108     QLIST_HEAD(finished_head, ioreq) finished;
109     QLIST_HEAD(freelist_head, ioreq) freelist;
110     int                 requests_total;
111     int                 requests_inflight;
112     int                 requests_finished;
113
114     /* Persistent grants extension */
115     gboolean            feature_discard;
116     gboolean            feature_persistent;
117     GTree               *persistent_gnts;
118     GSList              *persistent_regions;
119     unsigned int        persistent_gnt_count;
120     unsigned int        max_grants;
121
122     /* qemu block driver */
123     DriveInfo           *dinfo;
124     BlockBackend        *blk;
125     QEMUBH              *bh;
126 };
127
128 /* ------------------------------------------------------------- */
129
130 static void ioreq_reset(struct ioreq *ioreq)
131 {
132     memset(&ioreq->req, 0, sizeof(ioreq->req));
133     ioreq->status = 0;
134     ioreq->start = 0;
135     ioreq->presync = 0;
136     ioreq->mapped = 0;
137
138     memset(ioreq->domids, 0, sizeof(ioreq->domids));
139     memset(ioreq->refs, 0, sizeof(ioreq->refs));
140     ioreq->prot = 0;
141     memset(ioreq->page, 0, sizeof(ioreq->page));
142     ioreq->pages = NULL;
143
144     ioreq->aio_inflight = 0;
145     ioreq->aio_errors = 0;
146
147     ioreq->blkdev = NULL;
148     memset(&ioreq->list, 0, sizeof(ioreq->list));
149     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
150
151     qemu_iovec_reset(&ioreq->v);
152 }
153
154 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
155 {
156     uint ua = GPOINTER_TO_UINT(a);
157     uint ub = GPOINTER_TO_UINT(b);
158     return (ua > ub) - (ua < ub);
159 }
160
161 static void destroy_grant(gpointer pgnt)
162 {
163     PersistentGrant *grant = pgnt;
164     xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
165
166     if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
167         xen_be_printf(&grant->blkdev->xendev, 0,
168                       "xengnttab_unmap failed: %s\n",
169                       strerror(errno));
170     }
171     grant->blkdev->persistent_gnt_count--;
172     xen_be_printf(&grant->blkdev->xendev, 3,
173                   "unmapped grant %p\n", grant->page);
174     g_free(grant);
175 }
176
177 static void remove_persistent_region(gpointer data, gpointer dev)
178 {
179     PersistentRegion *region = data;
180     struct XenBlkDev *blkdev = dev;
181     xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
182
183     if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
184         xen_be_printf(&blkdev->xendev, 0,
185                       "xengnttab_unmap region %p failed: %s\n",
186                       region->addr, strerror(errno));
187     }
188     xen_be_printf(&blkdev->xendev, 3,
189                   "unmapped grant region %p with %d pages\n",
190                   region->addr, region->num);
191     g_free(region);
192 }
193
194 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
195 {
196     struct ioreq *ioreq = NULL;
197
198     if (QLIST_EMPTY(&blkdev->freelist)) {
199         if (blkdev->requests_total >= max_requests) {
200             goto out;
201         }
202         /* allocate new struct */
203         ioreq = g_malloc0(sizeof(*ioreq));
204         ioreq->blkdev = blkdev;
205         blkdev->requests_total++;
206         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
207     } else {
208         /* get one from freelist */
209         ioreq = QLIST_FIRST(&blkdev->freelist);
210         QLIST_REMOVE(ioreq, list);
211     }
212     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
213     blkdev->requests_inflight++;
214
215 out:
216     return ioreq;
217 }
218
219 static void ioreq_finish(struct ioreq *ioreq)
220 {
221     struct XenBlkDev *blkdev = ioreq->blkdev;
222
223     QLIST_REMOVE(ioreq, list);
224     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
225     blkdev->requests_inflight--;
226     blkdev->requests_finished++;
227 }
228
229 static void ioreq_release(struct ioreq *ioreq, bool finish)
230 {
231     struct XenBlkDev *blkdev = ioreq->blkdev;
232
233     QLIST_REMOVE(ioreq, list);
234     ioreq_reset(ioreq);
235     ioreq->blkdev = blkdev;
236     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
237     if (finish) {
238         blkdev->requests_finished--;
239     } else {
240         blkdev->requests_inflight--;
241     }
242 }
243
244 /*
245  * translate request into iovec + start offset
246  * do sanity checks along the way
247  */
248 static int ioreq_parse(struct ioreq *ioreq)
249 {
250     struct XenBlkDev *blkdev = ioreq->blkdev;
251     uintptr_t mem;
252     size_t len;
253     int i;
254
255     xen_be_printf(&blkdev->xendev, 3,
256                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
257                   ioreq->req.operation, ioreq->req.nr_segments,
258                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
259     switch (ioreq->req.operation) {
260     case BLKIF_OP_READ:
261         ioreq->prot = PROT_WRITE; /* to memory */
262         break;
263     case BLKIF_OP_FLUSH_DISKCACHE:
264         ioreq->presync = 1;
265         if (!ioreq->req.nr_segments) {
266             return 0;
267         }
268         /* fall through */
269     case BLKIF_OP_WRITE:
270         ioreq->prot = PROT_READ; /* from memory */
271         break;
272     case BLKIF_OP_DISCARD:
273         return 0;
274     default:
275         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
276                       ioreq->req.operation);
277         goto err;
278     };
279
280     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
281         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
282         goto err;
283     }
284
285     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
286     for (i = 0; i < ioreq->req.nr_segments; i++) {
287         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
288             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
289             goto err;
290         }
291         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
292             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
293             goto err;
294         }
295         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
296             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
297             goto err;
298         }
299
300         ioreq->domids[i] = blkdev->xendev.dom;
301         ioreq->refs[i]   = ioreq->req.seg[i].gref;
302
303         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
304         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
305         qemu_iovec_add(&ioreq->v, (void*)mem, len);
306     }
307     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
308         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
309         goto err;
310     }
311     return 0;
312
313 err:
314     ioreq->status = BLKIF_RSP_ERROR;
315     return -1;
316 }
317
318 static void ioreq_unmap(struct ioreq *ioreq)
319 {
320     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
321     int i;
322
323     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
324         return;
325     }
326     if (batch_maps) {
327         if (!ioreq->pages) {
328             return;
329         }
330         if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
331             xen_be_printf(&ioreq->blkdev->xendev, 0,
332                           "xengnttab_unmap failed: %s\n",
333                           strerror(errno));
334         }
335         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
336         ioreq->pages = NULL;
337     } else {
338         for (i = 0; i < ioreq->num_unmap; i++) {
339             if (!ioreq->page[i]) {
340                 continue;
341             }
342             if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
343                 xen_be_printf(&ioreq->blkdev->xendev, 0,
344                               "xengnttab_unmap failed: %s\n",
345                               strerror(errno));
346             }
347             ioreq->blkdev->cnt_map--;
348             ioreq->page[i] = NULL;
349         }
350     }
351     ioreq->mapped = 0;
352 }
353
354 static int ioreq_map(struct ioreq *ioreq)
355 {
356     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
357     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
358     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
359     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
360     int i, j, new_maps = 0;
361     PersistentGrant *grant;
362     PersistentRegion *region;
363     /* domids and refs variables will contain the information necessary
364      * to map the grants that are needed to fulfill this request.
365      *
366      * After mapping the needed grants, the page array will contain the
367      * memory address of each granted page in the order specified in ioreq
368      * (disregarding if it's a persistent grant or not).
369      */
370
371     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
372         return 0;
373     }
374     if (ioreq->blkdev->feature_persistent) {
375         for (i = 0; i < ioreq->v.niov; i++) {
376             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
377                                     GUINT_TO_POINTER(ioreq->refs[i]));
378
379             if (grant != NULL) {
380                 page[i] = grant->page;
381                 xen_be_printf(&ioreq->blkdev->xendev, 3,
382                               "using persistent-grant %" PRIu32 "\n",
383                               ioreq->refs[i]);
384             } else {
385                     /* Add the grant to the list of grants that
386                      * should be mapped
387                      */
388                     domids[new_maps] = ioreq->domids[i];
389                     refs[new_maps] = ioreq->refs[i];
390                     page[i] = NULL;
391                     new_maps++;
392             }
393         }
394         /* Set the protection to RW, since grants may be reused later
395          * with a different protection than the one needed for this request
396          */
397         ioreq->prot = PROT_WRITE | PROT_READ;
398     } else {
399         /* All grants in the request should be mapped */
400         memcpy(refs, ioreq->refs, sizeof(refs));
401         memcpy(domids, ioreq->domids, sizeof(domids));
402         memset(page, 0, sizeof(page));
403         new_maps = ioreq->v.niov;
404     }
405
406     if (batch_maps && new_maps) {
407         ioreq->pages = xengnttab_map_grant_refs
408             (gnt, new_maps, domids, refs, ioreq->prot);
409         if (ioreq->pages == NULL) {
410             xen_be_printf(&ioreq->blkdev->xendev, 0,
411                           "can't map %d grant refs (%s, %d maps)\n",
412                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
413             return -1;
414         }
415         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
416             if (page[i] == NULL) {
417                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
418             }
419         }
420         ioreq->blkdev->cnt_map += new_maps;
421     } else if (new_maps)  {
422         for (i = 0; i < new_maps; i++) {
423             ioreq->page[i] = xengnttab_map_grant_ref
424                 (gnt, domids[i], refs[i], ioreq->prot);
425             if (ioreq->page[i] == NULL) {
426                 xen_be_printf(&ioreq->blkdev->xendev, 0,
427                               "can't map grant ref %d (%s, %d maps)\n",
428                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
429                 ioreq->mapped = 1;
430                 ioreq_unmap(ioreq);
431                 return -1;
432             }
433             ioreq->blkdev->cnt_map++;
434         }
435         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
436             if (page[i] == NULL) {
437                 page[i] = ioreq->page[j++];
438             }
439         }
440     }
441     if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
442         (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
443         ioreq->blkdev->max_grants))) {
444         /*
445          * If we are using persistent grants and batch mappings only
446          * add the new maps to the list of persistent grants if the whole
447          * area can be persistently mapped.
448          */
449         if (batch_maps) {
450             region = g_malloc0(sizeof(*region));
451             region->addr = ioreq->pages;
452             region->num = new_maps;
453             ioreq->blkdev->persistent_regions = g_slist_append(
454                                             ioreq->blkdev->persistent_regions,
455                                             region);
456         }
457         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
458               && new_maps) {
459             /* Go through the list of newly mapped grants and add as many
460              * as possible to the list of persistently mapped grants.
461              *
462              * Since we start at the end of ioreq->page(s), we only need
463              * to decrease new_maps to prevent this granted pages from
464              * being unmapped in ioreq_unmap.
465              */
466             grant = g_malloc0(sizeof(*grant));
467             new_maps--;
468             if (batch_maps) {
469                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
470             } else {
471                 grant->page = ioreq->page[new_maps];
472             }
473             grant->blkdev = ioreq->blkdev;
474             xen_be_printf(&ioreq->blkdev->xendev, 3,
475                           "adding grant %" PRIu32 " page: %p\n",
476                           refs[new_maps], grant->page);
477             g_tree_insert(ioreq->blkdev->persistent_gnts,
478                           GUINT_TO_POINTER(refs[new_maps]),
479                           grant);
480             ioreq->blkdev->persistent_gnt_count++;
481         }
482         assert(!batch_maps || new_maps == 0);
483     }
484     for (i = 0; i < ioreq->v.niov; i++) {
485         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
486     }
487     ioreq->mapped = 1;
488     ioreq->num_unmap = new_maps;
489     return 0;
490 }
491
492 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
493
494 static void qemu_aio_complete(void *opaque, int ret)
495 {
496     struct ioreq *ioreq = opaque;
497
498     if (ret != 0) {
499         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
500                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
501         ioreq->aio_errors++;
502     }
503
504     ioreq->aio_inflight--;
505     if (ioreq->presync) {
506         ioreq->presync = 0;
507         ioreq_runio_qemu_aio(ioreq);
508         return;
509     }
510     if (ioreq->aio_inflight > 0) {
511         return;
512     }
513
514     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
515     ioreq_unmap(ioreq);
516     ioreq_finish(ioreq);
517     switch (ioreq->req.operation) {
518     case BLKIF_OP_WRITE:
519     case BLKIF_OP_FLUSH_DISKCACHE:
520         if (!ioreq->req.nr_segments) {
521             break;
522         }
523     case BLKIF_OP_READ:
524         if (ioreq->status == BLKIF_RSP_OKAY) {
525             block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
526         } else {
527             block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
528         }
529         break;
530     case BLKIF_OP_DISCARD:
531     default:
532         break;
533     }
534     qemu_bh_schedule(ioreq->blkdev->bh);
535 }
536
537 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
538 {
539     struct XenBlkDev *blkdev = ioreq->blkdev;
540
541     if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
542         goto err_no_map;
543     }
544
545     ioreq->aio_inflight++;
546     if (ioreq->presync) {
547         blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
548         return 0;
549     }
550
551     switch (ioreq->req.operation) {
552     case BLKIF_OP_READ:
553         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
554                          ioreq->v.size, BLOCK_ACCT_READ);
555         ioreq->aio_inflight++;
556         blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
557                        qemu_aio_complete, ioreq);
558         break;
559     case BLKIF_OP_WRITE:
560     case BLKIF_OP_FLUSH_DISKCACHE:
561         if (!ioreq->req.nr_segments) {
562             break;
563         }
564
565         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
566                          ioreq->v.size,
567                          ioreq->req.operation == BLKIF_OP_WRITE ?
568                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
569         ioreq->aio_inflight++;
570         blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
571                         qemu_aio_complete, ioreq);
572         break;
573     case BLKIF_OP_DISCARD:
574     {
575         struct blkif_request_discard *discard_req = (void *)&ioreq->req;
576         ioreq->aio_inflight++;
577         blk_aio_discard(blkdev->blk,
578                         discard_req->sector_number, discard_req->nr_sectors,
579                         qemu_aio_complete, ioreq);
580         break;
581     }
582     default:
583         /* unknown operation (shouldn't happen -- parse catches this) */
584         goto err;
585     }
586
587     qemu_aio_complete(ioreq, 0);
588
589     return 0;
590
591 err:
592     ioreq_unmap(ioreq);
593 err_no_map:
594     ioreq_finish(ioreq);
595     ioreq->status = BLKIF_RSP_ERROR;
596     return -1;
597 }
598
599 static int blk_send_response_one(struct ioreq *ioreq)
600 {
601     struct XenBlkDev  *blkdev = ioreq->blkdev;
602     int               send_notify   = 0;
603     int               have_requests = 0;
604     blkif_response_t  resp;
605     void              *dst;
606
607     resp.id        = ioreq->req.id;
608     resp.operation = ioreq->req.operation;
609     resp.status    = ioreq->status;
610
611     /* Place on the response ring for the relevant domain. */
612     switch (blkdev->protocol) {
613     case BLKIF_PROTOCOL_NATIVE:
614         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
615         break;
616     case BLKIF_PROTOCOL_X86_32:
617         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
618                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
619         break;
620     case BLKIF_PROTOCOL_X86_64:
621         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
622                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
623         break;
624     default:
625         dst = NULL;
626         return 0;
627     }
628     memcpy(dst, &resp, sizeof(resp));
629     blkdev->rings.common.rsp_prod_pvt++;
630
631     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
632     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
633         /*
634          * Tail check for pending requests. Allows frontend to avoid
635          * notifications if requests are already in flight (lower
636          * overheads and promotes batching).
637          */
638         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
639     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
640         have_requests = 1;
641     }
642
643     if (have_requests) {
644         blkdev->more_work++;
645     }
646     return send_notify;
647 }
648
649 /* walk finished list, send outstanding responses, free requests */
650 static void blk_send_response_all(struct XenBlkDev *blkdev)
651 {
652     struct ioreq *ioreq;
653     int send_notify = 0;
654
655     while (!QLIST_EMPTY(&blkdev->finished)) {
656         ioreq = QLIST_FIRST(&blkdev->finished);
657         send_notify += blk_send_response_one(ioreq);
658         ioreq_release(ioreq, true);
659     }
660     if (send_notify) {
661         xen_be_send_notify(&blkdev->xendev);
662     }
663 }
664
665 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
666 {
667     switch (blkdev->protocol) {
668     case BLKIF_PROTOCOL_NATIVE:
669         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
670                sizeof(ioreq->req));
671         break;
672     case BLKIF_PROTOCOL_X86_32:
673         blkif_get_x86_32_req(&ioreq->req,
674                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
675         break;
676     case BLKIF_PROTOCOL_X86_64:
677         blkif_get_x86_64_req(&ioreq->req,
678                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
679         break;
680     }
681     /* Prevent the compiler from accessing the on-ring fields instead. */
682     barrier();
683     return 0;
684 }
685
686 static void blk_handle_requests(struct XenBlkDev *blkdev)
687 {
688     RING_IDX rc, rp;
689     struct ioreq *ioreq;
690
691     blkdev->more_work = 0;
692
693     rc = blkdev->rings.common.req_cons;
694     rp = blkdev->rings.common.sring->req_prod;
695     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
696
697     blk_send_response_all(blkdev);
698     while (rc != rp) {
699         /* pull request from ring */
700         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
701             break;
702         }
703         ioreq = ioreq_start(blkdev);
704         if (ioreq == NULL) {
705             blkdev->more_work++;
706             break;
707         }
708         blk_get_request(blkdev, ioreq, rc);
709         blkdev->rings.common.req_cons = ++rc;
710
711         /* parse them */
712         if (ioreq_parse(ioreq) != 0) {
713
714             switch (ioreq->req.operation) {
715             case BLKIF_OP_READ:
716                 block_acct_invalid(blk_get_stats(blkdev->blk),
717                                    BLOCK_ACCT_READ);
718                 break;
719             case BLKIF_OP_WRITE:
720                 block_acct_invalid(blk_get_stats(blkdev->blk),
721                                    BLOCK_ACCT_WRITE);
722                 break;
723             case BLKIF_OP_FLUSH_DISKCACHE:
724                 block_acct_invalid(blk_get_stats(blkdev->blk),
725                                    BLOCK_ACCT_FLUSH);
726             default:
727                 break;
728             };
729
730             if (blk_send_response_one(ioreq)) {
731                 xen_be_send_notify(&blkdev->xendev);
732             }
733             ioreq_release(ioreq, false);
734             continue;
735         }
736
737         ioreq_runio_qemu_aio(ioreq);
738     }
739
740     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
741         qemu_bh_schedule(blkdev->bh);
742     }
743 }
744
745 /* ------------------------------------------------------------- */
746
747 static void blk_bh(void *opaque)
748 {
749     struct XenBlkDev *blkdev = opaque;
750     blk_handle_requests(blkdev);
751 }
752
753 /*
754  * We need to account for the grant allocations requiring contiguous
755  * chunks; the worst case number would be
756  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
757  * but in order to keep things simple just use
758  *     2 * max_req * max_seg.
759  */
760 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
761
762 static void blk_alloc(struct XenDevice *xendev)
763 {
764     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
765
766     QLIST_INIT(&blkdev->inflight);
767     QLIST_INIT(&blkdev->finished);
768     QLIST_INIT(&blkdev->freelist);
769     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
770     if (xen_mode != XEN_EMULATE) {
771         batch_maps = 1;
772     }
773     if (xengnttab_set_max_grants(xendev->gnttabdev,
774             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
775         xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
776                       strerror(errno));
777     }
778 }
779
780 static void blk_parse_discard(struct XenBlkDev *blkdev)
781 {
782     int enable;
783
784     blkdev->feature_discard = true;
785
786     if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
787         blkdev->feature_discard = !!enable;
788     }
789
790     if (blkdev->feature_discard) {
791         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
792     }
793 }
794
795 static int blk_init(struct XenDevice *xendev)
796 {
797     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
798     int info = 0;
799     char *directiosafe = NULL;
800
801     /* read xenstore entries */
802     if (blkdev->params == NULL) {
803         char *h = NULL;
804         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
805         if (blkdev->params != NULL) {
806             h = strchr(blkdev->params, ':');
807         }
808         if (h != NULL) {
809             blkdev->fileproto = blkdev->params;
810             blkdev->filename  = h+1;
811             *h = 0;
812         } else {
813             blkdev->fileproto = "<unset>";
814             blkdev->filename  = blkdev->params;
815         }
816     }
817     if (!strcmp("aio", blkdev->fileproto)) {
818         blkdev->fileproto = "raw";
819     }
820     if (!strcmp("vhd", blkdev->fileproto)) {
821         blkdev->fileproto = "vpc";
822     }
823     if (blkdev->mode == NULL) {
824         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
825     }
826     if (blkdev->type == NULL) {
827         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
828     }
829     if (blkdev->dev == NULL) {
830         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
831     }
832     if (blkdev->devtype == NULL) {
833         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
834     }
835     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
836     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
837
838     /* do we have all we need? */
839     if (blkdev->params == NULL ||
840         blkdev->mode == NULL   ||
841         blkdev->type == NULL   ||
842         blkdev->dev == NULL) {
843         goto out_error;
844     }
845
846     /* read-only ? */
847     if (strcmp(blkdev->mode, "w")) {
848         info  |= VDISK_READONLY;
849     }
850
851     /* cdrom ? */
852     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
853         info  |= VDISK_CDROM;
854     }
855
856     blkdev->file_blk  = BLOCK_SIZE;
857
858     /* fill info
859      * blk_connect supplies sector-size and sectors
860      */
861     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
862     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
863     xenstore_write_be_int(&blkdev->xendev, "info", info);
864
865     blk_parse_discard(blkdev);
866
867     g_free(directiosafe);
868     return 0;
869
870 out_error:
871     g_free(blkdev->params);
872     blkdev->params = NULL;
873     g_free(blkdev->mode);
874     blkdev->mode = NULL;
875     g_free(blkdev->type);
876     blkdev->type = NULL;
877     g_free(blkdev->dev);
878     blkdev->dev = NULL;
879     g_free(blkdev->devtype);
880     blkdev->devtype = NULL;
881     g_free(directiosafe);
882     blkdev->directiosafe = false;
883     return -1;
884 }
885
886 static int blk_connect(struct XenDevice *xendev)
887 {
888     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
889     int pers, index, qflags;
890     bool readonly = true;
891     bool writethrough = true;
892
893     /* read-only ? */
894     if (blkdev->directiosafe) {
895         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
896     } else {
897         qflags = 0;
898         writethrough = false;
899     }
900     if (strcmp(blkdev->mode, "w") == 0) {
901         qflags |= BDRV_O_RDWR;
902         readonly = false;
903     }
904     if (blkdev->feature_discard) {
905         qflags |= BDRV_O_UNMAP;
906     }
907
908     /* init qemu block driver */
909     index = (blkdev->xendev.dev - 202 * 256) / 16;
910     blkdev->dinfo = drive_get(IF_XEN, 0, index);
911     if (!blkdev->dinfo) {
912         Error *local_err = NULL;
913         QDict *options = NULL;
914
915         if (strcmp(blkdev->fileproto, "<unset>")) {
916             options = qdict_new();
917             qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
918         }
919
920         /* setup via xenbus -> create new block driver instance */
921         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
922         blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
923                                    qflags, &local_err);
924         if (!blkdev->blk) {
925             xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
926                           error_get_pretty(local_err));
927             error_free(local_err);
928             return -1;
929         }
930         blk_set_enable_write_cache(blkdev->blk, !writethrough);
931     } else {
932         /* setup via qemu cmdline -> already setup for us */
933         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
934         blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
935         if (blk_is_read_only(blkdev->blk) && !readonly) {
936             xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
937             blkdev->blk = NULL;
938             return -1;
939         }
940         /* blkdev->blk is not create by us, we get a reference
941          * so we can blk_unref() unconditionally */
942         blk_ref(blkdev->blk);
943     }
944     blk_attach_dev_nofail(blkdev->blk, blkdev);
945     blkdev->file_size = blk_getlength(blkdev->blk);
946     if (blkdev->file_size < 0) {
947         BlockDriverState *bs = blk_bs(blkdev->blk);
948         const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
949         xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
950                       (int)blkdev->file_size, strerror(-blkdev->file_size),
951                       drv_name ?: "-");
952         blkdev->file_size = 0;
953     }
954
955     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
956                   " size %" PRId64 " (%" PRId64 " MB)\n",
957                   blkdev->type, blkdev->fileproto, blkdev->filename,
958                   blkdev->file_size, blkdev->file_size >> 20);
959
960     /* Fill in number of sector size and number of sectors */
961     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
962     xenstore_write_be_int64(&blkdev->xendev, "sectors",
963                             blkdev->file_size / blkdev->file_blk);
964
965     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
966         return -1;
967     }
968     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
969                              &blkdev->xendev.remote_port) == -1) {
970         return -1;
971     }
972     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
973         blkdev->feature_persistent = FALSE;
974     } else {
975         blkdev->feature_persistent = !!pers;
976     }
977
978     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
979     if (blkdev->xendev.protocol) {
980         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
981             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
982         }
983         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
984             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
985         }
986     }
987
988     blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
989                                             blkdev->xendev.dom,
990                                             blkdev->ring_ref,
991                                             PROT_READ | PROT_WRITE);
992     if (!blkdev->sring) {
993         return -1;
994     }
995     blkdev->cnt_map++;
996
997     switch (blkdev->protocol) {
998     case BLKIF_PROTOCOL_NATIVE:
999     {
1000         blkif_sring_t *sring_native = blkdev->sring;
1001         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
1002         break;
1003     }
1004     case BLKIF_PROTOCOL_X86_32:
1005     {
1006         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1007
1008         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1009         break;
1010     }
1011     case BLKIF_PROTOCOL_X86_64:
1012     {
1013         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1014
1015         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1016         break;
1017     }
1018     }
1019
1020     if (blkdev->feature_persistent) {
1021         /* Init persistent grants */
1022         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1023         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1024                                              NULL, NULL,
1025                                              batch_maps ?
1026                                              (GDestroyNotify)g_free :
1027                                              (GDestroyNotify)destroy_grant);
1028         blkdev->persistent_regions = NULL;
1029         blkdev->persistent_gnt_count = 0;
1030     }
1031
1032     xen_be_bind_evtchn(&blkdev->xendev);
1033
1034     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1035                   "remote port %d, local port %d\n",
1036                   blkdev->xendev.protocol, blkdev->ring_ref,
1037                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
1038     return 0;
1039 }
1040
1041 static void blk_disconnect(struct XenDevice *xendev)
1042 {
1043     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1044
1045     if (blkdev->blk) {
1046         blk_detach_dev(blkdev->blk, blkdev);
1047         blk_unref(blkdev->blk);
1048         blkdev->blk = NULL;
1049     }
1050     xen_be_unbind_evtchn(&blkdev->xendev);
1051
1052     if (blkdev->sring) {
1053         xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1054         blkdev->cnt_map--;
1055         blkdev->sring = NULL;
1056     }
1057
1058     /*
1059      * Unmap persistent grants before switching to the closed state
1060      * so the frontend can free them.
1061      *
1062      * In the !batch_maps case g_tree_destroy will take care of unmapping
1063      * the grant, but in the batch_maps case we need to iterate over every
1064      * region in persistent_regions and unmap it.
1065      */
1066     if (blkdev->feature_persistent) {
1067         g_tree_destroy(blkdev->persistent_gnts);
1068         assert(batch_maps || blkdev->persistent_gnt_count == 0);
1069         if (batch_maps) {
1070             blkdev->persistent_gnt_count = 0;
1071             g_slist_foreach(blkdev->persistent_regions,
1072                             (GFunc)remove_persistent_region, blkdev);
1073             g_slist_free(blkdev->persistent_regions);
1074         }
1075         blkdev->feature_persistent = false;
1076     }
1077 }
1078
1079 static int blk_free(struct XenDevice *xendev)
1080 {
1081     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1082     struct ioreq *ioreq;
1083
1084     if (blkdev->blk || blkdev->sring) {
1085         blk_disconnect(xendev);
1086     }
1087
1088     while (!QLIST_EMPTY(&blkdev->freelist)) {
1089         ioreq = QLIST_FIRST(&blkdev->freelist);
1090         QLIST_REMOVE(ioreq, list);
1091         qemu_iovec_destroy(&ioreq->v);
1092         g_free(ioreq);
1093     }
1094
1095     g_free(blkdev->params);
1096     g_free(blkdev->mode);
1097     g_free(blkdev->type);
1098     g_free(blkdev->dev);
1099     g_free(blkdev->devtype);
1100     qemu_bh_delete(blkdev->bh);
1101     return 0;
1102 }
1103
1104 static void blk_event(struct XenDevice *xendev)
1105 {
1106     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1107
1108     qemu_bh_schedule(blkdev->bh);
1109 }
1110
1111 struct XenDevOps xen_blkdev_ops = {
1112     .size       = sizeof(struct XenBlkDev),
1113     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1114     .alloc      = blk_alloc,
1115     .init       = blk_init,
1116     .initialise    = blk_connect,
1117     .disconnect = blk_disconnect,
1118     .event      = blk_event,
1119     .free       = blk_free,
1120 };
This page took 0.087872 seconds and 4 git commands to generate.