]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/lalrae/tags/mips-20160513' into staging
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 #include <sys/uio.h>
26
27 #include "hw/hw.h"
28 #include "hw/xen/xen_backend.h"
29 #include "xen_blkif.h"
30 #include "sysemu/blockdev.h"
31 #include "sysemu/block-backend.h"
32 #include "qapi/error.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qstring.h"
35
36 /* ------------------------------------------------------------- */
37
38 static int batch_maps   = 0;
39
40 static int max_requests = 32;
41
42 /* ------------------------------------------------------------- */
43
44 #define BLOCK_SIZE  512
45 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
46
47 struct PersistentGrant {
48     void *page;
49     struct XenBlkDev *blkdev;
50 };
51
52 typedef struct PersistentGrant PersistentGrant;
53
54 struct PersistentRegion {
55     void *addr;
56     int num;
57 };
58
59 typedef struct PersistentRegion PersistentRegion;
60
61 struct ioreq {
62     blkif_request_t     req;
63     int16_t             status;
64
65     /* parsed request */
66     off_t               start;
67     QEMUIOVector        v;
68     int                 presync;
69     uint8_t             mapped;
70
71     /* grant mapping */
72     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74     int                 prot;
75     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76     void                *pages;
77     int                 num_unmap;
78
79     /* aio status */
80     int                 aio_inflight;
81     int                 aio_errors;
82
83     struct XenBlkDev    *blkdev;
84     QLIST_ENTRY(ioreq)   list;
85     BlockAcctCookie     acct;
86 };
87
88 struct XenBlkDev {
89     struct XenDevice    xendev;  /* must be first */
90     char                *params;
91     char                *mode;
92     char                *type;
93     char                *dev;
94     char                *devtype;
95     bool                directiosafe;
96     const char          *fileproto;
97     const char          *filename;
98     int                 ring_ref;
99     void                *sring;
100     int64_t             file_blk;
101     int64_t             file_size;
102     int                 protocol;
103     blkif_back_rings_t  rings;
104     int                 more_work;
105     int                 cnt_map;
106
107     /* request lists */
108     QLIST_HEAD(inflight_head, ioreq) inflight;
109     QLIST_HEAD(finished_head, ioreq) finished;
110     QLIST_HEAD(freelist_head, ioreq) freelist;
111     int                 requests_total;
112     int                 requests_inflight;
113     int                 requests_finished;
114
115     /* Persistent grants extension */
116     gboolean            feature_discard;
117     gboolean            feature_persistent;
118     GTree               *persistent_gnts;
119     GSList              *persistent_regions;
120     unsigned int        persistent_gnt_count;
121     unsigned int        max_grants;
122
123     /* qemu block driver */
124     DriveInfo           *dinfo;
125     BlockBackend        *blk;
126     QEMUBH              *bh;
127 };
128
129 /* ------------------------------------------------------------- */
130
131 static void ioreq_reset(struct ioreq *ioreq)
132 {
133     memset(&ioreq->req, 0, sizeof(ioreq->req));
134     ioreq->status = 0;
135     ioreq->start = 0;
136     ioreq->presync = 0;
137     ioreq->mapped = 0;
138
139     memset(ioreq->domids, 0, sizeof(ioreq->domids));
140     memset(ioreq->refs, 0, sizeof(ioreq->refs));
141     ioreq->prot = 0;
142     memset(ioreq->page, 0, sizeof(ioreq->page));
143     ioreq->pages = NULL;
144
145     ioreq->aio_inflight = 0;
146     ioreq->aio_errors = 0;
147
148     ioreq->blkdev = NULL;
149     memset(&ioreq->list, 0, sizeof(ioreq->list));
150     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151
152     qemu_iovec_reset(&ioreq->v);
153 }
154
155 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156 {
157     uint ua = GPOINTER_TO_UINT(a);
158     uint ub = GPOINTER_TO_UINT(b);
159     return (ua > ub) - (ua < ub);
160 }
161
162 static void destroy_grant(gpointer pgnt)
163 {
164     PersistentGrant *grant = pgnt;
165     xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
166
167     if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
168         xen_be_printf(&grant->blkdev->xendev, 0,
169                       "xengnttab_unmap failed: %s\n",
170                       strerror(errno));
171     }
172     grant->blkdev->persistent_gnt_count--;
173     xen_be_printf(&grant->blkdev->xendev, 3,
174                   "unmapped grant %p\n", grant->page);
175     g_free(grant);
176 }
177
178 static void remove_persistent_region(gpointer data, gpointer dev)
179 {
180     PersistentRegion *region = data;
181     struct XenBlkDev *blkdev = dev;
182     xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
183
184     if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
185         xen_be_printf(&blkdev->xendev, 0,
186                       "xengnttab_unmap region %p failed: %s\n",
187                       region->addr, strerror(errno));
188     }
189     xen_be_printf(&blkdev->xendev, 3,
190                   "unmapped grant region %p with %d pages\n",
191                   region->addr, region->num);
192     g_free(region);
193 }
194
195 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
196 {
197     struct ioreq *ioreq = NULL;
198
199     if (QLIST_EMPTY(&blkdev->freelist)) {
200         if (blkdev->requests_total >= max_requests) {
201             goto out;
202         }
203         /* allocate new struct */
204         ioreq = g_malloc0(sizeof(*ioreq));
205         ioreq->blkdev = blkdev;
206         blkdev->requests_total++;
207         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
208     } else {
209         /* get one from freelist */
210         ioreq = QLIST_FIRST(&blkdev->freelist);
211         QLIST_REMOVE(ioreq, list);
212     }
213     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
214     blkdev->requests_inflight++;
215
216 out:
217     return ioreq;
218 }
219
220 static void ioreq_finish(struct ioreq *ioreq)
221 {
222     struct XenBlkDev *blkdev = ioreq->blkdev;
223
224     QLIST_REMOVE(ioreq, list);
225     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
226     blkdev->requests_inflight--;
227     blkdev->requests_finished++;
228 }
229
230 static void ioreq_release(struct ioreq *ioreq, bool finish)
231 {
232     struct XenBlkDev *blkdev = ioreq->blkdev;
233
234     QLIST_REMOVE(ioreq, list);
235     ioreq_reset(ioreq);
236     ioreq->blkdev = blkdev;
237     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
238     if (finish) {
239         blkdev->requests_finished--;
240     } else {
241         blkdev->requests_inflight--;
242     }
243 }
244
245 /*
246  * translate request into iovec + start offset
247  * do sanity checks along the way
248  */
249 static int ioreq_parse(struct ioreq *ioreq)
250 {
251     struct XenBlkDev *blkdev = ioreq->blkdev;
252     uintptr_t mem;
253     size_t len;
254     int i;
255
256     xen_be_printf(&blkdev->xendev, 3,
257                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
258                   ioreq->req.operation, ioreq->req.nr_segments,
259                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
260     switch (ioreq->req.operation) {
261     case BLKIF_OP_READ:
262         ioreq->prot = PROT_WRITE; /* to memory */
263         break;
264     case BLKIF_OP_FLUSH_DISKCACHE:
265         ioreq->presync = 1;
266         if (!ioreq->req.nr_segments) {
267             return 0;
268         }
269         /* fall through */
270     case BLKIF_OP_WRITE:
271         ioreq->prot = PROT_READ; /* from memory */
272         break;
273     case BLKIF_OP_DISCARD:
274         return 0;
275     default:
276         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
277                       ioreq->req.operation);
278         goto err;
279     };
280
281     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
282         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
283         goto err;
284     }
285
286     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
287     for (i = 0; i < ioreq->req.nr_segments; i++) {
288         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
289             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
290             goto err;
291         }
292         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
293             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
294             goto err;
295         }
296         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
297             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
298             goto err;
299         }
300
301         ioreq->domids[i] = blkdev->xendev.dom;
302         ioreq->refs[i]   = ioreq->req.seg[i].gref;
303
304         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
305         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
306         qemu_iovec_add(&ioreq->v, (void*)mem, len);
307     }
308     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
309         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
310         goto err;
311     }
312     return 0;
313
314 err:
315     ioreq->status = BLKIF_RSP_ERROR;
316     return -1;
317 }
318
319 static void ioreq_unmap(struct ioreq *ioreq)
320 {
321     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
322     int i;
323
324     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
325         return;
326     }
327     if (batch_maps) {
328         if (!ioreq->pages) {
329             return;
330         }
331         if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
332             xen_be_printf(&ioreq->blkdev->xendev, 0,
333                           "xengnttab_unmap failed: %s\n",
334                           strerror(errno));
335         }
336         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
337         ioreq->pages = NULL;
338     } else {
339         for (i = 0; i < ioreq->num_unmap; i++) {
340             if (!ioreq->page[i]) {
341                 continue;
342             }
343             if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
344                 xen_be_printf(&ioreq->blkdev->xendev, 0,
345                               "xengnttab_unmap failed: %s\n",
346                               strerror(errno));
347             }
348             ioreq->blkdev->cnt_map--;
349             ioreq->page[i] = NULL;
350         }
351     }
352     ioreq->mapped = 0;
353 }
354
355 static int ioreq_map(struct ioreq *ioreq)
356 {
357     xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
358     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
359     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
360     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
361     int i, j, new_maps = 0;
362     PersistentGrant *grant;
363     PersistentRegion *region;
364     /* domids and refs variables will contain the information necessary
365      * to map the grants that are needed to fulfill this request.
366      *
367      * After mapping the needed grants, the page array will contain the
368      * memory address of each granted page in the order specified in ioreq
369      * (disregarding if it's a persistent grant or not).
370      */
371
372     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
373         return 0;
374     }
375     if (ioreq->blkdev->feature_persistent) {
376         for (i = 0; i < ioreq->v.niov; i++) {
377             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
378                                     GUINT_TO_POINTER(ioreq->refs[i]));
379
380             if (grant != NULL) {
381                 page[i] = grant->page;
382                 xen_be_printf(&ioreq->blkdev->xendev, 3,
383                               "using persistent-grant %" PRIu32 "\n",
384                               ioreq->refs[i]);
385             } else {
386                     /* Add the grant to the list of grants that
387                      * should be mapped
388                      */
389                     domids[new_maps] = ioreq->domids[i];
390                     refs[new_maps] = ioreq->refs[i];
391                     page[i] = NULL;
392                     new_maps++;
393             }
394         }
395         /* Set the protection to RW, since grants may be reused later
396          * with a different protection than the one needed for this request
397          */
398         ioreq->prot = PROT_WRITE | PROT_READ;
399     } else {
400         /* All grants in the request should be mapped */
401         memcpy(refs, ioreq->refs, sizeof(refs));
402         memcpy(domids, ioreq->domids, sizeof(domids));
403         memset(page, 0, sizeof(page));
404         new_maps = ioreq->v.niov;
405     }
406
407     if (batch_maps && new_maps) {
408         ioreq->pages = xengnttab_map_grant_refs
409             (gnt, new_maps, domids, refs, ioreq->prot);
410         if (ioreq->pages == NULL) {
411             xen_be_printf(&ioreq->blkdev->xendev, 0,
412                           "can't map %d grant refs (%s, %d maps)\n",
413                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
414             return -1;
415         }
416         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
417             if (page[i] == NULL) {
418                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
419             }
420         }
421         ioreq->blkdev->cnt_map += new_maps;
422     } else if (new_maps)  {
423         for (i = 0; i < new_maps; i++) {
424             ioreq->page[i] = xengnttab_map_grant_ref
425                 (gnt, domids[i], refs[i], ioreq->prot);
426             if (ioreq->page[i] == NULL) {
427                 xen_be_printf(&ioreq->blkdev->xendev, 0,
428                               "can't map grant ref %d (%s, %d maps)\n",
429                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
430                 ioreq->mapped = 1;
431                 ioreq_unmap(ioreq);
432                 return -1;
433             }
434             ioreq->blkdev->cnt_map++;
435         }
436         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
437             if (page[i] == NULL) {
438                 page[i] = ioreq->page[j++];
439             }
440         }
441     }
442     if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
443         (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
444         ioreq->blkdev->max_grants))) {
445         /*
446          * If we are using persistent grants and batch mappings only
447          * add the new maps to the list of persistent grants if the whole
448          * area can be persistently mapped.
449          */
450         if (batch_maps) {
451             region = g_malloc0(sizeof(*region));
452             region->addr = ioreq->pages;
453             region->num = new_maps;
454             ioreq->blkdev->persistent_regions = g_slist_append(
455                                             ioreq->blkdev->persistent_regions,
456                                             region);
457         }
458         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
459               && new_maps) {
460             /* Go through the list of newly mapped grants and add as many
461              * as possible to the list of persistently mapped grants.
462              *
463              * Since we start at the end of ioreq->page(s), we only need
464              * to decrease new_maps to prevent this granted pages from
465              * being unmapped in ioreq_unmap.
466              */
467             grant = g_malloc0(sizeof(*grant));
468             new_maps--;
469             if (batch_maps) {
470                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
471             } else {
472                 grant->page = ioreq->page[new_maps];
473             }
474             grant->blkdev = ioreq->blkdev;
475             xen_be_printf(&ioreq->blkdev->xendev, 3,
476                           "adding grant %" PRIu32 " page: %p\n",
477                           refs[new_maps], grant->page);
478             g_tree_insert(ioreq->blkdev->persistent_gnts,
479                           GUINT_TO_POINTER(refs[new_maps]),
480                           grant);
481             ioreq->blkdev->persistent_gnt_count++;
482         }
483         assert(!batch_maps || new_maps == 0);
484     }
485     for (i = 0; i < ioreq->v.niov; i++) {
486         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
487     }
488     ioreq->mapped = 1;
489     ioreq->num_unmap = new_maps;
490     return 0;
491 }
492
493 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
494
495 static void qemu_aio_complete(void *opaque, int ret)
496 {
497     struct ioreq *ioreq = opaque;
498
499     if (ret != 0) {
500         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
501                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
502         ioreq->aio_errors++;
503     }
504
505     ioreq->aio_inflight--;
506     if (ioreq->presync) {
507         ioreq->presync = 0;
508         ioreq_runio_qemu_aio(ioreq);
509         return;
510     }
511     if (ioreq->aio_inflight > 0) {
512         return;
513     }
514
515     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
516     ioreq_unmap(ioreq);
517     ioreq_finish(ioreq);
518     switch (ioreq->req.operation) {
519     case BLKIF_OP_WRITE:
520     case BLKIF_OP_FLUSH_DISKCACHE:
521         if (!ioreq->req.nr_segments) {
522             break;
523         }
524     case BLKIF_OP_READ:
525         if (ioreq->status == BLKIF_RSP_OKAY) {
526             block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
527         } else {
528             block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
529         }
530         break;
531     case BLKIF_OP_DISCARD:
532     default:
533         break;
534     }
535     qemu_bh_schedule(ioreq->blkdev->bh);
536 }
537
538 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
539 {
540     struct XenBlkDev *blkdev = ioreq->blkdev;
541
542     if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
543         goto err_no_map;
544     }
545
546     ioreq->aio_inflight++;
547     if (ioreq->presync) {
548         blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
549         return 0;
550     }
551
552     switch (ioreq->req.operation) {
553     case BLKIF_OP_READ:
554         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
555                          ioreq->v.size, BLOCK_ACCT_READ);
556         ioreq->aio_inflight++;
557         blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
558                        qemu_aio_complete, ioreq);
559         break;
560     case BLKIF_OP_WRITE:
561     case BLKIF_OP_FLUSH_DISKCACHE:
562         if (!ioreq->req.nr_segments) {
563             break;
564         }
565
566         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
567                          ioreq->v.size,
568                          ioreq->req.operation == BLKIF_OP_WRITE ?
569                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
570         ioreq->aio_inflight++;
571         blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
572                         qemu_aio_complete, ioreq);
573         break;
574     case BLKIF_OP_DISCARD:
575     {
576         struct blkif_request_discard *discard_req = (void *)&ioreq->req;
577         ioreq->aio_inflight++;
578         blk_aio_discard(blkdev->blk,
579                         discard_req->sector_number, discard_req->nr_sectors,
580                         qemu_aio_complete, ioreq);
581         break;
582     }
583     default:
584         /* unknown operation (shouldn't happen -- parse catches this) */
585         goto err;
586     }
587
588     qemu_aio_complete(ioreq, 0);
589
590     return 0;
591
592 err:
593     ioreq_unmap(ioreq);
594 err_no_map:
595     ioreq_finish(ioreq);
596     ioreq->status = BLKIF_RSP_ERROR;
597     return -1;
598 }
599
600 static int blk_send_response_one(struct ioreq *ioreq)
601 {
602     struct XenBlkDev  *blkdev = ioreq->blkdev;
603     int               send_notify   = 0;
604     int               have_requests = 0;
605     blkif_response_t  resp;
606     void              *dst;
607
608     resp.id        = ioreq->req.id;
609     resp.operation = ioreq->req.operation;
610     resp.status    = ioreq->status;
611
612     /* Place on the response ring for the relevant domain. */
613     switch (blkdev->protocol) {
614     case BLKIF_PROTOCOL_NATIVE:
615         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
616         break;
617     case BLKIF_PROTOCOL_X86_32:
618         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
619                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
620         break;
621     case BLKIF_PROTOCOL_X86_64:
622         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
623                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
624         break;
625     default:
626         dst = NULL;
627         return 0;
628     }
629     memcpy(dst, &resp, sizeof(resp));
630     blkdev->rings.common.rsp_prod_pvt++;
631
632     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
633     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
634         /*
635          * Tail check for pending requests. Allows frontend to avoid
636          * notifications if requests are already in flight (lower
637          * overheads and promotes batching).
638          */
639         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
640     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
641         have_requests = 1;
642     }
643
644     if (have_requests) {
645         blkdev->more_work++;
646     }
647     return send_notify;
648 }
649
650 /* walk finished list, send outstanding responses, free requests */
651 static void blk_send_response_all(struct XenBlkDev *blkdev)
652 {
653     struct ioreq *ioreq;
654     int send_notify = 0;
655
656     while (!QLIST_EMPTY(&blkdev->finished)) {
657         ioreq = QLIST_FIRST(&blkdev->finished);
658         send_notify += blk_send_response_one(ioreq);
659         ioreq_release(ioreq, true);
660     }
661     if (send_notify) {
662         xen_be_send_notify(&blkdev->xendev);
663     }
664 }
665
666 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
667 {
668     switch (blkdev->protocol) {
669     case BLKIF_PROTOCOL_NATIVE:
670         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
671                sizeof(ioreq->req));
672         break;
673     case BLKIF_PROTOCOL_X86_32:
674         blkif_get_x86_32_req(&ioreq->req,
675                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
676         break;
677     case BLKIF_PROTOCOL_X86_64:
678         blkif_get_x86_64_req(&ioreq->req,
679                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
680         break;
681     }
682     return 0;
683 }
684
685 static void blk_handle_requests(struct XenBlkDev *blkdev)
686 {
687     RING_IDX rc, rp;
688     struct ioreq *ioreq;
689
690     blkdev->more_work = 0;
691
692     rc = blkdev->rings.common.req_cons;
693     rp = blkdev->rings.common.sring->req_prod;
694     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
695
696     blk_send_response_all(blkdev);
697     while (rc != rp) {
698         /* pull request from ring */
699         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
700             break;
701         }
702         ioreq = ioreq_start(blkdev);
703         if (ioreq == NULL) {
704             blkdev->more_work++;
705             break;
706         }
707         blk_get_request(blkdev, ioreq, rc);
708         blkdev->rings.common.req_cons = ++rc;
709
710         /* parse them */
711         if (ioreq_parse(ioreq) != 0) {
712
713             switch (ioreq->req.operation) {
714             case BLKIF_OP_READ:
715                 block_acct_invalid(blk_get_stats(blkdev->blk),
716                                    BLOCK_ACCT_READ);
717                 break;
718             case BLKIF_OP_WRITE:
719                 block_acct_invalid(blk_get_stats(blkdev->blk),
720                                    BLOCK_ACCT_WRITE);
721                 break;
722             case BLKIF_OP_FLUSH_DISKCACHE:
723                 block_acct_invalid(blk_get_stats(blkdev->blk),
724                                    BLOCK_ACCT_FLUSH);
725             default:
726                 break;
727             };
728
729             if (blk_send_response_one(ioreq)) {
730                 xen_be_send_notify(&blkdev->xendev);
731             }
732             ioreq_release(ioreq, false);
733             continue;
734         }
735
736         ioreq_runio_qemu_aio(ioreq);
737     }
738
739     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
740         qemu_bh_schedule(blkdev->bh);
741     }
742 }
743
744 /* ------------------------------------------------------------- */
745
746 static void blk_bh(void *opaque)
747 {
748     struct XenBlkDev *blkdev = opaque;
749     blk_handle_requests(blkdev);
750 }
751
752 /*
753  * We need to account for the grant allocations requiring contiguous
754  * chunks; the worst case number would be
755  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
756  * but in order to keep things simple just use
757  *     2 * max_req * max_seg.
758  */
759 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
760
761 static void blk_alloc(struct XenDevice *xendev)
762 {
763     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
764
765     QLIST_INIT(&blkdev->inflight);
766     QLIST_INIT(&blkdev->finished);
767     QLIST_INIT(&blkdev->freelist);
768     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
769     if (xen_mode != XEN_EMULATE) {
770         batch_maps = 1;
771     }
772     if (xengnttab_set_max_grants(xendev->gnttabdev,
773             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
774         xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
775                       strerror(errno));
776     }
777 }
778
779 static void blk_parse_discard(struct XenBlkDev *blkdev)
780 {
781     int enable;
782
783     blkdev->feature_discard = true;
784
785     if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
786         blkdev->feature_discard = !!enable;
787     }
788
789     if (blkdev->feature_discard) {
790         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
791     }
792 }
793
794 static int blk_init(struct XenDevice *xendev)
795 {
796     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
797     int info = 0;
798     char *directiosafe = NULL;
799
800     /* read xenstore entries */
801     if (blkdev->params == NULL) {
802         char *h = NULL;
803         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
804         if (blkdev->params != NULL) {
805             h = strchr(blkdev->params, ':');
806         }
807         if (h != NULL) {
808             blkdev->fileproto = blkdev->params;
809             blkdev->filename  = h+1;
810             *h = 0;
811         } else {
812             blkdev->fileproto = "<unset>";
813             blkdev->filename  = blkdev->params;
814         }
815     }
816     if (!strcmp("aio", blkdev->fileproto)) {
817         blkdev->fileproto = "raw";
818     }
819     if (!strcmp("vhd", blkdev->fileproto)) {
820         blkdev->fileproto = "vpc";
821     }
822     if (blkdev->mode == NULL) {
823         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
824     }
825     if (blkdev->type == NULL) {
826         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
827     }
828     if (blkdev->dev == NULL) {
829         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
830     }
831     if (blkdev->devtype == NULL) {
832         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
833     }
834     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
835     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
836
837     /* do we have all we need? */
838     if (blkdev->params == NULL ||
839         blkdev->mode == NULL   ||
840         blkdev->type == NULL   ||
841         blkdev->dev == NULL) {
842         goto out_error;
843     }
844
845     /* read-only ? */
846     if (strcmp(blkdev->mode, "w")) {
847         info  |= VDISK_READONLY;
848     }
849
850     /* cdrom ? */
851     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
852         info  |= VDISK_CDROM;
853     }
854
855     blkdev->file_blk  = BLOCK_SIZE;
856
857     /* fill info
858      * blk_connect supplies sector-size and sectors
859      */
860     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
861     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
862     xenstore_write_be_int(&blkdev->xendev, "info", info);
863
864     blk_parse_discard(blkdev);
865
866     g_free(directiosafe);
867     return 0;
868
869 out_error:
870     g_free(blkdev->params);
871     blkdev->params = NULL;
872     g_free(blkdev->mode);
873     blkdev->mode = NULL;
874     g_free(blkdev->type);
875     blkdev->type = NULL;
876     g_free(blkdev->dev);
877     blkdev->dev = NULL;
878     g_free(blkdev->devtype);
879     blkdev->devtype = NULL;
880     g_free(directiosafe);
881     blkdev->directiosafe = false;
882     return -1;
883 }
884
885 static int blk_connect(struct XenDevice *xendev)
886 {
887     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
888     int pers, index, qflags;
889     bool readonly = true;
890     bool writethrough = true;
891
892     /* read-only ? */
893     if (blkdev->directiosafe) {
894         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
895     } else {
896         qflags = 0;
897         writethrough = false;
898     }
899     if (strcmp(blkdev->mode, "w") == 0) {
900         qflags |= BDRV_O_RDWR;
901         readonly = false;
902     }
903     if (blkdev->feature_discard) {
904         qflags |= BDRV_O_UNMAP;
905     }
906
907     /* init qemu block driver */
908     index = (blkdev->xendev.dev - 202 * 256) / 16;
909     blkdev->dinfo = drive_get(IF_XEN, 0, index);
910     if (!blkdev->dinfo) {
911         Error *local_err = NULL;
912         QDict *options = NULL;
913
914         if (strcmp(blkdev->fileproto, "<unset>")) {
915             options = qdict_new();
916             qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
917         }
918
919         /* setup via xenbus -> create new block driver instance */
920         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
921         blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
922                                    qflags, &local_err);
923         if (!blkdev->blk) {
924             xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
925                           error_get_pretty(local_err));
926             error_free(local_err);
927             return -1;
928         }
929         blk_set_enable_write_cache(blkdev->blk, !writethrough);
930     } else {
931         /* setup via qemu cmdline -> already setup for us */
932         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
933         blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
934         if (blk_is_read_only(blkdev->blk) && !readonly) {
935             xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
936             blkdev->blk = NULL;
937             return -1;
938         }
939         /* blkdev->blk is not create by us, we get a reference
940          * so we can blk_unref() unconditionally */
941         blk_ref(blkdev->blk);
942     }
943     blk_attach_dev_nofail(blkdev->blk, blkdev);
944     blkdev->file_size = blk_getlength(blkdev->blk);
945     if (blkdev->file_size < 0) {
946         BlockDriverState *bs = blk_bs(blkdev->blk);
947         const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
948         xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
949                       (int)blkdev->file_size, strerror(-blkdev->file_size),
950                       drv_name ?: "-");
951         blkdev->file_size = 0;
952     }
953
954     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
955                   " size %" PRId64 " (%" PRId64 " MB)\n",
956                   blkdev->type, blkdev->fileproto, blkdev->filename,
957                   blkdev->file_size, blkdev->file_size >> 20);
958
959     /* Fill in number of sector size and number of sectors */
960     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
961     xenstore_write_be_int64(&blkdev->xendev, "sectors",
962                             blkdev->file_size / blkdev->file_blk);
963
964     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
965         return -1;
966     }
967     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
968                              &blkdev->xendev.remote_port) == -1) {
969         return -1;
970     }
971     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
972         blkdev->feature_persistent = FALSE;
973     } else {
974         blkdev->feature_persistent = !!pers;
975     }
976
977     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
978     if (blkdev->xendev.protocol) {
979         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
980             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
981         }
982         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
983             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
984         }
985     }
986
987     blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
988                                             blkdev->xendev.dom,
989                                             blkdev->ring_ref,
990                                             PROT_READ | PROT_WRITE);
991     if (!blkdev->sring) {
992         return -1;
993     }
994     blkdev->cnt_map++;
995
996     switch (blkdev->protocol) {
997     case BLKIF_PROTOCOL_NATIVE:
998     {
999         blkif_sring_t *sring_native = blkdev->sring;
1000         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
1001         break;
1002     }
1003     case BLKIF_PROTOCOL_X86_32:
1004     {
1005         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1006
1007         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1008         break;
1009     }
1010     case BLKIF_PROTOCOL_X86_64:
1011     {
1012         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1013
1014         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1015         break;
1016     }
1017     }
1018
1019     if (blkdev->feature_persistent) {
1020         /* Init persistent grants */
1021         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1022         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1023                                              NULL, NULL,
1024                                              batch_maps ?
1025                                              (GDestroyNotify)g_free :
1026                                              (GDestroyNotify)destroy_grant);
1027         blkdev->persistent_regions = NULL;
1028         blkdev->persistent_gnt_count = 0;
1029     }
1030
1031     xen_be_bind_evtchn(&blkdev->xendev);
1032
1033     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1034                   "remote port %d, local port %d\n",
1035                   blkdev->xendev.protocol, blkdev->ring_ref,
1036                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
1037     return 0;
1038 }
1039
1040 static void blk_disconnect(struct XenDevice *xendev)
1041 {
1042     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1043
1044     if (blkdev->blk) {
1045         blk_detach_dev(blkdev->blk, blkdev);
1046         blk_unref(blkdev->blk);
1047         blkdev->blk = NULL;
1048     }
1049     xen_be_unbind_evtchn(&blkdev->xendev);
1050
1051     if (blkdev->sring) {
1052         xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1053         blkdev->cnt_map--;
1054         blkdev->sring = NULL;
1055     }
1056
1057     /*
1058      * Unmap persistent grants before switching to the closed state
1059      * so the frontend can free them.
1060      *
1061      * In the !batch_maps case g_tree_destroy will take care of unmapping
1062      * the grant, but in the batch_maps case we need to iterate over every
1063      * region in persistent_regions and unmap it.
1064      */
1065     if (blkdev->feature_persistent) {
1066         g_tree_destroy(blkdev->persistent_gnts);
1067         assert(batch_maps || blkdev->persistent_gnt_count == 0);
1068         if (batch_maps) {
1069             blkdev->persistent_gnt_count = 0;
1070             g_slist_foreach(blkdev->persistent_regions,
1071                             (GFunc)remove_persistent_region, blkdev);
1072             g_slist_free(blkdev->persistent_regions);
1073         }
1074         blkdev->feature_persistent = false;
1075     }
1076 }
1077
1078 static int blk_free(struct XenDevice *xendev)
1079 {
1080     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1081     struct ioreq *ioreq;
1082
1083     if (blkdev->blk || blkdev->sring) {
1084         blk_disconnect(xendev);
1085     }
1086
1087     while (!QLIST_EMPTY(&blkdev->freelist)) {
1088         ioreq = QLIST_FIRST(&blkdev->freelist);
1089         QLIST_REMOVE(ioreq, list);
1090         qemu_iovec_destroy(&ioreq->v);
1091         g_free(ioreq);
1092     }
1093
1094     g_free(blkdev->params);
1095     g_free(blkdev->mode);
1096     g_free(blkdev->type);
1097     g_free(blkdev->dev);
1098     g_free(blkdev->devtype);
1099     qemu_bh_delete(blkdev->bh);
1100     return 0;
1101 }
1102
1103 static void blk_event(struct XenDevice *xendev)
1104 {
1105     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1106
1107     qemu_bh_schedule(blkdev->bh);
1108 }
1109
1110 struct XenDevOps xen_blkdev_ops = {
1111     .size       = sizeof(struct XenBlkDev),
1112     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1113     .alloc      = blk_alloc,
1114     .init       = blk_init,
1115     .initialise    = blk_connect,
1116     .disconnect = blk_disconnect,
1117     .event      = blk_event,
1118     .free       = blk_free,
1119 };
This page took 0.085093 seconds and 4 git commands to generate.