]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
block: Connect BlockBackend to BlockDriverState
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <inttypes.h>
29 #include <time.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
37
38 #include "hw/hw.h"
39 #include "hw/xen/xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
42 #include "sysemu/block-backend.h"
43
44 /* ------------------------------------------------------------- */
45
46 static int batch_maps   = 0;
47
48 static int max_requests = 32;
49
50 /* ------------------------------------------------------------- */
51
52 #define BLOCK_SIZE  512
53 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54
55 struct PersistentGrant {
56     void *page;
57     struct XenBlkDev *blkdev;
58 };
59
60 typedef struct PersistentGrant PersistentGrant;
61
62 struct ioreq {
63     blkif_request_t     req;
64     int16_t             status;
65
66     /* parsed request */
67     off_t               start;
68     QEMUIOVector        v;
69     int                 presync;
70     int                 postsync;
71     uint8_t             mapped;
72
73     /* grant mapping */
74     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76     int                 prot;
77     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78     void                *pages;
79     int                 num_unmap;
80
81     /* aio status */
82     int                 aio_inflight;
83     int                 aio_errors;
84
85     struct XenBlkDev    *blkdev;
86     QLIST_ENTRY(ioreq)   list;
87     BlockAcctCookie     acct;
88 };
89
90 struct XenBlkDev {
91     struct XenDevice    xendev;  /* must be first */
92     char                *params;
93     char                *mode;
94     char                *type;
95     char                *dev;
96     char                *devtype;
97     bool                directiosafe;
98     const char          *fileproto;
99     const char          *filename;
100     int                 ring_ref;
101     void                *sring;
102     int64_t             file_blk;
103     int64_t             file_size;
104     int                 protocol;
105     blkif_back_rings_t  rings;
106     int                 more_work;
107     int                 cnt_map;
108
109     /* request lists */
110     QLIST_HEAD(inflight_head, ioreq) inflight;
111     QLIST_HEAD(finished_head, ioreq) finished;
112     QLIST_HEAD(freelist_head, ioreq) freelist;
113     int                 requests_total;
114     int                 requests_inflight;
115     int                 requests_finished;
116
117     /* Persistent grants extension */
118     gboolean            feature_discard;
119     gboolean            feature_persistent;
120     GTree               *persistent_gnts;
121     unsigned int        persistent_gnt_count;
122     unsigned int        max_grants;
123
124     /* qemu block driver */
125     DriveInfo           *dinfo;
126     BlockDriverState    *bs;
127     QEMUBH              *bh;
128 };
129
130 /* ------------------------------------------------------------- */
131
132 static void ioreq_reset(struct ioreq *ioreq)
133 {
134     memset(&ioreq->req, 0, sizeof(ioreq->req));
135     ioreq->status = 0;
136     ioreq->start = 0;
137     ioreq->presync = 0;
138     ioreq->postsync = 0;
139     ioreq->mapped = 0;
140
141     memset(ioreq->domids, 0, sizeof(ioreq->domids));
142     memset(ioreq->refs, 0, sizeof(ioreq->refs));
143     ioreq->prot = 0;
144     memset(ioreq->page, 0, sizeof(ioreq->page));
145     ioreq->pages = NULL;
146
147     ioreq->aio_inflight = 0;
148     ioreq->aio_errors = 0;
149
150     ioreq->blkdev = NULL;
151     memset(&ioreq->list, 0, sizeof(ioreq->list));
152     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
153
154     qemu_iovec_reset(&ioreq->v);
155 }
156
157 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
158 {
159     uint ua = GPOINTER_TO_UINT(a);
160     uint ub = GPOINTER_TO_UINT(b);
161     return (ua > ub) - (ua < ub);
162 }
163
164 static void destroy_grant(gpointer pgnt)
165 {
166     PersistentGrant *grant = pgnt;
167     XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
168
169     if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
170         xen_be_printf(&grant->blkdev->xendev, 0,
171                       "xc_gnttab_munmap failed: %s\n",
172                       strerror(errno));
173     }
174     grant->blkdev->persistent_gnt_count--;
175     xen_be_printf(&grant->blkdev->xendev, 3,
176                   "unmapped grant %p\n", grant->page);
177     g_free(grant);
178 }
179
180 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
181 {
182     struct ioreq *ioreq = NULL;
183
184     if (QLIST_EMPTY(&blkdev->freelist)) {
185         if (blkdev->requests_total >= max_requests) {
186             goto out;
187         }
188         /* allocate new struct */
189         ioreq = g_malloc0(sizeof(*ioreq));
190         ioreq->blkdev = blkdev;
191         blkdev->requests_total++;
192         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
193     } else {
194         /* get one from freelist */
195         ioreq = QLIST_FIRST(&blkdev->freelist);
196         QLIST_REMOVE(ioreq, list);
197     }
198     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
199     blkdev->requests_inflight++;
200
201 out:
202     return ioreq;
203 }
204
205 static void ioreq_finish(struct ioreq *ioreq)
206 {
207     struct XenBlkDev *blkdev = ioreq->blkdev;
208
209     QLIST_REMOVE(ioreq, list);
210     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
211     blkdev->requests_inflight--;
212     blkdev->requests_finished++;
213 }
214
215 static void ioreq_release(struct ioreq *ioreq, bool finish)
216 {
217     struct XenBlkDev *blkdev = ioreq->blkdev;
218
219     QLIST_REMOVE(ioreq, list);
220     ioreq_reset(ioreq);
221     ioreq->blkdev = blkdev;
222     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
223     if (finish) {
224         blkdev->requests_finished--;
225     } else {
226         blkdev->requests_inflight--;
227     }
228 }
229
230 /*
231  * translate request into iovec + start offset
232  * do sanity checks along the way
233  */
234 static int ioreq_parse(struct ioreq *ioreq)
235 {
236     struct XenBlkDev *blkdev = ioreq->blkdev;
237     uintptr_t mem;
238     size_t len;
239     int i;
240
241     xen_be_printf(&blkdev->xendev, 3,
242                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
243                   ioreq->req.operation, ioreq->req.nr_segments,
244                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
245     switch (ioreq->req.operation) {
246     case BLKIF_OP_READ:
247         ioreq->prot = PROT_WRITE; /* to memory */
248         break;
249     case BLKIF_OP_FLUSH_DISKCACHE:
250         ioreq->presync = 1;
251         if (!ioreq->req.nr_segments) {
252             return 0;
253         }
254         /* fall through */
255     case BLKIF_OP_WRITE:
256         ioreq->prot = PROT_READ; /* from memory */
257         break;
258     case BLKIF_OP_DISCARD:
259         return 0;
260     default:
261         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
262                       ioreq->req.operation);
263         goto err;
264     };
265
266     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
267         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
268         goto err;
269     }
270
271     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
272     for (i = 0; i < ioreq->req.nr_segments; i++) {
273         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
274             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
275             goto err;
276         }
277         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
278             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
279             goto err;
280         }
281         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
282             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
283             goto err;
284         }
285
286         ioreq->domids[i] = blkdev->xendev.dom;
287         ioreq->refs[i]   = ioreq->req.seg[i].gref;
288
289         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
290         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
291         qemu_iovec_add(&ioreq->v, (void*)mem, len);
292     }
293     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
294         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
295         goto err;
296     }
297     return 0;
298
299 err:
300     ioreq->status = BLKIF_RSP_ERROR;
301     return -1;
302 }
303
304 static void ioreq_unmap(struct ioreq *ioreq)
305 {
306     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
307     int i;
308
309     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
310         return;
311     }
312     if (batch_maps) {
313         if (!ioreq->pages) {
314             return;
315         }
316         if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
317             xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
318                           strerror(errno));
319         }
320         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
321         ioreq->pages = NULL;
322     } else {
323         for (i = 0; i < ioreq->num_unmap; i++) {
324             if (!ioreq->page[i]) {
325                 continue;
326             }
327             if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
328                 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
329                               strerror(errno));
330             }
331             ioreq->blkdev->cnt_map--;
332             ioreq->page[i] = NULL;
333         }
334     }
335     ioreq->mapped = 0;
336 }
337
338 static int ioreq_map(struct ioreq *ioreq)
339 {
340     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
341     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
342     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
343     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
344     int i, j, new_maps = 0;
345     PersistentGrant *grant;
346     /* domids and refs variables will contain the information necessary
347      * to map the grants that are needed to fulfill this request.
348      *
349      * After mapping the needed grants, the page array will contain the
350      * memory address of each granted page in the order specified in ioreq
351      * (disregarding if it's a persistent grant or not).
352      */
353
354     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
355         return 0;
356     }
357     if (ioreq->blkdev->feature_persistent) {
358         for (i = 0; i < ioreq->v.niov; i++) {
359             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
360                                     GUINT_TO_POINTER(ioreq->refs[i]));
361
362             if (grant != NULL) {
363                 page[i] = grant->page;
364                 xen_be_printf(&ioreq->blkdev->xendev, 3,
365                               "using persistent-grant %" PRIu32 "\n",
366                               ioreq->refs[i]);
367             } else {
368                     /* Add the grant to the list of grants that
369                      * should be mapped
370                      */
371                     domids[new_maps] = ioreq->domids[i];
372                     refs[new_maps] = ioreq->refs[i];
373                     page[i] = NULL;
374                     new_maps++;
375             }
376         }
377         /* Set the protection to RW, since grants may be reused later
378          * with a different protection than the one needed for this request
379          */
380         ioreq->prot = PROT_WRITE | PROT_READ;
381     } else {
382         /* All grants in the request should be mapped */
383         memcpy(refs, ioreq->refs, sizeof(refs));
384         memcpy(domids, ioreq->domids, sizeof(domids));
385         memset(page, 0, sizeof(page));
386         new_maps = ioreq->v.niov;
387     }
388
389     if (batch_maps && new_maps) {
390         ioreq->pages = xc_gnttab_map_grant_refs
391             (gnt, new_maps, domids, refs, ioreq->prot);
392         if (ioreq->pages == NULL) {
393             xen_be_printf(&ioreq->blkdev->xendev, 0,
394                           "can't map %d grant refs (%s, %d maps)\n",
395                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
396             return -1;
397         }
398         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
399             if (page[i] == NULL) {
400                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
401             }
402         }
403         ioreq->blkdev->cnt_map += new_maps;
404     } else if (new_maps)  {
405         for (i = 0; i < new_maps; i++) {
406             ioreq->page[i] = xc_gnttab_map_grant_ref
407                 (gnt, domids[i], refs[i], ioreq->prot);
408             if (ioreq->page[i] == NULL) {
409                 xen_be_printf(&ioreq->blkdev->xendev, 0,
410                               "can't map grant ref %d (%s, %d maps)\n",
411                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
412                 ioreq->mapped = 1;
413                 ioreq_unmap(ioreq);
414                 return -1;
415             }
416             ioreq->blkdev->cnt_map++;
417         }
418         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
419             if (page[i] == NULL) {
420                 page[i] = ioreq->page[j++];
421             }
422         }
423     }
424     if (ioreq->blkdev->feature_persistent) {
425         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
426               && new_maps) {
427             /* Go through the list of newly mapped grants and add as many
428              * as possible to the list of persistently mapped grants.
429              *
430              * Since we start at the end of ioreq->page(s), we only need
431              * to decrease new_maps to prevent this granted pages from
432              * being unmapped in ioreq_unmap.
433              */
434             grant = g_malloc0(sizeof(*grant));
435             new_maps--;
436             if (batch_maps) {
437                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
438             } else {
439                 grant->page = ioreq->page[new_maps];
440             }
441             grant->blkdev = ioreq->blkdev;
442             xen_be_printf(&ioreq->blkdev->xendev, 3,
443                           "adding grant %" PRIu32 " page: %p\n",
444                           refs[new_maps], grant->page);
445             g_tree_insert(ioreq->blkdev->persistent_gnts,
446                           GUINT_TO_POINTER(refs[new_maps]),
447                           grant);
448             ioreq->blkdev->persistent_gnt_count++;
449         }
450     }
451     for (i = 0; i < ioreq->v.niov; i++) {
452         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
453     }
454     ioreq->mapped = 1;
455     ioreq->num_unmap = new_maps;
456     return 0;
457 }
458
459 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
460
461 static void qemu_aio_complete(void *opaque, int ret)
462 {
463     struct ioreq *ioreq = opaque;
464
465     if (ret != 0) {
466         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
467                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
468         ioreq->aio_errors++;
469     }
470
471     ioreq->aio_inflight--;
472     if (ioreq->presync) {
473         ioreq->presync = 0;
474         ioreq_runio_qemu_aio(ioreq);
475         return;
476     }
477     if (ioreq->aio_inflight > 0) {
478         return;
479     }
480     if (ioreq->postsync) {
481         ioreq->postsync = 0;
482         ioreq->aio_inflight++;
483         bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
484         return;
485     }
486
487     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
488     ioreq_unmap(ioreq);
489     ioreq_finish(ioreq);
490     switch (ioreq->req.operation) {
491     case BLKIF_OP_WRITE:
492     case BLKIF_OP_FLUSH_DISKCACHE:
493         if (!ioreq->req.nr_segments) {
494             break;
495         }
496     case BLKIF_OP_READ:
497         block_acct_done(bdrv_get_stats(ioreq->blkdev->bs), &ioreq->acct);
498         break;
499     case BLKIF_OP_DISCARD:
500     default:
501         break;
502     }
503     qemu_bh_schedule(ioreq->blkdev->bh);
504 }
505
506 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
507 {
508     struct XenBlkDev *blkdev = ioreq->blkdev;
509
510     if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
511         goto err_no_map;
512     }
513
514     ioreq->aio_inflight++;
515     if (ioreq->presync) {
516         bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
517         return 0;
518     }
519
520     switch (ioreq->req.operation) {
521     case BLKIF_OP_READ:
522         block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
523                          ioreq->v.size, BLOCK_ACCT_READ);
524         ioreq->aio_inflight++;
525         bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
526                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
527                        qemu_aio_complete, ioreq);
528         break;
529     case BLKIF_OP_WRITE:
530     case BLKIF_OP_FLUSH_DISKCACHE:
531         if (!ioreq->req.nr_segments) {
532             break;
533         }
534
535         block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
536                          ioreq->v.size, BLOCK_ACCT_WRITE);
537         ioreq->aio_inflight++;
538         bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
539                         &ioreq->v, ioreq->v.size / BLOCK_SIZE,
540                         qemu_aio_complete, ioreq);
541         break;
542     case BLKIF_OP_DISCARD:
543     {
544         struct blkif_request_discard *discard_req = (void *)&ioreq->req;
545         ioreq->aio_inflight++;
546         bdrv_aio_discard(blkdev->bs,
547                         discard_req->sector_number, discard_req->nr_sectors,
548                         qemu_aio_complete, ioreq);
549         break;
550     }
551     default:
552         /* unknown operation (shouldn't happen -- parse catches this) */
553         goto err;
554     }
555
556     qemu_aio_complete(ioreq, 0);
557
558     return 0;
559
560 err:
561     ioreq_unmap(ioreq);
562 err_no_map:
563     ioreq_finish(ioreq);
564     ioreq->status = BLKIF_RSP_ERROR;
565     return -1;
566 }
567
568 static int blk_send_response_one(struct ioreq *ioreq)
569 {
570     struct XenBlkDev  *blkdev = ioreq->blkdev;
571     int               send_notify   = 0;
572     int               have_requests = 0;
573     blkif_response_t  resp;
574     void              *dst;
575
576     resp.id        = ioreq->req.id;
577     resp.operation = ioreq->req.operation;
578     resp.status    = ioreq->status;
579
580     /* Place on the response ring for the relevant domain. */
581     switch (blkdev->protocol) {
582     case BLKIF_PROTOCOL_NATIVE:
583         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
584         break;
585     case BLKIF_PROTOCOL_X86_32:
586         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
587                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
588         break;
589     case BLKIF_PROTOCOL_X86_64:
590         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
591                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
592         break;
593     default:
594         dst = NULL;
595         return 0;
596     }
597     memcpy(dst, &resp, sizeof(resp));
598     blkdev->rings.common.rsp_prod_pvt++;
599
600     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
601     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
602         /*
603          * Tail check for pending requests. Allows frontend to avoid
604          * notifications if requests are already in flight (lower
605          * overheads and promotes batching).
606          */
607         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
608     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
609         have_requests = 1;
610     }
611
612     if (have_requests) {
613         blkdev->more_work++;
614     }
615     return send_notify;
616 }
617
618 /* walk finished list, send outstanding responses, free requests */
619 static void blk_send_response_all(struct XenBlkDev *blkdev)
620 {
621     struct ioreq *ioreq;
622     int send_notify = 0;
623
624     while (!QLIST_EMPTY(&blkdev->finished)) {
625         ioreq = QLIST_FIRST(&blkdev->finished);
626         send_notify += blk_send_response_one(ioreq);
627         ioreq_release(ioreq, true);
628     }
629     if (send_notify) {
630         xen_be_send_notify(&blkdev->xendev);
631     }
632 }
633
634 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
635 {
636     switch (blkdev->protocol) {
637     case BLKIF_PROTOCOL_NATIVE:
638         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
639                sizeof(ioreq->req));
640         break;
641     case BLKIF_PROTOCOL_X86_32:
642         blkif_get_x86_32_req(&ioreq->req,
643                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
644         break;
645     case BLKIF_PROTOCOL_X86_64:
646         blkif_get_x86_64_req(&ioreq->req,
647                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
648         break;
649     }
650     return 0;
651 }
652
653 static void blk_handle_requests(struct XenBlkDev *blkdev)
654 {
655     RING_IDX rc, rp;
656     struct ioreq *ioreq;
657
658     blkdev->more_work = 0;
659
660     rc = blkdev->rings.common.req_cons;
661     rp = blkdev->rings.common.sring->req_prod;
662     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
663
664     blk_send_response_all(blkdev);
665     while (rc != rp) {
666         /* pull request from ring */
667         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
668             break;
669         }
670         ioreq = ioreq_start(blkdev);
671         if (ioreq == NULL) {
672             blkdev->more_work++;
673             break;
674         }
675         blk_get_request(blkdev, ioreq, rc);
676         blkdev->rings.common.req_cons = ++rc;
677
678         /* parse them */
679         if (ioreq_parse(ioreq) != 0) {
680             if (blk_send_response_one(ioreq)) {
681                 xen_be_send_notify(&blkdev->xendev);
682             }
683             ioreq_release(ioreq, false);
684             continue;
685         }
686
687         ioreq_runio_qemu_aio(ioreq);
688     }
689
690     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
691         qemu_bh_schedule(blkdev->bh);
692     }
693 }
694
695 /* ------------------------------------------------------------- */
696
697 static void blk_bh(void *opaque)
698 {
699     struct XenBlkDev *blkdev = opaque;
700     blk_handle_requests(blkdev);
701 }
702
703 /*
704  * We need to account for the grant allocations requiring contiguous
705  * chunks; the worst case number would be
706  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
707  * but in order to keep things simple just use
708  *     2 * max_req * max_seg.
709  */
710 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
711
712 static void blk_alloc(struct XenDevice *xendev)
713 {
714     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
715
716     QLIST_INIT(&blkdev->inflight);
717     QLIST_INIT(&blkdev->finished);
718     QLIST_INIT(&blkdev->freelist);
719     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
720     if (xen_mode != XEN_EMULATE) {
721         batch_maps = 1;
722     }
723     if (xc_gnttab_set_max_grants(xendev->gnttabdev,
724             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
725         xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
726                       strerror(errno));
727     }
728 }
729
730 static void blk_parse_discard(struct XenBlkDev *blkdev)
731 {
732     int enable;
733
734     blkdev->feature_discard = true;
735
736     if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
737         blkdev->feature_discard = !!enable;
738     }
739
740     if (blkdev->feature_discard) {
741         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
742     }
743 }
744
745 static int blk_init(struct XenDevice *xendev)
746 {
747     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
748     int info = 0;
749     char *directiosafe = NULL;
750
751     /* read xenstore entries */
752     if (blkdev->params == NULL) {
753         char *h = NULL;
754         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
755         if (blkdev->params != NULL) {
756             h = strchr(blkdev->params, ':');
757         }
758         if (h != NULL) {
759             blkdev->fileproto = blkdev->params;
760             blkdev->filename  = h+1;
761             *h = 0;
762         } else {
763             blkdev->fileproto = "<unset>";
764             blkdev->filename  = blkdev->params;
765         }
766     }
767     if (!strcmp("aio", blkdev->fileproto)) {
768         blkdev->fileproto = "raw";
769     }
770     if (blkdev->mode == NULL) {
771         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
772     }
773     if (blkdev->type == NULL) {
774         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
775     }
776     if (blkdev->dev == NULL) {
777         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
778     }
779     if (blkdev->devtype == NULL) {
780         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
781     }
782     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
783     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
784
785     /* do we have all we need? */
786     if (blkdev->params == NULL ||
787         blkdev->mode == NULL   ||
788         blkdev->type == NULL   ||
789         blkdev->dev == NULL) {
790         goto out_error;
791     }
792
793     /* read-only ? */
794     if (strcmp(blkdev->mode, "w")) {
795         info  |= VDISK_READONLY;
796     }
797
798     /* cdrom ? */
799     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
800         info  |= VDISK_CDROM;
801     }
802
803     blkdev->file_blk  = BLOCK_SIZE;
804
805     /* fill info
806      * blk_connect supplies sector-size and sectors
807      */
808     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
809     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
810     xenstore_write_be_int(&blkdev->xendev, "info", info);
811
812     blk_parse_discard(blkdev);
813
814     g_free(directiosafe);
815     return 0;
816
817 out_error:
818     g_free(blkdev->params);
819     blkdev->params = NULL;
820     g_free(blkdev->mode);
821     blkdev->mode = NULL;
822     g_free(blkdev->type);
823     blkdev->type = NULL;
824     g_free(blkdev->dev);
825     blkdev->dev = NULL;
826     g_free(blkdev->devtype);
827     blkdev->devtype = NULL;
828     g_free(directiosafe);
829     blkdev->directiosafe = false;
830     return -1;
831 }
832
833 static int blk_connect(struct XenDevice *xendev)
834 {
835     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
836     int pers, index, qflags;
837     bool readonly = true;
838
839     /* read-only ? */
840     if (blkdev->directiosafe) {
841         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
842     } else {
843         qflags = BDRV_O_CACHE_WB;
844     }
845     if (strcmp(blkdev->mode, "w") == 0) {
846         qflags |= BDRV_O_RDWR;
847         readonly = false;
848     }
849     if (blkdev->feature_discard) {
850         qflags |= BDRV_O_UNMAP;
851     }
852
853     /* init qemu block driver */
854     index = (blkdev->xendev.dev - 202 * 256) / 16;
855     blkdev->dinfo = drive_get(IF_XEN, 0, index);
856     if (!blkdev->dinfo) {
857         Error *local_err = NULL;
858         BlockBackend *blk;
859         BlockDriver *drv;
860
861         /* setup via xenbus -> create new block driver instance */
862         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
863         blk = blk_new_with_bs(blkdev->dev, NULL);
864         if (!blk) {
865             return -1;
866         }
867         blkdev->bs = blk_bs(blk);
868
869         drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
870         if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
871                       drv, &local_err) != 0) {
872             xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
873                           error_get_pretty(local_err));
874             error_free(local_err);
875             bdrv_unref(blkdev->bs);
876             blk_unref(blk);
877             blkdev->bs = NULL;
878             return -1;
879         }
880     } else {
881         /* setup via qemu cmdline -> already setup for us */
882         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
883         blkdev->bs = blkdev->dinfo->bdrv;
884         if (bdrv_is_read_only(blkdev->bs) && !readonly) {
885             xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
886             blkdev->bs = NULL;
887             return -1;
888         }
889         /* blkdev->bs is not create by us, we get a reference
890          * so we can bdrv_unref() unconditionally */
891         bdrv_ref(blkdev->bs);
892     }
893     bdrv_attach_dev_nofail(blkdev->bs, blkdev);
894     blkdev->file_size = bdrv_getlength(blkdev->bs);
895     if (blkdev->file_size < 0) {
896         xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
897                       (int)blkdev->file_size, strerror(-blkdev->file_size),
898                       bdrv_get_format_name(blkdev->bs) ?: "-");
899         blkdev->file_size = 0;
900     }
901
902     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
903                   " size %" PRId64 " (%" PRId64 " MB)\n",
904                   blkdev->type, blkdev->fileproto, blkdev->filename,
905                   blkdev->file_size, blkdev->file_size >> 20);
906
907     /* Fill in number of sector size and number of sectors */
908     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
909     xenstore_write_be_int64(&blkdev->xendev, "sectors",
910                             blkdev->file_size / blkdev->file_blk);
911
912     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
913         return -1;
914     }
915     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
916                              &blkdev->xendev.remote_port) == -1) {
917         return -1;
918     }
919     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
920         blkdev->feature_persistent = FALSE;
921     } else {
922         blkdev->feature_persistent = !!pers;
923     }
924
925     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
926     if (blkdev->xendev.protocol) {
927         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
928             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
929         }
930         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
931             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
932         }
933     }
934
935     blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
936                                             blkdev->xendev.dom,
937                                             blkdev->ring_ref,
938                                             PROT_READ | PROT_WRITE);
939     if (!blkdev->sring) {
940         return -1;
941     }
942     blkdev->cnt_map++;
943
944     switch (blkdev->protocol) {
945     case BLKIF_PROTOCOL_NATIVE:
946     {
947         blkif_sring_t *sring_native = blkdev->sring;
948         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
949         break;
950     }
951     case BLKIF_PROTOCOL_X86_32:
952     {
953         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
954
955         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
956         break;
957     }
958     case BLKIF_PROTOCOL_X86_64:
959     {
960         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
961
962         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
963         break;
964     }
965     }
966
967     if (blkdev->feature_persistent) {
968         /* Init persistent grants */
969         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
970         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
971                                              NULL, NULL,
972                                              (GDestroyNotify)destroy_grant);
973         blkdev->persistent_gnt_count = 0;
974     }
975
976     xen_be_bind_evtchn(&blkdev->xendev);
977
978     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
979                   "remote port %d, local port %d\n",
980                   blkdev->xendev.protocol, blkdev->ring_ref,
981                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
982     return 0;
983 }
984
985 static void blk_disconnect(struct XenDevice *xendev)
986 {
987     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
988
989     if (blkdev->bs) {
990         bdrv_detach_dev(blkdev->bs, blkdev);
991         bdrv_unref(blkdev->bs);
992         if (!blkdev->dinfo) {
993             blk_unref(blk_by_name(blkdev->dev));
994         }
995         blkdev->bs = NULL;
996     }
997     xen_be_unbind_evtchn(&blkdev->xendev);
998
999     if (blkdev->sring) {
1000         xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1001         blkdev->cnt_map--;
1002         blkdev->sring = NULL;
1003     }
1004 }
1005
1006 static int blk_free(struct XenDevice *xendev)
1007 {
1008     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1009     struct ioreq *ioreq;
1010
1011     if (blkdev->bs || blkdev->sring) {
1012         blk_disconnect(xendev);
1013     }
1014
1015     /* Free persistent grants */
1016     if (blkdev->feature_persistent) {
1017         g_tree_destroy(blkdev->persistent_gnts);
1018     }
1019
1020     while (!QLIST_EMPTY(&blkdev->freelist)) {
1021         ioreq = QLIST_FIRST(&blkdev->freelist);
1022         QLIST_REMOVE(ioreq, list);
1023         qemu_iovec_destroy(&ioreq->v);
1024         g_free(ioreq);
1025     }
1026
1027     g_free(blkdev->params);
1028     g_free(blkdev->mode);
1029     g_free(blkdev->type);
1030     g_free(blkdev->dev);
1031     g_free(blkdev->devtype);
1032     qemu_bh_delete(blkdev->bh);
1033     return 0;
1034 }
1035
1036 static void blk_event(struct XenDevice *xendev)
1037 {
1038     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1039
1040     qemu_bh_schedule(blkdev->bh);
1041 }
1042
1043 struct XenDevOps xen_blkdev_ops = {
1044     .size       = sizeof(struct XenBlkDev),
1045     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1046     .alloc      = blk_alloc,
1047     .init       = blk_init,
1048     .initialise    = blk_connect,
1049     .disconnect = blk_disconnect,
1050     .event      = blk_event,
1051     .free       = blk_free,
1052 };
This page took 0.104654 seconds and 4 git commands to generate.