]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <inttypes.h>
28 #include <time.h>
29 #include <fcntl.h>
30 #include <errno.h>
31 #include <sys/ioctl.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/mman.h>
35 #include <sys/uio.h>
36
37 #include "hw/hw.h"
38 #include "hw/xen/xen_backend.h"
39 #include "xen_blkif.h"
40 #include "sysemu/blockdev.h"
41 #include "sysemu/block-backend.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qstring.h"
44
45 /* ------------------------------------------------------------- */
46
47 static int batch_maps   = 0;
48
49 static int max_requests = 32;
50
51 /* ------------------------------------------------------------- */
52
53 #define BLOCK_SIZE  512
54 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
55
56 struct PersistentGrant {
57     void *page;
58     struct XenBlkDev *blkdev;
59 };
60
61 typedef struct PersistentGrant PersistentGrant;
62
63 struct PersistentRegion {
64     void *addr;
65     int num;
66 };
67
68 typedef struct PersistentRegion PersistentRegion;
69
70 struct ioreq {
71     blkif_request_t     req;
72     int16_t             status;
73
74     /* parsed request */
75     off_t               start;
76     QEMUIOVector        v;
77     int                 presync;
78     uint8_t             mapped;
79
80     /* grant mapping */
81     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
82     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
83     int                 prot;
84     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
85     void                *pages;
86     int                 num_unmap;
87
88     /* aio status */
89     int                 aio_inflight;
90     int                 aio_errors;
91
92     struct XenBlkDev    *blkdev;
93     QLIST_ENTRY(ioreq)   list;
94     BlockAcctCookie     acct;
95 };
96
97 struct XenBlkDev {
98     struct XenDevice    xendev;  /* must be first */
99     char                *params;
100     char                *mode;
101     char                *type;
102     char                *dev;
103     char                *devtype;
104     bool                directiosafe;
105     const char          *fileproto;
106     const char          *filename;
107     int                 ring_ref;
108     void                *sring;
109     int64_t             file_blk;
110     int64_t             file_size;
111     int                 protocol;
112     blkif_back_rings_t  rings;
113     int                 more_work;
114     int                 cnt_map;
115
116     /* request lists */
117     QLIST_HEAD(inflight_head, ioreq) inflight;
118     QLIST_HEAD(finished_head, ioreq) finished;
119     QLIST_HEAD(freelist_head, ioreq) freelist;
120     int                 requests_total;
121     int                 requests_inflight;
122     int                 requests_finished;
123
124     /* Persistent grants extension */
125     gboolean            feature_discard;
126     gboolean            feature_persistent;
127     GTree               *persistent_gnts;
128     GSList              *persistent_regions;
129     unsigned int        persistent_gnt_count;
130     unsigned int        max_grants;
131
132     /* qemu block driver */
133     DriveInfo           *dinfo;
134     BlockBackend        *blk;
135     QEMUBH              *bh;
136 };
137
138 /* ------------------------------------------------------------- */
139
140 static void ioreq_reset(struct ioreq *ioreq)
141 {
142     memset(&ioreq->req, 0, sizeof(ioreq->req));
143     ioreq->status = 0;
144     ioreq->start = 0;
145     ioreq->presync = 0;
146     ioreq->mapped = 0;
147
148     memset(ioreq->domids, 0, sizeof(ioreq->domids));
149     memset(ioreq->refs, 0, sizeof(ioreq->refs));
150     ioreq->prot = 0;
151     memset(ioreq->page, 0, sizeof(ioreq->page));
152     ioreq->pages = NULL;
153
154     ioreq->aio_inflight = 0;
155     ioreq->aio_errors = 0;
156
157     ioreq->blkdev = NULL;
158     memset(&ioreq->list, 0, sizeof(ioreq->list));
159     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
160
161     qemu_iovec_reset(&ioreq->v);
162 }
163
164 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
165 {
166     uint ua = GPOINTER_TO_UINT(a);
167     uint ub = GPOINTER_TO_UINT(b);
168     return (ua > ub) - (ua < ub);
169 }
170
171 static void destroy_grant(gpointer pgnt)
172 {
173     PersistentGrant *grant = pgnt;
174     XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
175
176     if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
177         xen_be_printf(&grant->blkdev->xendev, 0,
178                       "xc_gnttab_munmap failed: %s\n",
179                       strerror(errno));
180     }
181     grant->blkdev->persistent_gnt_count--;
182     xen_be_printf(&grant->blkdev->xendev, 3,
183                   "unmapped grant %p\n", grant->page);
184     g_free(grant);
185 }
186
187 static void remove_persistent_region(gpointer data, gpointer dev)
188 {
189     PersistentRegion *region = data;
190     struct XenBlkDev *blkdev = dev;
191     XenGnttab gnt = blkdev->xendev.gnttabdev;
192
193     if (xc_gnttab_munmap(gnt, region->addr, region->num) != 0) {
194         xen_be_printf(&blkdev->xendev, 0,
195                       "xc_gnttab_munmap region %p failed: %s\n",
196                       region->addr, strerror(errno));
197     }
198     xen_be_printf(&blkdev->xendev, 3,
199                   "unmapped grant region %p with %d pages\n",
200                   region->addr, region->num);
201     g_free(region);
202 }
203
204 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
205 {
206     struct ioreq *ioreq = NULL;
207
208     if (QLIST_EMPTY(&blkdev->freelist)) {
209         if (blkdev->requests_total >= max_requests) {
210             goto out;
211         }
212         /* allocate new struct */
213         ioreq = g_malloc0(sizeof(*ioreq));
214         ioreq->blkdev = blkdev;
215         blkdev->requests_total++;
216         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
217     } else {
218         /* get one from freelist */
219         ioreq = QLIST_FIRST(&blkdev->freelist);
220         QLIST_REMOVE(ioreq, list);
221     }
222     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
223     blkdev->requests_inflight++;
224
225 out:
226     return ioreq;
227 }
228
229 static void ioreq_finish(struct ioreq *ioreq)
230 {
231     struct XenBlkDev *blkdev = ioreq->blkdev;
232
233     QLIST_REMOVE(ioreq, list);
234     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
235     blkdev->requests_inflight--;
236     blkdev->requests_finished++;
237 }
238
239 static void ioreq_release(struct ioreq *ioreq, bool finish)
240 {
241     struct XenBlkDev *blkdev = ioreq->blkdev;
242
243     QLIST_REMOVE(ioreq, list);
244     ioreq_reset(ioreq);
245     ioreq->blkdev = blkdev;
246     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
247     if (finish) {
248         blkdev->requests_finished--;
249     } else {
250         blkdev->requests_inflight--;
251     }
252 }
253
254 /*
255  * translate request into iovec + start offset
256  * do sanity checks along the way
257  */
258 static int ioreq_parse(struct ioreq *ioreq)
259 {
260     struct XenBlkDev *blkdev = ioreq->blkdev;
261     uintptr_t mem;
262     size_t len;
263     int i;
264
265     xen_be_printf(&blkdev->xendev, 3,
266                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
267                   ioreq->req.operation, ioreq->req.nr_segments,
268                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
269     switch (ioreq->req.operation) {
270     case BLKIF_OP_READ:
271         ioreq->prot = PROT_WRITE; /* to memory */
272         break;
273     case BLKIF_OP_FLUSH_DISKCACHE:
274         ioreq->presync = 1;
275         if (!ioreq->req.nr_segments) {
276             return 0;
277         }
278         /* fall through */
279     case BLKIF_OP_WRITE:
280         ioreq->prot = PROT_READ; /* from memory */
281         break;
282     case BLKIF_OP_DISCARD:
283         return 0;
284     default:
285         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
286                       ioreq->req.operation);
287         goto err;
288     };
289
290     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
291         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
292         goto err;
293     }
294
295     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
296     for (i = 0; i < ioreq->req.nr_segments; i++) {
297         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
298             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
299             goto err;
300         }
301         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
302             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
303             goto err;
304         }
305         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
306             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
307             goto err;
308         }
309
310         ioreq->domids[i] = blkdev->xendev.dom;
311         ioreq->refs[i]   = ioreq->req.seg[i].gref;
312
313         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
314         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
315         qemu_iovec_add(&ioreq->v, (void*)mem, len);
316     }
317     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
318         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
319         goto err;
320     }
321     return 0;
322
323 err:
324     ioreq->status = BLKIF_RSP_ERROR;
325     return -1;
326 }
327
328 static void ioreq_unmap(struct ioreq *ioreq)
329 {
330     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
331     int i;
332
333     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
334         return;
335     }
336     if (batch_maps) {
337         if (!ioreq->pages) {
338             return;
339         }
340         if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
341             xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
342                           strerror(errno));
343         }
344         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
345         ioreq->pages = NULL;
346     } else {
347         for (i = 0; i < ioreq->num_unmap; i++) {
348             if (!ioreq->page[i]) {
349                 continue;
350             }
351             if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
352                 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
353                               strerror(errno));
354             }
355             ioreq->blkdev->cnt_map--;
356             ioreq->page[i] = NULL;
357         }
358     }
359     ioreq->mapped = 0;
360 }
361
362 static int ioreq_map(struct ioreq *ioreq)
363 {
364     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
365     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
366     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
367     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
368     int i, j, new_maps = 0;
369     PersistentGrant *grant;
370     PersistentRegion *region;
371     /* domids and refs variables will contain the information necessary
372      * to map the grants that are needed to fulfill this request.
373      *
374      * After mapping the needed grants, the page array will contain the
375      * memory address of each granted page in the order specified in ioreq
376      * (disregarding if it's a persistent grant or not).
377      */
378
379     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
380         return 0;
381     }
382     if (ioreq->blkdev->feature_persistent) {
383         for (i = 0; i < ioreq->v.niov; i++) {
384             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
385                                     GUINT_TO_POINTER(ioreq->refs[i]));
386
387             if (grant != NULL) {
388                 page[i] = grant->page;
389                 xen_be_printf(&ioreq->blkdev->xendev, 3,
390                               "using persistent-grant %" PRIu32 "\n",
391                               ioreq->refs[i]);
392             } else {
393                     /* Add the grant to the list of grants that
394                      * should be mapped
395                      */
396                     domids[new_maps] = ioreq->domids[i];
397                     refs[new_maps] = ioreq->refs[i];
398                     page[i] = NULL;
399                     new_maps++;
400             }
401         }
402         /* Set the protection to RW, since grants may be reused later
403          * with a different protection than the one needed for this request
404          */
405         ioreq->prot = PROT_WRITE | PROT_READ;
406     } else {
407         /* All grants in the request should be mapped */
408         memcpy(refs, ioreq->refs, sizeof(refs));
409         memcpy(domids, ioreq->domids, sizeof(domids));
410         memset(page, 0, sizeof(page));
411         new_maps = ioreq->v.niov;
412     }
413
414     if (batch_maps && new_maps) {
415         ioreq->pages = xc_gnttab_map_grant_refs
416             (gnt, new_maps, domids, refs, ioreq->prot);
417         if (ioreq->pages == NULL) {
418             xen_be_printf(&ioreq->blkdev->xendev, 0,
419                           "can't map %d grant refs (%s, %d maps)\n",
420                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
421             return -1;
422         }
423         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
424             if (page[i] == NULL) {
425                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
426             }
427         }
428         ioreq->blkdev->cnt_map += new_maps;
429     } else if (new_maps)  {
430         for (i = 0; i < new_maps; i++) {
431             ioreq->page[i] = xc_gnttab_map_grant_ref
432                 (gnt, domids[i], refs[i], ioreq->prot);
433             if (ioreq->page[i] == NULL) {
434                 xen_be_printf(&ioreq->blkdev->xendev, 0,
435                               "can't map grant ref %d (%s, %d maps)\n",
436                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
437                 ioreq->mapped = 1;
438                 ioreq_unmap(ioreq);
439                 return -1;
440             }
441             ioreq->blkdev->cnt_map++;
442         }
443         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
444             if (page[i] == NULL) {
445                 page[i] = ioreq->page[j++];
446             }
447         }
448     }
449     if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
450         (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
451         ioreq->blkdev->max_grants))) {
452         /*
453          * If we are using persistent grants and batch mappings only
454          * add the new maps to the list of persistent grants if the whole
455          * area can be persistently mapped.
456          */
457         if (batch_maps) {
458             region = g_malloc0(sizeof(*region));
459             region->addr = ioreq->pages;
460             region->num = new_maps;
461             ioreq->blkdev->persistent_regions = g_slist_append(
462                                             ioreq->blkdev->persistent_regions,
463                                             region);
464         }
465         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
466               && new_maps) {
467             /* Go through the list of newly mapped grants and add as many
468              * as possible to the list of persistently mapped grants.
469              *
470              * Since we start at the end of ioreq->page(s), we only need
471              * to decrease new_maps to prevent this granted pages from
472              * being unmapped in ioreq_unmap.
473              */
474             grant = g_malloc0(sizeof(*grant));
475             new_maps--;
476             if (batch_maps) {
477                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
478             } else {
479                 grant->page = ioreq->page[new_maps];
480             }
481             grant->blkdev = ioreq->blkdev;
482             xen_be_printf(&ioreq->blkdev->xendev, 3,
483                           "adding grant %" PRIu32 " page: %p\n",
484                           refs[new_maps], grant->page);
485             g_tree_insert(ioreq->blkdev->persistent_gnts,
486                           GUINT_TO_POINTER(refs[new_maps]),
487                           grant);
488             ioreq->blkdev->persistent_gnt_count++;
489         }
490         assert(!batch_maps || new_maps == 0);
491     }
492     for (i = 0; i < ioreq->v.niov; i++) {
493         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
494     }
495     ioreq->mapped = 1;
496     ioreq->num_unmap = new_maps;
497     return 0;
498 }
499
500 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
501
502 static void qemu_aio_complete(void *opaque, int ret)
503 {
504     struct ioreq *ioreq = opaque;
505
506     if (ret != 0) {
507         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
508                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
509         ioreq->aio_errors++;
510     }
511
512     ioreq->aio_inflight--;
513     if (ioreq->presync) {
514         ioreq->presync = 0;
515         ioreq_runio_qemu_aio(ioreq);
516         return;
517     }
518     if (ioreq->aio_inflight > 0) {
519         return;
520     }
521
522     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
523     ioreq_unmap(ioreq);
524     ioreq_finish(ioreq);
525     switch (ioreq->req.operation) {
526     case BLKIF_OP_WRITE:
527     case BLKIF_OP_FLUSH_DISKCACHE:
528         if (!ioreq->req.nr_segments) {
529             break;
530         }
531     case BLKIF_OP_READ:
532         if (ioreq->status == BLKIF_RSP_OKAY) {
533             block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
534         } else {
535             block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
536         }
537         break;
538     case BLKIF_OP_DISCARD:
539     default:
540         break;
541     }
542     qemu_bh_schedule(ioreq->blkdev->bh);
543 }
544
545 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
546 {
547     struct XenBlkDev *blkdev = ioreq->blkdev;
548
549     if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
550         goto err_no_map;
551     }
552
553     ioreq->aio_inflight++;
554     if (ioreq->presync) {
555         blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
556         return 0;
557     }
558
559     switch (ioreq->req.operation) {
560     case BLKIF_OP_READ:
561         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
562                          ioreq->v.size, BLOCK_ACCT_READ);
563         ioreq->aio_inflight++;
564         blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
565                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
566                       qemu_aio_complete, ioreq);
567         break;
568     case BLKIF_OP_WRITE:
569     case BLKIF_OP_FLUSH_DISKCACHE:
570         if (!ioreq->req.nr_segments) {
571             break;
572         }
573
574         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
575                          ioreq->v.size,
576                          ioreq->req.operation == BLKIF_OP_WRITE ?
577                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
578         ioreq->aio_inflight++;
579         blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
580                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
581                        qemu_aio_complete, ioreq);
582         break;
583     case BLKIF_OP_DISCARD:
584     {
585         struct blkif_request_discard *discard_req = (void *)&ioreq->req;
586         ioreq->aio_inflight++;
587         blk_aio_discard(blkdev->blk,
588                         discard_req->sector_number, discard_req->nr_sectors,
589                         qemu_aio_complete, ioreq);
590         break;
591     }
592     default:
593         /* unknown operation (shouldn't happen -- parse catches this) */
594         goto err;
595     }
596
597     qemu_aio_complete(ioreq, 0);
598
599     return 0;
600
601 err:
602     ioreq_unmap(ioreq);
603 err_no_map:
604     ioreq_finish(ioreq);
605     ioreq->status = BLKIF_RSP_ERROR;
606     return -1;
607 }
608
609 static int blk_send_response_one(struct ioreq *ioreq)
610 {
611     struct XenBlkDev  *blkdev = ioreq->blkdev;
612     int               send_notify   = 0;
613     int               have_requests = 0;
614     blkif_response_t  resp;
615     void              *dst;
616
617     resp.id        = ioreq->req.id;
618     resp.operation = ioreq->req.operation;
619     resp.status    = ioreq->status;
620
621     /* Place on the response ring for the relevant domain. */
622     switch (blkdev->protocol) {
623     case BLKIF_PROTOCOL_NATIVE:
624         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
625         break;
626     case BLKIF_PROTOCOL_X86_32:
627         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
628                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
629         break;
630     case BLKIF_PROTOCOL_X86_64:
631         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
632                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
633         break;
634     default:
635         dst = NULL;
636         return 0;
637     }
638     memcpy(dst, &resp, sizeof(resp));
639     blkdev->rings.common.rsp_prod_pvt++;
640
641     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
642     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
643         /*
644          * Tail check for pending requests. Allows frontend to avoid
645          * notifications if requests are already in flight (lower
646          * overheads and promotes batching).
647          */
648         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
649     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
650         have_requests = 1;
651     }
652
653     if (have_requests) {
654         blkdev->more_work++;
655     }
656     return send_notify;
657 }
658
659 /* walk finished list, send outstanding responses, free requests */
660 static void blk_send_response_all(struct XenBlkDev *blkdev)
661 {
662     struct ioreq *ioreq;
663     int send_notify = 0;
664
665     while (!QLIST_EMPTY(&blkdev->finished)) {
666         ioreq = QLIST_FIRST(&blkdev->finished);
667         send_notify += blk_send_response_one(ioreq);
668         ioreq_release(ioreq, true);
669     }
670     if (send_notify) {
671         xen_be_send_notify(&blkdev->xendev);
672     }
673 }
674
675 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
676 {
677     switch (blkdev->protocol) {
678     case BLKIF_PROTOCOL_NATIVE:
679         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
680                sizeof(ioreq->req));
681         break;
682     case BLKIF_PROTOCOL_X86_32:
683         blkif_get_x86_32_req(&ioreq->req,
684                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
685         break;
686     case BLKIF_PROTOCOL_X86_64:
687         blkif_get_x86_64_req(&ioreq->req,
688                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
689         break;
690     }
691     return 0;
692 }
693
694 static void blk_handle_requests(struct XenBlkDev *blkdev)
695 {
696     RING_IDX rc, rp;
697     struct ioreq *ioreq;
698
699     blkdev->more_work = 0;
700
701     rc = blkdev->rings.common.req_cons;
702     rp = blkdev->rings.common.sring->req_prod;
703     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
704
705     blk_send_response_all(blkdev);
706     while (rc != rp) {
707         /* pull request from ring */
708         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
709             break;
710         }
711         ioreq = ioreq_start(blkdev);
712         if (ioreq == NULL) {
713             blkdev->more_work++;
714             break;
715         }
716         blk_get_request(blkdev, ioreq, rc);
717         blkdev->rings.common.req_cons = ++rc;
718
719         /* parse them */
720         if (ioreq_parse(ioreq) != 0) {
721
722             switch (ioreq->req.operation) {
723             case BLKIF_OP_READ:
724                 block_acct_invalid(blk_get_stats(blkdev->blk),
725                                    BLOCK_ACCT_READ);
726                 break;
727             case BLKIF_OP_WRITE:
728                 block_acct_invalid(blk_get_stats(blkdev->blk),
729                                    BLOCK_ACCT_WRITE);
730                 break;
731             case BLKIF_OP_FLUSH_DISKCACHE:
732                 block_acct_invalid(blk_get_stats(blkdev->blk),
733                                    BLOCK_ACCT_FLUSH);
734             default:
735                 break;
736             };
737
738             if (blk_send_response_one(ioreq)) {
739                 xen_be_send_notify(&blkdev->xendev);
740             }
741             ioreq_release(ioreq, false);
742             continue;
743         }
744
745         ioreq_runio_qemu_aio(ioreq);
746     }
747
748     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
749         qemu_bh_schedule(blkdev->bh);
750     }
751 }
752
753 /* ------------------------------------------------------------- */
754
755 static void blk_bh(void *opaque)
756 {
757     struct XenBlkDev *blkdev = opaque;
758     blk_handle_requests(blkdev);
759 }
760
761 /*
762  * We need to account for the grant allocations requiring contiguous
763  * chunks; the worst case number would be
764  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
765  * but in order to keep things simple just use
766  *     2 * max_req * max_seg.
767  */
768 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
769
770 static void blk_alloc(struct XenDevice *xendev)
771 {
772     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
773
774     QLIST_INIT(&blkdev->inflight);
775     QLIST_INIT(&blkdev->finished);
776     QLIST_INIT(&blkdev->freelist);
777     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
778     if (xen_mode != XEN_EMULATE) {
779         batch_maps = 1;
780     }
781     if (xc_gnttab_set_max_grants(xendev->gnttabdev,
782             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
783         xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
784                       strerror(errno));
785     }
786 }
787
788 static void blk_parse_discard(struct XenBlkDev *blkdev)
789 {
790     int enable;
791
792     blkdev->feature_discard = true;
793
794     if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
795         blkdev->feature_discard = !!enable;
796     }
797
798     if (blkdev->feature_discard) {
799         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
800     }
801 }
802
803 static int blk_init(struct XenDevice *xendev)
804 {
805     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
806     int info = 0;
807     char *directiosafe = NULL;
808
809     /* read xenstore entries */
810     if (blkdev->params == NULL) {
811         char *h = NULL;
812         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
813         if (blkdev->params != NULL) {
814             h = strchr(blkdev->params, ':');
815         }
816         if (h != NULL) {
817             blkdev->fileproto = blkdev->params;
818             blkdev->filename  = h+1;
819             *h = 0;
820         } else {
821             blkdev->fileproto = "<unset>";
822             blkdev->filename  = blkdev->params;
823         }
824     }
825     if (!strcmp("aio", blkdev->fileproto)) {
826         blkdev->fileproto = "raw";
827     }
828     if (blkdev->mode == NULL) {
829         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
830     }
831     if (blkdev->type == NULL) {
832         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
833     }
834     if (blkdev->dev == NULL) {
835         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
836     }
837     if (blkdev->devtype == NULL) {
838         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
839     }
840     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
841     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
842
843     /* do we have all we need? */
844     if (blkdev->params == NULL ||
845         blkdev->mode == NULL   ||
846         blkdev->type == NULL   ||
847         blkdev->dev == NULL) {
848         goto out_error;
849     }
850
851     /* read-only ? */
852     if (strcmp(blkdev->mode, "w")) {
853         info  |= VDISK_READONLY;
854     }
855
856     /* cdrom ? */
857     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
858         info  |= VDISK_CDROM;
859     }
860
861     blkdev->file_blk  = BLOCK_SIZE;
862
863     /* fill info
864      * blk_connect supplies sector-size and sectors
865      */
866     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
867     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
868     xenstore_write_be_int(&blkdev->xendev, "info", info);
869
870     blk_parse_discard(blkdev);
871
872     g_free(directiosafe);
873     return 0;
874
875 out_error:
876     g_free(blkdev->params);
877     blkdev->params = NULL;
878     g_free(blkdev->mode);
879     blkdev->mode = NULL;
880     g_free(blkdev->type);
881     blkdev->type = NULL;
882     g_free(blkdev->dev);
883     blkdev->dev = NULL;
884     g_free(blkdev->devtype);
885     blkdev->devtype = NULL;
886     g_free(directiosafe);
887     blkdev->directiosafe = false;
888     return -1;
889 }
890
891 static int blk_connect(struct XenDevice *xendev)
892 {
893     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
894     int pers, index, qflags;
895     bool readonly = true;
896
897     /* read-only ? */
898     if (blkdev->directiosafe) {
899         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
900     } else {
901         qflags = BDRV_O_CACHE_WB;
902     }
903     if (strcmp(blkdev->mode, "w") == 0) {
904         qflags |= BDRV_O_RDWR;
905         readonly = false;
906     }
907     if (blkdev->feature_discard) {
908         qflags |= BDRV_O_UNMAP;
909     }
910
911     /* init qemu block driver */
912     index = (blkdev->xendev.dev - 202 * 256) / 16;
913     blkdev->dinfo = drive_get(IF_XEN, 0, index);
914     if (!blkdev->dinfo) {
915         Error *local_err = NULL;
916         QDict *options = NULL;
917
918         if (strcmp(blkdev->fileproto, "<unset>")) {
919             options = qdict_new();
920             qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
921         }
922
923         /* setup via xenbus -> create new block driver instance */
924         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
925         blkdev->blk = blk_new_open(blkdev->dev, blkdev->filename, NULL, options,
926                                    qflags, &local_err);
927         if (!blkdev->blk) {
928             xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
929                           error_get_pretty(local_err));
930             error_free(local_err);
931             return -1;
932         }
933     } else {
934         /* setup via qemu cmdline -> already setup for us */
935         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
936         blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
937         if (blk_is_read_only(blkdev->blk) && !readonly) {
938             xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
939             blkdev->blk = NULL;
940             return -1;
941         }
942         /* blkdev->blk is not create by us, we get a reference
943          * so we can blk_unref() unconditionally */
944         blk_ref(blkdev->blk);
945     }
946     blk_attach_dev_nofail(blkdev->blk, blkdev);
947     blkdev->file_size = blk_getlength(blkdev->blk);
948     if (blkdev->file_size < 0) {
949         BlockDriverState *bs = blk_bs(blkdev->blk);
950         const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
951         xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
952                       (int)blkdev->file_size, strerror(-blkdev->file_size),
953                       drv_name ?: "-");
954         blkdev->file_size = 0;
955     }
956
957     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
958                   " size %" PRId64 " (%" PRId64 " MB)\n",
959                   blkdev->type, blkdev->fileproto, blkdev->filename,
960                   blkdev->file_size, blkdev->file_size >> 20);
961
962     /* Fill in number of sector size and number of sectors */
963     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
964     xenstore_write_be_int64(&blkdev->xendev, "sectors",
965                             blkdev->file_size / blkdev->file_blk);
966
967     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
968         return -1;
969     }
970     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
971                              &blkdev->xendev.remote_port) == -1) {
972         return -1;
973     }
974     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
975         blkdev->feature_persistent = FALSE;
976     } else {
977         blkdev->feature_persistent = !!pers;
978     }
979
980     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
981     if (blkdev->xendev.protocol) {
982         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
983             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
984         }
985         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
986             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
987         }
988     }
989
990     blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
991                                             blkdev->xendev.dom,
992                                             blkdev->ring_ref,
993                                             PROT_READ | PROT_WRITE);
994     if (!blkdev->sring) {
995         return -1;
996     }
997     blkdev->cnt_map++;
998
999     switch (blkdev->protocol) {
1000     case BLKIF_PROTOCOL_NATIVE:
1001     {
1002         blkif_sring_t *sring_native = blkdev->sring;
1003         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
1004         break;
1005     }
1006     case BLKIF_PROTOCOL_X86_32:
1007     {
1008         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1009
1010         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
1011         break;
1012     }
1013     case BLKIF_PROTOCOL_X86_64:
1014     {
1015         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1016
1017         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
1018         break;
1019     }
1020     }
1021
1022     if (blkdev->feature_persistent) {
1023         /* Init persistent grants */
1024         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1025         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1026                                              NULL, NULL,
1027                                              batch_maps ?
1028                                              (GDestroyNotify)g_free :
1029                                              (GDestroyNotify)destroy_grant);
1030         blkdev->persistent_regions = NULL;
1031         blkdev->persistent_gnt_count = 0;
1032     }
1033
1034     xen_be_bind_evtchn(&blkdev->xendev);
1035
1036     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
1037                   "remote port %d, local port %d\n",
1038                   blkdev->xendev.protocol, blkdev->ring_ref,
1039                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
1040     return 0;
1041 }
1042
1043 static void blk_disconnect(struct XenDevice *xendev)
1044 {
1045     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1046
1047     if (blkdev->blk) {
1048         blk_detach_dev(blkdev->blk, blkdev);
1049         blk_unref(blkdev->blk);
1050         blkdev->blk = NULL;
1051     }
1052     xen_be_unbind_evtchn(&blkdev->xendev);
1053
1054     if (blkdev->sring) {
1055         xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1056         blkdev->cnt_map--;
1057         blkdev->sring = NULL;
1058     }
1059
1060     /*
1061      * Unmap persistent grants before switching to the closed state
1062      * so the frontend can free them.
1063      *
1064      * In the !batch_maps case g_tree_destroy will take care of unmapping
1065      * the grant, but in the batch_maps case we need to iterate over every
1066      * region in persistent_regions and unmap it.
1067      */
1068     if (blkdev->feature_persistent) {
1069         g_tree_destroy(blkdev->persistent_gnts);
1070         assert(batch_maps || blkdev->persistent_gnt_count == 0);
1071         if (batch_maps) {
1072             blkdev->persistent_gnt_count = 0;
1073             g_slist_foreach(blkdev->persistent_regions,
1074                             (GFunc)remove_persistent_region, blkdev);
1075             g_slist_free(blkdev->persistent_regions);
1076         }
1077         blkdev->feature_persistent = false;
1078     }
1079 }
1080
1081 static int blk_free(struct XenDevice *xendev)
1082 {
1083     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1084     struct ioreq *ioreq;
1085
1086     if (blkdev->blk || blkdev->sring) {
1087         blk_disconnect(xendev);
1088     }
1089
1090     while (!QLIST_EMPTY(&blkdev->freelist)) {
1091         ioreq = QLIST_FIRST(&blkdev->freelist);
1092         QLIST_REMOVE(ioreq, list);
1093         qemu_iovec_destroy(&ioreq->v);
1094         g_free(ioreq);
1095     }
1096
1097     g_free(blkdev->params);
1098     g_free(blkdev->mode);
1099     g_free(blkdev->type);
1100     g_free(blkdev->dev);
1101     g_free(blkdev->devtype);
1102     qemu_bh_delete(blkdev->bh);
1103     return 0;
1104 }
1105
1106 static void blk_event(struct XenDevice *xendev)
1107 {
1108     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1109
1110     qemu_bh_schedule(blkdev->bh);
1111 }
1112
1113 struct XenDevOps xen_blkdev_ops = {
1114     .size       = sizeof(struct XenBlkDev),
1115     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1116     .alloc      = blk_alloc,
1117     .init       = blk_init,
1118     .initialise    = blk_connect,
1119     .disconnect = blk_disconnect,
1120     .event      = blk_event,
1121     .free       = blk_free,
1122 };
This page took 0.087602 seconds and 4 git commands to generate.