]> Git Repo - qemu.git/blob - hw/block/xen_disk.c
Merge remote-tracking branch 'remotes/sstabellini/xen-140220' into staging
[qemu.git] / hw / block / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <inttypes.h>
29 #include <time.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
37
38 #include "hw/hw.h"
39 #include "hw/xen/xen_backend.h"
40 #include "xen_blkif.h"
41 #include "sysemu/blockdev.h"
42
43 /* ------------------------------------------------------------- */
44
45 static int batch_maps   = 0;
46
47 static int max_requests = 32;
48
49 /* ------------------------------------------------------------- */
50
51 #define BLOCK_SIZE  512
52 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
53
54 struct PersistentGrant {
55     void *page;
56     struct XenBlkDev *blkdev;
57 };
58
59 typedef struct PersistentGrant PersistentGrant;
60
61 struct ioreq {
62     blkif_request_t     req;
63     int16_t             status;
64
65     /* parsed request */
66     off_t               start;
67     QEMUIOVector        v;
68     int                 presync;
69     int                 postsync;
70     uint8_t             mapped;
71
72     /* grant mapping */
73     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75     int                 prot;
76     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
77     void                *pages;
78     int                 num_unmap;
79
80     /* aio status */
81     int                 aio_inflight;
82     int                 aio_errors;
83
84     struct XenBlkDev    *blkdev;
85     QLIST_ENTRY(ioreq)   list;
86     BlockAcctCookie     acct;
87 };
88
89 struct XenBlkDev {
90     struct XenDevice    xendev;  /* must be first */
91     char                *params;
92     char                *mode;
93     char                *type;
94     char                *dev;
95     char                *devtype;
96     bool                directiosafe;
97     const char          *fileproto;
98     const char          *filename;
99     int                 ring_ref;
100     void                *sring;
101     int64_t             file_blk;
102     int64_t             file_size;
103     int                 protocol;
104     blkif_back_rings_t  rings;
105     int                 more_work;
106     int                 cnt_map;
107
108     /* request lists */
109     QLIST_HEAD(inflight_head, ioreq) inflight;
110     QLIST_HEAD(finished_head, ioreq) finished;
111     QLIST_HEAD(freelist_head, ioreq) freelist;
112     int                 requests_total;
113     int                 requests_inflight;
114     int                 requests_finished;
115
116     /* Persistent grants extension */
117     gboolean            feature_persistent;
118     GTree               *persistent_gnts;
119     unsigned int        persistent_gnt_count;
120     unsigned int        max_grants;
121
122     /* qemu block driver */
123     DriveInfo           *dinfo;
124     BlockDriverState    *bs;
125     QEMUBH              *bh;
126 };
127
128 /* ------------------------------------------------------------- */
129
130 static void ioreq_reset(struct ioreq *ioreq)
131 {
132     memset(&ioreq->req, 0, sizeof(ioreq->req));
133     ioreq->status = 0;
134     ioreq->start = 0;
135     ioreq->presync = 0;
136     ioreq->postsync = 0;
137     ioreq->mapped = 0;
138
139     memset(ioreq->domids, 0, sizeof(ioreq->domids));
140     memset(ioreq->refs, 0, sizeof(ioreq->refs));
141     ioreq->prot = 0;
142     memset(ioreq->page, 0, sizeof(ioreq->page));
143     ioreq->pages = NULL;
144
145     ioreq->aio_inflight = 0;
146     ioreq->aio_errors = 0;
147
148     ioreq->blkdev = NULL;
149     memset(&ioreq->list, 0, sizeof(ioreq->list));
150     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151
152     qemu_iovec_reset(&ioreq->v);
153 }
154
155 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156 {
157     uint ua = GPOINTER_TO_UINT(a);
158     uint ub = GPOINTER_TO_UINT(b);
159     return (ua > ub) - (ua < ub);
160 }
161
162 static void destroy_grant(gpointer pgnt)
163 {
164     PersistentGrant *grant = pgnt;
165     XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
166
167     if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
168         xen_be_printf(&grant->blkdev->xendev, 0,
169                       "xc_gnttab_munmap failed: %s\n",
170                       strerror(errno));
171     }
172     grant->blkdev->persistent_gnt_count--;
173     xen_be_printf(&grant->blkdev->xendev, 3,
174                   "unmapped grant %p\n", grant->page);
175     g_free(grant);
176 }
177
178 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
179 {
180     struct ioreq *ioreq = NULL;
181
182     if (QLIST_EMPTY(&blkdev->freelist)) {
183         if (blkdev->requests_total >= max_requests) {
184             goto out;
185         }
186         /* allocate new struct */
187         ioreq = g_malloc0(sizeof(*ioreq));
188         ioreq->blkdev = blkdev;
189         blkdev->requests_total++;
190         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
191     } else {
192         /* get one from freelist */
193         ioreq = QLIST_FIRST(&blkdev->freelist);
194         QLIST_REMOVE(ioreq, list);
195     }
196     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
197     blkdev->requests_inflight++;
198
199 out:
200     return ioreq;
201 }
202
203 static void ioreq_finish(struct ioreq *ioreq)
204 {
205     struct XenBlkDev *blkdev = ioreq->blkdev;
206
207     QLIST_REMOVE(ioreq, list);
208     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
209     blkdev->requests_inflight--;
210     blkdev->requests_finished++;
211 }
212
213 static void ioreq_release(struct ioreq *ioreq, bool finish)
214 {
215     struct XenBlkDev *blkdev = ioreq->blkdev;
216
217     QLIST_REMOVE(ioreq, list);
218     ioreq_reset(ioreq);
219     ioreq->blkdev = blkdev;
220     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
221     if (finish) {
222         blkdev->requests_finished--;
223     } else {
224         blkdev->requests_inflight--;
225     }
226 }
227
228 /*
229  * translate request into iovec + start offset
230  * do sanity checks along the way
231  */
232 static int ioreq_parse(struct ioreq *ioreq)
233 {
234     struct XenBlkDev *blkdev = ioreq->blkdev;
235     uintptr_t mem;
236     size_t len;
237     int i;
238
239     xen_be_printf(&blkdev->xendev, 3,
240                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
241                   ioreq->req.operation, ioreq->req.nr_segments,
242                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
243     switch (ioreq->req.operation) {
244     case BLKIF_OP_READ:
245         ioreq->prot = PROT_WRITE; /* to memory */
246         break;
247     case BLKIF_OP_FLUSH_DISKCACHE:
248         ioreq->presync = 1;
249         if (!ioreq->req.nr_segments) {
250             return 0;
251         }
252         /* fall through */
253     case BLKIF_OP_WRITE:
254         ioreq->prot = PROT_READ; /* from memory */
255         break;
256     default:
257         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
258                       ioreq->req.operation);
259         goto err;
260     };
261
262     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
263         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
264         goto err;
265     }
266
267     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
268     for (i = 0; i < ioreq->req.nr_segments; i++) {
269         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
270             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
271             goto err;
272         }
273         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
274             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
275             goto err;
276         }
277         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
278             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
279             goto err;
280         }
281
282         ioreq->domids[i] = blkdev->xendev.dom;
283         ioreq->refs[i]   = ioreq->req.seg[i].gref;
284
285         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
286         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
287         qemu_iovec_add(&ioreq->v, (void*)mem, len);
288     }
289     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
290         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
291         goto err;
292     }
293     return 0;
294
295 err:
296     ioreq->status = BLKIF_RSP_ERROR;
297     return -1;
298 }
299
300 static void ioreq_unmap(struct ioreq *ioreq)
301 {
302     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
303     int i;
304
305     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
306         return;
307     }
308     if (batch_maps) {
309         if (!ioreq->pages) {
310             return;
311         }
312         if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
313             xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
314                           strerror(errno));
315         }
316         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
317         ioreq->pages = NULL;
318     } else {
319         for (i = 0; i < ioreq->num_unmap; i++) {
320             if (!ioreq->page[i]) {
321                 continue;
322             }
323             if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
324                 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
325                               strerror(errno));
326             }
327             ioreq->blkdev->cnt_map--;
328             ioreq->page[i] = NULL;
329         }
330     }
331     ioreq->mapped = 0;
332 }
333
334 static int ioreq_map(struct ioreq *ioreq)
335 {
336     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
337     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
338     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
339     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
340     int i, j, new_maps = 0;
341     PersistentGrant *grant;
342     /* domids and refs variables will contain the information necessary
343      * to map the grants that are needed to fulfill this request.
344      *
345      * After mapping the needed grants, the page array will contain the
346      * memory address of each granted page in the order specified in ioreq
347      * (disregarding if it's a persistent grant or not).
348      */
349
350     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
351         return 0;
352     }
353     if (ioreq->blkdev->feature_persistent) {
354         for (i = 0; i < ioreq->v.niov; i++) {
355             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
356                                     GUINT_TO_POINTER(ioreq->refs[i]));
357
358             if (grant != NULL) {
359                 page[i] = grant->page;
360                 xen_be_printf(&ioreq->blkdev->xendev, 3,
361                               "using persistent-grant %" PRIu32 "\n",
362                               ioreq->refs[i]);
363             } else {
364                     /* Add the grant to the list of grants that
365                      * should be mapped
366                      */
367                     domids[new_maps] = ioreq->domids[i];
368                     refs[new_maps] = ioreq->refs[i];
369                     page[i] = NULL;
370                     new_maps++;
371             }
372         }
373         /* Set the protection to RW, since grants may be reused later
374          * with a different protection than the one needed for this request
375          */
376         ioreq->prot = PROT_WRITE | PROT_READ;
377     } else {
378         /* All grants in the request should be mapped */
379         memcpy(refs, ioreq->refs, sizeof(refs));
380         memcpy(domids, ioreq->domids, sizeof(domids));
381         memset(page, 0, sizeof(page));
382         new_maps = ioreq->v.niov;
383     }
384
385     if (batch_maps && new_maps) {
386         ioreq->pages = xc_gnttab_map_grant_refs
387             (gnt, new_maps, domids, refs, ioreq->prot);
388         if (ioreq->pages == NULL) {
389             xen_be_printf(&ioreq->blkdev->xendev, 0,
390                           "can't map %d grant refs (%s, %d maps)\n",
391                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
392             return -1;
393         }
394         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
395             if (page[i] == NULL) {
396                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
397             }
398         }
399         ioreq->blkdev->cnt_map += new_maps;
400     } else if (new_maps)  {
401         for (i = 0; i < new_maps; i++) {
402             ioreq->page[i] = xc_gnttab_map_grant_ref
403                 (gnt, domids[i], refs[i], ioreq->prot);
404             if (ioreq->page[i] == NULL) {
405                 xen_be_printf(&ioreq->blkdev->xendev, 0,
406                               "can't map grant ref %d (%s, %d maps)\n",
407                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
408                 ioreq->mapped = 1;
409                 ioreq_unmap(ioreq);
410                 return -1;
411             }
412             ioreq->blkdev->cnt_map++;
413         }
414         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
415             if (page[i] == NULL) {
416                 page[i] = ioreq->page[j++];
417             }
418         }
419     }
420     if (ioreq->blkdev->feature_persistent) {
421         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
422               && new_maps) {
423             /* Go through the list of newly mapped grants and add as many
424              * as possible to the list of persistently mapped grants.
425              *
426              * Since we start at the end of ioreq->page(s), we only need
427              * to decrease new_maps to prevent this granted pages from
428              * being unmapped in ioreq_unmap.
429              */
430             grant = g_malloc0(sizeof(*grant));
431             new_maps--;
432             if (batch_maps) {
433                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
434             } else {
435                 grant->page = ioreq->page[new_maps];
436             }
437             grant->blkdev = ioreq->blkdev;
438             xen_be_printf(&ioreq->blkdev->xendev, 3,
439                           "adding grant %" PRIu32 " page: %p\n",
440                           refs[new_maps], grant->page);
441             g_tree_insert(ioreq->blkdev->persistent_gnts,
442                           GUINT_TO_POINTER(refs[new_maps]),
443                           grant);
444             ioreq->blkdev->persistent_gnt_count++;
445         }
446     }
447     for (i = 0; i < ioreq->v.niov; i++) {
448         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
449     }
450     ioreq->mapped = 1;
451     ioreq->num_unmap = new_maps;
452     return 0;
453 }
454
455 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
456
457 static void qemu_aio_complete(void *opaque, int ret)
458 {
459     struct ioreq *ioreq = opaque;
460
461     if (ret != 0) {
462         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
463                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
464         ioreq->aio_errors++;
465     }
466
467     ioreq->aio_inflight--;
468     if (ioreq->presync) {
469         ioreq->presync = 0;
470         ioreq_runio_qemu_aio(ioreq);
471         return;
472     }
473     if (ioreq->aio_inflight > 0) {
474         return;
475     }
476     if (ioreq->postsync) {
477         ioreq->postsync = 0;
478         ioreq->aio_inflight++;
479         bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
480         return;
481     }
482
483     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
484     ioreq_unmap(ioreq);
485     ioreq_finish(ioreq);
486     switch (ioreq->req.operation) {
487     case BLKIF_OP_WRITE:
488     case BLKIF_OP_FLUSH_DISKCACHE:
489         if (!ioreq->req.nr_segments) {
490             break;
491         }
492     case BLKIF_OP_READ:
493         bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
494         break;
495     default:
496         break;
497     }
498     qemu_bh_schedule(ioreq->blkdev->bh);
499 }
500
501 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
502 {
503     struct XenBlkDev *blkdev = ioreq->blkdev;
504
505     if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
506         goto err_no_map;
507     }
508
509     ioreq->aio_inflight++;
510     if (ioreq->presync) {
511         bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
512         return 0;
513     }
514
515     switch (ioreq->req.operation) {
516     case BLKIF_OP_READ:
517         bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
518         ioreq->aio_inflight++;
519         bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
520                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
521                        qemu_aio_complete, ioreq);
522         break;
523     case BLKIF_OP_WRITE:
524     case BLKIF_OP_FLUSH_DISKCACHE:
525         if (!ioreq->req.nr_segments) {
526             break;
527         }
528
529         bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
530         ioreq->aio_inflight++;
531         bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
532                         &ioreq->v, ioreq->v.size / BLOCK_SIZE,
533                         qemu_aio_complete, ioreq);
534         break;
535     default:
536         /* unknown operation (shouldn't happen -- parse catches this) */
537         goto err;
538     }
539
540     qemu_aio_complete(ioreq, 0);
541
542     return 0;
543
544 err:
545     ioreq_unmap(ioreq);
546 err_no_map:
547     ioreq_finish(ioreq);
548     ioreq->status = BLKIF_RSP_ERROR;
549     return -1;
550 }
551
552 static int blk_send_response_one(struct ioreq *ioreq)
553 {
554     struct XenBlkDev  *blkdev = ioreq->blkdev;
555     int               send_notify   = 0;
556     int               have_requests = 0;
557     blkif_response_t  resp;
558     void              *dst;
559
560     resp.id        = ioreq->req.id;
561     resp.operation = ioreq->req.operation;
562     resp.status    = ioreq->status;
563
564     /* Place on the response ring for the relevant domain. */
565     switch (blkdev->protocol) {
566     case BLKIF_PROTOCOL_NATIVE:
567         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
568         break;
569     case BLKIF_PROTOCOL_X86_32:
570         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
571                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
572         break;
573     case BLKIF_PROTOCOL_X86_64:
574         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
575                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
576         break;
577     default:
578         dst = NULL;
579     }
580     memcpy(dst, &resp, sizeof(resp));
581     blkdev->rings.common.rsp_prod_pvt++;
582
583     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
584     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
585         /*
586          * Tail check for pending requests. Allows frontend to avoid
587          * notifications if requests are already in flight (lower
588          * overheads and promotes batching).
589          */
590         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
591     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
592         have_requests = 1;
593     }
594
595     if (have_requests) {
596         blkdev->more_work++;
597     }
598     return send_notify;
599 }
600
601 /* walk finished list, send outstanding responses, free requests */
602 static void blk_send_response_all(struct XenBlkDev *blkdev)
603 {
604     struct ioreq *ioreq;
605     int send_notify = 0;
606
607     while (!QLIST_EMPTY(&blkdev->finished)) {
608         ioreq = QLIST_FIRST(&blkdev->finished);
609         send_notify += blk_send_response_one(ioreq);
610         ioreq_release(ioreq, true);
611     }
612     if (send_notify) {
613         xen_be_send_notify(&blkdev->xendev);
614     }
615 }
616
617 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
618 {
619     switch (blkdev->protocol) {
620     case BLKIF_PROTOCOL_NATIVE:
621         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
622                sizeof(ioreq->req));
623         break;
624     case BLKIF_PROTOCOL_X86_32:
625         blkif_get_x86_32_req(&ioreq->req,
626                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
627         break;
628     case BLKIF_PROTOCOL_X86_64:
629         blkif_get_x86_64_req(&ioreq->req,
630                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
631         break;
632     }
633     return 0;
634 }
635
636 static void blk_handle_requests(struct XenBlkDev *blkdev)
637 {
638     RING_IDX rc, rp;
639     struct ioreq *ioreq;
640
641     blkdev->more_work = 0;
642
643     rc = blkdev->rings.common.req_cons;
644     rp = blkdev->rings.common.sring->req_prod;
645     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
646
647     blk_send_response_all(blkdev);
648     while (rc != rp) {
649         /* pull request from ring */
650         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
651             break;
652         }
653         ioreq = ioreq_start(blkdev);
654         if (ioreq == NULL) {
655             blkdev->more_work++;
656             break;
657         }
658         blk_get_request(blkdev, ioreq, rc);
659         blkdev->rings.common.req_cons = ++rc;
660
661         /* parse them */
662         if (ioreq_parse(ioreq) != 0) {
663             if (blk_send_response_one(ioreq)) {
664                 xen_be_send_notify(&blkdev->xendev);
665             }
666             ioreq_release(ioreq, false);
667             continue;
668         }
669
670         ioreq_runio_qemu_aio(ioreq);
671     }
672
673     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
674         qemu_bh_schedule(blkdev->bh);
675     }
676 }
677
678 /* ------------------------------------------------------------- */
679
680 static void blk_bh(void *opaque)
681 {
682     struct XenBlkDev *blkdev = opaque;
683     blk_handle_requests(blkdev);
684 }
685
686 /*
687  * We need to account for the grant allocations requiring contiguous
688  * chunks; the worst case number would be
689  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
690  * but in order to keep things simple just use
691  *     2 * max_req * max_seg.
692  */
693 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
694
695 static void blk_alloc(struct XenDevice *xendev)
696 {
697     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
698
699     QLIST_INIT(&blkdev->inflight);
700     QLIST_INIT(&blkdev->finished);
701     QLIST_INIT(&blkdev->freelist);
702     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
703     if (xen_mode != XEN_EMULATE) {
704         batch_maps = 1;
705     }
706     if (xc_gnttab_set_max_grants(xendev->gnttabdev,
707             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
708         xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
709                       strerror(errno));
710     }
711 }
712
713 static int blk_init(struct XenDevice *xendev)
714 {
715     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
716     int info = 0;
717     char *directiosafe = NULL;
718
719     /* read xenstore entries */
720     if (blkdev->params == NULL) {
721         char *h = NULL;
722         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
723         if (blkdev->params != NULL) {
724             h = strchr(blkdev->params, ':');
725         }
726         if (h != NULL) {
727             blkdev->fileproto = blkdev->params;
728             blkdev->filename  = h+1;
729             *h = 0;
730         } else {
731             blkdev->fileproto = "<unset>";
732             blkdev->filename  = blkdev->params;
733         }
734     }
735     if (!strcmp("aio", blkdev->fileproto)) {
736         blkdev->fileproto = "raw";
737     }
738     if (blkdev->mode == NULL) {
739         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
740     }
741     if (blkdev->type == NULL) {
742         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
743     }
744     if (blkdev->dev == NULL) {
745         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
746     }
747     if (blkdev->devtype == NULL) {
748         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
749     }
750     directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
751     blkdev->directiosafe = (directiosafe && atoi(directiosafe));
752
753     /* do we have all we need? */
754     if (blkdev->params == NULL ||
755         blkdev->mode == NULL   ||
756         blkdev->type == NULL   ||
757         blkdev->dev == NULL) {
758         goto out_error;
759     }
760
761     /* read-only ? */
762     if (strcmp(blkdev->mode, "w")) {
763         info  |= VDISK_READONLY;
764     }
765
766     /* cdrom ? */
767     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
768         info  |= VDISK_CDROM;
769     }
770
771     blkdev->file_blk  = BLOCK_SIZE;
772
773     /* fill info
774      * blk_connect supplies sector-size and sectors
775      */
776     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
777     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
778     xenstore_write_be_int(&blkdev->xendev, "info", info);
779
780     g_free(directiosafe);
781     return 0;
782
783 out_error:
784     g_free(blkdev->params);
785     blkdev->params = NULL;
786     g_free(blkdev->mode);
787     blkdev->mode = NULL;
788     g_free(blkdev->type);
789     blkdev->type = NULL;
790     g_free(blkdev->dev);
791     blkdev->dev = NULL;
792     g_free(blkdev->devtype);
793     blkdev->devtype = NULL;
794     g_free(directiosafe);
795     blkdev->directiosafe = false;
796     return -1;
797 }
798
799 static int blk_connect(struct XenDevice *xendev)
800 {
801     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
802     int pers, index, qflags;
803     bool readonly = true;
804
805     /* read-only ? */
806     if (blkdev->directiosafe) {
807         qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
808     } else {
809         qflags = BDRV_O_CACHE_WB;
810     }
811     if (strcmp(blkdev->mode, "w") == 0) {
812         qflags |= BDRV_O_RDWR;
813         readonly = false;
814     }
815
816     /* init qemu block driver */
817     index = (blkdev->xendev.dev - 202 * 256) / 16;
818     blkdev->dinfo = drive_get(IF_XEN, 0, index);
819     if (!blkdev->dinfo) {
820         /* setup via xenbus -> create new block driver instance */
821         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
822         blkdev->bs = bdrv_new(blkdev->dev);
823         if (blkdev->bs) {
824             Error *local_err = NULL;
825             BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
826                                                            readonly);
827             if (bdrv_open(blkdev->bs,
828                           blkdev->filename, NULL, qflags, drv, &local_err) != 0)
829             {
830                 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
831                               error_get_pretty(local_err));
832                 error_free(local_err);
833                 bdrv_unref(blkdev->bs);
834                 blkdev->bs = NULL;
835             }
836         }
837         if (!blkdev->bs) {
838             return -1;
839         }
840     } else {
841         /* setup via qemu cmdline -> already setup for us */
842         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
843         blkdev->bs = blkdev->dinfo->bdrv;
844         if (bdrv_is_read_only(blkdev->bs) && !readonly) {
845             xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
846             blkdev->bs = NULL;
847             return -1;
848         }
849         /* blkdev->bs is not create by us, we get a reference
850          * so we can bdrv_unref() unconditionally */
851         bdrv_ref(blkdev->bs);
852     }
853     bdrv_attach_dev_nofail(blkdev->bs, blkdev);
854     blkdev->file_size = bdrv_getlength(blkdev->bs);
855     if (blkdev->file_size < 0) {
856         xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
857                       (int)blkdev->file_size, strerror(-blkdev->file_size),
858                       bdrv_get_format_name(blkdev->bs) ?: "-");
859         blkdev->file_size = 0;
860     }
861
862     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
863                   " size %" PRId64 " (%" PRId64 " MB)\n",
864                   blkdev->type, blkdev->fileproto, blkdev->filename,
865                   blkdev->file_size, blkdev->file_size >> 20);
866
867     /* Fill in number of sector size and number of sectors */
868     xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
869     xenstore_write_be_int64(&blkdev->xendev, "sectors",
870                             blkdev->file_size / blkdev->file_blk);
871
872     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
873         return -1;
874     }
875     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
876                              &blkdev->xendev.remote_port) == -1) {
877         return -1;
878     }
879     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
880         blkdev->feature_persistent = FALSE;
881     } else {
882         blkdev->feature_persistent = !!pers;
883     }
884
885     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
886     if (blkdev->xendev.protocol) {
887         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
888             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
889         }
890         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
891             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
892         }
893     }
894
895     blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
896                                             blkdev->xendev.dom,
897                                             blkdev->ring_ref,
898                                             PROT_READ | PROT_WRITE);
899     if (!blkdev->sring) {
900         return -1;
901     }
902     blkdev->cnt_map++;
903
904     switch (blkdev->protocol) {
905     case BLKIF_PROTOCOL_NATIVE:
906     {
907         blkif_sring_t *sring_native = blkdev->sring;
908         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
909         break;
910     }
911     case BLKIF_PROTOCOL_X86_32:
912     {
913         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
914
915         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
916         break;
917     }
918     case BLKIF_PROTOCOL_X86_64:
919     {
920         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
921
922         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
923         break;
924     }
925     }
926
927     if (blkdev->feature_persistent) {
928         /* Init persistent grants */
929         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
930         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
931                                              NULL, NULL,
932                                              (GDestroyNotify)destroy_grant);
933         blkdev->persistent_gnt_count = 0;
934     }
935
936     xen_be_bind_evtchn(&blkdev->xendev);
937
938     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
939                   "remote port %d, local port %d\n",
940                   blkdev->xendev.protocol, blkdev->ring_ref,
941                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
942     return 0;
943 }
944
945 static void blk_disconnect(struct XenDevice *xendev)
946 {
947     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
948
949     if (blkdev->bs) {
950         bdrv_detach_dev(blkdev->bs, blkdev);
951         bdrv_unref(blkdev->bs);
952         blkdev->bs = NULL;
953     }
954     xen_be_unbind_evtchn(&blkdev->xendev);
955
956     if (blkdev->sring) {
957         xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
958         blkdev->cnt_map--;
959         blkdev->sring = NULL;
960     }
961 }
962
963 static int blk_free(struct XenDevice *xendev)
964 {
965     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
966     struct ioreq *ioreq;
967
968     if (blkdev->bs || blkdev->sring) {
969         blk_disconnect(xendev);
970     }
971
972     /* Free persistent grants */
973     if (blkdev->feature_persistent) {
974         g_tree_destroy(blkdev->persistent_gnts);
975     }
976
977     while (!QLIST_EMPTY(&blkdev->freelist)) {
978         ioreq = QLIST_FIRST(&blkdev->freelist);
979         QLIST_REMOVE(ioreq, list);
980         qemu_iovec_destroy(&ioreq->v);
981         g_free(ioreq);
982     }
983
984     g_free(blkdev->params);
985     g_free(blkdev->mode);
986     g_free(blkdev->type);
987     g_free(blkdev->dev);
988     g_free(blkdev->devtype);
989     qemu_bh_delete(blkdev->bh);
990     return 0;
991 }
992
993 static void blk_event(struct XenDevice *xendev)
994 {
995     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
996
997     qemu_bh_schedule(blkdev->bh);
998 }
999
1000 struct XenDevOps xen_blkdev_ops = {
1001     .size       = sizeof(struct XenBlkDev),
1002     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
1003     .alloc      = blk_alloc,
1004     .init       = blk_init,
1005     .initialise    = blk_connect,
1006     .disconnect = blk_disconnect,
1007     .event      = blk_event,
1008     .free       = blk_free,
1009 };
This page took 0.078226 seconds and 4 git commands to generate.