]> Git Repo - qemu.git/blob - hw/xen_disk.c
pseries: Remove "busname" property for PCI host bridge
[qemu.git] / hw / xen_disk.c
1 /*
2  *  xen paravirt block device backend
3  *
4  *  (c) Gerd Hoffmann <[email protected]>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; under version 2 of the License.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License along
16  *  with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  *  Contributions after 2012-01-13 are licensed under the terms of the
19  *  GNU GPL, version 2 or (at your option) any later version.
20  */
21
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <inttypes.h>
29 #include <time.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
37
38 #include "hw/hw.h"
39 #include "hw/xen_backend.h"
40 #include "hw/xen_blkif.h"
41 #include "sysemu/blockdev.h"
42
43 /* ------------------------------------------------------------- */
44
45 static int batch_maps   = 0;
46
47 static int max_requests = 32;
48
49 /* ------------------------------------------------------------- */
50
51 #define BLOCK_SIZE  512
52 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
53
54 struct PersistentGrant {
55     void *page;
56     struct XenBlkDev *blkdev;
57 };
58
59 typedef struct PersistentGrant PersistentGrant;
60
61 struct ioreq {
62     blkif_request_t     req;
63     int16_t             status;
64
65     /* parsed request */
66     off_t               start;
67     QEMUIOVector        v;
68     int                 presync;
69     int                 postsync;
70     uint8_t             mapped;
71
72     /* grant mapping */
73     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75     int                 prot;
76     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
77     void                *pages;
78     int                 num_unmap;
79
80     /* aio status */
81     int                 aio_inflight;
82     int                 aio_errors;
83
84     struct XenBlkDev    *blkdev;
85     QLIST_ENTRY(ioreq)   list;
86     BlockAcctCookie     acct;
87 };
88
89 struct XenBlkDev {
90     struct XenDevice    xendev;  /* must be first */
91     char                *params;
92     char                *mode;
93     char                *type;
94     char                *dev;
95     char                *devtype;
96     const char          *fileproto;
97     const char          *filename;
98     int                 ring_ref;
99     void                *sring;
100     int64_t             file_blk;
101     int64_t             file_size;
102     int                 protocol;
103     blkif_back_rings_t  rings;
104     int                 more_work;
105     int                 cnt_map;
106
107     /* request lists */
108     QLIST_HEAD(inflight_head, ioreq) inflight;
109     QLIST_HEAD(finished_head, ioreq) finished;
110     QLIST_HEAD(freelist_head, ioreq) freelist;
111     int                 requests_total;
112     int                 requests_inflight;
113     int                 requests_finished;
114
115     /* Persistent grants extension */
116     gboolean            feature_persistent;
117     GTree               *persistent_gnts;
118     unsigned int        persistent_gnt_count;
119     unsigned int        max_grants;
120
121     /* qemu block driver */
122     DriveInfo           *dinfo;
123     BlockDriverState    *bs;
124     QEMUBH              *bh;
125 };
126
127 /* ------------------------------------------------------------- */
128
129 static void ioreq_reset(struct ioreq *ioreq)
130 {
131     memset(&ioreq->req, 0, sizeof(ioreq->req));
132     ioreq->status = 0;
133     ioreq->start = 0;
134     ioreq->presync = 0;
135     ioreq->postsync = 0;
136     ioreq->mapped = 0;
137
138     memset(ioreq->domids, 0, sizeof(ioreq->domids));
139     memset(ioreq->refs, 0, sizeof(ioreq->refs));
140     ioreq->prot = 0;
141     memset(ioreq->page, 0, sizeof(ioreq->page));
142     ioreq->pages = NULL;
143
144     ioreq->aio_inflight = 0;
145     ioreq->aio_errors = 0;
146
147     ioreq->blkdev = NULL;
148     memset(&ioreq->list, 0, sizeof(ioreq->list));
149     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
150
151     qemu_iovec_reset(&ioreq->v);
152 }
153
154 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
155 {
156     uint ua = GPOINTER_TO_UINT(a);
157     uint ub = GPOINTER_TO_UINT(b);
158     return (ua > ub) - (ua < ub);
159 }
160
161 static void destroy_grant(gpointer pgnt)
162 {
163     PersistentGrant *grant = pgnt;
164     XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
165
166     if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
167         xen_be_printf(&grant->blkdev->xendev, 0,
168                       "xc_gnttab_munmap failed: %s\n",
169                       strerror(errno));
170     }
171     grant->blkdev->persistent_gnt_count--;
172     xen_be_printf(&grant->blkdev->xendev, 3,
173                   "unmapped grant %p\n", grant->page);
174     g_free(grant);
175 }
176
177 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
178 {
179     struct ioreq *ioreq = NULL;
180
181     if (QLIST_EMPTY(&blkdev->freelist)) {
182         if (blkdev->requests_total >= max_requests) {
183             goto out;
184         }
185         /* allocate new struct */
186         ioreq = g_malloc0(sizeof(*ioreq));
187         ioreq->blkdev = blkdev;
188         blkdev->requests_total++;
189         qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
190     } else {
191         /* get one from freelist */
192         ioreq = QLIST_FIRST(&blkdev->freelist);
193         QLIST_REMOVE(ioreq, list);
194     }
195     QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
196     blkdev->requests_inflight++;
197
198 out:
199     return ioreq;
200 }
201
202 static void ioreq_finish(struct ioreq *ioreq)
203 {
204     struct XenBlkDev *blkdev = ioreq->blkdev;
205
206     QLIST_REMOVE(ioreq, list);
207     QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
208     blkdev->requests_inflight--;
209     blkdev->requests_finished++;
210 }
211
212 static void ioreq_release(struct ioreq *ioreq, bool finish)
213 {
214     struct XenBlkDev *blkdev = ioreq->blkdev;
215
216     QLIST_REMOVE(ioreq, list);
217     ioreq_reset(ioreq);
218     ioreq->blkdev = blkdev;
219     QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
220     if (finish) {
221         blkdev->requests_finished--;
222     } else {
223         blkdev->requests_inflight--;
224     }
225 }
226
227 /*
228  * translate request into iovec + start offset
229  * do sanity checks along the way
230  */
231 static int ioreq_parse(struct ioreq *ioreq)
232 {
233     struct XenBlkDev *blkdev = ioreq->blkdev;
234     uintptr_t mem;
235     size_t len;
236     int i;
237
238     xen_be_printf(&blkdev->xendev, 3,
239                   "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
240                   ioreq->req.operation, ioreq->req.nr_segments,
241                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
242     switch (ioreq->req.operation) {
243     case BLKIF_OP_READ:
244         ioreq->prot = PROT_WRITE; /* to memory */
245         break;
246     case BLKIF_OP_FLUSH_DISKCACHE:
247         ioreq->presync = 1;
248         if (!ioreq->req.nr_segments) {
249             return 0;
250         }
251         /* fall through */
252     case BLKIF_OP_WRITE:
253         ioreq->prot = PROT_READ; /* from memory */
254         break;
255     default:
256         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
257                       ioreq->req.operation);
258         goto err;
259     };
260
261     if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
262         xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
263         goto err;
264     }
265
266     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
267     for (i = 0; i < ioreq->req.nr_segments; i++) {
268         if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
269             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
270             goto err;
271         }
272         if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
273             xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
274             goto err;
275         }
276         if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
277             xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
278             goto err;
279         }
280
281         ioreq->domids[i] = blkdev->xendev.dom;
282         ioreq->refs[i]   = ioreq->req.seg[i].gref;
283
284         mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
285         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
286         qemu_iovec_add(&ioreq->v, (void*)mem, len);
287     }
288     if (ioreq->start + ioreq->v.size > blkdev->file_size) {
289         xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
290         goto err;
291     }
292     return 0;
293
294 err:
295     ioreq->status = BLKIF_RSP_ERROR;
296     return -1;
297 }
298
299 static void ioreq_unmap(struct ioreq *ioreq)
300 {
301     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
302     int i;
303
304     if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
305         return;
306     }
307     if (batch_maps) {
308         if (!ioreq->pages) {
309             return;
310         }
311         if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
312             xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
313                           strerror(errno));
314         }
315         ioreq->blkdev->cnt_map -= ioreq->num_unmap;
316         ioreq->pages = NULL;
317     } else {
318         for (i = 0; i < ioreq->num_unmap; i++) {
319             if (!ioreq->page[i]) {
320                 continue;
321             }
322             if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
323                 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
324                               strerror(errno));
325             }
326             ioreq->blkdev->cnt_map--;
327             ioreq->page[i] = NULL;
328         }
329     }
330     ioreq->mapped = 0;
331 }
332
333 static int ioreq_map(struct ioreq *ioreq)
334 {
335     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
336     uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
337     uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
338     void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
339     int i, j, new_maps = 0;
340     PersistentGrant *grant;
341     /* domids and refs variables will contain the information necessary
342      * to map the grants that are needed to fulfill this request.
343      *
344      * After mapping the needed grants, the page array will contain the
345      * memory address of each granted page in the order specified in ioreq
346      * (disregarding if it's a persistent grant or not).
347      */
348
349     if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
350         return 0;
351     }
352     if (ioreq->blkdev->feature_persistent) {
353         for (i = 0; i < ioreq->v.niov; i++) {
354             grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
355                                     GUINT_TO_POINTER(ioreq->refs[i]));
356
357             if (grant != NULL) {
358                 page[i] = grant->page;
359                 xen_be_printf(&ioreq->blkdev->xendev, 3,
360                               "using persistent-grant %" PRIu32 "\n",
361                               ioreq->refs[i]);
362             } else {
363                     /* Add the grant to the list of grants that
364                      * should be mapped
365                      */
366                     domids[new_maps] = ioreq->domids[i];
367                     refs[new_maps] = ioreq->refs[i];
368                     page[i] = NULL;
369                     new_maps++;
370             }
371         }
372         /* Set the protection to RW, since grants may be reused later
373          * with a different protection than the one needed for this request
374          */
375         ioreq->prot = PROT_WRITE | PROT_READ;
376     } else {
377         /* All grants in the request should be mapped */
378         memcpy(refs, ioreq->refs, sizeof(refs));
379         memcpy(domids, ioreq->domids, sizeof(domids));
380         memset(page, 0, sizeof(page));
381         new_maps = ioreq->v.niov;
382     }
383
384     if (batch_maps && new_maps) {
385         ioreq->pages = xc_gnttab_map_grant_refs
386             (gnt, new_maps, domids, refs, ioreq->prot);
387         if (ioreq->pages == NULL) {
388             xen_be_printf(&ioreq->blkdev->xendev, 0,
389                           "can't map %d grant refs (%s, %d maps)\n",
390                           new_maps, strerror(errno), ioreq->blkdev->cnt_map);
391             return -1;
392         }
393         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
394             if (page[i] == NULL) {
395                 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
396             }
397         }
398         ioreq->blkdev->cnt_map += new_maps;
399     } else if (new_maps)  {
400         for (i = 0; i < new_maps; i++) {
401             ioreq->page[i] = xc_gnttab_map_grant_ref
402                 (gnt, domids[i], refs[i], ioreq->prot);
403             if (ioreq->page[i] == NULL) {
404                 xen_be_printf(&ioreq->blkdev->xendev, 0,
405                               "can't map grant ref %d (%s, %d maps)\n",
406                               refs[i], strerror(errno), ioreq->blkdev->cnt_map);
407                 ioreq_unmap(ioreq);
408                 return -1;
409             }
410             ioreq->blkdev->cnt_map++;
411         }
412         for (i = 0, j = 0; i < ioreq->v.niov; i++) {
413             if (page[i] == NULL) {
414                 page[i] = ioreq->page[j++];
415             }
416         }
417     }
418     if (ioreq->blkdev->feature_persistent) {
419         while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
420               && new_maps) {
421             /* Go through the list of newly mapped grants and add as many
422              * as possible to the list of persistently mapped grants.
423              *
424              * Since we start at the end of ioreq->page(s), we only need
425              * to decrease new_maps to prevent this granted pages from
426              * being unmapped in ioreq_unmap.
427              */
428             grant = g_malloc0(sizeof(*grant));
429             new_maps--;
430             if (batch_maps) {
431                 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
432             } else {
433                 grant->page = ioreq->page[new_maps];
434             }
435             grant->blkdev = ioreq->blkdev;
436             xen_be_printf(&ioreq->blkdev->xendev, 3,
437                           "adding grant %" PRIu32 " page: %p\n",
438                           refs[new_maps], grant->page);
439             g_tree_insert(ioreq->blkdev->persistent_gnts,
440                           GUINT_TO_POINTER(refs[new_maps]),
441                           grant);
442             ioreq->blkdev->persistent_gnt_count++;
443         }
444     }
445     for (i = 0; i < ioreq->v.niov; i++) {
446         ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
447     }
448     ioreq->mapped = 1;
449     ioreq->num_unmap = new_maps;
450     return 0;
451 }
452
453 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
454
455 static void qemu_aio_complete(void *opaque, int ret)
456 {
457     struct ioreq *ioreq = opaque;
458
459     if (ret != 0) {
460         xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
461                       ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
462         ioreq->aio_errors++;
463     }
464
465     ioreq->aio_inflight--;
466     if (ioreq->presync) {
467         ioreq->presync = 0;
468         ioreq_runio_qemu_aio(ioreq);
469         return;
470     }
471     if (ioreq->aio_inflight > 0) {
472         return;
473     }
474     if (ioreq->postsync) {
475         ioreq->postsync = 0;
476         ioreq->aio_inflight++;
477         bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
478         return;
479     }
480
481     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
482     ioreq_unmap(ioreq);
483     ioreq_finish(ioreq);
484     bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
485     qemu_bh_schedule(ioreq->blkdev->bh);
486 }
487
488 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
489 {
490     struct XenBlkDev *blkdev = ioreq->blkdev;
491
492     if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
493         goto err_no_map;
494     }
495
496     ioreq->aio_inflight++;
497     if (ioreq->presync) {
498         bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
499         return 0;
500     }
501
502     switch (ioreq->req.operation) {
503     case BLKIF_OP_READ:
504         bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
505         ioreq->aio_inflight++;
506         bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
507                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
508                        qemu_aio_complete, ioreq);
509         break;
510     case BLKIF_OP_WRITE:
511     case BLKIF_OP_FLUSH_DISKCACHE:
512         if (!ioreq->req.nr_segments) {
513             break;
514         }
515
516         bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
517         ioreq->aio_inflight++;
518         bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
519                         &ioreq->v, ioreq->v.size / BLOCK_SIZE,
520                         qemu_aio_complete, ioreq);
521         break;
522     default:
523         /* unknown operation (shouldn't happen -- parse catches this) */
524         goto err;
525     }
526
527     qemu_aio_complete(ioreq, 0);
528
529     return 0;
530
531 err:
532     ioreq_unmap(ioreq);
533 err_no_map:
534     ioreq_finish(ioreq);
535     ioreq->status = BLKIF_RSP_ERROR;
536     return -1;
537 }
538
539 static int blk_send_response_one(struct ioreq *ioreq)
540 {
541     struct XenBlkDev  *blkdev = ioreq->blkdev;
542     int               send_notify   = 0;
543     int               have_requests = 0;
544     blkif_response_t  resp;
545     void              *dst;
546
547     resp.id        = ioreq->req.id;
548     resp.operation = ioreq->req.operation;
549     resp.status    = ioreq->status;
550
551     /* Place on the response ring for the relevant domain. */
552     switch (blkdev->protocol) {
553     case BLKIF_PROTOCOL_NATIVE:
554         dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
555         break;
556     case BLKIF_PROTOCOL_X86_32:
557         dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
558                                 blkdev->rings.x86_32_part.rsp_prod_pvt);
559         break;
560     case BLKIF_PROTOCOL_X86_64:
561         dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
562                                 blkdev->rings.x86_64_part.rsp_prod_pvt);
563         break;
564     default:
565         dst = NULL;
566     }
567     memcpy(dst, &resp, sizeof(resp));
568     blkdev->rings.common.rsp_prod_pvt++;
569
570     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
571     if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
572         /*
573          * Tail check for pending requests. Allows frontend to avoid
574          * notifications if requests are already in flight (lower
575          * overheads and promotes batching).
576          */
577         RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
578     } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
579         have_requests = 1;
580     }
581
582     if (have_requests) {
583         blkdev->more_work++;
584     }
585     return send_notify;
586 }
587
588 /* walk finished list, send outstanding responses, free requests */
589 static void blk_send_response_all(struct XenBlkDev *blkdev)
590 {
591     struct ioreq *ioreq;
592     int send_notify = 0;
593
594     while (!QLIST_EMPTY(&blkdev->finished)) {
595         ioreq = QLIST_FIRST(&blkdev->finished);
596         send_notify += blk_send_response_one(ioreq);
597         ioreq_release(ioreq, true);
598     }
599     if (send_notify) {
600         xen_be_send_notify(&blkdev->xendev);
601     }
602 }
603
604 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
605 {
606     switch (blkdev->protocol) {
607     case BLKIF_PROTOCOL_NATIVE:
608         memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
609                sizeof(ioreq->req));
610         break;
611     case BLKIF_PROTOCOL_X86_32:
612         blkif_get_x86_32_req(&ioreq->req,
613                              RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
614         break;
615     case BLKIF_PROTOCOL_X86_64:
616         blkif_get_x86_64_req(&ioreq->req,
617                              RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
618         break;
619     }
620     return 0;
621 }
622
623 static void blk_handle_requests(struct XenBlkDev *blkdev)
624 {
625     RING_IDX rc, rp;
626     struct ioreq *ioreq;
627
628     blkdev->more_work = 0;
629
630     rc = blkdev->rings.common.req_cons;
631     rp = blkdev->rings.common.sring->req_prod;
632     xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
633
634     blk_send_response_all(blkdev);
635     while (rc != rp) {
636         /* pull request from ring */
637         if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
638             break;
639         }
640         ioreq = ioreq_start(blkdev);
641         if (ioreq == NULL) {
642             blkdev->more_work++;
643             break;
644         }
645         blk_get_request(blkdev, ioreq, rc);
646         blkdev->rings.common.req_cons = ++rc;
647
648         /* parse them */
649         if (ioreq_parse(ioreq) != 0) {
650             if (blk_send_response_one(ioreq)) {
651                 xen_be_send_notify(&blkdev->xendev);
652             }
653             ioreq_release(ioreq, false);
654             continue;
655         }
656
657         ioreq_runio_qemu_aio(ioreq);
658     }
659
660     if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
661         qemu_bh_schedule(blkdev->bh);
662     }
663 }
664
665 /* ------------------------------------------------------------- */
666
667 static void blk_bh(void *opaque)
668 {
669     struct XenBlkDev *blkdev = opaque;
670     blk_handle_requests(blkdev);
671 }
672
673 /*
674  * We need to account for the grant allocations requiring contiguous
675  * chunks; the worst case number would be
676  *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
677  * but in order to keep things simple just use
678  *     2 * max_req * max_seg.
679  */
680 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
681
682 static void blk_alloc(struct XenDevice *xendev)
683 {
684     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
685
686     QLIST_INIT(&blkdev->inflight);
687     QLIST_INIT(&blkdev->finished);
688     QLIST_INIT(&blkdev->freelist);
689     blkdev->bh = qemu_bh_new(blk_bh, blkdev);
690     if (xen_mode != XEN_EMULATE) {
691         batch_maps = 1;
692     }
693     if (xc_gnttab_set_max_grants(xendev->gnttabdev,
694             MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
695         xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
696                       strerror(errno));
697     }
698 }
699
700 static int blk_init(struct XenDevice *xendev)
701 {
702     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
703     int index, qflags, info = 0;
704
705     /* read xenstore entries */
706     if (blkdev->params == NULL) {
707         char *h = NULL;
708         blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
709         if (blkdev->params != NULL) {
710             h = strchr(blkdev->params, ':');
711         }
712         if (h != NULL) {
713             blkdev->fileproto = blkdev->params;
714             blkdev->filename  = h+1;
715             *h = 0;
716         } else {
717             blkdev->fileproto = "<unset>";
718             blkdev->filename  = blkdev->params;
719         }
720     }
721     if (!strcmp("aio", blkdev->fileproto)) {
722         blkdev->fileproto = "raw";
723     }
724     if (blkdev->mode == NULL) {
725         blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
726     }
727     if (blkdev->type == NULL) {
728         blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
729     }
730     if (blkdev->dev == NULL) {
731         blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
732     }
733     if (blkdev->devtype == NULL) {
734         blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
735     }
736
737     /* do we have all we need? */
738     if (blkdev->params == NULL ||
739         blkdev->mode == NULL   ||
740         blkdev->type == NULL   ||
741         blkdev->dev == NULL) {
742         goto out_error;
743     }
744
745     /* read-only ? */
746     qflags = BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NATIVE_AIO;
747     if (strcmp(blkdev->mode, "w") == 0) {
748         qflags |= BDRV_O_RDWR;
749     } else {
750         info  |= VDISK_READONLY;
751     }
752
753     /* cdrom ? */
754     if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
755         info  |= VDISK_CDROM;
756     }
757
758     /* init qemu block driver */
759     index = (blkdev->xendev.dev - 202 * 256) / 16;
760     blkdev->dinfo = drive_get(IF_XEN, 0, index);
761     if (!blkdev->dinfo) {
762         /* setup via xenbus -> create new block driver instance */
763         xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
764         blkdev->bs = bdrv_new(blkdev->dev);
765         if (blkdev->bs) {
766             if (bdrv_open(blkdev->bs, blkdev->filename, NULL, qflags,
767                         bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
768                 bdrv_delete(blkdev->bs);
769                 blkdev->bs = NULL;
770             }
771         }
772         if (!blkdev->bs) {
773             goto out_error;
774         }
775     } else {
776         /* setup via qemu cmdline -> already setup for us */
777         xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
778         blkdev->bs = blkdev->dinfo->bdrv;
779     }
780     bdrv_attach_dev_nofail(blkdev->bs, blkdev);
781     blkdev->file_blk  = BLOCK_SIZE;
782     blkdev->file_size = bdrv_getlength(blkdev->bs);
783     if (blkdev->file_size < 0) {
784         xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
785                       (int)blkdev->file_size, strerror(-blkdev->file_size),
786                       bdrv_get_format_name(blkdev->bs) ?: "-");
787         blkdev->file_size = 0;
788     }
789
790     xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
791                   " size %" PRId64 " (%" PRId64 " MB)\n",
792                   blkdev->type, blkdev->fileproto, blkdev->filename,
793                   blkdev->file_size, blkdev->file_size >> 20);
794
795     /* fill info */
796     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
797     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
798     xenstore_write_be_int(&blkdev->xendev, "info",            info);
799     xenstore_write_be_int(&blkdev->xendev, "sector-size",     blkdev->file_blk);
800     xenstore_write_be_int(&blkdev->xendev, "sectors",
801                           blkdev->file_size / blkdev->file_blk);
802     return 0;
803
804 out_error:
805     g_free(blkdev->params);
806     blkdev->params = NULL;
807     g_free(blkdev->mode);
808     blkdev->mode = NULL;
809     g_free(blkdev->type);
810     blkdev->type = NULL;
811     g_free(blkdev->dev);
812     blkdev->dev = NULL;
813     g_free(blkdev->devtype);
814     blkdev->devtype = NULL;
815     return -1;
816 }
817
818 static int blk_connect(struct XenDevice *xendev)
819 {
820     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
821     int pers;
822
823     if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
824         return -1;
825     }
826     if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
827                              &blkdev->xendev.remote_port) == -1) {
828         return -1;
829     }
830     if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
831         blkdev->feature_persistent = FALSE;
832     } else {
833         blkdev->feature_persistent = !!pers;
834     }
835
836     blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
837     if (blkdev->xendev.protocol) {
838         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
839             blkdev->protocol = BLKIF_PROTOCOL_X86_32;
840         }
841         if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
842             blkdev->protocol = BLKIF_PROTOCOL_X86_64;
843         }
844     }
845
846     blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
847                                             blkdev->xendev.dom,
848                                             blkdev->ring_ref,
849                                             PROT_READ | PROT_WRITE);
850     if (!blkdev->sring) {
851         return -1;
852     }
853     blkdev->cnt_map++;
854
855     switch (blkdev->protocol) {
856     case BLKIF_PROTOCOL_NATIVE:
857     {
858         blkif_sring_t *sring_native = blkdev->sring;
859         BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
860         break;
861     }
862     case BLKIF_PROTOCOL_X86_32:
863     {
864         blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
865
866         BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
867         break;
868     }
869     case BLKIF_PROTOCOL_X86_64:
870     {
871         blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
872
873         BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
874         break;
875     }
876     }
877
878     if (blkdev->feature_persistent) {
879         /* Init persistent grants */
880         blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
881         blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
882                                              NULL, NULL,
883                                              (GDestroyNotify)destroy_grant);
884         blkdev->persistent_gnt_count = 0;
885     }
886
887     xen_be_bind_evtchn(&blkdev->xendev);
888
889     xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
890                   "remote port %d, local port %d\n",
891                   blkdev->xendev.protocol, blkdev->ring_ref,
892                   blkdev->xendev.remote_port, blkdev->xendev.local_port);
893     return 0;
894 }
895
896 static void blk_disconnect(struct XenDevice *xendev)
897 {
898     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
899
900     if (blkdev->bs) {
901         if (!blkdev->dinfo) {
902             /* close/delete only if we created it ourself */
903             bdrv_close(blkdev->bs);
904             bdrv_detach_dev(blkdev->bs, blkdev);
905             bdrv_delete(blkdev->bs);
906         }
907         blkdev->bs = NULL;
908     }
909     xen_be_unbind_evtchn(&blkdev->xendev);
910
911     if (blkdev->sring) {
912         xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
913         blkdev->cnt_map--;
914         blkdev->sring = NULL;
915     }
916 }
917
918 static int blk_free(struct XenDevice *xendev)
919 {
920     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
921     struct ioreq *ioreq;
922
923     if (blkdev->bs || blkdev->sring) {
924         blk_disconnect(xendev);
925     }
926
927     /* Free persistent grants */
928     if (blkdev->feature_persistent) {
929         g_tree_destroy(blkdev->persistent_gnts);
930     }
931
932     while (!QLIST_EMPTY(&blkdev->freelist)) {
933         ioreq = QLIST_FIRST(&blkdev->freelist);
934         QLIST_REMOVE(ioreq, list);
935         qemu_iovec_destroy(&ioreq->v);
936         g_free(ioreq);
937     }
938
939     g_free(blkdev->params);
940     g_free(blkdev->mode);
941     g_free(blkdev->type);
942     g_free(blkdev->dev);
943     g_free(blkdev->devtype);
944     qemu_bh_delete(blkdev->bh);
945     return 0;
946 }
947
948 static void blk_event(struct XenDevice *xendev)
949 {
950     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
951
952     qemu_bh_schedule(blkdev->bh);
953 }
954
955 struct XenDevOps xen_blkdev_ops = {
956     .size       = sizeof(struct XenBlkDev),
957     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
958     .alloc      = blk_alloc,
959     .init       = blk_init,
960     .initialise    = blk_connect,
961     .disconnect = blk_disconnect,
962     .event      = blk_event,
963     .free       = blk_free,
964 };
This page took 0.077777 seconds and 4 git commands to generate.