]> Git Repo - qemu.git/blame - hw/block/xen_disk.c
misc: Fix new collection of typos
[qemu.git] / hw / block / xen_disk.c
CommitLineData
62d23efa
AL
1/*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <[email protected]>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
8167ee88 16 * with this program; if not, see <http://www.gnu.org/licenses/>.
6b620ca3
PB
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
62d23efa
AL
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <stdarg.h>
25#include <string.h>
26#include <unistd.h>
27#include <signal.h>
28#include <inttypes.h>
29#include <time.h>
30#include <fcntl.h>
31#include <errno.h>
32#include <sys/ioctl.h>
33#include <sys/types.h>
34#include <sys/stat.h>
35#include <sys/mman.h>
36#include <sys/uio.h>
37
83c9f4ca 38#include "hw/hw.h"
0d09e41a 39#include "hw/xen/xen_backend.h"
47b43a1f 40#include "xen_blkif.h"
9c17d615 41#include "sysemu/blockdev.h"
26f54e9a 42#include "sysemu/block-backend.h"
9a925356
HR
43#include "qapi/qmp/qdict.h"
44#include "qapi/qmp/qstring.h"
62d23efa
AL
45
46/* ------------------------------------------------------------- */
47
62d23efa
AL
48static int batch_maps = 0;
49
50static int max_requests = 32;
62d23efa
AL
51
52/* ------------------------------------------------------------- */
53
54#define BLOCK_SIZE 512
55#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
56
9e496d74
RPM
57struct PersistentGrant {
58 void *page;
59 struct XenBlkDev *blkdev;
60};
61
62typedef struct PersistentGrant PersistentGrant;
63
2f01dfac
RPM
64struct PersistentRegion {
65 void *addr;
66 int num;
67};
68
69typedef struct PersistentRegion PersistentRegion;
70
62d23efa
AL
71struct ioreq {
72 blkif_request_t req;
73 int16_t status;
74
75 /* parsed request */
76 off_t start;
77 QEMUIOVector v;
78 int presync;
79 int postsync;
c6961b7d 80 uint8_t mapped;
62d23efa
AL
81
82 /* grant mapping */
83 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
84 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
85 int prot;
86 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
87 void *pages;
9e496d74 88 int num_unmap;
62d23efa
AL
89
90 /* aio status */
91 int aio_inflight;
92 int aio_errors;
93
94 struct XenBlkDev *blkdev;
72cf2d4f 95 QLIST_ENTRY(ioreq) list;
a597e79c 96 BlockAcctCookie acct;
62d23efa
AL
97};
98
99struct XenBlkDev {
100 struct XenDevice xendev; /* must be first */
101 char *params;
102 char *mode;
103 char *type;
104 char *dev;
105 char *devtype;
454ae734 106 bool directiosafe;
62d23efa
AL
107 const char *fileproto;
108 const char *filename;
109 int ring_ref;
110 void *sring;
111 int64_t file_blk;
112 int64_t file_size;
113 int protocol;
114 blkif_back_rings_t rings;
115 int more_work;
116 int cnt_map;
117
118 /* request lists */
72cf2d4f
BS
119 QLIST_HEAD(inflight_head, ioreq) inflight;
120 QLIST_HEAD(finished_head, ioreq) finished;
121 QLIST_HEAD(freelist_head, ioreq) freelist;
62d23efa
AL
122 int requests_total;
123 int requests_inflight;
124 int requests_finished;
125
9e496d74 126 /* Persistent grants extension */
f3135204 127 gboolean feature_discard;
9e496d74
RPM
128 gboolean feature_persistent;
129 GTree *persistent_gnts;
2f01dfac 130 GSList *persistent_regions;
9e496d74
RPM
131 unsigned int persistent_gnt_count;
132 unsigned int max_grants;
133
62d23efa 134 /* qemu block driver */
751c6a17 135 DriveInfo *dinfo;
4be74634 136 BlockBackend *blk;
62d23efa
AL
137 QEMUBH *bh;
138};
139
140/* ------------------------------------------------------------- */
141
282c6a2f
RPM
142static void ioreq_reset(struct ioreq *ioreq)
143{
144 memset(&ioreq->req, 0, sizeof(ioreq->req));
145 ioreq->status = 0;
146 ioreq->start = 0;
147 ioreq->presync = 0;
148 ioreq->postsync = 0;
149 ioreq->mapped = 0;
150
151 memset(ioreq->domids, 0, sizeof(ioreq->domids));
152 memset(ioreq->refs, 0, sizeof(ioreq->refs));
153 ioreq->prot = 0;
154 memset(ioreq->page, 0, sizeof(ioreq->page));
155 ioreq->pages = NULL;
156
157 ioreq->aio_inflight = 0;
158 ioreq->aio_errors = 0;
159
160 ioreq->blkdev = NULL;
161 memset(&ioreq->list, 0, sizeof(ioreq->list));
162 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
163
164 qemu_iovec_reset(&ioreq->v);
165}
166
9e496d74
RPM
167static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
168{
169 uint ua = GPOINTER_TO_UINT(a);
170 uint ub = GPOINTER_TO_UINT(b);
171 return (ua > ub) - (ua < ub);
172}
173
174static void destroy_grant(gpointer pgnt)
175{
176 PersistentGrant *grant = pgnt;
177 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
178
179 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
180 xen_be_printf(&grant->blkdev->xendev, 0,
181 "xc_gnttab_munmap failed: %s\n",
182 strerror(errno));
183 }
184 grant->blkdev->persistent_gnt_count--;
185 xen_be_printf(&grant->blkdev->xendev, 3,
186 "unmapped grant %p\n", grant->page);
187 g_free(grant);
188}
189
2f01dfac
RPM
190static void remove_persistent_region(gpointer data, gpointer dev)
191{
192 PersistentRegion *region = data;
193 struct XenBlkDev *blkdev = dev;
194 XenGnttab gnt = blkdev->xendev.gnttabdev;
195
196 if (xc_gnttab_munmap(gnt, region->addr, region->num) != 0) {
197 xen_be_printf(&blkdev->xendev, 0,
198 "xc_gnttab_munmap region %p failed: %s\n",
199 region->addr, strerror(errno));
200 }
201 xen_be_printf(&blkdev->xendev, 3,
202 "unmapped grant region %p with %d pages\n",
203 region->addr, region->num);
204 g_free(region);
205}
206
62d23efa
AL
207static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
208{
209 struct ioreq *ioreq = NULL;
210
72cf2d4f 211 if (QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab
AP
212 if (blkdev->requests_total >= max_requests) {
213 goto out;
214 }
215 /* allocate new struct */
7267c094 216 ioreq = g_malloc0(sizeof(*ioreq));
209cd7ab
AP
217 ioreq->blkdev = blkdev;
218 blkdev->requests_total++;
62d23efa
AL
219 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
220 } else {
209cd7ab
AP
221 /* get one from freelist */
222 ioreq = QLIST_FIRST(&blkdev->freelist);
223 QLIST_REMOVE(ioreq, list);
62d23efa 224 }
72cf2d4f 225 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
62d23efa
AL
226 blkdev->requests_inflight++;
227
228out:
229 return ioreq;
230}
231
232static void ioreq_finish(struct ioreq *ioreq)
233{
234 struct XenBlkDev *blkdev = ioreq->blkdev;
235
72cf2d4f
BS
236 QLIST_REMOVE(ioreq, list);
237 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
62d23efa
AL
238 blkdev->requests_inflight--;
239 blkdev->requests_finished++;
240}
241
ed547766 242static void ioreq_release(struct ioreq *ioreq, bool finish)
62d23efa
AL
243{
244 struct XenBlkDev *blkdev = ioreq->blkdev;
245
72cf2d4f 246 QLIST_REMOVE(ioreq, list);
282c6a2f 247 ioreq_reset(ioreq);
62d23efa 248 ioreq->blkdev = blkdev;
72cf2d4f 249 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
ed547766
JB
250 if (finish) {
251 blkdev->requests_finished--;
252 } else {
253 blkdev->requests_inflight--;
254 }
62d23efa
AL
255}
256
257/*
258 * translate request into iovec + start offset
259 * do sanity checks along the way
260 */
261static int ioreq_parse(struct ioreq *ioreq)
262{
263 struct XenBlkDev *blkdev = ioreq->blkdev;
264 uintptr_t mem;
265 size_t len;
266 int i;
267
268 xen_be_printf(&blkdev->xendev, 3,
209cd7ab
AP
269 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
270 ioreq->req.operation, ioreq->req.nr_segments,
271 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
62d23efa
AL
272 switch (ioreq->req.operation) {
273 case BLKIF_OP_READ:
209cd7ab
AP
274 ioreq->prot = PROT_WRITE; /* to memory */
275 break;
7e7b7cba
SS
276 case BLKIF_OP_FLUSH_DISKCACHE:
277 ioreq->presync = 1;
5cbdebe3 278 if (!ioreq->req.nr_segments) {
5cbdebe3
SS
279 return 0;
280 }
209cd7ab 281 /* fall through */
62d23efa 282 case BLKIF_OP_WRITE:
209cd7ab 283 ioreq->prot = PROT_READ; /* from memory */
209cd7ab 284 break;
f3135204
OH
285 case BLKIF_OP_DISCARD:
286 return 0;
62d23efa 287 default:
209cd7ab
AP
288 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
289 ioreq->req.operation);
290 goto err;
62d23efa
AL
291 };
292
908c7b9f
GH
293 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
294 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
295 goto err;
296 }
297
62d23efa
AL
298 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
299 for (i = 0; i < ioreq->req.nr_segments; i++) {
209cd7ab
AP
300 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
301 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
302 goto err;
303 }
304 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
305 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
306 goto err;
307 }
308 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
309 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
310 goto err;
311 }
312
313 ioreq->domids[i] = blkdev->xendev.dom;
314 ioreq->refs[i] = ioreq->req.seg[i].gref;
315
316 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
317 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
62d23efa
AL
318 qemu_iovec_add(&ioreq->v, (void*)mem, len);
319 }
320 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
209cd7ab
AP
321 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
322 goto err;
62d23efa
AL
323 }
324 return 0;
325
326err:
327 ioreq->status = BLKIF_RSP_ERROR;
328 return -1;
329}
330
331static void ioreq_unmap(struct ioreq *ioreq)
332{
d5b93ddf 333 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
62d23efa
AL
334 int i;
335
9e496d74 336 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
62d23efa 337 return;
209cd7ab 338 }
62d23efa 339 if (batch_maps) {
209cd7ab
AP
340 if (!ioreq->pages) {
341 return;
342 }
9e496d74 343 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
209cd7ab
AP
344 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
345 strerror(errno));
346 }
9e496d74 347 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
209cd7ab 348 ioreq->pages = NULL;
62d23efa 349 } else {
9e496d74 350 for (i = 0; i < ioreq->num_unmap; i++) {
209cd7ab
AP
351 if (!ioreq->page[i]) {
352 continue;
353 }
354 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
355 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
356 strerror(errno));
357 }
358 ioreq->blkdev->cnt_map--;
359 ioreq->page[i] = NULL;
360 }
62d23efa 361 }
c6961b7d 362 ioreq->mapped = 0;
62d23efa
AL
363}
364
365static int ioreq_map(struct ioreq *ioreq)
366{
d5b93ddf 367 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
9e496d74
RPM
368 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
369 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
370 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
371 int i, j, new_maps = 0;
372 PersistentGrant *grant;
2f01dfac 373 PersistentRegion *region;
9e496d74
RPM
374 /* domids and refs variables will contain the information necessary
375 * to map the grants that are needed to fulfill this request.
376 *
377 * After mapping the needed grants, the page array will contain the
378 * memory address of each granted page in the order specified in ioreq
379 * (disregarding if it's a persistent grant or not).
380 */
62d23efa 381
c6961b7d 382 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
62d23efa 383 return 0;
209cd7ab 384 }
9e496d74
RPM
385 if (ioreq->blkdev->feature_persistent) {
386 for (i = 0; i < ioreq->v.niov; i++) {
387 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
388 GUINT_TO_POINTER(ioreq->refs[i]));
389
390 if (grant != NULL) {
391 page[i] = grant->page;
392 xen_be_printf(&ioreq->blkdev->xendev, 3,
393 "using persistent-grant %" PRIu32 "\n",
394 ioreq->refs[i]);
395 } else {
396 /* Add the grant to the list of grants that
397 * should be mapped
398 */
399 domids[new_maps] = ioreq->domids[i];
400 refs[new_maps] = ioreq->refs[i];
401 page[i] = NULL;
402 new_maps++;
403 }
404 }
405 /* Set the protection to RW, since grants may be reused later
406 * with a different protection than the one needed for this request
407 */
408 ioreq->prot = PROT_WRITE | PROT_READ;
409 } else {
410 /* All grants in the request should be mapped */
411 memcpy(refs, ioreq->refs, sizeof(refs));
412 memcpy(domids, ioreq->domids, sizeof(domids));
413 memset(page, 0, sizeof(page));
414 new_maps = ioreq->v.niov;
415 }
416
417 if (batch_maps && new_maps) {
209cd7ab 418 ioreq->pages = xc_gnttab_map_grant_refs
9e496d74 419 (gnt, new_maps, domids, refs, ioreq->prot);
209cd7ab
AP
420 if (ioreq->pages == NULL) {
421 xen_be_printf(&ioreq->blkdev->xendev, 0,
422 "can't map %d grant refs (%s, %d maps)\n",
9e496d74 423 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
209cd7ab
AP
424 return -1;
425 }
9e496d74
RPM
426 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
427 if (page[i] == NULL) {
428 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
429 }
209cd7ab 430 }
9e496d74
RPM
431 ioreq->blkdev->cnt_map += new_maps;
432 } else if (new_maps) {
433 for (i = 0; i < new_maps; i++) {
209cd7ab 434 ioreq->page[i] = xc_gnttab_map_grant_ref
9e496d74 435 (gnt, domids[i], refs[i], ioreq->prot);
209cd7ab
AP
436 if (ioreq->page[i] == NULL) {
437 xen_be_printf(&ioreq->blkdev->xendev, 0,
438 "can't map grant ref %d (%s, %d maps)\n",
9e496d74 439 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
a76f48e5 440 ioreq->mapped = 1;
209cd7ab
AP
441 ioreq_unmap(ioreq);
442 return -1;
443 }
209cd7ab
AP
444 ioreq->blkdev->cnt_map++;
445 }
9e496d74
RPM
446 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
447 if (page[i] == NULL) {
448 page[i] = ioreq->page[j++];
449 }
450 }
451 }
2f01dfac
RPM
452 if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
453 (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
454 ioreq->blkdev->max_grants))) {
455 /*
456 * If we are using persistent grants and batch mappings only
457 * add the new maps to the list of persistent grants if the whole
458 * area can be persistently mapped.
459 */
460 if (batch_maps) {
461 region = g_malloc0(sizeof(*region));
462 region->addr = ioreq->pages;
463 region->num = new_maps;
464 ioreq->blkdev->persistent_regions = g_slist_append(
465 ioreq->blkdev->persistent_regions,
466 region);
467 }
9e496d74
RPM
468 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
469 && new_maps) {
470 /* Go through the list of newly mapped grants and add as many
471 * as possible to the list of persistently mapped grants.
472 *
473 * Since we start at the end of ioreq->page(s), we only need
474 * to decrease new_maps to prevent this granted pages from
475 * being unmapped in ioreq_unmap.
476 */
477 grant = g_malloc0(sizeof(*grant));
478 new_maps--;
479 if (batch_maps) {
480 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
481 } else {
482 grant->page = ioreq->page[new_maps];
483 }
484 grant->blkdev = ioreq->blkdev;
485 xen_be_printf(&ioreq->blkdev->xendev, 3,
486 "adding grant %" PRIu32 " page: %p\n",
487 refs[new_maps], grant->page);
488 g_tree_insert(ioreq->blkdev->persistent_gnts,
489 GUINT_TO_POINTER(refs[new_maps]),
490 grant);
491 ioreq->blkdev->persistent_gnt_count++;
492 }
2f01dfac 493 assert(!batch_maps || new_maps == 0);
9e496d74
RPM
494 }
495 for (i = 0; i < ioreq->v.niov; i++) {
496 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
62d23efa 497 }
c6961b7d 498 ioreq->mapped = 1;
9e496d74 499 ioreq->num_unmap = new_maps;
62d23efa
AL
500 return 0;
501}
502
c6961b7d
SS
503static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
504
62d23efa
AL
505static void qemu_aio_complete(void *opaque, int ret)
506{
507 struct ioreq *ioreq = opaque;
508
509 if (ret != 0) {
510 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
511 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
512 ioreq->aio_errors++;
513 }
514
515 ioreq->aio_inflight--;
c6961b7d
SS
516 if (ioreq->presync) {
517 ioreq->presync = 0;
518 ioreq_runio_qemu_aio(ioreq);
519 return;
520 }
209cd7ab 521 if (ioreq->aio_inflight > 0) {
62d23efa 522 return;
209cd7ab 523 }
d56de074 524 if (ioreq->postsync) {
c6961b7d
SS
525 ioreq->postsync = 0;
526 ioreq->aio_inflight++;
4be74634 527 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
c6961b7d 528 return;
d56de074 529 }
62d23efa
AL
530
531 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
532 ioreq_unmap(ioreq);
533 ioreq_finish(ioreq);
58da5b1e
OH
534 switch (ioreq->req.operation) {
535 case BLKIF_OP_WRITE:
536 case BLKIF_OP_FLUSH_DISKCACHE:
537 if (!ioreq->req.nr_segments) {
538 break;
539 }
540 case BLKIF_OP_READ:
4be74634 541 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
58da5b1e 542 break;
f3135204 543 case BLKIF_OP_DISCARD:
58da5b1e
OH
544 default:
545 break;
546 }
62d23efa
AL
547 qemu_bh_schedule(ioreq->blkdev->bh);
548}
549
550static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
551{
552 struct XenBlkDev *blkdev = ioreq->blkdev;
553
209cd7ab
AP
554 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
555 goto err_no_map;
556 }
62d23efa
AL
557
558 ioreq->aio_inflight++;
209cd7ab 559 if (ioreq->presync) {
4be74634 560 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
c6961b7d 561 return 0;
209cd7ab 562 }
62d23efa
AL
563
564 switch (ioreq->req.operation) {
565 case BLKIF_OP_READ:
4be74634 566 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
5366d0c8 567 ioreq->v.size, BLOCK_ACCT_READ);
62d23efa 568 ioreq->aio_inflight++;
4be74634
MA
569 blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
570 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
571 qemu_aio_complete, ioreq);
209cd7ab 572 break;
62d23efa 573 case BLKIF_OP_WRITE:
7e7b7cba 574 case BLKIF_OP_FLUSH_DISKCACHE:
209cd7ab 575 if (!ioreq->req.nr_segments) {
5cbdebe3 576 break;
209cd7ab 577 }
a597e79c 578
4be74634 579 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
5366d0c8 580 ioreq->v.size, BLOCK_ACCT_WRITE);
209bef3e 581 ioreq->aio_inflight++;
4be74634
MA
582 blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
583 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
584 qemu_aio_complete, ioreq);
209cd7ab 585 break;
f3135204
OH
586 case BLKIF_OP_DISCARD:
587 {
588 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
589 ioreq->aio_inflight++;
4be74634 590 blk_aio_discard(blkdev->blk,
f3135204
OH
591 discard_req->sector_number, discard_req->nr_sectors,
592 qemu_aio_complete, ioreq);
593 break;
594 }
62d23efa 595 default:
209cd7ab
AP
596 /* unknown operation (shouldn't happen -- parse catches this) */
597 goto err;
62d23efa
AL
598 }
599
62d23efa
AL
600 qemu_aio_complete(ioreq, 0);
601
602 return 0;
603
604err:
f6ec953c
FZ
605 ioreq_unmap(ioreq);
606err_no_map:
607 ioreq_finish(ioreq);
62d23efa
AL
608 ioreq->status = BLKIF_RSP_ERROR;
609 return -1;
610}
611
612static int blk_send_response_one(struct ioreq *ioreq)
613{
614 struct XenBlkDev *blkdev = ioreq->blkdev;
615 int send_notify = 0;
616 int have_requests = 0;
617 blkif_response_t resp;
618 void *dst;
619
620 resp.id = ioreq->req.id;
621 resp.operation = ioreq->req.operation;
622 resp.status = ioreq->status;
623
624 /* Place on the response ring for the relevant domain. */
625 switch (blkdev->protocol) {
626 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
627 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
628 break;
62d23efa 629 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
630 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
631 blkdev->rings.x86_32_part.rsp_prod_pvt);
209cd7ab 632 break;
62d23efa 633 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
634 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
635 blkdev->rings.x86_64_part.rsp_prod_pvt);
209cd7ab 636 break;
62d23efa 637 default:
209cd7ab 638 dst = NULL;
8cced121 639 return 0;
62d23efa
AL
640 }
641 memcpy(dst, &resp, sizeof(resp));
642 blkdev->rings.common.rsp_prod_pvt++;
643
644 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
645 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
209cd7ab
AP
646 /*
647 * Tail check for pending requests. Allows frontend to avoid
648 * notifications if requests are already in flight (lower
649 * overheads and promotes batching).
650 */
651 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
62d23efa 652 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
209cd7ab 653 have_requests = 1;
62d23efa
AL
654 }
655
209cd7ab
AP
656 if (have_requests) {
657 blkdev->more_work++;
658 }
62d23efa
AL
659 return send_notify;
660}
661
662/* walk finished list, send outstanding responses, free requests */
663static void blk_send_response_all(struct XenBlkDev *blkdev)
664{
665 struct ioreq *ioreq;
666 int send_notify = 0;
667
72cf2d4f
BS
668 while (!QLIST_EMPTY(&blkdev->finished)) {
669 ioreq = QLIST_FIRST(&blkdev->finished);
209cd7ab 670 send_notify += blk_send_response_one(ioreq);
ed547766 671 ioreq_release(ioreq, true);
209cd7ab
AP
672 }
673 if (send_notify) {
674 xen_be_send_notify(&blkdev->xendev);
62d23efa 675 }
62d23efa
AL
676}
677
678static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
679{
680 switch (blkdev->protocol) {
681 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
682 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
683 sizeof(ioreq->req));
684 break;
62d23efa 685 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
686 blkif_get_x86_32_req(&ioreq->req,
687 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
209cd7ab 688 break;
62d23efa 689 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
690 blkif_get_x86_64_req(&ioreq->req,
691 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
209cd7ab 692 break;
62d23efa
AL
693 }
694 return 0;
695}
696
697static void blk_handle_requests(struct XenBlkDev *blkdev)
698{
699 RING_IDX rc, rp;
700 struct ioreq *ioreq;
701
702 blkdev->more_work = 0;
703
704 rc = blkdev->rings.common.req_cons;
705 rp = blkdev->rings.common.sring->req_prod;
706 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
707
4e5b184d 708 blk_send_response_all(blkdev);
fc1f79f7 709 while (rc != rp) {
62d23efa 710 /* pull request from ring */
209cd7ab 711 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
62d23efa 712 break;
209cd7ab 713 }
62d23efa
AL
714 ioreq = ioreq_start(blkdev);
715 if (ioreq == NULL) {
716 blkdev->more_work++;
717 break;
718 }
719 blk_get_request(blkdev, ioreq, rc);
720 blkdev->rings.common.req_cons = ++rc;
721
722 /* parse them */
723 if (ioreq_parse(ioreq) != 0) {
209cd7ab 724 if (blk_send_response_one(ioreq)) {
62d23efa 725 xen_be_send_notify(&blkdev->xendev);
209cd7ab 726 }
ed547766 727 ioreq_release(ioreq, false);
62d23efa
AL
728 continue;
729 }
730
4e5b184d 731 ioreq_runio_qemu_aio(ioreq);
209cd7ab 732 }
62d23efa 733
209cd7ab 734 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
62d23efa 735 qemu_bh_schedule(blkdev->bh);
209cd7ab 736 }
62d23efa
AL
737}
738
739/* ------------------------------------------------------------- */
740
741static void blk_bh(void *opaque)
742{
743 struct XenBlkDev *blkdev = opaque;
744 blk_handle_requests(blkdev);
745}
746
64c27e5b
JB
747/*
748 * We need to account for the grant allocations requiring contiguous
749 * chunks; the worst case number would be
750 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
751 * but in order to keep things simple just use
752 * 2 * max_req * max_seg.
753 */
754#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
755
62d23efa
AL
756static void blk_alloc(struct XenDevice *xendev)
757{
758 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
759
72cf2d4f
BS
760 QLIST_INIT(&blkdev->inflight);
761 QLIST_INIT(&blkdev->finished);
762 QLIST_INIT(&blkdev->freelist);
62d23efa 763 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
209cd7ab 764 if (xen_mode != XEN_EMULATE) {
62d23efa 765 batch_maps = 1;
209cd7ab 766 }
64c27e5b
JB
767 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
768 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
769 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
770 strerror(errno));
771 }
62d23efa
AL
772}
773
f3135204
OH
774static void blk_parse_discard(struct XenBlkDev *blkdev)
775{
776 int enable;
777
778 blkdev->feature_discard = true;
779
780 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
781 blkdev->feature_discard = !!enable;
782 }
783
784 if (blkdev->feature_discard) {
785 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
786 }
787}
788
62d23efa
AL
789static int blk_init(struct XenDevice *xendev)
790{
791 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
86f425db 792 int info = 0;
454ae734 793 char *directiosafe = NULL;
62d23efa
AL
794
795 /* read xenstore entries */
796 if (blkdev->params == NULL) {
5ea3c2b4 797 char *h = NULL;
209cd7ab 798 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
5ea3c2b4
SS
799 if (blkdev->params != NULL) {
800 h = strchr(blkdev->params, ':');
801 }
209cd7ab
AP
802 if (h != NULL) {
803 blkdev->fileproto = blkdev->params;
804 blkdev->filename = h+1;
805 *h = 0;
806 } else {
807 blkdev->fileproto = "<unset>";
808 blkdev->filename = blkdev->params;
809 }
810 }
7cef3f4f
SS
811 if (!strcmp("aio", blkdev->fileproto)) {
812 blkdev->fileproto = "raw";
813 }
209cd7ab
AP
814 if (blkdev->mode == NULL) {
815 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
816 }
817 if (blkdev->type == NULL) {
818 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
819 }
820 if (blkdev->dev == NULL) {
821 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
822 }
823 if (blkdev->devtype == NULL) {
824 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
825 }
454ae734
SS
826 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
827 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
62d23efa
AL
828
829 /* do we have all we need? */
830 if (blkdev->params == NULL ||
209cd7ab
AP
831 blkdev->mode == NULL ||
832 blkdev->type == NULL ||
833 blkdev->dev == NULL) {
5ea3c2b4 834 goto out_error;
209cd7ab 835 }
62d23efa
AL
836
837 /* read-only ? */
86f425db 838 if (strcmp(blkdev->mode, "w")) {
209cd7ab 839 info |= VDISK_READONLY;
62d23efa
AL
840 }
841
842 /* cdrom ? */
209cd7ab
AP
843 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
844 info |= VDISK_CDROM;
845 }
62d23efa 846
86f425db
AB
847 blkdev->file_blk = BLOCK_SIZE;
848
849 /* fill info
850 * blk_connect supplies sector-size and sectors
851 */
852 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
853 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
854 xenstore_write_be_int(&blkdev->xendev, "info", info);
454ae734 855
f3135204
OH
856 blk_parse_discard(blkdev);
857
454ae734 858 g_free(directiosafe);
86f425db
AB
859 return 0;
860
861out_error:
862 g_free(blkdev->params);
863 blkdev->params = NULL;
864 g_free(blkdev->mode);
865 blkdev->mode = NULL;
866 g_free(blkdev->type);
867 blkdev->type = NULL;
868 g_free(blkdev->dev);
869 blkdev->dev = NULL;
870 g_free(blkdev->devtype);
871 blkdev->devtype = NULL;
454ae734
SS
872 g_free(directiosafe);
873 blkdev->directiosafe = false;
86f425db
AB
874 return -1;
875}
876
877static int blk_connect(struct XenDevice *xendev)
878{
879 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
880 int pers, index, qflags;
b64ec4e4 881 bool readonly = true;
86f425db
AB
882
883 /* read-only ? */
454ae734
SS
884 if (blkdev->directiosafe) {
885 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
886 } else {
887 qflags = BDRV_O_CACHE_WB;
888 }
86f425db
AB
889 if (strcmp(blkdev->mode, "w") == 0) {
890 qflags |= BDRV_O_RDWR;
b64ec4e4 891 readonly = false;
86f425db 892 }
f3135204
OH
893 if (blkdev->feature_discard) {
894 qflags |= BDRV_O_UNMAP;
895 }
86f425db 896
62d23efa 897 /* init qemu block driver */
751c6a17
GH
898 index = (blkdev->xendev.dev - 202 * 256) / 16;
899 blkdev->dinfo = drive_get(IF_XEN, 0, index);
900 if (!blkdev->dinfo) {
98522f63 901 Error *local_err = NULL;
9a925356 902 QDict *options = NULL;
cedccf13 903
9a925356
HR
904 if (strcmp(blkdev->fileproto, "<unset>")) {
905 options = qdict_new();
906 qdict_put(options, "driver", qstring_from_str(blkdev->fileproto));
26f54e9a 907 }
cedccf13 908
9a925356
HR
909 /* setup via xenbus -> create new block driver instance */
910 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
911 blkdev->blk = blk_new_open(blkdev->dev, blkdev->filename, NULL, options,
912 qflags, &local_err);
913 if (!blkdev->blk) {
cedccf13
MA
914 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
915 error_get_pretty(local_err));
916 error_free(local_err);
cedccf13
MA
917 return -1;
918 }
62d23efa
AL
919 } else {
920 /* setup via qemu cmdline -> already setup for us */
921 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
4be74634
MA
922 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
923 if (blk_is_read_only(blkdev->blk) && !readonly) {
4f8a066b 924 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
4be74634 925 blkdev->blk = NULL;
4f8a066b
KW
926 return -1;
927 }
4be74634
MA
928 /* blkdev->blk is not create by us, we get a reference
929 * so we can blk_unref() unconditionally */
930 blk_ref(blkdev->blk);
931 }
932 blk_attach_dev_nofail(blkdev->blk, blkdev);
933 blkdev->file_size = blk_getlength(blkdev->blk);
62d23efa 934 if (blkdev->file_size < 0) {
4be74634 935 xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
62d23efa 936 (int)blkdev->file_size, strerror(-blkdev->file_size),
4be74634 937 bdrv_get_format_name(blk_bs(blkdev->blk)) ?: "-");
209cd7ab 938 blkdev->file_size = 0;
62d23efa 939 }
62d23efa
AL
940
941 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
209cd7ab
AP
942 " size %" PRId64 " (%" PRId64 " MB)\n",
943 blkdev->type, blkdev->fileproto, blkdev->filename,
944 blkdev->file_size, blkdev->file_size >> 20);
62d23efa 945
86f425db
AB
946 /* Fill in number of sector size and number of sectors */
947 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
9246ce88
FF
948 xenstore_write_be_int64(&blkdev->xendev, "sectors",
949 blkdev->file_size / blkdev->file_blk);
62d23efa 950
209cd7ab
AP
951 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
952 return -1;
953 }
62d23efa 954 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
209cd7ab
AP
955 &blkdev->xendev.remote_port) == -1) {
956 return -1;
957 }
9e496d74
RPM
958 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
959 blkdev->feature_persistent = FALSE;
960 } else {
961 blkdev->feature_persistent = !!pers;
962 }
62d23efa
AL
963
964 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
965 if (blkdev->xendev.protocol) {
209cd7ab 966 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
62d23efa 967 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
209cd7ab
AP
968 }
969 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
62d23efa 970 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
209cd7ab 971 }
62d23efa
AL
972 }
973
974 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
209cd7ab
AP
975 blkdev->xendev.dom,
976 blkdev->ring_ref,
977 PROT_READ | PROT_WRITE);
978 if (!blkdev->sring) {
979 return -1;
980 }
62d23efa
AL
981 blkdev->cnt_map++;
982
983 switch (blkdev->protocol) {
984 case BLKIF_PROTOCOL_NATIVE:
985 {
209cd7ab
AP
986 blkif_sring_t *sring_native = blkdev->sring;
987 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
988 break;
62d23efa
AL
989 }
990 case BLKIF_PROTOCOL_X86_32:
991 {
209cd7ab 992 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
6fcfeff9
BS
993
994 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
209cd7ab 995 break;
62d23efa
AL
996 }
997 case BLKIF_PROTOCOL_X86_64:
998 {
209cd7ab 999 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
6fcfeff9
BS
1000
1001 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
209cd7ab 1002 break;
62d23efa
AL
1003 }
1004 }
1005
9e496d74
RPM
1006 if (blkdev->feature_persistent) {
1007 /* Init persistent grants */
1008 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1009 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1010 NULL, NULL,
2f01dfac
RPM
1011 batch_maps ?
1012 (GDestroyNotify)g_free :
9e496d74 1013 (GDestroyNotify)destroy_grant);
2f01dfac 1014 blkdev->persistent_regions = NULL;
9e496d74
RPM
1015 blkdev->persistent_gnt_count = 0;
1016 }
1017
62d23efa
AL
1018 xen_be_bind_evtchn(&blkdev->xendev);
1019
1020 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
209cd7ab
AP
1021 "remote port %d, local port %d\n",
1022 blkdev->xendev.protocol, blkdev->ring_ref,
1023 blkdev->xendev.remote_port, blkdev->xendev.local_port);
62d23efa
AL
1024 return 0;
1025}
1026
1027static void blk_disconnect(struct XenDevice *xendev)
1028{
1029 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1030
4be74634
MA
1031 if (blkdev->blk) {
1032 blk_detach_dev(blkdev->blk, blkdev);
1033 blk_unref(blkdev->blk);
1034 blkdev->blk = NULL;
62d23efa
AL
1035 }
1036 xen_be_unbind_evtchn(&blkdev->xendev);
1037
1038 if (blkdev->sring) {
209cd7ab
AP
1039 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1040 blkdev->cnt_map--;
1041 blkdev->sring = NULL;
62d23efa 1042 }
2f01dfac
RPM
1043
1044 /*
1045 * Unmap persistent grants before switching to the closed state
1046 * so the frontend can free them.
1047 *
1048 * In the !batch_maps case g_tree_destroy will take care of unmapping
1049 * the grant, but in the batch_maps case we need to iterate over every
1050 * region in persistent_regions and unmap it.
1051 */
1052 if (blkdev->feature_persistent) {
1053 g_tree_destroy(blkdev->persistent_gnts);
1054 assert(batch_maps || blkdev->persistent_gnt_count == 0);
1055 if (batch_maps) {
1056 blkdev->persistent_gnt_count = 0;
1057 g_slist_foreach(blkdev->persistent_regions,
1058 (GFunc)remove_persistent_region, blkdev);
1059 g_slist_free(blkdev->persistent_regions);
1060 }
1061 blkdev->feature_persistent = false;
1062 }
62d23efa
AL
1063}
1064
1065static int blk_free(struct XenDevice *xendev)
1066{
1067 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1068 struct ioreq *ioreq;
1069
4be74634 1070 if (blkdev->blk || blkdev->sring) {
77ba8fef
SS
1071 blk_disconnect(xendev);
1072 }
1073
72cf2d4f 1074 while (!QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab 1075 ioreq = QLIST_FIRST(&blkdev->freelist);
72cf2d4f 1076 QLIST_REMOVE(ioreq, list);
62d23efa 1077 qemu_iovec_destroy(&ioreq->v);
7267c094 1078 g_free(ioreq);
62d23efa
AL
1079 }
1080
7267c094
AL
1081 g_free(blkdev->params);
1082 g_free(blkdev->mode);
1083 g_free(blkdev->type);
1084 g_free(blkdev->dev);
1085 g_free(blkdev->devtype);
62d23efa
AL
1086 qemu_bh_delete(blkdev->bh);
1087 return 0;
1088}
1089
1090static void blk_event(struct XenDevice *xendev)
1091{
1092 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1093
1094 qemu_bh_schedule(blkdev->bh);
1095}
1096
1097struct XenDevOps xen_blkdev_ops = {
1098 .size = sizeof(struct XenBlkDev),
1099 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1100 .alloc = blk_alloc,
1101 .init = blk_init,
384087b2 1102 .initialise = blk_connect,
62d23efa
AL
1103 .disconnect = blk_disconnect,
1104 .event = blk_event,
1105 .free = blk_free,
1106};
This page took 0.920533 seconds and 4 git commands to generate.