]> Git Repo - qemu.git/blame - hw/block/xen_disk.c
net: Fuse g_malloc(); memset() into g_new0()
[qemu.git] / hw / block / xen_disk.c
CommitLineData
62d23efa
AL
1/*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <[email protected]>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
8167ee88 16 * with this program; if not, see <http://www.gnu.org/licenses/>.
6b620ca3
PB
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
62d23efa
AL
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <stdarg.h>
25#include <string.h>
26#include <unistd.h>
27#include <signal.h>
28#include <inttypes.h>
29#include <time.h>
30#include <fcntl.h>
31#include <errno.h>
32#include <sys/ioctl.h>
33#include <sys/types.h>
34#include <sys/stat.h>
35#include <sys/mman.h>
36#include <sys/uio.h>
37
83c9f4ca 38#include "hw/hw.h"
0d09e41a 39#include "hw/xen/xen_backend.h"
47b43a1f 40#include "xen_blkif.h"
9c17d615 41#include "sysemu/blockdev.h"
26f54e9a 42#include "sysemu/block-backend.h"
62d23efa
AL
43
44/* ------------------------------------------------------------- */
45
62d23efa
AL
46static int batch_maps = 0;
47
48static int max_requests = 32;
62d23efa
AL
49
50/* ------------------------------------------------------------- */
51
52#define BLOCK_SIZE 512
53#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
54
9e496d74
RPM
55struct PersistentGrant {
56 void *page;
57 struct XenBlkDev *blkdev;
58};
59
60typedef struct PersistentGrant PersistentGrant;
61
2f01dfac
RPM
62struct PersistentRegion {
63 void *addr;
64 int num;
65};
66
67typedef struct PersistentRegion PersistentRegion;
68
62d23efa
AL
69struct ioreq {
70 blkif_request_t req;
71 int16_t status;
72
73 /* parsed request */
74 off_t start;
75 QEMUIOVector v;
76 int presync;
77 int postsync;
c6961b7d 78 uint8_t mapped;
62d23efa
AL
79
80 /* grant mapping */
81 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
82 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
83 int prot;
84 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
85 void *pages;
9e496d74 86 int num_unmap;
62d23efa
AL
87
88 /* aio status */
89 int aio_inflight;
90 int aio_errors;
91
92 struct XenBlkDev *blkdev;
72cf2d4f 93 QLIST_ENTRY(ioreq) list;
a597e79c 94 BlockAcctCookie acct;
62d23efa
AL
95};
96
97struct XenBlkDev {
98 struct XenDevice xendev; /* must be first */
99 char *params;
100 char *mode;
101 char *type;
102 char *dev;
103 char *devtype;
454ae734 104 bool directiosafe;
62d23efa
AL
105 const char *fileproto;
106 const char *filename;
107 int ring_ref;
108 void *sring;
109 int64_t file_blk;
110 int64_t file_size;
111 int protocol;
112 blkif_back_rings_t rings;
113 int more_work;
114 int cnt_map;
115
116 /* request lists */
72cf2d4f
BS
117 QLIST_HEAD(inflight_head, ioreq) inflight;
118 QLIST_HEAD(finished_head, ioreq) finished;
119 QLIST_HEAD(freelist_head, ioreq) freelist;
62d23efa
AL
120 int requests_total;
121 int requests_inflight;
122 int requests_finished;
123
9e496d74 124 /* Persistent grants extension */
f3135204 125 gboolean feature_discard;
9e496d74
RPM
126 gboolean feature_persistent;
127 GTree *persistent_gnts;
2f01dfac 128 GSList *persistent_regions;
9e496d74
RPM
129 unsigned int persistent_gnt_count;
130 unsigned int max_grants;
131
62d23efa 132 /* qemu block driver */
751c6a17 133 DriveInfo *dinfo;
4be74634 134 BlockBackend *blk;
62d23efa
AL
135 QEMUBH *bh;
136};
137
138/* ------------------------------------------------------------- */
139
282c6a2f
RPM
140static void ioreq_reset(struct ioreq *ioreq)
141{
142 memset(&ioreq->req, 0, sizeof(ioreq->req));
143 ioreq->status = 0;
144 ioreq->start = 0;
145 ioreq->presync = 0;
146 ioreq->postsync = 0;
147 ioreq->mapped = 0;
148
149 memset(ioreq->domids, 0, sizeof(ioreq->domids));
150 memset(ioreq->refs, 0, sizeof(ioreq->refs));
151 ioreq->prot = 0;
152 memset(ioreq->page, 0, sizeof(ioreq->page));
153 ioreq->pages = NULL;
154
155 ioreq->aio_inflight = 0;
156 ioreq->aio_errors = 0;
157
158 ioreq->blkdev = NULL;
159 memset(&ioreq->list, 0, sizeof(ioreq->list));
160 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
161
162 qemu_iovec_reset(&ioreq->v);
163}
164
9e496d74
RPM
165static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
166{
167 uint ua = GPOINTER_TO_UINT(a);
168 uint ub = GPOINTER_TO_UINT(b);
169 return (ua > ub) - (ua < ub);
170}
171
172static void destroy_grant(gpointer pgnt)
173{
174 PersistentGrant *grant = pgnt;
175 XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
176
177 if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
178 xen_be_printf(&grant->blkdev->xendev, 0,
179 "xc_gnttab_munmap failed: %s\n",
180 strerror(errno));
181 }
182 grant->blkdev->persistent_gnt_count--;
183 xen_be_printf(&grant->blkdev->xendev, 3,
184 "unmapped grant %p\n", grant->page);
185 g_free(grant);
186}
187
2f01dfac
RPM
188static void remove_persistent_region(gpointer data, gpointer dev)
189{
190 PersistentRegion *region = data;
191 struct XenBlkDev *blkdev = dev;
192 XenGnttab gnt = blkdev->xendev.gnttabdev;
193
194 if (xc_gnttab_munmap(gnt, region->addr, region->num) != 0) {
195 xen_be_printf(&blkdev->xendev, 0,
196 "xc_gnttab_munmap region %p failed: %s\n",
197 region->addr, strerror(errno));
198 }
199 xen_be_printf(&blkdev->xendev, 3,
200 "unmapped grant region %p with %d pages\n",
201 region->addr, region->num);
202 g_free(region);
203}
204
62d23efa
AL
205static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
206{
207 struct ioreq *ioreq = NULL;
208
72cf2d4f 209 if (QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab
AP
210 if (blkdev->requests_total >= max_requests) {
211 goto out;
212 }
213 /* allocate new struct */
7267c094 214 ioreq = g_malloc0(sizeof(*ioreq));
209cd7ab
AP
215 ioreq->blkdev = blkdev;
216 blkdev->requests_total++;
62d23efa
AL
217 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
218 } else {
209cd7ab
AP
219 /* get one from freelist */
220 ioreq = QLIST_FIRST(&blkdev->freelist);
221 QLIST_REMOVE(ioreq, list);
62d23efa 222 }
72cf2d4f 223 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
62d23efa
AL
224 blkdev->requests_inflight++;
225
226out:
227 return ioreq;
228}
229
230static void ioreq_finish(struct ioreq *ioreq)
231{
232 struct XenBlkDev *blkdev = ioreq->blkdev;
233
72cf2d4f
BS
234 QLIST_REMOVE(ioreq, list);
235 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
62d23efa
AL
236 blkdev->requests_inflight--;
237 blkdev->requests_finished++;
238}
239
ed547766 240static void ioreq_release(struct ioreq *ioreq, bool finish)
62d23efa
AL
241{
242 struct XenBlkDev *blkdev = ioreq->blkdev;
243
72cf2d4f 244 QLIST_REMOVE(ioreq, list);
282c6a2f 245 ioreq_reset(ioreq);
62d23efa 246 ioreq->blkdev = blkdev;
72cf2d4f 247 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
ed547766
JB
248 if (finish) {
249 blkdev->requests_finished--;
250 } else {
251 blkdev->requests_inflight--;
252 }
62d23efa
AL
253}
254
255/*
256 * translate request into iovec + start offset
257 * do sanity checks along the way
258 */
259static int ioreq_parse(struct ioreq *ioreq)
260{
261 struct XenBlkDev *blkdev = ioreq->blkdev;
262 uintptr_t mem;
263 size_t len;
264 int i;
265
266 xen_be_printf(&blkdev->xendev, 3,
209cd7ab
AP
267 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
268 ioreq->req.operation, ioreq->req.nr_segments,
269 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
62d23efa
AL
270 switch (ioreq->req.operation) {
271 case BLKIF_OP_READ:
209cd7ab
AP
272 ioreq->prot = PROT_WRITE; /* to memory */
273 break;
7e7b7cba
SS
274 case BLKIF_OP_FLUSH_DISKCACHE:
275 ioreq->presync = 1;
5cbdebe3 276 if (!ioreq->req.nr_segments) {
5cbdebe3
SS
277 return 0;
278 }
209cd7ab 279 /* fall through */
62d23efa 280 case BLKIF_OP_WRITE:
209cd7ab 281 ioreq->prot = PROT_READ; /* from memory */
209cd7ab 282 break;
f3135204
OH
283 case BLKIF_OP_DISCARD:
284 return 0;
62d23efa 285 default:
209cd7ab
AP
286 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
287 ioreq->req.operation);
288 goto err;
62d23efa
AL
289 };
290
908c7b9f
GH
291 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
292 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
293 goto err;
294 }
295
62d23efa
AL
296 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
297 for (i = 0; i < ioreq->req.nr_segments; i++) {
209cd7ab
AP
298 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
299 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
300 goto err;
301 }
302 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
303 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
304 goto err;
305 }
306 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
307 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
308 goto err;
309 }
310
311 ioreq->domids[i] = blkdev->xendev.dom;
312 ioreq->refs[i] = ioreq->req.seg[i].gref;
313
314 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
315 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
62d23efa
AL
316 qemu_iovec_add(&ioreq->v, (void*)mem, len);
317 }
318 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
209cd7ab
AP
319 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
320 goto err;
62d23efa
AL
321 }
322 return 0;
323
324err:
325 ioreq->status = BLKIF_RSP_ERROR;
326 return -1;
327}
328
329static void ioreq_unmap(struct ioreq *ioreq)
330{
d5b93ddf 331 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
62d23efa
AL
332 int i;
333
9e496d74 334 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
62d23efa 335 return;
209cd7ab 336 }
62d23efa 337 if (batch_maps) {
209cd7ab
AP
338 if (!ioreq->pages) {
339 return;
340 }
9e496d74 341 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
209cd7ab
AP
342 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
343 strerror(errno));
344 }
9e496d74 345 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
209cd7ab 346 ioreq->pages = NULL;
62d23efa 347 } else {
9e496d74 348 for (i = 0; i < ioreq->num_unmap; i++) {
209cd7ab
AP
349 if (!ioreq->page[i]) {
350 continue;
351 }
352 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
353 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
354 strerror(errno));
355 }
356 ioreq->blkdev->cnt_map--;
357 ioreq->page[i] = NULL;
358 }
62d23efa 359 }
c6961b7d 360 ioreq->mapped = 0;
62d23efa
AL
361}
362
363static int ioreq_map(struct ioreq *ioreq)
364{
d5b93ddf 365 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
9e496d74
RPM
366 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
367 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
368 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
369 int i, j, new_maps = 0;
370 PersistentGrant *grant;
2f01dfac 371 PersistentRegion *region;
9e496d74
RPM
372 /* domids and refs variables will contain the information necessary
373 * to map the grants that are needed to fulfill this request.
374 *
375 * After mapping the needed grants, the page array will contain the
376 * memory address of each granted page in the order specified in ioreq
377 * (disregarding if it's a persistent grant or not).
378 */
62d23efa 379
c6961b7d 380 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
62d23efa 381 return 0;
209cd7ab 382 }
9e496d74
RPM
383 if (ioreq->blkdev->feature_persistent) {
384 for (i = 0; i < ioreq->v.niov; i++) {
385 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
386 GUINT_TO_POINTER(ioreq->refs[i]));
387
388 if (grant != NULL) {
389 page[i] = grant->page;
390 xen_be_printf(&ioreq->blkdev->xendev, 3,
391 "using persistent-grant %" PRIu32 "\n",
392 ioreq->refs[i]);
393 } else {
394 /* Add the grant to the list of grants that
395 * should be mapped
396 */
397 domids[new_maps] = ioreq->domids[i];
398 refs[new_maps] = ioreq->refs[i];
399 page[i] = NULL;
400 new_maps++;
401 }
402 }
403 /* Set the protection to RW, since grants may be reused later
404 * with a different protection than the one needed for this request
405 */
406 ioreq->prot = PROT_WRITE | PROT_READ;
407 } else {
408 /* All grants in the request should be mapped */
409 memcpy(refs, ioreq->refs, sizeof(refs));
410 memcpy(domids, ioreq->domids, sizeof(domids));
411 memset(page, 0, sizeof(page));
412 new_maps = ioreq->v.niov;
413 }
414
415 if (batch_maps && new_maps) {
209cd7ab 416 ioreq->pages = xc_gnttab_map_grant_refs
9e496d74 417 (gnt, new_maps, domids, refs, ioreq->prot);
209cd7ab
AP
418 if (ioreq->pages == NULL) {
419 xen_be_printf(&ioreq->blkdev->xendev, 0,
420 "can't map %d grant refs (%s, %d maps)\n",
9e496d74 421 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
209cd7ab
AP
422 return -1;
423 }
9e496d74
RPM
424 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
425 if (page[i] == NULL) {
426 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
427 }
209cd7ab 428 }
9e496d74
RPM
429 ioreq->blkdev->cnt_map += new_maps;
430 } else if (new_maps) {
431 for (i = 0; i < new_maps; i++) {
209cd7ab 432 ioreq->page[i] = xc_gnttab_map_grant_ref
9e496d74 433 (gnt, domids[i], refs[i], ioreq->prot);
209cd7ab
AP
434 if (ioreq->page[i] == NULL) {
435 xen_be_printf(&ioreq->blkdev->xendev, 0,
436 "can't map grant ref %d (%s, %d maps)\n",
9e496d74 437 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
a76f48e5 438 ioreq->mapped = 1;
209cd7ab
AP
439 ioreq_unmap(ioreq);
440 return -1;
441 }
209cd7ab
AP
442 ioreq->blkdev->cnt_map++;
443 }
9e496d74
RPM
444 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
445 if (page[i] == NULL) {
446 page[i] = ioreq->page[j++];
447 }
448 }
449 }
2f01dfac
RPM
450 if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
451 (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
452 ioreq->blkdev->max_grants))) {
453 /*
454 * If we are using persistent grants and batch mappings only
455 * add the new maps to the list of persistent grants if the whole
456 * area can be persistently mapped.
457 */
458 if (batch_maps) {
459 region = g_malloc0(sizeof(*region));
460 region->addr = ioreq->pages;
461 region->num = new_maps;
462 ioreq->blkdev->persistent_regions = g_slist_append(
463 ioreq->blkdev->persistent_regions,
464 region);
465 }
9e496d74
RPM
466 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
467 && new_maps) {
468 /* Go through the list of newly mapped grants and add as many
469 * as possible to the list of persistently mapped grants.
470 *
471 * Since we start at the end of ioreq->page(s), we only need
472 * to decrease new_maps to prevent this granted pages from
473 * being unmapped in ioreq_unmap.
474 */
475 grant = g_malloc0(sizeof(*grant));
476 new_maps--;
477 if (batch_maps) {
478 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
479 } else {
480 grant->page = ioreq->page[new_maps];
481 }
482 grant->blkdev = ioreq->blkdev;
483 xen_be_printf(&ioreq->blkdev->xendev, 3,
484 "adding grant %" PRIu32 " page: %p\n",
485 refs[new_maps], grant->page);
486 g_tree_insert(ioreq->blkdev->persistent_gnts,
487 GUINT_TO_POINTER(refs[new_maps]),
488 grant);
489 ioreq->blkdev->persistent_gnt_count++;
490 }
2f01dfac 491 assert(!batch_maps || new_maps == 0);
9e496d74
RPM
492 }
493 for (i = 0; i < ioreq->v.niov; i++) {
494 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
62d23efa 495 }
c6961b7d 496 ioreq->mapped = 1;
9e496d74 497 ioreq->num_unmap = new_maps;
62d23efa
AL
498 return 0;
499}
500
c6961b7d
SS
501static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
502
62d23efa
AL
503static void qemu_aio_complete(void *opaque, int ret)
504{
505 struct ioreq *ioreq = opaque;
506
507 if (ret != 0) {
508 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
509 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
510 ioreq->aio_errors++;
511 }
512
513 ioreq->aio_inflight--;
c6961b7d
SS
514 if (ioreq->presync) {
515 ioreq->presync = 0;
516 ioreq_runio_qemu_aio(ioreq);
517 return;
518 }
209cd7ab 519 if (ioreq->aio_inflight > 0) {
62d23efa 520 return;
209cd7ab 521 }
d56de074 522 if (ioreq->postsync) {
c6961b7d
SS
523 ioreq->postsync = 0;
524 ioreq->aio_inflight++;
4be74634 525 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
c6961b7d 526 return;
d56de074 527 }
62d23efa
AL
528
529 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
530 ioreq_unmap(ioreq);
531 ioreq_finish(ioreq);
58da5b1e
OH
532 switch (ioreq->req.operation) {
533 case BLKIF_OP_WRITE:
534 case BLKIF_OP_FLUSH_DISKCACHE:
535 if (!ioreq->req.nr_segments) {
536 break;
537 }
538 case BLKIF_OP_READ:
4be74634 539 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
58da5b1e 540 break;
f3135204 541 case BLKIF_OP_DISCARD:
58da5b1e
OH
542 default:
543 break;
544 }
62d23efa
AL
545 qemu_bh_schedule(ioreq->blkdev->bh);
546}
547
548static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
549{
550 struct XenBlkDev *blkdev = ioreq->blkdev;
551
209cd7ab
AP
552 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
553 goto err_no_map;
554 }
62d23efa
AL
555
556 ioreq->aio_inflight++;
209cd7ab 557 if (ioreq->presync) {
4be74634 558 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
c6961b7d 559 return 0;
209cd7ab 560 }
62d23efa
AL
561
562 switch (ioreq->req.operation) {
563 case BLKIF_OP_READ:
4be74634 564 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
5366d0c8 565 ioreq->v.size, BLOCK_ACCT_READ);
62d23efa 566 ioreq->aio_inflight++;
4be74634
MA
567 blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
568 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
569 qemu_aio_complete, ioreq);
209cd7ab 570 break;
62d23efa 571 case BLKIF_OP_WRITE:
7e7b7cba 572 case BLKIF_OP_FLUSH_DISKCACHE:
209cd7ab 573 if (!ioreq->req.nr_segments) {
5cbdebe3 574 break;
209cd7ab 575 }
a597e79c 576
4be74634 577 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
5366d0c8 578 ioreq->v.size, BLOCK_ACCT_WRITE);
209bef3e 579 ioreq->aio_inflight++;
4be74634
MA
580 blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
581 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
582 qemu_aio_complete, ioreq);
209cd7ab 583 break;
f3135204
OH
584 case BLKIF_OP_DISCARD:
585 {
586 struct blkif_request_discard *discard_req = (void *)&ioreq->req;
587 ioreq->aio_inflight++;
4be74634 588 blk_aio_discard(blkdev->blk,
f3135204
OH
589 discard_req->sector_number, discard_req->nr_sectors,
590 qemu_aio_complete, ioreq);
591 break;
592 }
62d23efa 593 default:
209cd7ab
AP
594 /* unknown operation (shouldn't happen -- parse catches this) */
595 goto err;
62d23efa
AL
596 }
597
62d23efa
AL
598 qemu_aio_complete(ioreq, 0);
599
600 return 0;
601
602err:
f6ec953c
FZ
603 ioreq_unmap(ioreq);
604err_no_map:
605 ioreq_finish(ioreq);
62d23efa
AL
606 ioreq->status = BLKIF_RSP_ERROR;
607 return -1;
608}
609
610static int blk_send_response_one(struct ioreq *ioreq)
611{
612 struct XenBlkDev *blkdev = ioreq->blkdev;
613 int send_notify = 0;
614 int have_requests = 0;
615 blkif_response_t resp;
616 void *dst;
617
618 resp.id = ioreq->req.id;
619 resp.operation = ioreq->req.operation;
620 resp.status = ioreq->status;
621
622 /* Place on the response ring for the relevant domain. */
623 switch (blkdev->protocol) {
624 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
625 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
626 break;
62d23efa 627 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
628 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
629 blkdev->rings.x86_32_part.rsp_prod_pvt);
209cd7ab 630 break;
62d23efa 631 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
632 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
633 blkdev->rings.x86_64_part.rsp_prod_pvt);
209cd7ab 634 break;
62d23efa 635 default:
209cd7ab 636 dst = NULL;
8cced121 637 return 0;
62d23efa
AL
638 }
639 memcpy(dst, &resp, sizeof(resp));
640 blkdev->rings.common.rsp_prod_pvt++;
641
642 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
643 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
209cd7ab
AP
644 /*
645 * Tail check for pending requests. Allows frontend to avoid
646 * notifications if requests are already in flight (lower
647 * overheads and promotes batching).
648 */
649 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
62d23efa 650 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
209cd7ab 651 have_requests = 1;
62d23efa
AL
652 }
653
209cd7ab
AP
654 if (have_requests) {
655 blkdev->more_work++;
656 }
62d23efa
AL
657 return send_notify;
658}
659
660/* walk finished list, send outstanding responses, free requests */
661static void blk_send_response_all(struct XenBlkDev *blkdev)
662{
663 struct ioreq *ioreq;
664 int send_notify = 0;
665
72cf2d4f
BS
666 while (!QLIST_EMPTY(&blkdev->finished)) {
667 ioreq = QLIST_FIRST(&blkdev->finished);
209cd7ab 668 send_notify += blk_send_response_one(ioreq);
ed547766 669 ioreq_release(ioreq, true);
209cd7ab
AP
670 }
671 if (send_notify) {
672 xen_be_send_notify(&blkdev->xendev);
62d23efa 673 }
62d23efa
AL
674}
675
676static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
677{
678 switch (blkdev->protocol) {
679 case BLKIF_PROTOCOL_NATIVE:
209cd7ab
AP
680 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
681 sizeof(ioreq->req));
682 break;
62d23efa 683 case BLKIF_PROTOCOL_X86_32:
6fcfeff9
BS
684 blkif_get_x86_32_req(&ioreq->req,
685 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
209cd7ab 686 break;
62d23efa 687 case BLKIF_PROTOCOL_X86_64:
6fcfeff9
BS
688 blkif_get_x86_64_req(&ioreq->req,
689 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
209cd7ab 690 break;
62d23efa
AL
691 }
692 return 0;
693}
694
695static void blk_handle_requests(struct XenBlkDev *blkdev)
696{
697 RING_IDX rc, rp;
698 struct ioreq *ioreq;
699
700 blkdev->more_work = 0;
701
702 rc = blkdev->rings.common.req_cons;
703 rp = blkdev->rings.common.sring->req_prod;
704 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
705
4e5b184d 706 blk_send_response_all(blkdev);
fc1f79f7 707 while (rc != rp) {
62d23efa 708 /* pull request from ring */
209cd7ab 709 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
62d23efa 710 break;
209cd7ab 711 }
62d23efa
AL
712 ioreq = ioreq_start(blkdev);
713 if (ioreq == NULL) {
714 blkdev->more_work++;
715 break;
716 }
717 blk_get_request(blkdev, ioreq, rc);
718 blkdev->rings.common.req_cons = ++rc;
719
720 /* parse them */
721 if (ioreq_parse(ioreq) != 0) {
209cd7ab 722 if (blk_send_response_one(ioreq)) {
62d23efa 723 xen_be_send_notify(&blkdev->xendev);
209cd7ab 724 }
ed547766 725 ioreq_release(ioreq, false);
62d23efa
AL
726 continue;
727 }
728
4e5b184d 729 ioreq_runio_qemu_aio(ioreq);
209cd7ab 730 }
62d23efa 731
209cd7ab 732 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
62d23efa 733 qemu_bh_schedule(blkdev->bh);
209cd7ab 734 }
62d23efa
AL
735}
736
737/* ------------------------------------------------------------- */
738
739static void blk_bh(void *opaque)
740{
741 struct XenBlkDev *blkdev = opaque;
742 blk_handle_requests(blkdev);
743}
744
64c27e5b
JB
745/*
746 * We need to account for the grant allocations requiring contiguous
747 * chunks; the worst case number would be
748 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
749 * but in order to keep things simple just use
750 * 2 * max_req * max_seg.
751 */
752#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
753
62d23efa
AL
754static void blk_alloc(struct XenDevice *xendev)
755{
756 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
757
72cf2d4f
BS
758 QLIST_INIT(&blkdev->inflight);
759 QLIST_INIT(&blkdev->finished);
760 QLIST_INIT(&blkdev->freelist);
62d23efa 761 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
209cd7ab 762 if (xen_mode != XEN_EMULATE) {
62d23efa 763 batch_maps = 1;
209cd7ab 764 }
64c27e5b
JB
765 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
766 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
767 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
768 strerror(errno));
769 }
62d23efa
AL
770}
771
f3135204
OH
772static void blk_parse_discard(struct XenBlkDev *blkdev)
773{
774 int enable;
775
776 blkdev->feature_discard = true;
777
778 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
779 blkdev->feature_discard = !!enable;
780 }
781
782 if (blkdev->feature_discard) {
783 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
784 }
785}
786
62d23efa
AL
787static int blk_init(struct XenDevice *xendev)
788{
789 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
86f425db 790 int info = 0;
454ae734 791 char *directiosafe = NULL;
62d23efa
AL
792
793 /* read xenstore entries */
794 if (blkdev->params == NULL) {
5ea3c2b4 795 char *h = NULL;
209cd7ab 796 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
5ea3c2b4
SS
797 if (blkdev->params != NULL) {
798 h = strchr(blkdev->params, ':');
799 }
209cd7ab
AP
800 if (h != NULL) {
801 blkdev->fileproto = blkdev->params;
802 blkdev->filename = h+1;
803 *h = 0;
804 } else {
805 blkdev->fileproto = "<unset>";
806 blkdev->filename = blkdev->params;
807 }
808 }
7cef3f4f
SS
809 if (!strcmp("aio", blkdev->fileproto)) {
810 blkdev->fileproto = "raw";
811 }
209cd7ab
AP
812 if (blkdev->mode == NULL) {
813 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
814 }
815 if (blkdev->type == NULL) {
816 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
817 }
818 if (blkdev->dev == NULL) {
819 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
820 }
821 if (blkdev->devtype == NULL) {
822 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
823 }
454ae734
SS
824 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
825 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
62d23efa
AL
826
827 /* do we have all we need? */
828 if (blkdev->params == NULL ||
209cd7ab
AP
829 blkdev->mode == NULL ||
830 blkdev->type == NULL ||
831 blkdev->dev == NULL) {
5ea3c2b4 832 goto out_error;
209cd7ab 833 }
62d23efa
AL
834
835 /* read-only ? */
86f425db 836 if (strcmp(blkdev->mode, "w")) {
209cd7ab 837 info |= VDISK_READONLY;
62d23efa
AL
838 }
839
840 /* cdrom ? */
209cd7ab
AP
841 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
842 info |= VDISK_CDROM;
843 }
62d23efa 844
86f425db
AB
845 blkdev->file_blk = BLOCK_SIZE;
846
847 /* fill info
848 * blk_connect supplies sector-size and sectors
849 */
850 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
851 xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
852 xenstore_write_be_int(&blkdev->xendev, "info", info);
454ae734 853
f3135204
OH
854 blk_parse_discard(blkdev);
855
454ae734 856 g_free(directiosafe);
86f425db
AB
857 return 0;
858
859out_error:
860 g_free(blkdev->params);
861 blkdev->params = NULL;
862 g_free(blkdev->mode);
863 blkdev->mode = NULL;
864 g_free(blkdev->type);
865 blkdev->type = NULL;
866 g_free(blkdev->dev);
867 blkdev->dev = NULL;
868 g_free(blkdev->devtype);
869 blkdev->devtype = NULL;
454ae734
SS
870 g_free(directiosafe);
871 blkdev->directiosafe = false;
86f425db
AB
872 return -1;
873}
874
875static int blk_connect(struct XenDevice *xendev)
876{
877 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
878 int pers, index, qflags;
b64ec4e4 879 bool readonly = true;
86f425db
AB
880
881 /* read-only ? */
454ae734
SS
882 if (blkdev->directiosafe) {
883 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
884 } else {
885 qflags = BDRV_O_CACHE_WB;
886 }
86f425db
AB
887 if (strcmp(blkdev->mode, "w") == 0) {
888 qflags |= BDRV_O_RDWR;
b64ec4e4 889 readonly = false;
86f425db 890 }
f3135204
OH
891 if (blkdev->feature_discard) {
892 qflags |= BDRV_O_UNMAP;
893 }
86f425db 894
62d23efa 895 /* init qemu block driver */
751c6a17
GH
896 index = (blkdev->xendev.dev - 202 * 256) / 16;
897 blkdev->dinfo = drive_get(IF_XEN, 0, index);
898 if (!blkdev->dinfo) {
98522f63 899 Error *local_err = NULL;
26f54e9a 900 BlockBackend *blk;
cedccf13 901 BlockDriver *drv;
4be74634 902 BlockDriverState *bs;
cedccf13 903
62d23efa
AL
904 /* setup via xenbus -> create new block driver instance */
905 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
7e7d56d9 906 blk = blk_new_with_bs(blkdev->dev, NULL);
26f54e9a
MA
907 if (!blk) {
908 return -1;
909 }
4be74634 910 blkdev->blk = blk;
cedccf13 911
4be74634 912 bs = blk_bs(blk);
cedccf13 913 drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly);
4be74634 914 if (bdrv_open(&bs, blkdev->filename, NULL, NULL, qflags,
cedccf13
MA
915 drv, &local_err) != 0) {
916 xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
917 error_get_pretty(local_err));
918 error_free(local_err);
26f54e9a 919 blk_unref(blk);
4be74634 920 blkdev->blk = NULL;
cedccf13
MA
921 return -1;
922 }
4be74634 923 assert(bs == blk_bs(blk));
62d23efa
AL
924 } else {
925 /* setup via qemu cmdline -> already setup for us */
926 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
4be74634
MA
927 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
928 if (blk_is_read_only(blkdev->blk) && !readonly) {
4f8a066b 929 xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
4be74634 930 blkdev->blk = NULL;
4f8a066b
KW
931 return -1;
932 }
4be74634
MA
933 /* blkdev->blk is not create by us, we get a reference
934 * so we can blk_unref() unconditionally */
935 blk_ref(blkdev->blk);
936 }
937 blk_attach_dev_nofail(blkdev->blk, blkdev);
938 blkdev->file_size = blk_getlength(blkdev->blk);
62d23efa 939 if (blkdev->file_size < 0) {
4be74634 940 xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
62d23efa 941 (int)blkdev->file_size, strerror(-blkdev->file_size),
4be74634 942 bdrv_get_format_name(blk_bs(blkdev->blk)) ?: "-");
209cd7ab 943 blkdev->file_size = 0;
62d23efa 944 }
62d23efa
AL
945
946 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
209cd7ab
AP
947 " size %" PRId64 " (%" PRId64 " MB)\n",
948 blkdev->type, blkdev->fileproto, blkdev->filename,
949 blkdev->file_size, blkdev->file_size >> 20);
62d23efa 950
86f425db
AB
951 /* Fill in number of sector size and number of sectors */
952 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
9246ce88
FF
953 xenstore_write_be_int64(&blkdev->xendev, "sectors",
954 blkdev->file_size / blkdev->file_blk);
62d23efa 955
209cd7ab
AP
956 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
957 return -1;
958 }
62d23efa 959 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
209cd7ab
AP
960 &blkdev->xendev.remote_port) == -1) {
961 return -1;
962 }
9e496d74
RPM
963 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
964 blkdev->feature_persistent = FALSE;
965 } else {
966 blkdev->feature_persistent = !!pers;
967 }
62d23efa
AL
968
969 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
970 if (blkdev->xendev.protocol) {
209cd7ab 971 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
62d23efa 972 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
209cd7ab
AP
973 }
974 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
62d23efa 975 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
209cd7ab 976 }
62d23efa
AL
977 }
978
979 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
209cd7ab
AP
980 blkdev->xendev.dom,
981 blkdev->ring_ref,
982 PROT_READ | PROT_WRITE);
983 if (!blkdev->sring) {
984 return -1;
985 }
62d23efa
AL
986 blkdev->cnt_map++;
987
988 switch (blkdev->protocol) {
989 case BLKIF_PROTOCOL_NATIVE:
990 {
209cd7ab
AP
991 blkif_sring_t *sring_native = blkdev->sring;
992 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
993 break;
62d23efa
AL
994 }
995 case BLKIF_PROTOCOL_X86_32:
996 {
209cd7ab 997 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
6fcfeff9
BS
998
999 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
209cd7ab 1000 break;
62d23efa
AL
1001 }
1002 case BLKIF_PROTOCOL_X86_64:
1003 {
209cd7ab 1004 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
6fcfeff9
BS
1005
1006 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
209cd7ab 1007 break;
62d23efa
AL
1008 }
1009 }
1010
9e496d74
RPM
1011 if (blkdev->feature_persistent) {
1012 /* Init persistent grants */
1013 blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
1014 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1015 NULL, NULL,
2f01dfac
RPM
1016 batch_maps ?
1017 (GDestroyNotify)g_free :
9e496d74 1018 (GDestroyNotify)destroy_grant);
2f01dfac 1019 blkdev->persistent_regions = NULL;
9e496d74
RPM
1020 blkdev->persistent_gnt_count = 0;
1021 }
1022
62d23efa
AL
1023 xen_be_bind_evtchn(&blkdev->xendev);
1024
1025 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
209cd7ab
AP
1026 "remote port %d, local port %d\n",
1027 blkdev->xendev.protocol, blkdev->ring_ref,
1028 blkdev->xendev.remote_port, blkdev->xendev.local_port);
62d23efa
AL
1029 return 0;
1030}
1031
1032static void blk_disconnect(struct XenDevice *xendev)
1033{
1034 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1035
4be74634
MA
1036 if (blkdev->blk) {
1037 blk_detach_dev(blkdev->blk, blkdev);
1038 blk_unref(blkdev->blk);
1039 blkdev->blk = NULL;
62d23efa
AL
1040 }
1041 xen_be_unbind_evtchn(&blkdev->xendev);
1042
1043 if (blkdev->sring) {
209cd7ab
AP
1044 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
1045 blkdev->cnt_map--;
1046 blkdev->sring = NULL;
62d23efa 1047 }
2f01dfac
RPM
1048
1049 /*
1050 * Unmap persistent grants before switching to the closed state
1051 * so the frontend can free them.
1052 *
1053 * In the !batch_maps case g_tree_destroy will take care of unmapping
1054 * the grant, but in the batch_maps case we need to iterate over every
1055 * region in persistent_regions and unmap it.
1056 */
1057 if (blkdev->feature_persistent) {
1058 g_tree_destroy(blkdev->persistent_gnts);
1059 assert(batch_maps || blkdev->persistent_gnt_count == 0);
1060 if (batch_maps) {
1061 blkdev->persistent_gnt_count = 0;
1062 g_slist_foreach(blkdev->persistent_regions,
1063 (GFunc)remove_persistent_region, blkdev);
1064 g_slist_free(blkdev->persistent_regions);
1065 }
1066 blkdev->feature_persistent = false;
1067 }
62d23efa
AL
1068}
1069
1070static int blk_free(struct XenDevice *xendev)
1071{
1072 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1073 struct ioreq *ioreq;
1074
4be74634 1075 if (blkdev->blk || blkdev->sring) {
77ba8fef
SS
1076 blk_disconnect(xendev);
1077 }
1078
72cf2d4f 1079 while (!QLIST_EMPTY(&blkdev->freelist)) {
209cd7ab 1080 ioreq = QLIST_FIRST(&blkdev->freelist);
72cf2d4f 1081 QLIST_REMOVE(ioreq, list);
62d23efa 1082 qemu_iovec_destroy(&ioreq->v);
7267c094 1083 g_free(ioreq);
62d23efa
AL
1084 }
1085
7267c094
AL
1086 g_free(blkdev->params);
1087 g_free(blkdev->mode);
1088 g_free(blkdev->type);
1089 g_free(blkdev->dev);
1090 g_free(blkdev->devtype);
62d23efa
AL
1091 qemu_bh_delete(blkdev->bh);
1092 return 0;
1093}
1094
1095static void blk_event(struct XenDevice *xendev)
1096{
1097 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1098
1099 qemu_bh_schedule(blkdev->bh);
1100}
1101
1102struct XenDevOps xen_blkdev_ops = {
1103 .size = sizeof(struct XenBlkDev),
1104 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1105 .alloc = blk_alloc,
1106 .init = blk_init,
384087b2 1107 .initialise = blk_connect,
62d23efa
AL
1108 .disconnect = blk_disconnect,
1109 .event = blk_event,
1110 .free = blk_free,
1111};
This page took 0.886351 seconds and 4 git commands to generate.