]>
Commit | Line | Data |
---|---|---|
62d23efa AL |
1 | /* |
2 | * xen paravirt block device backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
62d23efa AL |
17 | */ |
18 | ||
19 | #include <stdio.h> | |
20 | #include <stdlib.h> | |
21 | #include <stdarg.h> | |
22 | #include <string.h> | |
23 | #include <unistd.h> | |
24 | #include <signal.h> | |
25 | #include <inttypes.h> | |
26 | #include <time.h> | |
27 | #include <fcntl.h> | |
28 | #include <errno.h> | |
29 | #include <sys/ioctl.h> | |
30 | #include <sys/types.h> | |
31 | #include <sys/stat.h> | |
32 | #include <sys/mman.h> | |
33 | #include <sys/uio.h> | |
34 | ||
35 | #include <xs.h> | |
36 | #include <xenctrl.h> | |
37 | #include <xen/io/xenbus.h> | |
38 | ||
39 | #include "hw.h" | |
40 | #include "block_int.h" | |
41 | #include "qemu-char.h" | |
42 | #include "xen_blkif.h" | |
43 | #include "xen_backend.h" | |
2446333c | 44 | #include "blockdev.h" |
62d23efa AL |
45 | |
46 | /* ------------------------------------------------------------- */ | |
47 | ||
48 | static int syncwrite = 0; | |
49 | static int batch_maps = 0; | |
50 | ||
51 | static int max_requests = 32; | |
52 | static int use_aio = 1; | |
53 | ||
54 | /* ------------------------------------------------------------- */ | |
55 | ||
56 | #define BLOCK_SIZE 512 | |
57 | #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) | |
58 | ||
59 | struct ioreq { | |
60 | blkif_request_t req; | |
61 | int16_t status; | |
62 | ||
63 | /* parsed request */ | |
64 | off_t start; | |
65 | QEMUIOVector v; | |
66 | int presync; | |
67 | int postsync; | |
68 | ||
69 | /* grant mapping */ | |
70 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
71 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
72 | int prot; | |
73 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
74 | void *pages; | |
75 | ||
76 | /* aio status */ | |
77 | int aio_inflight; | |
78 | int aio_errors; | |
79 | ||
80 | struct XenBlkDev *blkdev; | |
72cf2d4f | 81 | QLIST_ENTRY(ioreq) list; |
62d23efa AL |
82 | }; |
83 | ||
84 | struct XenBlkDev { | |
85 | struct XenDevice xendev; /* must be first */ | |
86 | char *params; | |
87 | char *mode; | |
88 | char *type; | |
89 | char *dev; | |
90 | char *devtype; | |
91 | const char *fileproto; | |
92 | const char *filename; | |
93 | int ring_ref; | |
94 | void *sring; | |
95 | int64_t file_blk; | |
96 | int64_t file_size; | |
97 | int protocol; | |
98 | blkif_back_rings_t rings; | |
99 | int more_work; | |
100 | int cnt_map; | |
101 | ||
102 | /* request lists */ | |
72cf2d4f BS |
103 | QLIST_HEAD(inflight_head, ioreq) inflight; |
104 | QLIST_HEAD(finished_head, ioreq) finished; | |
105 | QLIST_HEAD(freelist_head, ioreq) freelist; | |
62d23efa AL |
106 | int requests_total; |
107 | int requests_inflight; | |
108 | int requests_finished; | |
109 | ||
110 | /* qemu block driver */ | |
751c6a17 | 111 | DriveInfo *dinfo; |
62d23efa AL |
112 | BlockDriverState *bs; |
113 | QEMUBH *bh; | |
114 | }; | |
115 | ||
116 | /* ------------------------------------------------------------- */ | |
117 | ||
118 | static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) | |
119 | { | |
120 | struct ioreq *ioreq = NULL; | |
121 | ||
72cf2d4f | 122 | if (QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab AP |
123 | if (blkdev->requests_total >= max_requests) { |
124 | goto out; | |
125 | } | |
126 | /* allocate new struct */ | |
127 | ioreq = qemu_mallocz(sizeof(*ioreq)); | |
128 | ioreq->blkdev = blkdev; | |
129 | blkdev->requests_total++; | |
62d23efa AL |
130 | qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
131 | } else { | |
209cd7ab AP |
132 | /* get one from freelist */ |
133 | ioreq = QLIST_FIRST(&blkdev->freelist); | |
134 | QLIST_REMOVE(ioreq, list); | |
62d23efa AL |
135 | qemu_iovec_reset(&ioreq->v); |
136 | } | |
72cf2d4f | 137 | QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); |
62d23efa AL |
138 | blkdev->requests_inflight++; |
139 | ||
140 | out: | |
141 | return ioreq; | |
142 | } | |
143 | ||
144 | static void ioreq_finish(struct ioreq *ioreq) | |
145 | { | |
146 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
147 | ||
72cf2d4f BS |
148 | QLIST_REMOVE(ioreq, list); |
149 | QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); | |
62d23efa AL |
150 | blkdev->requests_inflight--; |
151 | blkdev->requests_finished++; | |
152 | } | |
153 | ||
154 | static void ioreq_release(struct ioreq *ioreq) | |
155 | { | |
156 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
157 | ||
72cf2d4f | 158 | QLIST_REMOVE(ioreq, list); |
62d23efa AL |
159 | memset(ioreq, 0, sizeof(*ioreq)); |
160 | ioreq->blkdev = blkdev; | |
72cf2d4f | 161 | QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); |
62d23efa AL |
162 | blkdev->requests_finished--; |
163 | } | |
164 | ||
165 | /* | |
166 | * translate request into iovec + start offset | |
167 | * do sanity checks along the way | |
168 | */ | |
169 | static int ioreq_parse(struct ioreq *ioreq) | |
170 | { | |
171 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
172 | uintptr_t mem; | |
173 | size_t len; | |
174 | int i; | |
175 | ||
176 | xen_be_printf(&blkdev->xendev, 3, | |
209cd7ab AP |
177 | "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", |
178 | ioreq->req.operation, ioreq->req.nr_segments, | |
179 | ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); | |
62d23efa AL |
180 | switch (ioreq->req.operation) { |
181 | case BLKIF_OP_READ: | |
209cd7ab AP |
182 | ioreq->prot = PROT_WRITE; /* to memory */ |
183 | break; | |
62d23efa | 184 | case BLKIF_OP_WRITE_BARRIER: |
5cbdebe3 SS |
185 | if (!ioreq->req.nr_segments) { |
186 | ioreq->presync = 1; | |
187 | return 0; | |
188 | } | |
209cd7ab AP |
189 | if (!syncwrite) { |
190 | ioreq->presync = ioreq->postsync = 1; | |
191 | } | |
192 | /* fall through */ | |
62d23efa | 193 | case BLKIF_OP_WRITE: |
209cd7ab AP |
194 | ioreq->prot = PROT_READ; /* from memory */ |
195 | if (syncwrite) { | |
196 | ioreq->postsync = 1; | |
197 | } | |
198 | break; | |
62d23efa | 199 | default: |
209cd7ab AP |
200 | xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", |
201 | ioreq->req.operation); | |
202 | goto err; | |
62d23efa AL |
203 | }; |
204 | ||
908c7b9f GH |
205 | if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { |
206 | xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n"); | |
207 | goto err; | |
208 | } | |
209 | ||
62d23efa AL |
210 | ioreq->start = ioreq->req.sector_number * blkdev->file_blk; |
211 | for (i = 0; i < ioreq->req.nr_segments; i++) { | |
209cd7ab AP |
212 | if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
213 | xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); | |
214 | goto err; | |
215 | } | |
216 | if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { | |
217 | xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); | |
218 | goto err; | |
219 | } | |
220 | if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { | |
221 | xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); | |
222 | goto err; | |
223 | } | |
224 | ||
225 | ioreq->domids[i] = blkdev->xendev.dom; | |
226 | ioreq->refs[i] = ioreq->req.seg[i].gref; | |
227 | ||
228 | mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; | |
229 | len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; | |
62d23efa AL |
230 | qemu_iovec_add(&ioreq->v, (void*)mem, len); |
231 | } | |
232 | if (ioreq->start + ioreq->v.size > blkdev->file_size) { | |
209cd7ab AP |
233 | xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); |
234 | goto err; | |
62d23efa AL |
235 | } |
236 | return 0; | |
237 | ||
238 | err: | |
239 | ioreq->status = BLKIF_RSP_ERROR; | |
240 | return -1; | |
241 | } | |
242 | ||
243 | static void ioreq_unmap(struct ioreq *ioreq) | |
244 | { | |
d5b93ddf | 245 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
246 | int i; |
247 | ||
209cd7ab | 248 | if (ioreq->v.niov == 0) { |
62d23efa | 249 | return; |
209cd7ab | 250 | } |
62d23efa | 251 | if (batch_maps) { |
209cd7ab AP |
252 | if (!ioreq->pages) { |
253 | return; | |
254 | } | |
255 | if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) { | |
256 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", | |
257 | strerror(errno)); | |
258 | } | |
259 | ioreq->blkdev->cnt_map -= ioreq->v.niov; | |
260 | ioreq->pages = NULL; | |
62d23efa | 261 | } else { |
209cd7ab AP |
262 | for (i = 0; i < ioreq->v.niov; i++) { |
263 | if (!ioreq->page[i]) { | |
264 | continue; | |
265 | } | |
266 | if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { | |
267 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", | |
268 | strerror(errno)); | |
269 | } | |
270 | ioreq->blkdev->cnt_map--; | |
271 | ioreq->page[i] = NULL; | |
272 | } | |
62d23efa AL |
273 | } |
274 | } | |
275 | ||
276 | static int ioreq_map(struct ioreq *ioreq) | |
277 | { | |
d5b93ddf | 278 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
279 | int i; |
280 | ||
209cd7ab | 281 | if (ioreq->v.niov == 0) { |
62d23efa | 282 | return 0; |
209cd7ab | 283 | } |
62d23efa | 284 | if (batch_maps) { |
209cd7ab AP |
285 | ioreq->pages = xc_gnttab_map_grant_refs |
286 | (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); | |
287 | if (ioreq->pages == NULL) { | |
288 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
289 | "can't map %d grant refs (%s, %d maps)\n", | |
290 | ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); | |
291 | return -1; | |
292 | } | |
293 | for (i = 0; i < ioreq->v.niov; i++) { | |
294 | ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + | |
295 | (uintptr_t)ioreq->v.iov[i].iov_base; | |
296 | } | |
297 | ioreq->blkdev->cnt_map += ioreq->v.niov; | |
62d23efa | 298 | } else { |
209cd7ab AP |
299 | for (i = 0; i < ioreq->v.niov; i++) { |
300 | ioreq->page[i] = xc_gnttab_map_grant_ref | |
301 | (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); | |
302 | if (ioreq->page[i] == NULL) { | |
303 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
304 | "can't map grant ref %d (%s, %d maps)\n", | |
305 | ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); | |
306 | ioreq_unmap(ioreq); | |
307 | return -1; | |
308 | } | |
309 | ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; | |
310 | ioreq->blkdev->cnt_map++; | |
311 | } | |
62d23efa AL |
312 | } |
313 | return 0; | |
314 | } | |
315 | ||
316 | static int ioreq_runio_qemu_sync(struct ioreq *ioreq) | |
317 | { | |
318 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
1e71db30 | 319 | int i, rc; |
62d23efa AL |
320 | off_t pos; |
321 | ||
209cd7ab AP |
322 | if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { |
323 | goto err_no_map; | |
324 | } | |
325 | if (ioreq->presync) { | |
326 | bdrv_flush(blkdev->bs); | |
327 | } | |
62d23efa AL |
328 | |
329 | switch (ioreq->req.operation) { | |
330 | case BLKIF_OP_READ: | |
209cd7ab AP |
331 | pos = ioreq->start; |
332 | for (i = 0; i < ioreq->v.niov; i++) { | |
333 | rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, | |
334 | ioreq->v.iov[i].iov_base, | |
335 | ioreq->v.iov[i].iov_len / BLOCK_SIZE); | |
336 | if (rc != 0) { | |
337 | xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", | |
338 | ioreq->v.iov[i].iov_base, | |
339 | ioreq->v.iov[i].iov_len); | |
340 | goto err; | |
341 | } | |
209cd7ab AP |
342 | pos += ioreq->v.iov[i].iov_len; |
343 | } | |
344 | break; | |
62d23efa AL |
345 | case BLKIF_OP_WRITE: |
346 | case BLKIF_OP_WRITE_BARRIER: | |
209cd7ab | 347 | if (!ioreq->req.nr_segments) { |
5cbdebe3 | 348 | break; |
209cd7ab AP |
349 | } |
350 | pos = ioreq->start; | |
351 | for (i = 0; i < ioreq->v.niov; i++) { | |
352 | rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, | |
353 | ioreq->v.iov[i].iov_base, | |
354 | ioreq->v.iov[i].iov_len / BLOCK_SIZE); | |
355 | if (rc != 0) { | |
356 | xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", | |
357 | ioreq->v.iov[i].iov_base, | |
358 | ioreq->v.iov[i].iov_len); | |
359 | goto err; | |
360 | } | |
209cd7ab AP |
361 | pos += ioreq->v.iov[i].iov_len; |
362 | } | |
363 | break; | |
62d23efa | 364 | default: |
209cd7ab AP |
365 | /* unknown operation (shouldn't happen -- parse catches this) */ |
366 | goto err; | |
62d23efa AL |
367 | } |
368 | ||
209cd7ab AP |
369 | if (ioreq->postsync) { |
370 | bdrv_flush(blkdev->bs); | |
371 | } | |
62d23efa AL |
372 | ioreq->status = BLKIF_RSP_OKAY; |
373 | ||
374 | ioreq_unmap(ioreq); | |
375 | ioreq_finish(ioreq); | |
376 | return 0; | |
377 | ||
378 | err: | |
f6ec953c FZ |
379 | ioreq_unmap(ioreq); |
380 | err_no_map: | |
381 | ioreq_finish(ioreq); | |
62d23efa AL |
382 | ioreq->status = BLKIF_RSP_ERROR; |
383 | return -1; | |
384 | } | |
385 | ||
386 | static void qemu_aio_complete(void *opaque, int ret) | |
387 | { | |
388 | struct ioreq *ioreq = opaque; | |
389 | ||
390 | if (ret != 0) { | |
391 | xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", | |
392 | ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); | |
393 | ioreq->aio_errors++; | |
394 | } | |
395 | ||
396 | ioreq->aio_inflight--; | |
209cd7ab | 397 | if (ioreq->aio_inflight > 0) { |
62d23efa | 398 | return; |
209cd7ab | 399 | } |
62d23efa AL |
400 | |
401 | ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; | |
402 | ioreq_unmap(ioreq); | |
403 | ioreq_finish(ioreq); | |
404 | qemu_bh_schedule(ioreq->blkdev->bh); | |
405 | } | |
406 | ||
407 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq) | |
408 | { | |
409 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
410 | ||
209cd7ab AP |
411 | if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { |
412 | goto err_no_map; | |
413 | } | |
62d23efa AL |
414 | |
415 | ioreq->aio_inflight++; | |
209cd7ab AP |
416 | if (ioreq->presync) { |
417 | bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ | |
418 | } | |
62d23efa AL |
419 | |
420 | switch (ioreq->req.operation) { | |
421 | case BLKIF_OP_READ: | |
422 | ioreq->aio_inflight++; | |
423 | bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, | |
424 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
425 | qemu_aio_complete, ioreq); | |
209cd7ab | 426 | break; |
62d23efa AL |
427 | case BLKIF_OP_WRITE: |
428 | case BLKIF_OP_WRITE_BARRIER: | |
209cd7ab | 429 | if (!ioreq->req.nr_segments) { |
5cbdebe3 | 430 | break; |
209cd7ab | 431 | } |
209bef3e | 432 | ioreq->aio_inflight++; |
62d23efa AL |
433 | bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, |
434 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
435 | qemu_aio_complete, ioreq); | |
209cd7ab | 436 | break; |
62d23efa | 437 | default: |
209cd7ab AP |
438 | /* unknown operation (shouldn't happen -- parse catches this) */ |
439 | goto err; | |
62d23efa AL |
440 | } |
441 | ||
209cd7ab AP |
442 | if (ioreq->postsync) { |
443 | bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ | |
444 | } | |
62d23efa AL |
445 | qemu_aio_complete(ioreq, 0); |
446 | ||
447 | return 0; | |
448 | ||
449 | err: | |
f6ec953c FZ |
450 | ioreq_unmap(ioreq); |
451 | err_no_map: | |
452 | ioreq_finish(ioreq); | |
62d23efa AL |
453 | ioreq->status = BLKIF_RSP_ERROR; |
454 | return -1; | |
455 | } | |
456 | ||
457 | static int blk_send_response_one(struct ioreq *ioreq) | |
458 | { | |
459 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
460 | int send_notify = 0; | |
461 | int have_requests = 0; | |
462 | blkif_response_t resp; | |
463 | void *dst; | |
464 | ||
465 | resp.id = ioreq->req.id; | |
466 | resp.operation = ioreq->req.operation; | |
467 | resp.status = ioreq->status; | |
468 | ||
469 | /* Place on the response ring for the relevant domain. */ | |
470 | switch (blkdev->protocol) { | |
471 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
472 | dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); |
473 | break; | |
62d23efa | 474 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
475 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, |
476 | blkdev->rings.x86_32_part.rsp_prod_pvt); | |
209cd7ab | 477 | break; |
62d23efa | 478 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
479 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, |
480 | blkdev->rings.x86_64_part.rsp_prod_pvt); | |
209cd7ab | 481 | break; |
62d23efa | 482 | default: |
209cd7ab | 483 | dst = NULL; |
62d23efa AL |
484 | } |
485 | memcpy(dst, &resp, sizeof(resp)); | |
486 | blkdev->rings.common.rsp_prod_pvt++; | |
487 | ||
488 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); | |
489 | if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { | |
209cd7ab AP |
490 | /* |
491 | * Tail check for pending requests. Allows frontend to avoid | |
492 | * notifications if requests are already in flight (lower | |
493 | * overheads and promotes batching). | |
494 | */ | |
495 | RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); | |
62d23efa | 496 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { |
209cd7ab | 497 | have_requests = 1; |
62d23efa AL |
498 | } |
499 | ||
209cd7ab AP |
500 | if (have_requests) { |
501 | blkdev->more_work++; | |
502 | } | |
62d23efa AL |
503 | return send_notify; |
504 | } | |
505 | ||
506 | /* walk finished list, send outstanding responses, free requests */ | |
507 | static void blk_send_response_all(struct XenBlkDev *blkdev) | |
508 | { | |
509 | struct ioreq *ioreq; | |
510 | int send_notify = 0; | |
511 | ||
72cf2d4f BS |
512 | while (!QLIST_EMPTY(&blkdev->finished)) { |
513 | ioreq = QLIST_FIRST(&blkdev->finished); | |
209cd7ab AP |
514 | send_notify += blk_send_response_one(ioreq); |
515 | ioreq_release(ioreq); | |
516 | } | |
517 | if (send_notify) { | |
518 | xen_be_send_notify(&blkdev->xendev); | |
62d23efa | 519 | } |
62d23efa AL |
520 | } |
521 | ||
522 | static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) | |
523 | { | |
524 | switch (blkdev->protocol) { | |
525 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
526 | memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), |
527 | sizeof(ioreq->req)); | |
528 | break; | |
62d23efa | 529 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
530 | blkif_get_x86_32_req(&ioreq->req, |
531 | RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); | |
209cd7ab | 532 | break; |
62d23efa | 533 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
534 | blkif_get_x86_64_req(&ioreq->req, |
535 | RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); | |
209cd7ab | 536 | break; |
62d23efa AL |
537 | } |
538 | return 0; | |
539 | } | |
540 | ||
541 | static void blk_handle_requests(struct XenBlkDev *blkdev) | |
542 | { | |
543 | RING_IDX rc, rp; | |
544 | struct ioreq *ioreq; | |
545 | ||
546 | blkdev->more_work = 0; | |
547 | ||
548 | rc = blkdev->rings.common.req_cons; | |
549 | rp = blkdev->rings.common.sring->req_prod; | |
550 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
551 | ||
209cd7ab | 552 | if (use_aio) { |
62d23efa | 553 | blk_send_response_all(blkdev); |
209cd7ab | 554 | } |
fc1f79f7 | 555 | while (rc != rp) { |
62d23efa | 556 | /* pull request from ring */ |
209cd7ab | 557 | if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { |
62d23efa | 558 | break; |
209cd7ab | 559 | } |
62d23efa AL |
560 | ioreq = ioreq_start(blkdev); |
561 | if (ioreq == NULL) { | |
562 | blkdev->more_work++; | |
563 | break; | |
564 | } | |
565 | blk_get_request(blkdev, ioreq, rc); | |
566 | blkdev->rings.common.req_cons = ++rc; | |
567 | ||
568 | /* parse them */ | |
569 | if (ioreq_parse(ioreq) != 0) { | |
209cd7ab | 570 | if (blk_send_response_one(ioreq)) { |
62d23efa | 571 | xen_be_send_notify(&blkdev->xendev); |
209cd7ab | 572 | } |
62d23efa AL |
573 | ioreq_release(ioreq); |
574 | continue; | |
575 | } | |
576 | ||
577 | if (use_aio) { | |
578 | /* run i/o in aio mode */ | |
579 | ioreq_runio_qemu_aio(ioreq); | |
580 | } else { | |
581 | /* run i/o in sync mode */ | |
582 | ioreq_runio_qemu_sync(ioreq); | |
583 | } | |
584 | } | |
209cd7ab | 585 | if (!use_aio) { |
62d23efa | 586 | blk_send_response_all(blkdev); |
209cd7ab | 587 | } |
62d23efa | 588 | |
209cd7ab | 589 | if (blkdev->more_work && blkdev->requests_inflight < max_requests) { |
62d23efa | 590 | qemu_bh_schedule(blkdev->bh); |
209cd7ab | 591 | } |
62d23efa AL |
592 | } |
593 | ||
594 | /* ------------------------------------------------------------- */ | |
595 | ||
596 | static void blk_bh(void *opaque) | |
597 | { | |
598 | struct XenBlkDev *blkdev = opaque; | |
599 | blk_handle_requests(blkdev); | |
600 | } | |
601 | ||
602 | static void blk_alloc(struct XenDevice *xendev) | |
603 | { | |
604 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
605 | ||
72cf2d4f BS |
606 | QLIST_INIT(&blkdev->inflight); |
607 | QLIST_INIT(&blkdev->finished); | |
608 | QLIST_INIT(&blkdev->freelist); | |
62d23efa | 609 | blkdev->bh = qemu_bh_new(blk_bh, blkdev); |
209cd7ab | 610 | if (xen_mode != XEN_EMULATE) { |
62d23efa | 611 | batch_maps = 1; |
209cd7ab | 612 | } |
62d23efa AL |
613 | } |
614 | ||
615 | static int blk_init(struct XenDevice *xendev) | |
616 | { | |
617 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
9678d950 | 618 | int index, qflags, have_barriers, info = 0; |
62d23efa AL |
619 | char *h; |
620 | ||
621 | /* read xenstore entries */ | |
622 | if (blkdev->params == NULL) { | |
209cd7ab | 623 | blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); |
62d23efa | 624 | h = strchr(blkdev->params, ':'); |
209cd7ab AP |
625 | if (h != NULL) { |
626 | blkdev->fileproto = blkdev->params; | |
627 | blkdev->filename = h+1; | |
628 | *h = 0; | |
629 | } else { | |
630 | blkdev->fileproto = "<unset>"; | |
631 | blkdev->filename = blkdev->params; | |
632 | } | |
633 | } | |
634 | if (blkdev->mode == NULL) { | |
635 | blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); | |
636 | } | |
637 | if (blkdev->type == NULL) { | |
638 | blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); | |
639 | } | |
640 | if (blkdev->dev == NULL) { | |
641 | blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); | |
642 | } | |
643 | if (blkdev->devtype == NULL) { | |
644 | blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); | |
645 | } | |
62d23efa AL |
646 | |
647 | /* do we have all we need? */ | |
648 | if (blkdev->params == NULL || | |
209cd7ab AP |
649 | blkdev->mode == NULL || |
650 | blkdev->type == NULL || | |
651 | blkdev->dev == NULL) { | |
652 | return -1; | |
653 | } | |
62d23efa AL |
654 | |
655 | /* read-only ? */ | |
656 | if (strcmp(blkdev->mode, "w") == 0) { | |
209cd7ab | 657 | qflags = BDRV_O_RDWR; |
62d23efa | 658 | } else { |
209cd7ab AP |
659 | qflags = 0; |
660 | info |= VDISK_READONLY; | |
62d23efa AL |
661 | } |
662 | ||
663 | /* cdrom ? */ | |
209cd7ab AP |
664 | if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { |
665 | info |= VDISK_CDROM; | |
666 | } | |
62d23efa AL |
667 | |
668 | /* init qemu block driver */ | |
751c6a17 GH |
669 | index = (blkdev->xendev.dev - 202 * 256) / 16; |
670 | blkdev->dinfo = drive_get(IF_XEN, 0, index); | |
671 | if (!blkdev->dinfo) { | |
62d23efa AL |
672 | /* setup via xenbus -> create new block driver instance */ |
673 | xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); | |
ad717139 KW |
674 | blkdev->bs = bdrv_new(blkdev->dev); |
675 | if (bdrv_open(blkdev->bs, blkdev->filename, qflags, | |
676 | bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) { | |
677 | bdrv_delete(blkdev->bs); | |
678 | return -1; | |
679 | } | |
62d23efa AL |
680 | } else { |
681 | /* setup via qemu cmdline -> already setup for us */ | |
682 | xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); | |
209cd7ab | 683 | blkdev->bs = blkdev->dinfo->bdrv; |
62d23efa AL |
684 | } |
685 | blkdev->file_blk = BLOCK_SIZE; | |
686 | blkdev->file_size = bdrv_getlength(blkdev->bs); | |
687 | if (blkdev->file_size < 0) { | |
688 | xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", | |
689 | (int)blkdev->file_size, strerror(-blkdev->file_size), | |
690 | blkdev->bs->drv ? blkdev->bs->drv->format_name : "-"); | |
209cd7ab | 691 | blkdev->file_size = 0; |
62d23efa AL |
692 | } |
693 | have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0; | |
694 | ||
695 | xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," | |
209cd7ab AP |
696 | " size %" PRId64 " (%" PRId64 " MB)\n", |
697 | blkdev->type, blkdev->fileproto, blkdev->filename, | |
698 | blkdev->file_size, blkdev->file_size >> 20); | |
62d23efa AL |
699 | |
700 | /* fill info */ | |
701 | xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers); | |
702 | xenstore_write_be_int(&blkdev->xendev, "info", info); | |
703 | xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); | |
704 | xenstore_write_be_int(&blkdev->xendev, "sectors", | |
209cd7ab | 705 | blkdev->file_size / blkdev->file_blk); |
62d23efa AL |
706 | return 0; |
707 | } | |
708 | ||
709 | static int blk_connect(struct XenDevice *xendev) | |
710 | { | |
711 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
712 | ||
209cd7ab AP |
713 | if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { |
714 | return -1; | |
715 | } | |
62d23efa | 716 | if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", |
209cd7ab AP |
717 | &blkdev->xendev.remote_port) == -1) { |
718 | return -1; | |
719 | } | |
62d23efa AL |
720 | |
721 | blkdev->protocol = BLKIF_PROTOCOL_NATIVE; | |
722 | if (blkdev->xendev.protocol) { | |
209cd7ab | 723 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { |
62d23efa | 724 | blkdev->protocol = BLKIF_PROTOCOL_X86_32; |
209cd7ab AP |
725 | } |
726 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { | |
62d23efa | 727 | blkdev->protocol = BLKIF_PROTOCOL_X86_64; |
209cd7ab | 728 | } |
62d23efa AL |
729 | } |
730 | ||
731 | blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, | |
209cd7ab AP |
732 | blkdev->xendev.dom, |
733 | blkdev->ring_ref, | |
734 | PROT_READ | PROT_WRITE); | |
735 | if (!blkdev->sring) { | |
736 | return -1; | |
737 | } | |
62d23efa AL |
738 | blkdev->cnt_map++; |
739 | ||
740 | switch (blkdev->protocol) { | |
741 | case BLKIF_PROTOCOL_NATIVE: | |
742 | { | |
209cd7ab AP |
743 | blkif_sring_t *sring_native = blkdev->sring; |
744 | BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); | |
745 | break; | |
62d23efa AL |
746 | } |
747 | case BLKIF_PROTOCOL_X86_32: | |
748 | { | |
209cd7ab | 749 | blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; |
6fcfeff9 BS |
750 | |
751 | BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); | |
209cd7ab | 752 | break; |
62d23efa AL |
753 | } |
754 | case BLKIF_PROTOCOL_X86_64: | |
755 | { | |
209cd7ab | 756 | blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; |
6fcfeff9 BS |
757 | |
758 | BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); | |
209cd7ab | 759 | break; |
62d23efa AL |
760 | } |
761 | } | |
762 | ||
763 | xen_be_bind_evtchn(&blkdev->xendev); | |
764 | ||
765 | xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " | |
209cd7ab AP |
766 | "remote port %d, local port %d\n", |
767 | blkdev->xendev.protocol, blkdev->ring_ref, | |
768 | blkdev->xendev.remote_port, blkdev->xendev.local_port); | |
62d23efa AL |
769 | return 0; |
770 | } | |
771 | ||
772 | static void blk_disconnect(struct XenDevice *xendev) | |
773 | { | |
774 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
775 | ||
776 | if (blkdev->bs) { | |
751c6a17 | 777 | if (!blkdev->dinfo) { |
62d23efa AL |
778 | /* close/delete only if we created it ourself */ |
779 | bdrv_close(blkdev->bs); | |
780 | bdrv_delete(blkdev->bs); | |
781 | } | |
209cd7ab | 782 | blkdev->bs = NULL; |
62d23efa AL |
783 | } |
784 | xen_be_unbind_evtchn(&blkdev->xendev); | |
785 | ||
786 | if (blkdev->sring) { | |
209cd7ab AP |
787 | xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); |
788 | blkdev->cnt_map--; | |
789 | blkdev->sring = NULL; | |
62d23efa AL |
790 | } |
791 | } | |
792 | ||
793 | static int blk_free(struct XenDevice *xendev) | |
794 | { | |
795 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
796 | struct ioreq *ioreq; | |
797 | ||
72cf2d4f | 798 | while (!QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab | 799 | ioreq = QLIST_FIRST(&blkdev->freelist); |
72cf2d4f | 800 | QLIST_REMOVE(ioreq, list); |
62d23efa | 801 | qemu_iovec_destroy(&ioreq->v); |
209cd7ab | 802 | qemu_free(ioreq); |
62d23efa AL |
803 | } |
804 | ||
805 | qemu_free(blkdev->params); | |
806 | qemu_free(blkdev->mode); | |
807 | qemu_free(blkdev->type); | |
808 | qemu_free(blkdev->dev); | |
809 | qemu_free(blkdev->devtype); | |
810 | qemu_bh_delete(blkdev->bh); | |
811 | return 0; | |
812 | } | |
813 | ||
814 | static void blk_event(struct XenDevice *xendev) | |
815 | { | |
816 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
817 | ||
818 | qemu_bh_schedule(blkdev->bh); | |
819 | } | |
820 | ||
821 | struct XenDevOps xen_blkdev_ops = { | |
822 | .size = sizeof(struct XenBlkDev), | |
823 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
824 | .alloc = blk_alloc, | |
825 | .init = blk_init, | |
826 | .connect = blk_connect, | |
827 | .disconnect = blk_disconnect, | |
828 | .event = blk_event, | |
829 | .free = blk_free, | |
830 | }; |