]>
Commit | Line | Data |
---|---|---|
62d23efa AL |
1 | /* |
2 | * xen paravirt block device backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
6b620ca3 PB |
17 | * |
18 | * Contributions after 2012-01-13 are licensed under the terms of the | |
19 | * GNU GPL, version 2 or (at your option) any later version. | |
62d23efa AL |
20 | */ |
21 | ||
22 | #include <stdio.h> | |
23 | #include <stdlib.h> | |
24 | #include <stdarg.h> | |
25 | #include <string.h> | |
26 | #include <unistd.h> | |
27 | #include <signal.h> | |
28 | #include <inttypes.h> | |
29 | #include <time.h> | |
30 | #include <fcntl.h> | |
31 | #include <errno.h> | |
32 | #include <sys/ioctl.h> | |
33 | #include <sys/types.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/mman.h> | |
36 | #include <sys/uio.h> | |
37 | ||
62d23efa | 38 | #include "hw.h" |
62d23efa | 39 | #include "qemu-char.h" |
62d23efa | 40 | #include "xen_backend.h" |
b41f6719 | 41 | #include "xen_blkif.h" |
2446333c | 42 | #include "blockdev.h" |
62d23efa AL |
43 | |
44 | /* ------------------------------------------------------------- */ | |
45 | ||
62d23efa AL |
46 | static int batch_maps = 0; |
47 | ||
48 | static int max_requests = 32; | |
62d23efa AL |
49 | |
50 | /* ------------------------------------------------------------- */ | |
51 | ||
52 | #define BLOCK_SIZE 512 | |
53 | #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) | |
54 | ||
55 | struct ioreq { | |
56 | blkif_request_t req; | |
57 | int16_t status; | |
58 | ||
59 | /* parsed request */ | |
60 | off_t start; | |
61 | QEMUIOVector v; | |
62 | int presync; | |
63 | int postsync; | |
c6961b7d | 64 | uint8_t mapped; |
62d23efa AL |
65 | |
66 | /* grant mapping */ | |
67 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
68 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
69 | int prot; | |
70 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
71 | void *pages; | |
72 | ||
73 | /* aio status */ | |
74 | int aio_inflight; | |
75 | int aio_errors; | |
76 | ||
77 | struct XenBlkDev *blkdev; | |
72cf2d4f | 78 | QLIST_ENTRY(ioreq) list; |
a597e79c | 79 | BlockAcctCookie acct; |
62d23efa AL |
80 | }; |
81 | ||
82 | struct XenBlkDev { | |
83 | struct XenDevice xendev; /* must be first */ | |
84 | char *params; | |
85 | char *mode; | |
86 | char *type; | |
87 | char *dev; | |
88 | char *devtype; | |
89 | const char *fileproto; | |
90 | const char *filename; | |
91 | int ring_ref; | |
92 | void *sring; | |
93 | int64_t file_blk; | |
94 | int64_t file_size; | |
95 | int protocol; | |
96 | blkif_back_rings_t rings; | |
97 | int more_work; | |
98 | int cnt_map; | |
99 | ||
100 | /* request lists */ | |
72cf2d4f BS |
101 | QLIST_HEAD(inflight_head, ioreq) inflight; |
102 | QLIST_HEAD(finished_head, ioreq) finished; | |
103 | QLIST_HEAD(freelist_head, ioreq) freelist; | |
62d23efa AL |
104 | int requests_total; |
105 | int requests_inflight; | |
106 | int requests_finished; | |
107 | ||
108 | /* qemu block driver */ | |
751c6a17 | 109 | DriveInfo *dinfo; |
62d23efa AL |
110 | BlockDriverState *bs; |
111 | QEMUBH *bh; | |
112 | }; | |
113 | ||
114 | /* ------------------------------------------------------------- */ | |
115 | ||
116 | static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) | |
117 | { | |
118 | struct ioreq *ioreq = NULL; | |
119 | ||
72cf2d4f | 120 | if (QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab AP |
121 | if (blkdev->requests_total >= max_requests) { |
122 | goto out; | |
123 | } | |
124 | /* allocate new struct */ | |
7267c094 | 125 | ioreq = g_malloc0(sizeof(*ioreq)); |
209cd7ab AP |
126 | ioreq->blkdev = blkdev; |
127 | blkdev->requests_total++; | |
62d23efa AL |
128 | qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
129 | } else { | |
209cd7ab AP |
130 | /* get one from freelist */ |
131 | ioreq = QLIST_FIRST(&blkdev->freelist); | |
132 | QLIST_REMOVE(ioreq, list); | |
62d23efa AL |
133 | qemu_iovec_reset(&ioreq->v); |
134 | } | |
72cf2d4f | 135 | QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); |
62d23efa AL |
136 | blkdev->requests_inflight++; |
137 | ||
138 | out: | |
139 | return ioreq; | |
140 | } | |
141 | ||
142 | static void ioreq_finish(struct ioreq *ioreq) | |
143 | { | |
144 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
145 | ||
72cf2d4f BS |
146 | QLIST_REMOVE(ioreq, list); |
147 | QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); | |
62d23efa AL |
148 | blkdev->requests_inflight--; |
149 | blkdev->requests_finished++; | |
150 | } | |
151 | ||
ed547766 | 152 | static void ioreq_release(struct ioreq *ioreq, bool finish) |
62d23efa AL |
153 | { |
154 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
155 | ||
72cf2d4f | 156 | QLIST_REMOVE(ioreq, list); |
62d23efa AL |
157 | memset(ioreq, 0, sizeof(*ioreq)); |
158 | ioreq->blkdev = blkdev; | |
72cf2d4f | 159 | QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); |
ed547766 JB |
160 | if (finish) { |
161 | blkdev->requests_finished--; | |
162 | } else { | |
163 | blkdev->requests_inflight--; | |
164 | } | |
62d23efa AL |
165 | } |
166 | ||
167 | /* | |
168 | * translate request into iovec + start offset | |
169 | * do sanity checks along the way | |
170 | */ | |
171 | static int ioreq_parse(struct ioreq *ioreq) | |
172 | { | |
173 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
174 | uintptr_t mem; | |
175 | size_t len; | |
176 | int i; | |
177 | ||
178 | xen_be_printf(&blkdev->xendev, 3, | |
209cd7ab AP |
179 | "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", |
180 | ioreq->req.operation, ioreq->req.nr_segments, | |
181 | ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); | |
62d23efa AL |
182 | switch (ioreq->req.operation) { |
183 | case BLKIF_OP_READ: | |
209cd7ab AP |
184 | ioreq->prot = PROT_WRITE; /* to memory */ |
185 | break; | |
62d23efa | 186 | case BLKIF_OP_WRITE_BARRIER: |
5cbdebe3 SS |
187 | if (!ioreq->req.nr_segments) { |
188 | ioreq->presync = 1; | |
189 | return 0; | |
190 | } | |
ba1dffed | 191 | ioreq->presync = ioreq->postsync = 1; |
209cd7ab | 192 | /* fall through */ |
62d23efa | 193 | case BLKIF_OP_WRITE: |
209cd7ab | 194 | ioreq->prot = PROT_READ; /* from memory */ |
209cd7ab | 195 | break; |
62d23efa | 196 | default: |
209cd7ab AP |
197 | xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", |
198 | ioreq->req.operation); | |
199 | goto err; | |
62d23efa AL |
200 | }; |
201 | ||
908c7b9f GH |
202 | if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { |
203 | xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n"); | |
204 | goto err; | |
205 | } | |
206 | ||
62d23efa AL |
207 | ioreq->start = ioreq->req.sector_number * blkdev->file_blk; |
208 | for (i = 0; i < ioreq->req.nr_segments; i++) { | |
209cd7ab AP |
209 | if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
210 | xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); | |
211 | goto err; | |
212 | } | |
213 | if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { | |
214 | xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); | |
215 | goto err; | |
216 | } | |
217 | if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { | |
218 | xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); | |
219 | goto err; | |
220 | } | |
221 | ||
222 | ioreq->domids[i] = blkdev->xendev.dom; | |
223 | ioreq->refs[i] = ioreq->req.seg[i].gref; | |
224 | ||
225 | mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; | |
226 | len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; | |
62d23efa AL |
227 | qemu_iovec_add(&ioreq->v, (void*)mem, len); |
228 | } | |
229 | if (ioreq->start + ioreq->v.size > blkdev->file_size) { | |
209cd7ab AP |
230 | xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); |
231 | goto err; | |
62d23efa AL |
232 | } |
233 | return 0; | |
234 | ||
235 | err: | |
236 | ioreq->status = BLKIF_RSP_ERROR; | |
237 | return -1; | |
238 | } | |
239 | ||
240 | static void ioreq_unmap(struct ioreq *ioreq) | |
241 | { | |
d5b93ddf | 242 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
243 | int i; |
244 | ||
c6961b7d | 245 | if (ioreq->v.niov == 0 || ioreq->mapped == 0) { |
62d23efa | 246 | return; |
209cd7ab | 247 | } |
62d23efa | 248 | if (batch_maps) { |
209cd7ab AP |
249 | if (!ioreq->pages) { |
250 | return; | |
251 | } | |
252 | if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) { | |
253 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", | |
254 | strerror(errno)); | |
255 | } | |
256 | ioreq->blkdev->cnt_map -= ioreq->v.niov; | |
257 | ioreq->pages = NULL; | |
62d23efa | 258 | } else { |
209cd7ab AP |
259 | for (i = 0; i < ioreq->v.niov; i++) { |
260 | if (!ioreq->page[i]) { | |
261 | continue; | |
262 | } | |
263 | if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { | |
264 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", | |
265 | strerror(errno)); | |
266 | } | |
267 | ioreq->blkdev->cnt_map--; | |
268 | ioreq->page[i] = NULL; | |
269 | } | |
62d23efa | 270 | } |
c6961b7d | 271 | ioreq->mapped = 0; |
62d23efa AL |
272 | } |
273 | ||
274 | static int ioreq_map(struct ioreq *ioreq) | |
275 | { | |
d5b93ddf | 276 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
277 | int i; |
278 | ||
c6961b7d | 279 | if (ioreq->v.niov == 0 || ioreq->mapped == 1) { |
62d23efa | 280 | return 0; |
209cd7ab | 281 | } |
62d23efa | 282 | if (batch_maps) { |
209cd7ab AP |
283 | ioreq->pages = xc_gnttab_map_grant_refs |
284 | (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); | |
285 | if (ioreq->pages == NULL) { | |
286 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
287 | "can't map %d grant refs (%s, %d maps)\n", | |
288 | ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); | |
289 | return -1; | |
290 | } | |
291 | for (i = 0; i < ioreq->v.niov; i++) { | |
292 | ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + | |
293 | (uintptr_t)ioreq->v.iov[i].iov_base; | |
294 | } | |
295 | ioreq->blkdev->cnt_map += ioreq->v.niov; | |
62d23efa | 296 | } else { |
209cd7ab AP |
297 | for (i = 0; i < ioreq->v.niov; i++) { |
298 | ioreq->page[i] = xc_gnttab_map_grant_ref | |
299 | (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); | |
300 | if (ioreq->page[i] == NULL) { | |
301 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
302 | "can't map grant ref %d (%s, %d maps)\n", | |
303 | ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); | |
304 | ioreq_unmap(ioreq); | |
305 | return -1; | |
306 | } | |
307 | ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; | |
308 | ioreq->blkdev->cnt_map++; | |
309 | } | |
62d23efa | 310 | } |
c6961b7d | 311 | ioreq->mapped = 1; |
62d23efa AL |
312 | return 0; |
313 | } | |
314 | ||
c6961b7d SS |
315 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq); |
316 | ||
62d23efa AL |
317 | static void qemu_aio_complete(void *opaque, int ret) |
318 | { | |
319 | struct ioreq *ioreq = opaque; | |
320 | ||
321 | if (ret != 0) { | |
322 | xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", | |
323 | ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); | |
324 | ioreq->aio_errors++; | |
325 | } | |
326 | ||
327 | ioreq->aio_inflight--; | |
c6961b7d SS |
328 | if (ioreq->presync) { |
329 | ioreq->presync = 0; | |
330 | ioreq_runio_qemu_aio(ioreq); | |
331 | return; | |
332 | } | |
209cd7ab | 333 | if (ioreq->aio_inflight > 0) { |
62d23efa | 334 | return; |
209cd7ab | 335 | } |
d56de074 | 336 | if (ioreq->postsync) { |
c6961b7d SS |
337 | ioreq->postsync = 0; |
338 | ioreq->aio_inflight++; | |
339 | bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); | |
340 | return; | |
d56de074 | 341 | } |
62d23efa AL |
342 | |
343 | ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; | |
344 | ioreq_unmap(ioreq); | |
345 | ioreq_finish(ioreq); | |
a597e79c | 346 | bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct); |
62d23efa AL |
347 | qemu_bh_schedule(ioreq->blkdev->bh); |
348 | } | |
349 | ||
350 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq) | |
351 | { | |
352 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
353 | ||
209cd7ab AP |
354 | if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { |
355 | goto err_no_map; | |
356 | } | |
62d23efa AL |
357 | |
358 | ioreq->aio_inflight++; | |
209cd7ab | 359 | if (ioreq->presync) { |
c6961b7d SS |
360 | bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); |
361 | return 0; | |
209cd7ab | 362 | } |
62d23efa AL |
363 | |
364 | switch (ioreq->req.operation) { | |
365 | case BLKIF_OP_READ: | |
a597e79c | 366 | bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ); |
62d23efa AL |
367 | ioreq->aio_inflight++; |
368 | bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, | |
369 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
370 | qemu_aio_complete, ioreq); | |
209cd7ab | 371 | break; |
62d23efa AL |
372 | case BLKIF_OP_WRITE: |
373 | case BLKIF_OP_WRITE_BARRIER: | |
209cd7ab | 374 | if (!ioreq->req.nr_segments) { |
5cbdebe3 | 375 | break; |
209cd7ab | 376 | } |
a597e79c CH |
377 | |
378 | bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE); | |
209bef3e | 379 | ioreq->aio_inflight++; |
62d23efa AL |
380 | bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, |
381 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
382 | qemu_aio_complete, ioreq); | |
209cd7ab | 383 | break; |
62d23efa | 384 | default: |
209cd7ab AP |
385 | /* unknown operation (shouldn't happen -- parse catches this) */ |
386 | goto err; | |
62d23efa AL |
387 | } |
388 | ||
62d23efa AL |
389 | qemu_aio_complete(ioreq, 0); |
390 | ||
391 | return 0; | |
392 | ||
393 | err: | |
f6ec953c FZ |
394 | ioreq_unmap(ioreq); |
395 | err_no_map: | |
396 | ioreq_finish(ioreq); | |
62d23efa AL |
397 | ioreq->status = BLKIF_RSP_ERROR; |
398 | return -1; | |
399 | } | |
400 | ||
401 | static int blk_send_response_one(struct ioreq *ioreq) | |
402 | { | |
403 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
404 | int send_notify = 0; | |
405 | int have_requests = 0; | |
406 | blkif_response_t resp; | |
407 | void *dst; | |
408 | ||
409 | resp.id = ioreq->req.id; | |
410 | resp.operation = ioreq->req.operation; | |
411 | resp.status = ioreq->status; | |
412 | ||
413 | /* Place on the response ring for the relevant domain. */ | |
414 | switch (blkdev->protocol) { | |
415 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
416 | dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); |
417 | break; | |
62d23efa | 418 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
419 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, |
420 | blkdev->rings.x86_32_part.rsp_prod_pvt); | |
209cd7ab | 421 | break; |
62d23efa | 422 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
423 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, |
424 | blkdev->rings.x86_64_part.rsp_prod_pvt); | |
209cd7ab | 425 | break; |
62d23efa | 426 | default: |
209cd7ab | 427 | dst = NULL; |
62d23efa AL |
428 | } |
429 | memcpy(dst, &resp, sizeof(resp)); | |
430 | blkdev->rings.common.rsp_prod_pvt++; | |
431 | ||
432 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); | |
433 | if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { | |
209cd7ab AP |
434 | /* |
435 | * Tail check for pending requests. Allows frontend to avoid | |
436 | * notifications if requests are already in flight (lower | |
437 | * overheads and promotes batching). | |
438 | */ | |
439 | RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); | |
62d23efa | 440 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { |
209cd7ab | 441 | have_requests = 1; |
62d23efa AL |
442 | } |
443 | ||
209cd7ab AP |
444 | if (have_requests) { |
445 | blkdev->more_work++; | |
446 | } | |
62d23efa AL |
447 | return send_notify; |
448 | } | |
449 | ||
450 | /* walk finished list, send outstanding responses, free requests */ | |
451 | static void blk_send_response_all(struct XenBlkDev *blkdev) | |
452 | { | |
453 | struct ioreq *ioreq; | |
454 | int send_notify = 0; | |
455 | ||
72cf2d4f BS |
456 | while (!QLIST_EMPTY(&blkdev->finished)) { |
457 | ioreq = QLIST_FIRST(&blkdev->finished); | |
209cd7ab | 458 | send_notify += blk_send_response_one(ioreq); |
ed547766 | 459 | ioreq_release(ioreq, true); |
209cd7ab AP |
460 | } |
461 | if (send_notify) { | |
462 | xen_be_send_notify(&blkdev->xendev); | |
62d23efa | 463 | } |
62d23efa AL |
464 | } |
465 | ||
466 | static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) | |
467 | { | |
468 | switch (blkdev->protocol) { | |
469 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
470 | memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), |
471 | sizeof(ioreq->req)); | |
472 | break; | |
62d23efa | 473 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
474 | blkif_get_x86_32_req(&ioreq->req, |
475 | RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); | |
209cd7ab | 476 | break; |
62d23efa | 477 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
478 | blkif_get_x86_64_req(&ioreq->req, |
479 | RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); | |
209cd7ab | 480 | break; |
62d23efa AL |
481 | } |
482 | return 0; | |
483 | } | |
484 | ||
485 | static void blk_handle_requests(struct XenBlkDev *blkdev) | |
486 | { | |
487 | RING_IDX rc, rp; | |
488 | struct ioreq *ioreq; | |
489 | ||
490 | blkdev->more_work = 0; | |
491 | ||
492 | rc = blkdev->rings.common.req_cons; | |
493 | rp = blkdev->rings.common.sring->req_prod; | |
494 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
495 | ||
4e5b184d | 496 | blk_send_response_all(blkdev); |
fc1f79f7 | 497 | while (rc != rp) { |
62d23efa | 498 | /* pull request from ring */ |
209cd7ab | 499 | if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { |
62d23efa | 500 | break; |
209cd7ab | 501 | } |
62d23efa AL |
502 | ioreq = ioreq_start(blkdev); |
503 | if (ioreq == NULL) { | |
504 | blkdev->more_work++; | |
505 | break; | |
506 | } | |
507 | blk_get_request(blkdev, ioreq, rc); | |
508 | blkdev->rings.common.req_cons = ++rc; | |
509 | ||
510 | /* parse them */ | |
511 | if (ioreq_parse(ioreq) != 0) { | |
209cd7ab | 512 | if (blk_send_response_one(ioreq)) { |
62d23efa | 513 | xen_be_send_notify(&blkdev->xendev); |
209cd7ab | 514 | } |
ed547766 | 515 | ioreq_release(ioreq, false); |
62d23efa AL |
516 | continue; |
517 | } | |
518 | ||
4e5b184d | 519 | ioreq_runio_qemu_aio(ioreq); |
209cd7ab | 520 | } |
62d23efa | 521 | |
209cd7ab | 522 | if (blkdev->more_work && blkdev->requests_inflight < max_requests) { |
62d23efa | 523 | qemu_bh_schedule(blkdev->bh); |
209cd7ab | 524 | } |
62d23efa AL |
525 | } |
526 | ||
527 | /* ------------------------------------------------------------- */ | |
528 | ||
529 | static void blk_bh(void *opaque) | |
530 | { | |
531 | struct XenBlkDev *blkdev = opaque; | |
532 | blk_handle_requests(blkdev); | |
533 | } | |
534 | ||
64c27e5b JB |
535 | /* |
536 | * We need to account for the grant allocations requiring contiguous | |
537 | * chunks; the worst case number would be | |
538 | * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, | |
539 | * but in order to keep things simple just use | |
540 | * 2 * max_req * max_seg. | |
541 | */ | |
542 | #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) | |
543 | ||
62d23efa AL |
544 | static void blk_alloc(struct XenDevice *xendev) |
545 | { | |
546 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
547 | ||
72cf2d4f BS |
548 | QLIST_INIT(&blkdev->inflight); |
549 | QLIST_INIT(&blkdev->finished); | |
550 | QLIST_INIT(&blkdev->freelist); | |
62d23efa | 551 | blkdev->bh = qemu_bh_new(blk_bh, blkdev); |
209cd7ab | 552 | if (xen_mode != XEN_EMULATE) { |
62d23efa | 553 | batch_maps = 1; |
209cd7ab | 554 | } |
64c27e5b JB |
555 | if (xc_gnttab_set_max_grants(xendev->gnttabdev, |
556 | MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { | |
557 | xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n", | |
558 | strerror(errno)); | |
559 | } | |
62d23efa AL |
560 | } |
561 | ||
562 | static int blk_init(struct XenDevice *xendev) | |
563 | { | |
564 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
5cce43bb | 565 | int index, qflags, info = 0; |
62d23efa AL |
566 | |
567 | /* read xenstore entries */ | |
568 | if (blkdev->params == NULL) { | |
5ea3c2b4 | 569 | char *h = NULL; |
209cd7ab | 570 | blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); |
5ea3c2b4 SS |
571 | if (blkdev->params != NULL) { |
572 | h = strchr(blkdev->params, ':'); | |
573 | } | |
209cd7ab AP |
574 | if (h != NULL) { |
575 | blkdev->fileproto = blkdev->params; | |
576 | blkdev->filename = h+1; | |
577 | *h = 0; | |
578 | } else { | |
579 | blkdev->fileproto = "<unset>"; | |
580 | blkdev->filename = blkdev->params; | |
581 | } | |
582 | } | |
7cef3f4f SS |
583 | if (!strcmp("aio", blkdev->fileproto)) { |
584 | blkdev->fileproto = "raw"; | |
585 | } | |
209cd7ab AP |
586 | if (blkdev->mode == NULL) { |
587 | blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); | |
588 | } | |
589 | if (blkdev->type == NULL) { | |
590 | blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); | |
591 | } | |
592 | if (blkdev->dev == NULL) { | |
593 | blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); | |
594 | } | |
595 | if (blkdev->devtype == NULL) { | |
596 | blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); | |
597 | } | |
62d23efa AL |
598 | |
599 | /* do we have all we need? */ | |
600 | if (blkdev->params == NULL || | |
209cd7ab AP |
601 | blkdev->mode == NULL || |
602 | blkdev->type == NULL || | |
603 | blkdev->dev == NULL) { | |
5ea3c2b4 | 604 | goto out_error; |
209cd7ab | 605 | } |
62d23efa AL |
606 | |
607 | /* read-only ? */ | |
82091410 | 608 | qflags = BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NATIVE_AIO; |
62d23efa | 609 | if (strcmp(blkdev->mode, "w") == 0) { |
82091410 | 610 | qflags |= BDRV_O_RDWR; |
62d23efa | 611 | } else { |
209cd7ab | 612 | info |= VDISK_READONLY; |
62d23efa AL |
613 | } |
614 | ||
615 | /* cdrom ? */ | |
209cd7ab AP |
616 | if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { |
617 | info |= VDISK_CDROM; | |
618 | } | |
62d23efa AL |
619 | |
620 | /* init qemu block driver */ | |
751c6a17 GH |
621 | index = (blkdev->xendev.dev - 202 * 256) / 16; |
622 | blkdev->dinfo = drive_get(IF_XEN, 0, index); | |
623 | if (!blkdev->dinfo) { | |
62d23efa AL |
624 | /* setup via xenbus -> create new block driver instance */ |
625 | xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); | |
ad717139 | 626 | blkdev->bs = bdrv_new(blkdev->dev); |
5ea3c2b4 SS |
627 | if (blkdev->bs) { |
628 | if (bdrv_open(blkdev->bs, blkdev->filename, qflags, | |
629 | bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) { | |
630 | bdrv_delete(blkdev->bs); | |
631 | blkdev->bs = NULL; | |
632 | } | |
633 | } | |
634 | if (!blkdev->bs) { | |
635 | goto out_error; | |
ad717139 | 636 | } |
62d23efa AL |
637 | } else { |
638 | /* setup via qemu cmdline -> already setup for us */ | |
639 | xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); | |
209cd7ab | 640 | blkdev->bs = blkdev->dinfo->bdrv; |
62d23efa | 641 | } |
fa879d62 | 642 | bdrv_attach_dev_nofail(blkdev->bs, blkdev); |
62d23efa AL |
643 | blkdev->file_blk = BLOCK_SIZE; |
644 | blkdev->file_size = bdrv_getlength(blkdev->bs); | |
645 | if (blkdev->file_size < 0) { | |
646 | xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", | |
647 | (int)blkdev->file_size, strerror(-blkdev->file_size), | |
093003b1 | 648 | bdrv_get_format_name(blkdev->bs) ?: "-"); |
209cd7ab | 649 | blkdev->file_size = 0; |
62d23efa | 650 | } |
62d23efa AL |
651 | |
652 | xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," | |
209cd7ab AP |
653 | " size %" PRId64 " (%" PRId64 " MB)\n", |
654 | blkdev->type, blkdev->fileproto, blkdev->filename, | |
655 | blkdev->file_size, blkdev->file_size >> 20); | |
62d23efa AL |
656 | |
657 | /* fill info */ | |
5cce43bb | 658 | xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1); |
62d23efa AL |
659 | xenstore_write_be_int(&blkdev->xendev, "info", info); |
660 | xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); | |
661 | xenstore_write_be_int(&blkdev->xendev, "sectors", | |
209cd7ab | 662 | blkdev->file_size / blkdev->file_blk); |
62d23efa | 663 | return 0; |
5ea3c2b4 SS |
664 | |
665 | out_error: | |
7267c094 | 666 | g_free(blkdev->params); |
5ea3c2b4 | 667 | blkdev->params = NULL; |
7267c094 | 668 | g_free(blkdev->mode); |
5ea3c2b4 | 669 | blkdev->mode = NULL; |
7267c094 | 670 | g_free(blkdev->type); |
5ea3c2b4 | 671 | blkdev->type = NULL; |
7267c094 | 672 | g_free(blkdev->dev); |
5ea3c2b4 | 673 | blkdev->dev = NULL; |
7267c094 | 674 | g_free(blkdev->devtype); |
5ea3c2b4 SS |
675 | blkdev->devtype = NULL; |
676 | return -1; | |
62d23efa AL |
677 | } |
678 | ||
679 | static int blk_connect(struct XenDevice *xendev) | |
680 | { | |
681 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
682 | ||
209cd7ab AP |
683 | if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { |
684 | return -1; | |
685 | } | |
62d23efa | 686 | if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", |
209cd7ab AP |
687 | &blkdev->xendev.remote_port) == -1) { |
688 | return -1; | |
689 | } | |
62d23efa AL |
690 | |
691 | blkdev->protocol = BLKIF_PROTOCOL_NATIVE; | |
692 | if (blkdev->xendev.protocol) { | |
209cd7ab | 693 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { |
62d23efa | 694 | blkdev->protocol = BLKIF_PROTOCOL_X86_32; |
209cd7ab AP |
695 | } |
696 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { | |
62d23efa | 697 | blkdev->protocol = BLKIF_PROTOCOL_X86_64; |
209cd7ab | 698 | } |
62d23efa AL |
699 | } |
700 | ||
701 | blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, | |
209cd7ab AP |
702 | blkdev->xendev.dom, |
703 | blkdev->ring_ref, | |
704 | PROT_READ | PROT_WRITE); | |
705 | if (!blkdev->sring) { | |
706 | return -1; | |
707 | } | |
62d23efa AL |
708 | blkdev->cnt_map++; |
709 | ||
710 | switch (blkdev->protocol) { | |
711 | case BLKIF_PROTOCOL_NATIVE: | |
712 | { | |
209cd7ab AP |
713 | blkif_sring_t *sring_native = blkdev->sring; |
714 | BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); | |
715 | break; | |
62d23efa AL |
716 | } |
717 | case BLKIF_PROTOCOL_X86_32: | |
718 | { | |
209cd7ab | 719 | blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; |
6fcfeff9 BS |
720 | |
721 | BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); | |
209cd7ab | 722 | break; |
62d23efa AL |
723 | } |
724 | case BLKIF_PROTOCOL_X86_64: | |
725 | { | |
209cd7ab | 726 | blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; |
6fcfeff9 BS |
727 | |
728 | BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); | |
209cd7ab | 729 | break; |
62d23efa AL |
730 | } |
731 | } | |
732 | ||
733 | xen_be_bind_evtchn(&blkdev->xendev); | |
734 | ||
735 | xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " | |
209cd7ab AP |
736 | "remote port %d, local port %d\n", |
737 | blkdev->xendev.protocol, blkdev->ring_ref, | |
738 | blkdev->xendev.remote_port, blkdev->xendev.local_port); | |
62d23efa AL |
739 | return 0; |
740 | } | |
741 | ||
742 | static void blk_disconnect(struct XenDevice *xendev) | |
743 | { | |
744 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
745 | ||
746 | if (blkdev->bs) { | |
751c6a17 | 747 | if (!blkdev->dinfo) { |
62d23efa AL |
748 | /* close/delete only if we created it ourself */ |
749 | bdrv_close(blkdev->bs); | |
7429f2e1 | 750 | bdrv_detach_dev(blkdev->bs, blkdev); |
62d23efa AL |
751 | bdrv_delete(blkdev->bs); |
752 | } | |
209cd7ab | 753 | blkdev->bs = NULL; |
62d23efa AL |
754 | } |
755 | xen_be_unbind_evtchn(&blkdev->xendev); | |
756 | ||
757 | if (blkdev->sring) { | |
209cd7ab AP |
758 | xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); |
759 | blkdev->cnt_map--; | |
760 | blkdev->sring = NULL; | |
62d23efa AL |
761 | } |
762 | } | |
763 | ||
764 | static int blk_free(struct XenDevice *xendev) | |
765 | { | |
766 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
767 | struct ioreq *ioreq; | |
768 | ||
77ba8fef SS |
769 | if (blkdev->bs || blkdev->sring) { |
770 | blk_disconnect(xendev); | |
771 | } | |
772 | ||
72cf2d4f | 773 | while (!QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab | 774 | ioreq = QLIST_FIRST(&blkdev->freelist); |
72cf2d4f | 775 | QLIST_REMOVE(ioreq, list); |
62d23efa | 776 | qemu_iovec_destroy(&ioreq->v); |
7267c094 | 777 | g_free(ioreq); |
62d23efa AL |
778 | } |
779 | ||
7267c094 AL |
780 | g_free(blkdev->params); |
781 | g_free(blkdev->mode); | |
782 | g_free(blkdev->type); | |
783 | g_free(blkdev->dev); | |
784 | g_free(blkdev->devtype); | |
62d23efa AL |
785 | qemu_bh_delete(blkdev->bh); |
786 | return 0; | |
787 | } | |
788 | ||
789 | static void blk_event(struct XenDevice *xendev) | |
790 | { | |
791 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
792 | ||
793 | qemu_bh_schedule(blkdev->bh); | |
794 | } | |
795 | ||
796 | struct XenDevOps xen_blkdev_ops = { | |
797 | .size = sizeof(struct XenBlkDev), | |
798 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
799 | .alloc = blk_alloc, | |
800 | .init = blk_init, | |
384087b2 | 801 | .initialise = blk_connect, |
62d23efa AL |
802 | .disconnect = blk_disconnect, |
803 | .event = blk_event, | |
804 | .free = blk_free, | |
805 | }; |