]>
Commit | Line | Data |
---|---|---|
62d23efa AL |
1 | /* |
2 | * xen paravirt block device backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
6b620ca3 PB |
17 | * |
18 | * Contributions after 2012-01-13 are licensed under the terms of the | |
19 | * GNU GPL, version 2 or (at your option) any later version. | |
62d23efa AL |
20 | */ |
21 | ||
22 | #include <stdio.h> | |
23 | #include <stdlib.h> | |
24 | #include <stdarg.h> | |
25 | #include <string.h> | |
26 | #include <unistd.h> | |
27 | #include <signal.h> | |
28 | #include <inttypes.h> | |
29 | #include <time.h> | |
30 | #include <fcntl.h> | |
31 | #include <errno.h> | |
32 | #include <sys/ioctl.h> | |
33 | #include <sys/types.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/mman.h> | |
36 | #include <sys/uio.h> | |
37 | ||
83c9f4ca | 38 | #include "hw/hw.h" |
0d09e41a | 39 | #include "hw/xen/xen_backend.h" |
47b43a1f | 40 | #include "xen_blkif.h" |
9c17d615 | 41 | #include "sysemu/blockdev.h" |
62d23efa AL |
42 | |
43 | /* ------------------------------------------------------------- */ | |
44 | ||
62d23efa AL |
45 | static int batch_maps = 0; |
46 | ||
47 | static int max_requests = 32; | |
62d23efa AL |
48 | |
49 | /* ------------------------------------------------------------- */ | |
50 | ||
51 | #define BLOCK_SIZE 512 | |
52 | #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) | |
53 | ||
9e496d74 RPM |
54 | struct PersistentGrant { |
55 | void *page; | |
56 | struct XenBlkDev *blkdev; | |
57 | }; | |
58 | ||
59 | typedef struct PersistentGrant PersistentGrant; | |
60 | ||
62d23efa AL |
61 | struct ioreq { |
62 | blkif_request_t req; | |
63 | int16_t status; | |
64 | ||
65 | /* parsed request */ | |
66 | off_t start; | |
67 | QEMUIOVector v; | |
68 | int presync; | |
69 | int postsync; | |
c6961b7d | 70 | uint8_t mapped; |
62d23efa AL |
71 | |
72 | /* grant mapping */ | |
73 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
74 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
75 | int prot; | |
76 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
77 | void *pages; | |
9e496d74 | 78 | int num_unmap; |
62d23efa AL |
79 | |
80 | /* aio status */ | |
81 | int aio_inflight; | |
82 | int aio_errors; | |
83 | ||
84 | struct XenBlkDev *blkdev; | |
72cf2d4f | 85 | QLIST_ENTRY(ioreq) list; |
a597e79c | 86 | BlockAcctCookie acct; |
62d23efa AL |
87 | }; |
88 | ||
89 | struct XenBlkDev { | |
90 | struct XenDevice xendev; /* must be first */ | |
91 | char *params; | |
92 | char *mode; | |
93 | char *type; | |
94 | char *dev; | |
95 | char *devtype; | |
454ae734 | 96 | bool directiosafe; |
62d23efa AL |
97 | const char *fileproto; |
98 | const char *filename; | |
99 | int ring_ref; | |
100 | void *sring; | |
101 | int64_t file_blk; | |
102 | int64_t file_size; | |
103 | int protocol; | |
104 | blkif_back_rings_t rings; | |
105 | int more_work; | |
106 | int cnt_map; | |
107 | ||
108 | /* request lists */ | |
72cf2d4f BS |
109 | QLIST_HEAD(inflight_head, ioreq) inflight; |
110 | QLIST_HEAD(finished_head, ioreq) finished; | |
111 | QLIST_HEAD(freelist_head, ioreq) freelist; | |
62d23efa AL |
112 | int requests_total; |
113 | int requests_inflight; | |
114 | int requests_finished; | |
115 | ||
9e496d74 RPM |
116 | /* Persistent grants extension */ |
117 | gboolean feature_persistent; | |
118 | GTree *persistent_gnts; | |
119 | unsigned int persistent_gnt_count; | |
120 | unsigned int max_grants; | |
121 | ||
62d23efa | 122 | /* qemu block driver */ |
751c6a17 | 123 | DriveInfo *dinfo; |
62d23efa AL |
124 | BlockDriverState *bs; |
125 | QEMUBH *bh; | |
126 | }; | |
127 | ||
128 | /* ------------------------------------------------------------- */ | |
129 | ||
282c6a2f RPM |
130 | static void ioreq_reset(struct ioreq *ioreq) |
131 | { | |
132 | memset(&ioreq->req, 0, sizeof(ioreq->req)); | |
133 | ioreq->status = 0; | |
134 | ioreq->start = 0; | |
135 | ioreq->presync = 0; | |
136 | ioreq->postsync = 0; | |
137 | ioreq->mapped = 0; | |
138 | ||
139 | memset(ioreq->domids, 0, sizeof(ioreq->domids)); | |
140 | memset(ioreq->refs, 0, sizeof(ioreq->refs)); | |
141 | ioreq->prot = 0; | |
142 | memset(ioreq->page, 0, sizeof(ioreq->page)); | |
143 | ioreq->pages = NULL; | |
144 | ||
145 | ioreq->aio_inflight = 0; | |
146 | ioreq->aio_errors = 0; | |
147 | ||
148 | ioreq->blkdev = NULL; | |
149 | memset(&ioreq->list, 0, sizeof(ioreq->list)); | |
150 | memset(&ioreq->acct, 0, sizeof(ioreq->acct)); | |
151 | ||
152 | qemu_iovec_reset(&ioreq->v); | |
153 | } | |
154 | ||
9e496d74 RPM |
155 | static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) |
156 | { | |
157 | uint ua = GPOINTER_TO_UINT(a); | |
158 | uint ub = GPOINTER_TO_UINT(b); | |
159 | return (ua > ub) - (ua < ub); | |
160 | } | |
161 | ||
162 | static void destroy_grant(gpointer pgnt) | |
163 | { | |
164 | PersistentGrant *grant = pgnt; | |
165 | XenGnttab gnt = grant->blkdev->xendev.gnttabdev; | |
166 | ||
167 | if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) { | |
168 | xen_be_printf(&grant->blkdev->xendev, 0, | |
169 | "xc_gnttab_munmap failed: %s\n", | |
170 | strerror(errno)); | |
171 | } | |
172 | grant->blkdev->persistent_gnt_count--; | |
173 | xen_be_printf(&grant->blkdev->xendev, 3, | |
174 | "unmapped grant %p\n", grant->page); | |
175 | g_free(grant); | |
176 | } | |
177 | ||
62d23efa AL |
178 | static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) |
179 | { | |
180 | struct ioreq *ioreq = NULL; | |
181 | ||
72cf2d4f | 182 | if (QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab AP |
183 | if (blkdev->requests_total >= max_requests) { |
184 | goto out; | |
185 | } | |
186 | /* allocate new struct */ | |
7267c094 | 187 | ioreq = g_malloc0(sizeof(*ioreq)); |
209cd7ab AP |
188 | ioreq->blkdev = blkdev; |
189 | blkdev->requests_total++; | |
62d23efa AL |
190 | qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
191 | } else { | |
209cd7ab AP |
192 | /* get one from freelist */ |
193 | ioreq = QLIST_FIRST(&blkdev->freelist); | |
194 | QLIST_REMOVE(ioreq, list); | |
62d23efa | 195 | } |
72cf2d4f | 196 | QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); |
62d23efa AL |
197 | blkdev->requests_inflight++; |
198 | ||
199 | out: | |
200 | return ioreq; | |
201 | } | |
202 | ||
203 | static void ioreq_finish(struct ioreq *ioreq) | |
204 | { | |
205 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
206 | ||
72cf2d4f BS |
207 | QLIST_REMOVE(ioreq, list); |
208 | QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); | |
62d23efa AL |
209 | blkdev->requests_inflight--; |
210 | blkdev->requests_finished++; | |
211 | } | |
212 | ||
ed547766 | 213 | static void ioreq_release(struct ioreq *ioreq, bool finish) |
62d23efa AL |
214 | { |
215 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
216 | ||
72cf2d4f | 217 | QLIST_REMOVE(ioreq, list); |
282c6a2f | 218 | ioreq_reset(ioreq); |
62d23efa | 219 | ioreq->blkdev = blkdev; |
72cf2d4f | 220 | QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); |
ed547766 JB |
221 | if (finish) { |
222 | blkdev->requests_finished--; | |
223 | } else { | |
224 | blkdev->requests_inflight--; | |
225 | } | |
62d23efa AL |
226 | } |
227 | ||
228 | /* | |
229 | * translate request into iovec + start offset | |
230 | * do sanity checks along the way | |
231 | */ | |
232 | static int ioreq_parse(struct ioreq *ioreq) | |
233 | { | |
234 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
235 | uintptr_t mem; | |
236 | size_t len; | |
237 | int i; | |
238 | ||
239 | xen_be_printf(&blkdev->xendev, 3, | |
209cd7ab AP |
240 | "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", |
241 | ioreq->req.operation, ioreq->req.nr_segments, | |
242 | ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); | |
62d23efa AL |
243 | switch (ioreq->req.operation) { |
244 | case BLKIF_OP_READ: | |
209cd7ab AP |
245 | ioreq->prot = PROT_WRITE; /* to memory */ |
246 | break; | |
7e7b7cba SS |
247 | case BLKIF_OP_FLUSH_DISKCACHE: |
248 | ioreq->presync = 1; | |
5cbdebe3 | 249 | if (!ioreq->req.nr_segments) { |
5cbdebe3 SS |
250 | return 0; |
251 | } | |
209cd7ab | 252 | /* fall through */ |
62d23efa | 253 | case BLKIF_OP_WRITE: |
209cd7ab | 254 | ioreq->prot = PROT_READ; /* from memory */ |
209cd7ab | 255 | break; |
62d23efa | 256 | default: |
209cd7ab AP |
257 | xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", |
258 | ioreq->req.operation); | |
259 | goto err; | |
62d23efa AL |
260 | }; |
261 | ||
908c7b9f GH |
262 | if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { |
263 | xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n"); | |
264 | goto err; | |
265 | } | |
266 | ||
62d23efa AL |
267 | ioreq->start = ioreq->req.sector_number * blkdev->file_blk; |
268 | for (i = 0; i < ioreq->req.nr_segments; i++) { | |
209cd7ab AP |
269 | if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
270 | xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); | |
271 | goto err; | |
272 | } | |
273 | if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { | |
274 | xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); | |
275 | goto err; | |
276 | } | |
277 | if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { | |
278 | xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); | |
279 | goto err; | |
280 | } | |
281 | ||
282 | ioreq->domids[i] = blkdev->xendev.dom; | |
283 | ioreq->refs[i] = ioreq->req.seg[i].gref; | |
284 | ||
285 | mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; | |
286 | len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; | |
62d23efa AL |
287 | qemu_iovec_add(&ioreq->v, (void*)mem, len); |
288 | } | |
289 | if (ioreq->start + ioreq->v.size > blkdev->file_size) { | |
209cd7ab AP |
290 | xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); |
291 | goto err; | |
62d23efa AL |
292 | } |
293 | return 0; | |
294 | ||
295 | err: | |
296 | ioreq->status = BLKIF_RSP_ERROR; | |
297 | return -1; | |
298 | } | |
299 | ||
300 | static void ioreq_unmap(struct ioreq *ioreq) | |
301 | { | |
d5b93ddf | 302 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
303 | int i; |
304 | ||
9e496d74 | 305 | if (ioreq->num_unmap == 0 || ioreq->mapped == 0) { |
62d23efa | 306 | return; |
209cd7ab | 307 | } |
62d23efa | 308 | if (batch_maps) { |
209cd7ab AP |
309 | if (!ioreq->pages) { |
310 | return; | |
311 | } | |
9e496d74 | 312 | if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) { |
209cd7ab AP |
313 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", |
314 | strerror(errno)); | |
315 | } | |
9e496d74 | 316 | ioreq->blkdev->cnt_map -= ioreq->num_unmap; |
209cd7ab | 317 | ioreq->pages = NULL; |
62d23efa | 318 | } else { |
9e496d74 | 319 | for (i = 0; i < ioreq->num_unmap; i++) { |
209cd7ab AP |
320 | if (!ioreq->page[i]) { |
321 | continue; | |
322 | } | |
323 | if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { | |
324 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", | |
325 | strerror(errno)); | |
326 | } | |
327 | ioreq->blkdev->cnt_map--; | |
328 | ioreq->page[i] = NULL; | |
329 | } | |
62d23efa | 330 | } |
c6961b7d | 331 | ioreq->mapped = 0; |
62d23efa AL |
332 | } |
333 | ||
334 | static int ioreq_map(struct ioreq *ioreq) | |
335 | { | |
d5b93ddf | 336 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
9e496d74 RPM |
337 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
338 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
339 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
340 | int i, j, new_maps = 0; | |
341 | PersistentGrant *grant; | |
342 | /* domids and refs variables will contain the information necessary | |
343 | * to map the grants that are needed to fulfill this request. | |
344 | * | |
345 | * After mapping the needed grants, the page array will contain the | |
346 | * memory address of each granted page in the order specified in ioreq | |
347 | * (disregarding if it's a persistent grant or not). | |
348 | */ | |
62d23efa | 349 | |
c6961b7d | 350 | if (ioreq->v.niov == 0 || ioreq->mapped == 1) { |
62d23efa | 351 | return 0; |
209cd7ab | 352 | } |
9e496d74 RPM |
353 | if (ioreq->blkdev->feature_persistent) { |
354 | for (i = 0; i < ioreq->v.niov; i++) { | |
355 | grant = g_tree_lookup(ioreq->blkdev->persistent_gnts, | |
356 | GUINT_TO_POINTER(ioreq->refs[i])); | |
357 | ||
358 | if (grant != NULL) { | |
359 | page[i] = grant->page; | |
360 | xen_be_printf(&ioreq->blkdev->xendev, 3, | |
361 | "using persistent-grant %" PRIu32 "\n", | |
362 | ioreq->refs[i]); | |
363 | } else { | |
364 | /* Add the grant to the list of grants that | |
365 | * should be mapped | |
366 | */ | |
367 | domids[new_maps] = ioreq->domids[i]; | |
368 | refs[new_maps] = ioreq->refs[i]; | |
369 | page[i] = NULL; | |
370 | new_maps++; | |
371 | } | |
372 | } | |
373 | /* Set the protection to RW, since grants may be reused later | |
374 | * with a different protection than the one needed for this request | |
375 | */ | |
376 | ioreq->prot = PROT_WRITE | PROT_READ; | |
377 | } else { | |
378 | /* All grants in the request should be mapped */ | |
379 | memcpy(refs, ioreq->refs, sizeof(refs)); | |
380 | memcpy(domids, ioreq->domids, sizeof(domids)); | |
381 | memset(page, 0, sizeof(page)); | |
382 | new_maps = ioreq->v.niov; | |
383 | } | |
384 | ||
385 | if (batch_maps && new_maps) { | |
209cd7ab | 386 | ioreq->pages = xc_gnttab_map_grant_refs |
9e496d74 | 387 | (gnt, new_maps, domids, refs, ioreq->prot); |
209cd7ab AP |
388 | if (ioreq->pages == NULL) { |
389 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
390 | "can't map %d grant refs (%s, %d maps)\n", | |
9e496d74 | 391 | new_maps, strerror(errno), ioreq->blkdev->cnt_map); |
209cd7ab AP |
392 | return -1; |
393 | } | |
9e496d74 RPM |
394 | for (i = 0, j = 0; i < ioreq->v.niov; i++) { |
395 | if (page[i] == NULL) { | |
396 | page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE; | |
397 | } | |
209cd7ab | 398 | } |
9e496d74 RPM |
399 | ioreq->blkdev->cnt_map += new_maps; |
400 | } else if (new_maps) { | |
401 | for (i = 0; i < new_maps; i++) { | |
209cd7ab | 402 | ioreq->page[i] = xc_gnttab_map_grant_ref |
9e496d74 | 403 | (gnt, domids[i], refs[i], ioreq->prot); |
209cd7ab AP |
404 | if (ioreq->page[i] == NULL) { |
405 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
406 | "can't map grant ref %d (%s, %d maps)\n", | |
9e496d74 | 407 | refs[i], strerror(errno), ioreq->blkdev->cnt_map); |
209cd7ab AP |
408 | ioreq_unmap(ioreq); |
409 | return -1; | |
410 | } | |
209cd7ab AP |
411 | ioreq->blkdev->cnt_map++; |
412 | } | |
9e496d74 RPM |
413 | for (i = 0, j = 0; i < ioreq->v.niov; i++) { |
414 | if (page[i] == NULL) { | |
415 | page[i] = ioreq->page[j++]; | |
416 | } | |
417 | } | |
418 | } | |
419 | if (ioreq->blkdev->feature_persistent) { | |
420 | while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants) | |
421 | && new_maps) { | |
422 | /* Go through the list of newly mapped grants and add as many | |
423 | * as possible to the list of persistently mapped grants. | |
424 | * | |
425 | * Since we start at the end of ioreq->page(s), we only need | |
426 | * to decrease new_maps to prevent this granted pages from | |
427 | * being unmapped in ioreq_unmap. | |
428 | */ | |
429 | grant = g_malloc0(sizeof(*grant)); | |
430 | new_maps--; | |
431 | if (batch_maps) { | |
432 | grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE; | |
433 | } else { | |
434 | grant->page = ioreq->page[new_maps]; | |
435 | } | |
436 | grant->blkdev = ioreq->blkdev; | |
437 | xen_be_printf(&ioreq->blkdev->xendev, 3, | |
438 | "adding grant %" PRIu32 " page: %p\n", | |
439 | refs[new_maps], grant->page); | |
440 | g_tree_insert(ioreq->blkdev->persistent_gnts, | |
441 | GUINT_TO_POINTER(refs[new_maps]), | |
442 | grant); | |
443 | ioreq->blkdev->persistent_gnt_count++; | |
444 | } | |
445 | } | |
446 | for (i = 0; i < ioreq->v.niov; i++) { | |
447 | ioreq->v.iov[i].iov_base += (uintptr_t)page[i]; | |
62d23efa | 448 | } |
c6961b7d | 449 | ioreq->mapped = 1; |
9e496d74 | 450 | ioreq->num_unmap = new_maps; |
62d23efa AL |
451 | return 0; |
452 | } | |
453 | ||
c6961b7d SS |
454 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq); |
455 | ||
62d23efa AL |
456 | static void qemu_aio_complete(void *opaque, int ret) |
457 | { | |
458 | struct ioreq *ioreq = opaque; | |
459 | ||
460 | if (ret != 0) { | |
461 | xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", | |
462 | ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); | |
463 | ioreq->aio_errors++; | |
464 | } | |
465 | ||
466 | ioreq->aio_inflight--; | |
c6961b7d SS |
467 | if (ioreq->presync) { |
468 | ioreq->presync = 0; | |
469 | ioreq_runio_qemu_aio(ioreq); | |
470 | return; | |
471 | } | |
209cd7ab | 472 | if (ioreq->aio_inflight > 0) { |
62d23efa | 473 | return; |
209cd7ab | 474 | } |
d56de074 | 475 | if (ioreq->postsync) { |
c6961b7d SS |
476 | ioreq->postsync = 0; |
477 | ioreq->aio_inflight++; | |
478 | bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); | |
479 | return; | |
d56de074 | 480 | } |
62d23efa AL |
481 | |
482 | ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; | |
483 | ioreq_unmap(ioreq); | |
484 | ioreq_finish(ioreq); | |
a597e79c | 485 | bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct); |
62d23efa AL |
486 | qemu_bh_schedule(ioreq->blkdev->bh); |
487 | } | |
488 | ||
489 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq) | |
490 | { | |
491 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
492 | ||
209cd7ab AP |
493 | if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { |
494 | goto err_no_map; | |
495 | } | |
62d23efa AL |
496 | |
497 | ioreq->aio_inflight++; | |
209cd7ab | 498 | if (ioreq->presync) { |
c6961b7d SS |
499 | bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); |
500 | return 0; | |
209cd7ab | 501 | } |
62d23efa AL |
502 | |
503 | switch (ioreq->req.operation) { | |
504 | case BLKIF_OP_READ: | |
a597e79c | 505 | bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ); |
62d23efa AL |
506 | ioreq->aio_inflight++; |
507 | bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, | |
508 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
509 | qemu_aio_complete, ioreq); | |
209cd7ab | 510 | break; |
62d23efa | 511 | case BLKIF_OP_WRITE: |
7e7b7cba | 512 | case BLKIF_OP_FLUSH_DISKCACHE: |
209cd7ab | 513 | if (!ioreq->req.nr_segments) { |
5cbdebe3 | 514 | break; |
209cd7ab | 515 | } |
a597e79c CH |
516 | |
517 | bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE); | |
209bef3e | 518 | ioreq->aio_inflight++; |
62d23efa AL |
519 | bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, |
520 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
521 | qemu_aio_complete, ioreq); | |
209cd7ab | 522 | break; |
62d23efa | 523 | default: |
209cd7ab AP |
524 | /* unknown operation (shouldn't happen -- parse catches this) */ |
525 | goto err; | |
62d23efa AL |
526 | } |
527 | ||
62d23efa AL |
528 | qemu_aio_complete(ioreq, 0); |
529 | ||
530 | return 0; | |
531 | ||
532 | err: | |
f6ec953c FZ |
533 | ioreq_unmap(ioreq); |
534 | err_no_map: | |
535 | ioreq_finish(ioreq); | |
62d23efa AL |
536 | ioreq->status = BLKIF_RSP_ERROR; |
537 | return -1; | |
538 | } | |
539 | ||
540 | static int blk_send_response_one(struct ioreq *ioreq) | |
541 | { | |
542 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
543 | int send_notify = 0; | |
544 | int have_requests = 0; | |
545 | blkif_response_t resp; | |
546 | void *dst; | |
547 | ||
548 | resp.id = ioreq->req.id; | |
549 | resp.operation = ioreq->req.operation; | |
550 | resp.status = ioreq->status; | |
551 | ||
552 | /* Place on the response ring for the relevant domain. */ | |
553 | switch (blkdev->protocol) { | |
554 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
555 | dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); |
556 | break; | |
62d23efa | 557 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
558 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, |
559 | blkdev->rings.x86_32_part.rsp_prod_pvt); | |
209cd7ab | 560 | break; |
62d23efa | 561 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
562 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, |
563 | blkdev->rings.x86_64_part.rsp_prod_pvt); | |
209cd7ab | 564 | break; |
62d23efa | 565 | default: |
209cd7ab | 566 | dst = NULL; |
62d23efa AL |
567 | } |
568 | memcpy(dst, &resp, sizeof(resp)); | |
569 | blkdev->rings.common.rsp_prod_pvt++; | |
570 | ||
571 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); | |
572 | if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { | |
209cd7ab AP |
573 | /* |
574 | * Tail check for pending requests. Allows frontend to avoid | |
575 | * notifications if requests are already in flight (lower | |
576 | * overheads and promotes batching). | |
577 | */ | |
578 | RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); | |
62d23efa | 579 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { |
209cd7ab | 580 | have_requests = 1; |
62d23efa AL |
581 | } |
582 | ||
209cd7ab AP |
583 | if (have_requests) { |
584 | blkdev->more_work++; | |
585 | } | |
62d23efa AL |
586 | return send_notify; |
587 | } | |
588 | ||
589 | /* walk finished list, send outstanding responses, free requests */ | |
590 | static void blk_send_response_all(struct XenBlkDev *blkdev) | |
591 | { | |
592 | struct ioreq *ioreq; | |
593 | int send_notify = 0; | |
594 | ||
72cf2d4f BS |
595 | while (!QLIST_EMPTY(&blkdev->finished)) { |
596 | ioreq = QLIST_FIRST(&blkdev->finished); | |
209cd7ab | 597 | send_notify += blk_send_response_one(ioreq); |
ed547766 | 598 | ioreq_release(ioreq, true); |
209cd7ab AP |
599 | } |
600 | if (send_notify) { | |
601 | xen_be_send_notify(&blkdev->xendev); | |
62d23efa | 602 | } |
62d23efa AL |
603 | } |
604 | ||
605 | static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) | |
606 | { | |
607 | switch (blkdev->protocol) { | |
608 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
609 | memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), |
610 | sizeof(ioreq->req)); | |
611 | break; | |
62d23efa | 612 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
613 | blkif_get_x86_32_req(&ioreq->req, |
614 | RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); | |
209cd7ab | 615 | break; |
62d23efa | 616 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
617 | blkif_get_x86_64_req(&ioreq->req, |
618 | RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); | |
209cd7ab | 619 | break; |
62d23efa AL |
620 | } |
621 | return 0; | |
622 | } | |
623 | ||
624 | static void blk_handle_requests(struct XenBlkDev *blkdev) | |
625 | { | |
626 | RING_IDX rc, rp; | |
627 | struct ioreq *ioreq; | |
628 | ||
629 | blkdev->more_work = 0; | |
630 | ||
631 | rc = blkdev->rings.common.req_cons; | |
632 | rp = blkdev->rings.common.sring->req_prod; | |
633 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
634 | ||
4e5b184d | 635 | blk_send_response_all(blkdev); |
fc1f79f7 | 636 | while (rc != rp) { |
62d23efa | 637 | /* pull request from ring */ |
209cd7ab | 638 | if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { |
62d23efa | 639 | break; |
209cd7ab | 640 | } |
62d23efa AL |
641 | ioreq = ioreq_start(blkdev); |
642 | if (ioreq == NULL) { | |
643 | blkdev->more_work++; | |
644 | break; | |
645 | } | |
646 | blk_get_request(blkdev, ioreq, rc); | |
647 | blkdev->rings.common.req_cons = ++rc; | |
648 | ||
649 | /* parse them */ | |
650 | if (ioreq_parse(ioreq) != 0) { | |
209cd7ab | 651 | if (blk_send_response_one(ioreq)) { |
62d23efa | 652 | xen_be_send_notify(&blkdev->xendev); |
209cd7ab | 653 | } |
ed547766 | 654 | ioreq_release(ioreq, false); |
62d23efa AL |
655 | continue; |
656 | } | |
657 | ||
4e5b184d | 658 | ioreq_runio_qemu_aio(ioreq); |
209cd7ab | 659 | } |
62d23efa | 660 | |
209cd7ab | 661 | if (blkdev->more_work && blkdev->requests_inflight < max_requests) { |
62d23efa | 662 | qemu_bh_schedule(blkdev->bh); |
209cd7ab | 663 | } |
62d23efa AL |
664 | } |
665 | ||
666 | /* ------------------------------------------------------------- */ | |
667 | ||
668 | static void blk_bh(void *opaque) | |
669 | { | |
670 | struct XenBlkDev *blkdev = opaque; | |
671 | blk_handle_requests(blkdev); | |
672 | } | |
673 | ||
64c27e5b JB |
674 | /* |
675 | * We need to account for the grant allocations requiring contiguous | |
676 | * chunks; the worst case number would be | |
677 | * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, | |
678 | * but in order to keep things simple just use | |
679 | * 2 * max_req * max_seg. | |
680 | */ | |
681 | #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) | |
682 | ||
62d23efa AL |
683 | static void blk_alloc(struct XenDevice *xendev) |
684 | { | |
685 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
686 | ||
72cf2d4f BS |
687 | QLIST_INIT(&blkdev->inflight); |
688 | QLIST_INIT(&blkdev->finished); | |
689 | QLIST_INIT(&blkdev->freelist); | |
62d23efa | 690 | blkdev->bh = qemu_bh_new(blk_bh, blkdev); |
209cd7ab | 691 | if (xen_mode != XEN_EMULATE) { |
62d23efa | 692 | batch_maps = 1; |
209cd7ab | 693 | } |
64c27e5b JB |
694 | if (xc_gnttab_set_max_grants(xendev->gnttabdev, |
695 | MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { | |
696 | xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n", | |
697 | strerror(errno)); | |
698 | } | |
62d23efa AL |
699 | } |
700 | ||
701 | static int blk_init(struct XenDevice *xendev) | |
702 | { | |
703 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
86f425db | 704 | int info = 0; |
454ae734 | 705 | char *directiosafe = NULL; |
62d23efa AL |
706 | |
707 | /* read xenstore entries */ | |
708 | if (blkdev->params == NULL) { | |
5ea3c2b4 | 709 | char *h = NULL; |
209cd7ab | 710 | blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); |
5ea3c2b4 SS |
711 | if (blkdev->params != NULL) { |
712 | h = strchr(blkdev->params, ':'); | |
713 | } | |
209cd7ab AP |
714 | if (h != NULL) { |
715 | blkdev->fileproto = blkdev->params; | |
716 | blkdev->filename = h+1; | |
717 | *h = 0; | |
718 | } else { | |
719 | blkdev->fileproto = "<unset>"; | |
720 | blkdev->filename = blkdev->params; | |
721 | } | |
722 | } | |
7cef3f4f SS |
723 | if (!strcmp("aio", blkdev->fileproto)) { |
724 | blkdev->fileproto = "raw"; | |
725 | } | |
209cd7ab AP |
726 | if (blkdev->mode == NULL) { |
727 | blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); | |
728 | } | |
729 | if (blkdev->type == NULL) { | |
730 | blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); | |
731 | } | |
732 | if (blkdev->dev == NULL) { | |
733 | blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); | |
734 | } | |
735 | if (blkdev->devtype == NULL) { | |
736 | blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); | |
737 | } | |
454ae734 SS |
738 | directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe"); |
739 | blkdev->directiosafe = (directiosafe && atoi(directiosafe)); | |
62d23efa AL |
740 | |
741 | /* do we have all we need? */ | |
742 | if (blkdev->params == NULL || | |
209cd7ab AP |
743 | blkdev->mode == NULL || |
744 | blkdev->type == NULL || | |
745 | blkdev->dev == NULL) { | |
5ea3c2b4 | 746 | goto out_error; |
209cd7ab | 747 | } |
62d23efa AL |
748 | |
749 | /* read-only ? */ | |
86f425db | 750 | if (strcmp(blkdev->mode, "w")) { |
209cd7ab | 751 | info |= VDISK_READONLY; |
62d23efa AL |
752 | } |
753 | ||
754 | /* cdrom ? */ | |
209cd7ab AP |
755 | if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { |
756 | info |= VDISK_CDROM; | |
757 | } | |
62d23efa | 758 | |
86f425db AB |
759 | blkdev->file_blk = BLOCK_SIZE; |
760 | ||
761 | /* fill info | |
762 | * blk_connect supplies sector-size and sectors | |
763 | */ | |
764 | xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1); | |
765 | xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1); | |
766 | xenstore_write_be_int(&blkdev->xendev, "info", info); | |
454ae734 SS |
767 | |
768 | g_free(directiosafe); | |
86f425db AB |
769 | return 0; |
770 | ||
771 | out_error: | |
772 | g_free(blkdev->params); | |
773 | blkdev->params = NULL; | |
774 | g_free(blkdev->mode); | |
775 | blkdev->mode = NULL; | |
776 | g_free(blkdev->type); | |
777 | blkdev->type = NULL; | |
778 | g_free(blkdev->dev); | |
779 | blkdev->dev = NULL; | |
780 | g_free(blkdev->devtype); | |
781 | blkdev->devtype = NULL; | |
454ae734 SS |
782 | g_free(directiosafe); |
783 | blkdev->directiosafe = false; | |
86f425db AB |
784 | return -1; |
785 | } | |
786 | ||
787 | static int blk_connect(struct XenDevice *xendev) | |
788 | { | |
789 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
790 | int pers, index, qflags; | |
b64ec4e4 | 791 | bool readonly = true; |
86f425db AB |
792 | |
793 | /* read-only ? */ | |
454ae734 SS |
794 | if (blkdev->directiosafe) { |
795 | qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO; | |
796 | } else { | |
797 | qflags = BDRV_O_CACHE_WB; | |
798 | } | |
86f425db AB |
799 | if (strcmp(blkdev->mode, "w") == 0) { |
800 | qflags |= BDRV_O_RDWR; | |
b64ec4e4 | 801 | readonly = false; |
86f425db AB |
802 | } |
803 | ||
62d23efa | 804 | /* init qemu block driver */ |
751c6a17 GH |
805 | index = (blkdev->xendev.dev - 202 * 256) / 16; |
806 | blkdev->dinfo = drive_get(IF_XEN, 0, index); | |
807 | if (!blkdev->dinfo) { | |
62d23efa AL |
808 | /* setup via xenbus -> create new block driver instance */ |
809 | xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); | |
ad717139 | 810 | blkdev->bs = bdrv_new(blkdev->dev); |
5ea3c2b4 | 811 | if (blkdev->bs) { |
34b5d2c6 | 812 | Error *local_err = NULL; |
b64ec4e4 FZ |
813 | BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto, |
814 | readonly); | |
815 | if (bdrv_open(blkdev->bs, | |
34b5d2c6 HR |
816 | blkdev->filename, NULL, qflags, drv, &local_err) != 0) |
817 | { | |
818 | xen_be_printf(&blkdev->xendev, 0, "error: %s\n", | |
819 | error_get_pretty(local_err)); | |
820 | error_free(local_err); | |
4f6fd349 | 821 | bdrv_unref(blkdev->bs); |
5ea3c2b4 SS |
822 | blkdev->bs = NULL; |
823 | } | |
824 | } | |
825 | if (!blkdev->bs) { | |
86f425db | 826 | return -1; |
ad717139 | 827 | } |
62d23efa AL |
828 | } else { |
829 | /* setup via qemu cmdline -> already setup for us */ | |
830 | xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); | |
209cd7ab | 831 | blkdev->bs = blkdev->dinfo->bdrv; |
c0777fe1 FZ |
832 | /* blkdev->bs is not create by us, we get a reference |
833 | * so we can bdrv_unref() unconditionally */ | |
834 | bdrv_ref(blkdev->bs); | |
62d23efa | 835 | } |
fa879d62 | 836 | bdrv_attach_dev_nofail(blkdev->bs, blkdev); |
62d23efa AL |
837 | blkdev->file_size = bdrv_getlength(blkdev->bs); |
838 | if (blkdev->file_size < 0) { | |
839 | xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", | |
840 | (int)blkdev->file_size, strerror(-blkdev->file_size), | |
093003b1 | 841 | bdrv_get_format_name(blkdev->bs) ?: "-"); |
209cd7ab | 842 | blkdev->file_size = 0; |
62d23efa | 843 | } |
62d23efa AL |
844 | |
845 | xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," | |
209cd7ab AP |
846 | " size %" PRId64 " (%" PRId64 " MB)\n", |
847 | blkdev->type, blkdev->fileproto, blkdev->filename, | |
848 | blkdev->file_size, blkdev->file_size >> 20); | |
62d23efa | 849 | |
86f425db AB |
850 | /* Fill in number of sector size and number of sectors */ |
851 | xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); | |
9246ce88 FF |
852 | xenstore_write_be_int64(&blkdev->xendev, "sectors", |
853 | blkdev->file_size / blkdev->file_blk); | |
62d23efa | 854 | |
209cd7ab AP |
855 | if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { |
856 | return -1; | |
857 | } | |
62d23efa | 858 | if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", |
209cd7ab AP |
859 | &blkdev->xendev.remote_port) == -1) { |
860 | return -1; | |
861 | } | |
9e496d74 RPM |
862 | if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) { |
863 | blkdev->feature_persistent = FALSE; | |
864 | } else { | |
865 | blkdev->feature_persistent = !!pers; | |
866 | } | |
62d23efa AL |
867 | |
868 | blkdev->protocol = BLKIF_PROTOCOL_NATIVE; | |
869 | if (blkdev->xendev.protocol) { | |
209cd7ab | 870 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { |
62d23efa | 871 | blkdev->protocol = BLKIF_PROTOCOL_X86_32; |
209cd7ab AP |
872 | } |
873 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { | |
62d23efa | 874 | blkdev->protocol = BLKIF_PROTOCOL_X86_64; |
209cd7ab | 875 | } |
62d23efa AL |
876 | } |
877 | ||
878 | blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, | |
209cd7ab AP |
879 | blkdev->xendev.dom, |
880 | blkdev->ring_ref, | |
881 | PROT_READ | PROT_WRITE); | |
882 | if (!blkdev->sring) { | |
883 | return -1; | |
884 | } | |
62d23efa AL |
885 | blkdev->cnt_map++; |
886 | ||
887 | switch (blkdev->protocol) { | |
888 | case BLKIF_PROTOCOL_NATIVE: | |
889 | { | |
209cd7ab AP |
890 | blkif_sring_t *sring_native = blkdev->sring; |
891 | BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); | |
892 | break; | |
62d23efa AL |
893 | } |
894 | case BLKIF_PROTOCOL_X86_32: | |
895 | { | |
209cd7ab | 896 | blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; |
6fcfeff9 BS |
897 | |
898 | BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); | |
209cd7ab | 899 | break; |
62d23efa AL |
900 | } |
901 | case BLKIF_PROTOCOL_X86_64: | |
902 | { | |
209cd7ab | 903 | blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; |
6fcfeff9 BS |
904 | |
905 | BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); | |
209cd7ab | 906 | break; |
62d23efa AL |
907 | } |
908 | } | |
909 | ||
9e496d74 RPM |
910 | if (blkdev->feature_persistent) { |
911 | /* Init persistent grants */ | |
912 | blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST; | |
913 | blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp, | |
914 | NULL, NULL, | |
915 | (GDestroyNotify)destroy_grant); | |
916 | blkdev->persistent_gnt_count = 0; | |
917 | } | |
918 | ||
62d23efa AL |
919 | xen_be_bind_evtchn(&blkdev->xendev); |
920 | ||
921 | xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " | |
209cd7ab AP |
922 | "remote port %d, local port %d\n", |
923 | blkdev->xendev.protocol, blkdev->ring_ref, | |
924 | blkdev->xendev.remote_port, blkdev->xendev.local_port); | |
62d23efa AL |
925 | return 0; |
926 | } | |
927 | ||
928 | static void blk_disconnect(struct XenDevice *xendev) | |
929 | { | |
930 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
931 | ||
932 | if (blkdev->bs) { | |
c0777fe1 FZ |
933 | bdrv_detach_dev(blkdev->bs, blkdev); |
934 | bdrv_unref(blkdev->bs); | |
209cd7ab | 935 | blkdev->bs = NULL; |
62d23efa AL |
936 | } |
937 | xen_be_unbind_evtchn(&blkdev->xendev); | |
938 | ||
939 | if (blkdev->sring) { | |
209cd7ab AP |
940 | xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); |
941 | blkdev->cnt_map--; | |
942 | blkdev->sring = NULL; | |
62d23efa AL |
943 | } |
944 | } | |
945 | ||
946 | static int blk_free(struct XenDevice *xendev) | |
947 | { | |
948 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
949 | struct ioreq *ioreq; | |
950 | ||
77ba8fef SS |
951 | if (blkdev->bs || blkdev->sring) { |
952 | blk_disconnect(xendev); | |
953 | } | |
954 | ||
9e496d74 RPM |
955 | /* Free persistent grants */ |
956 | if (blkdev->feature_persistent) { | |
957 | g_tree_destroy(blkdev->persistent_gnts); | |
958 | } | |
959 | ||
72cf2d4f | 960 | while (!QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab | 961 | ioreq = QLIST_FIRST(&blkdev->freelist); |
72cf2d4f | 962 | QLIST_REMOVE(ioreq, list); |
62d23efa | 963 | qemu_iovec_destroy(&ioreq->v); |
7267c094 | 964 | g_free(ioreq); |
62d23efa AL |
965 | } |
966 | ||
7267c094 AL |
967 | g_free(blkdev->params); |
968 | g_free(blkdev->mode); | |
969 | g_free(blkdev->type); | |
970 | g_free(blkdev->dev); | |
971 | g_free(blkdev->devtype); | |
62d23efa AL |
972 | qemu_bh_delete(blkdev->bh); |
973 | return 0; | |
974 | } | |
975 | ||
976 | static void blk_event(struct XenDevice *xendev) | |
977 | { | |
978 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
979 | ||
980 | qemu_bh_schedule(blkdev->bh); | |
981 | } | |
982 | ||
983 | struct XenDevOps xen_blkdev_ops = { | |
984 | .size = sizeof(struct XenBlkDev), | |
985 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
986 | .alloc = blk_alloc, | |
987 | .init = blk_init, | |
384087b2 | 988 | .initialise = blk_connect, |
62d23efa AL |
989 | .disconnect = blk_disconnect, |
990 | .event = blk_event, | |
991 | .free = blk_free, | |
992 | }; |