]>
Commit | Line | Data |
---|---|---|
62d23efa AL |
1 | /* |
2 | * xen paravirt block device backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
6b620ca3 PB |
17 | * |
18 | * Contributions after 2012-01-13 are licensed under the terms of the | |
19 | * GNU GPL, version 2 or (at your option) any later version. | |
62d23efa AL |
20 | */ |
21 | ||
22 | #include <stdio.h> | |
23 | #include <stdlib.h> | |
24 | #include <stdarg.h> | |
25 | #include <string.h> | |
26 | #include <unistd.h> | |
27 | #include <signal.h> | |
28 | #include <inttypes.h> | |
29 | #include <time.h> | |
30 | #include <fcntl.h> | |
31 | #include <errno.h> | |
32 | #include <sys/ioctl.h> | |
33 | #include <sys/types.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/mman.h> | |
36 | #include <sys/uio.h> | |
37 | ||
62d23efa | 38 | #include "hw.h" |
62d23efa | 39 | #include "xen_backend.h" |
b41f6719 | 40 | #include "xen_blkif.h" |
9c17d615 | 41 | #include "sysemu/blockdev.h" |
62d23efa AL |
42 | |
43 | /* ------------------------------------------------------------- */ | |
44 | ||
62d23efa AL |
45 | static int batch_maps = 0; |
46 | ||
47 | static int max_requests = 32; | |
62d23efa AL |
48 | |
49 | /* ------------------------------------------------------------- */ | |
50 | ||
51 | #define BLOCK_SIZE 512 | |
52 | #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) | |
53 | ||
9e496d74 RPM |
54 | struct PersistentGrant { |
55 | void *page; | |
56 | struct XenBlkDev *blkdev; | |
57 | }; | |
58 | ||
59 | typedef struct PersistentGrant PersistentGrant; | |
60 | ||
62d23efa AL |
61 | struct ioreq { |
62 | blkif_request_t req; | |
63 | int16_t status; | |
64 | ||
65 | /* parsed request */ | |
66 | off_t start; | |
67 | QEMUIOVector v; | |
68 | int presync; | |
69 | int postsync; | |
c6961b7d | 70 | uint8_t mapped; |
62d23efa AL |
71 | |
72 | /* grant mapping */ | |
73 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
74 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
75 | int prot; | |
76 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
77 | void *pages; | |
9e496d74 | 78 | int num_unmap; |
62d23efa AL |
79 | |
80 | /* aio status */ | |
81 | int aio_inflight; | |
82 | int aio_errors; | |
83 | ||
84 | struct XenBlkDev *blkdev; | |
72cf2d4f | 85 | QLIST_ENTRY(ioreq) list; |
a597e79c | 86 | BlockAcctCookie acct; |
62d23efa AL |
87 | }; |
88 | ||
89 | struct XenBlkDev { | |
90 | struct XenDevice xendev; /* must be first */ | |
91 | char *params; | |
92 | char *mode; | |
93 | char *type; | |
94 | char *dev; | |
95 | char *devtype; | |
96 | const char *fileproto; | |
97 | const char *filename; | |
98 | int ring_ref; | |
99 | void *sring; | |
100 | int64_t file_blk; | |
101 | int64_t file_size; | |
102 | int protocol; | |
103 | blkif_back_rings_t rings; | |
104 | int more_work; | |
105 | int cnt_map; | |
106 | ||
107 | /* request lists */ | |
72cf2d4f BS |
108 | QLIST_HEAD(inflight_head, ioreq) inflight; |
109 | QLIST_HEAD(finished_head, ioreq) finished; | |
110 | QLIST_HEAD(freelist_head, ioreq) freelist; | |
62d23efa AL |
111 | int requests_total; |
112 | int requests_inflight; | |
113 | int requests_finished; | |
114 | ||
9e496d74 RPM |
115 | /* Persistent grants extension */ |
116 | gboolean feature_persistent; | |
117 | GTree *persistent_gnts; | |
118 | unsigned int persistent_gnt_count; | |
119 | unsigned int max_grants; | |
120 | ||
62d23efa | 121 | /* qemu block driver */ |
751c6a17 | 122 | DriveInfo *dinfo; |
62d23efa AL |
123 | BlockDriverState *bs; |
124 | QEMUBH *bh; | |
125 | }; | |
126 | ||
127 | /* ------------------------------------------------------------- */ | |
128 | ||
282c6a2f RPM |
129 | static void ioreq_reset(struct ioreq *ioreq) |
130 | { | |
131 | memset(&ioreq->req, 0, sizeof(ioreq->req)); | |
132 | ioreq->status = 0; | |
133 | ioreq->start = 0; | |
134 | ioreq->presync = 0; | |
135 | ioreq->postsync = 0; | |
136 | ioreq->mapped = 0; | |
137 | ||
138 | memset(ioreq->domids, 0, sizeof(ioreq->domids)); | |
139 | memset(ioreq->refs, 0, sizeof(ioreq->refs)); | |
140 | ioreq->prot = 0; | |
141 | memset(ioreq->page, 0, sizeof(ioreq->page)); | |
142 | ioreq->pages = NULL; | |
143 | ||
144 | ioreq->aio_inflight = 0; | |
145 | ioreq->aio_errors = 0; | |
146 | ||
147 | ioreq->blkdev = NULL; | |
148 | memset(&ioreq->list, 0, sizeof(ioreq->list)); | |
149 | memset(&ioreq->acct, 0, sizeof(ioreq->acct)); | |
150 | ||
151 | qemu_iovec_reset(&ioreq->v); | |
152 | } | |
153 | ||
9e496d74 RPM |
154 | static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) |
155 | { | |
156 | uint ua = GPOINTER_TO_UINT(a); | |
157 | uint ub = GPOINTER_TO_UINT(b); | |
158 | return (ua > ub) - (ua < ub); | |
159 | } | |
160 | ||
161 | static void destroy_grant(gpointer pgnt) | |
162 | { | |
163 | PersistentGrant *grant = pgnt; | |
164 | XenGnttab gnt = grant->blkdev->xendev.gnttabdev; | |
165 | ||
166 | if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) { | |
167 | xen_be_printf(&grant->blkdev->xendev, 0, | |
168 | "xc_gnttab_munmap failed: %s\n", | |
169 | strerror(errno)); | |
170 | } | |
171 | grant->blkdev->persistent_gnt_count--; | |
172 | xen_be_printf(&grant->blkdev->xendev, 3, | |
173 | "unmapped grant %p\n", grant->page); | |
174 | g_free(grant); | |
175 | } | |
176 | ||
62d23efa AL |
177 | static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) |
178 | { | |
179 | struct ioreq *ioreq = NULL; | |
180 | ||
72cf2d4f | 181 | if (QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab AP |
182 | if (blkdev->requests_total >= max_requests) { |
183 | goto out; | |
184 | } | |
185 | /* allocate new struct */ | |
7267c094 | 186 | ioreq = g_malloc0(sizeof(*ioreq)); |
209cd7ab AP |
187 | ioreq->blkdev = blkdev; |
188 | blkdev->requests_total++; | |
62d23efa AL |
189 | qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
190 | } else { | |
209cd7ab AP |
191 | /* get one from freelist */ |
192 | ioreq = QLIST_FIRST(&blkdev->freelist); | |
193 | QLIST_REMOVE(ioreq, list); | |
62d23efa | 194 | } |
72cf2d4f | 195 | QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); |
62d23efa AL |
196 | blkdev->requests_inflight++; |
197 | ||
198 | out: | |
199 | return ioreq; | |
200 | } | |
201 | ||
202 | static void ioreq_finish(struct ioreq *ioreq) | |
203 | { | |
204 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
205 | ||
72cf2d4f BS |
206 | QLIST_REMOVE(ioreq, list); |
207 | QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); | |
62d23efa AL |
208 | blkdev->requests_inflight--; |
209 | blkdev->requests_finished++; | |
210 | } | |
211 | ||
ed547766 | 212 | static void ioreq_release(struct ioreq *ioreq, bool finish) |
62d23efa AL |
213 | { |
214 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
215 | ||
72cf2d4f | 216 | QLIST_REMOVE(ioreq, list); |
282c6a2f | 217 | ioreq_reset(ioreq); |
62d23efa | 218 | ioreq->blkdev = blkdev; |
72cf2d4f | 219 | QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); |
ed547766 JB |
220 | if (finish) { |
221 | blkdev->requests_finished--; | |
222 | } else { | |
223 | blkdev->requests_inflight--; | |
224 | } | |
62d23efa AL |
225 | } |
226 | ||
227 | /* | |
228 | * translate request into iovec + start offset | |
229 | * do sanity checks along the way | |
230 | */ | |
231 | static int ioreq_parse(struct ioreq *ioreq) | |
232 | { | |
233 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
234 | uintptr_t mem; | |
235 | size_t len; | |
236 | int i; | |
237 | ||
238 | xen_be_printf(&blkdev->xendev, 3, | |
209cd7ab AP |
239 | "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", |
240 | ioreq->req.operation, ioreq->req.nr_segments, | |
241 | ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); | |
62d23efa AL |
242 | switch (ioreq->req.operation) { |
243 | case BLKIF_OP_READ: | |
209cd7ab AP |
244 | ioreq->prot = PROT_WRITE; /* to memory */ |
245 | break; | |
7e7b7cba SS |
246 | case BLKIF_OP_FLUSH_DISKCACHE: |
247 | ioreq->presync = 1; | |
5cbdebe3 | 248 | if (!ioreq->req.nr_segments) { |
5cbdebe3 SS |
249 | return 0; |
250 | } | |
209cd7ab | 251 | /* fall through */ |
62d23efa | 252 | case BLKIF_OP_WRITE: |
209cd7ab | 253 | ioreq->prot = PROT_READ; /* from memory */ |
209cd7ab | 254 | break; |
62d23efa | 255 | default: |
209cd7ab AP |
256 | xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", |
257 | ioreq->req.operation); | |
258 | goto err; | |
62d23efa AL |
259 | }; |
260 | ||
908c7b9f GH |
261 | if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { |
262 | xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n"); | |
263 | goto err; | |
264 | } | |
265 | ||
62d23efa AL |
266 | ioreq->start = ioreq->req.sector_number * blkdev->file_blk; |
267 | for (i = 0; i < ioreq->req.nr_segments; i++) { | |
209cd7ab AP |
268 | if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
269 | xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); | |
270 | goto err; | |
271 | } | |
272 | if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { | |
273 | xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); | |
274 | goto err; | |
275 | } | |
276 | if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { | |
277 | xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); | |
278 | goto err; | |
279 | } | |
280 | ||
281 | ioreq->domids[i] = blkdev->xendev.dom; | |
282 | ioreq->refs[i] = ioreq->req.seg[i].gref; | |
283 | ||
284 | mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; | |
285 | len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; | |
62d23efa AL |
286 | qemu_iovec_add(&ioreq->v, (void*)mem, len); |
287 | } | |
288 | if (ioreq->start + ioreq->v.size > blkdev->file_size) { | |
209cd7ab AP |
289 | xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); |
290 | goto err; | |
62d23efa AL |
291 | } |
292 | return 0; | |
293 | ||
294 | err: | |
295 | ioreq->status = BLKIF_RSP_ERROR; | |
296 | return -1; | |
297 | } | |
298 | ||
299 | static void ioreq_unmap(struct ioreq *ioreq) | |
300 | { | |
d5b93ddf | 301 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
302 | int i; |
303 | ||
9e496d74 | 304 | if (ioreq->num_unmap == 0 || ioreq->mapped == 0) { |
62d23efa | 305 | return; |
209cd7ab | 306 | } |
62d23efa | 307 | if (batch_maps) { |
209cd7ab AP |
308 | if (!ioreq->pages) { |
309 | return; | |
310 | } | |
9e496d74 | 311 | if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) { |
209cd7ab AP |
312 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", |
313 | strerror(errno)); | |
314 | } | |
9e496d74 | 315 | ioreq->blkdev->cnt_map -= ioreq->num_unmap; |
209cd7ab | 316 | ioreq->pages = NULL; |
62d23efa | 317 | } else { |
9e496d74 | 318 | for (i = 0; i < ioreq->num_unmap; i++) { |
209cd7ab AP |
319 | if (!ioreq->page[i]) { |
320 | continue; | |
321 | } | |
322 | if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { | |
323 | xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", | |
324 | strerror(errno)); | |
325 | } | |
326 | ioreq->blkdev->cnt_map--; | |
327 | ioreq->page[i] = NULL; | |
328 | } | |
62d23efa | 329 | } |
c6961b7d | 330 | ioreq->mapped = 0; |
62d23efa AL |
331 | } |
332 | ||
333 | static int ioreq_map(struct ioreq *ioreq) | |
334 | { | |
d5b93ddf | 335 | XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev; |
9e496d74 RPM |
336 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
337 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
338 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
339 | int i, j, new_maps = 0; | |
340 | PersistentGrant *grant; | |
341 | /* domids and refs variables will contain the information necessary | |
342 | * to map the grants that are needed to fulfill this request. | |
343 | * | |
344 | * After mapping the needed grants, the page array will contain the | |
345 | * memory address of each granted page in the order specified in ioreq | |
346 | * (disregarding if it's a persistent grant or not). | |
347 | */ | |
62d23efa | 348 | |
c6961b7d | 349 | if (ioreq->v.niov == 0 || ioreq->mapped == 1) { |
62d23efa | 350 | return 0; |
209cd7ab | 351 | } |
9e496d74 RPM |
352 | if (ioreq->blkdev->feature_persistent) { |
353 | for (i = 0; i < ioreq->v.niov; i++) { | |
354 | grant = g_tree_lookup(ioreq->blkdev->persistent_gnts, | |
355 | GUINT_TO_POINTER(ioreq->refs[i])); | |
356 | ||
357 | if (grant != NULL) { | |
358 | page[i] = grant->page; | |
359 | xen_be_printf(&ioreq->blkdev->xendev, 3, | |
360 | "using persistent-grant %" PRIu32 "\n", | |
361 | ioreq->refs[i]); | |
362 | } else { | |
363 | /* Add the grant to the list of grants that | |
364 | * should be mapped | |
365 | */ | |
366 | domids[new_maps] = ioreq->domids[i]; | |
367 | refs[new_maps] = ioreq->refs[i]; | |
368 | page[i] = NULL; | |
369 | new_maps++; | |
370 | } | |
371 | } | |
372 | /* Set the protection to RW, since grants may be reused later | |
373 | * with a different protection than the one needed for this request | |
374 | */ | |
375 | ioreq->prot = PROT_WRITE | PROT_READ; | |
376 | } else { | |
377 | /* All grants in the request should be mapped */ | |
378 | memcpy(refs, ioreq->refs, sizeof(refs)); | |
379 | memcpy(domids, ioreq->domids, sizeof(domids)); | |
380 | memset(page, 0, sizeof(page)); | |
381 | new_maps = ioreq->v.niov; | |
382 | } | |
383 | ||
384 | if (batch_maps && new_maps) { | |
209cd7ab | 385 | ioreq->pages = xc_gnttab_map_grant_refs |
9e496d74 | 386 | (gnt, new_maps, domids, refs, ioreq->prot); |
209cd7ab AP |
387 | if (ioreq->pages == NULL) { |
388 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
389 | "can't map %d grant refs (%s, %d maps)\n", | |
9e496d74 | 390 | new_maps, strerror(errno), ioreq->blkdev->cnt_map); |
209cd7ab AP |
391 | return -1; |
392 | } | |
9e496d74 RPM |
393 | for (i = 0, j = 0; i < ioreq->v.niov; i++) { |
394 | if (page[i] == NULL) { | |
395 | page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE; | |
396 | } | |
209cd7ab | 397 | } |
9e496d74 RPM |
398 | ioreq->blkdev->cnt_map += new_maps; |
399 | } else if (new_maps) { | |
400 | for (i = 0; i < new_maps; i++) { | |
209cd7ab | 401 | ioreq->page[i] = xc_gnttab_map_grant_ref |
9e496d74 | 402 | (gnt, domids[i], refs[i], ioreq->prot); |
209cd7ab AP |
403 | if (ioreq->page[i] == NULL) { |
404 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
405 | "can't map grant ref %d (%s, %d maps)\n", | |
9e496d74 | 406 | refs[i], strerror(errno), ioreq->blkdev->cnt_map); |
209cd7ab AP |
407 | ioreq_unmap(ioreq); |
408 | return -1; | |
409 | } | |
209cd7ab AP |
410 | ioreq->blkdev->cnt_map++; |
411 | } | |
9e496d74 RPM |
412 | for (i = 0, j = 0; i < ioreq->v.niov; i++) { |
413 | if (page[i] == NULL) { | |
414 | page[i] = ioreq->page[j++]; | |
415 | } | |
416 | } | |
417 | } | |
418 | if (ioreq->blkdev->feature_persistent) { | |
419 | while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants) | |
420 | && new_maps) { | |
421 | /* Go through the list of newly mapped grants and add as many | |
422 | * as possible to the list of persistently mapped grants. | |
423 | * | |
424 | * Since we start at the end of ioreq->page(s), we only need | |
425 | * to decrease new_maps to prevent this granted pages from | |
426 | * being unmapped in ioreq_unmap. | |
427 | */ | |
428 | grant = g_malloc0(sizeof(*grant)); | |
429 | new_maps--; | |
430 | if (batch_maps) { | |
431 | grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE; | |
432 | } else { | |
433 | grant->page = ioreq->page[new_maps]; | |
434 | } | |
435 | grant->blkdev = ioreq->blkdev; | |
436 | xen_be_printf(&ioreq->blkdev->xendev, 3, | |
437 | "adding grant %" PRIu32 " page: %p\n", | |
438 | refs[new_maps], grant->page); | |
439 | g_tree_insert(ioreq->blkdev->persistent_gnts, | |
440 | GUINT_TO_POINTER(refs[new_maps]), | |
441 | grant); | |
442 | ioreq->blkdev->persistent_gnt_count++; | |
443 | } | |
444 | } | |
445 | for (i = 0; i < ioreq->v.niov; i++) { | |
446 | ioreq->v.iov[i].iov_base += (uintptr_t)page[i]; | |
62d23efa | 447 | } |
c6961b7d | 448 | ioreq->mapped = 1; |
9e496d74 | 449 | ioreq->num_unmap = new_maps; |
62d23efa AL |
450 | return 0; |
451 | } | |
452 | ||
c6961b7d SS |
453 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq); |
454 | ||
62d23efa AL |
455 | static void qemu_aio_complete(void *opaque, int ret) |
456 | { | |
457 | struct ioreq *ioreq = opaque; | |
458 | ||
459 | if (ret != 0) { | |
460 | xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", | |
461 | ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); | |
462 | ioreq->aio_errors++; | |
463 | } | |
464 | ||
465 | ioreq->aio_inflight--; | |
c6961b7d SS |
466 | if (ioreq->presync) { |
467 | ioreq->presync = 0; | |
468 | ioreq_runio_qemu_aio(ioreq); | |
469 | return; | |
470 | } | |
209cd7ab | 471 | if (ioreq->aio_inflight > 0) { |
62d23efa | 472 | return; |
209cd7ab | 473 | } |
d56de074 | 474 | if (ioreq->postsync) { |
c6961b7d SS |
475 | ioreq->postsync = 0; |
476 | ioreq->aio_inflight++; | |
477 | bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); | |
478 | return; | |
d56de074 | 479 | } |
62d23efa AL |
480 | |
481 | ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; | |
482 | ioreq_unmap(ioreq); | |
483 | ioreq_finish(ioreq); | |
a597e79c | 484 | bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct); |
62d23efa AL |
485 | qemu_bh_schedule(ioreq->blkdev->bh); |
486 | } | |
487 | ||
488 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq) | |
489 | { | |
490 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
491 | ||
209cd7ab AP |
492 | if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { |
493 | goto err_no_map; | |
494 | } | |
62d23efa AL |
495 | |
496 | ioreq->aio_inflight++; | |
209cd7ab | 497 | if (ioreq->presync) { |
c6961b7d SS |
498 | bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); |
499 | return 0; | |
209cd7ab | 500 | } |
62d23efa AL |
501 | |
502 | switch (ioreq->req.operation) { | |
503 | case BLKIF_OP_READ: | |
a597e79c | 504 | bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ); |
62d23efa AL |
505 | ioreq->aio_inflight++; |
506 | bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, | |
507 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
508 | qemu_aio_complete, ioreq); | |
209cd7ab | 509 | break; |
62d23efa | 510 | case BLKIF_OP_WRITE: |
7e7b7cba | 511 | case BLKIF_OP_FLUSH_DISKCACHE: |
209cd7ab | 512 | if (!ioreq->req.nr_segments) { |
5cbdebe3 | 513 | break; |
209cd7ab | 514 | } |
a597e79c CH |
515 | |
516 | bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE); | |
209bef3e | 517 | ioreq->aio_inflight++; |
62d23efa AL |
518 | bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, |
519 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
520 | qemu_aio_complete, ioreq); | |
209cd7ab | 521 | break; |
62d23efa | 522 | default: |
209cd7ab AP |
523 | /* unknown operation (shouldn't happen -- parse catches this) */ |
524 | goto err; | |
62d23efa AL |
525 | } |
526 | ||
62d23efa AL |
527 | qemu_aio_complete(ioreq, 0); |
528 | ||
529 | return 0; | |
530 | ||
531 | err: | |
f6ec953c FZ |
532 | ioreq_unmap(ioreq); |
533 | err_no_map: | |
534 | ioreq_finish(ioreq); | |
62d23efa AL |
535 | ioreq->status = BLKIF_RSP_ERROR; |
536 | return -1; | |
537 | } | |
538 | ||
539 | static int blk_send_response_one(struct ioreq *ioreq) | |
540 | { | |
541 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
542 | int send_notify = 0; | |
543 | int have_requests = 0; | |
544 | blkif_response_t resp; | |
545 | void *dst; | |
546 | ||
547 | resp.id = ioreq->req.id; | |
548 | resp.operation = ioreq->req.operation; | |
549 | resp.status = ioreq->status; | |
550 | ||
551 | /* Place on the response ring for the relevant domain. */ | |
552 | switch (blkdev->protocol) { | |
553 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
554 | dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); |
555 | break; | |
62d23efa | 556 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
557 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, |
558 | blkdev->rings.x86_32_part.rsp_prod_pvt); | |
209cd7ab | 559 | break; |
62d23efa | 560 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
561 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, |
562 | blkdev->rings.x86_64_part.rsp_prod_pvt); | |
209cd7ab | 563 | break; |
62d23efa | 564 | default: |
209cd7ab | 565 | dst = NULL; |
62d23efa AL |
566 | } |
567 | memcpy(dst, &resp, sizeof(resp)); | |
568 | blkdev->rings.common.rsp_prod_pvt++; | |
569 | ||
570 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); | |
571 | if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { | |
209cd7ab AP |
572 | /* |
573 | * Tail check for pending requests. Allows frontend to avoid | |
574 | * notifications if requests are already in flight (lower | |
575 | * overheads and promotes batching). | |
576 | */ | |
577 | RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); | |
62d23efa | 578 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { |
209cd7ab | 579 | have_requests = 1; |
62d23efa AL |
580 | } |
581 | ||
209cd7ab AP |
582 | if (have_requests) { |
583 | blkdev->more_work++; | |
584 | } | |
62d23efa AL |
585 | return send_notify; |
586 | } | |
587 | ||
588 | /* walk finished list, send outstanding responses, free requests */ | |
589 | static void blk_send_response_all(struct XenBlkDev *blkdev) | |
590 | { | |
591 | struct ioreq *ioreq; | |
592 | int send_notify = 0; | |
593 | ||
72cf2d4f BS |
594 | while (!QLIST_EMPTY(&blkdev->finished)) { |
595 | ioreq = QLIST_FIRST(&blkdev->finished); | |
209cd7ab | 596 | send_notify += blk_send_response_one(ioreq); |
ed547766 | 597 | ioreq_release(ioreq, true); |
209cd7ab AP |
598 | } |
599 | if (send_notify) { | |
600 | xen_be_send_notify(&blkdev->xendev); | |
62d23efa | 601 | } |
62d23efa AL |
602 | } |
603 | ||
604 | static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) | |
605 | { | |
606 | switch (blkdev->protocol) { | |
607 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
608 | memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), |
609 | sizeof(ioreq->req)); | |
610 | break; | |
62d23efa | 611 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
612 | blkif_get_x86_32_req(&ioreq->req, |
613 | RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); | |
209cd7ab | 614 | break; |
62d23efa | 615 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
616 | blkif_get_x86_64_req(&ioreq->req, |
617 | RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); | |
209cd7ab | 618 | break; |
62d23efa AL |
619 | } |
620 | return 0; | |
621 | } | |
622 | ||
623 | static void blk_handle_requests(struct XenBlkDev *blkdev) | |
624 | { | |
625 | RING_IDX rc, rp; | |
626 | struct ioreq *ioreq; | |
627 | ||
628 | blkdev->more_work = 0; | |
629 | ||
630 | rc = blkdev->rings.common.req_cons; | |
631 | rp = blkdev->rings.common.sring->req_prod; | |
632 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
633 | ||
4e5b184d | 634 | blk_send_response_all(blkdev); |
fc1f79f7 | 635 | while (rc != rp) { |
62d23efa | 636 | /* pull request from ring */ |
209cd7ab | 637 | if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { |
62d23efa | 638 | break; |
209cd7ab | 639 | } |
62d23efa AL |
640 | ioreq = ioreq_start(blkdev); |
641 | if (ioreq == NULL) { | |
642 | blkdev->more_work++; | |
643 | break; | |
644 | } | |
645 | blk_get_request(blkdev, ioreq, rc); | |
646 | blkdev->rings.common.req_cons = ++rc; | |
647 | ||
648 | /* parse them */ | |
649 | if (ioreq_parse(ioreq) != 0) { | |
209cd7ab | 650 | if (blk_send_response_one(ioreq)) { |
62d23efa | 651 | xen_be_send_notify(&blkdev->xendev); |
209cd7ab | 652 | } |
ed547766 | 653 | ioreq_release(ioreq, false); |
62d23efa AL |
654 | continue; |
655 | } | |
656 | ||
4e5b184d | 657 | ioreq_runio_qemu_aio(ioreq); |
209cd7ab | 658 | } |
62d23efa | 659 | |
209cd7ab | 660 | if (blkdev->more_work && blkdev->requests_inflight < max_requests) { |
62d23efa | 661 | qemu_bh_schedule(blkdev->bh); |
209cd7ab | 662 | } |
62d23efa AL |
663 | } |
664 | ||
665 | /* ------------------------------------------------------------- */ | |
666 | ||
667 | static void blk_bh(void *opaque) | |
668 | { | |
669 | struct XenBlkDev *blkdev = opaque; | |
670 | blk_handle_requests(blkdev); | |
671 | } | |
672 | ||
64c27e5b JB |
673 | /* |
674 | * We need to account for the grant allocations requiring contiguous | |
675 | * chunks; the worst case number would be | |
676 | * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, | |
677 | * but in order to keep things simple just use | |
678 | * 2 * max_req * max_seg. | |
679 | */ | |
680 | #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) | |
681 | ||
62d23efa AL |
682 | static void blk_alloc(struct XenDevice *xendev) |
683 | { | |
684 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
685 | ||
72cf2d4f BS |
686 | QLIST_INIT(&blkdev->inflight); |
687 | QLIST_INIT(&blkdev->finished); | |
688 | QLIST_INIT(&blkdev->freelist); | |
62d23efa | 689 | blkdev->bh = qemu_bh_new(blk_bh, blkdev); |
209cd7ab | 690 | if (xen_mode != XEN_EMULATE) { |
62d23efa | 691 | batch_maps = 1; |
209cd7ab | 692 | } |
64c27e5b JB |
693 | if (xc_gnttab_set_max_grants(xendev->gnttabdev, |
694 | MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { | |
695 | xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n", | |
696 | strerror(errno)); | |
697 | } | |
62d23efa AL |
698 | } |
699 | ||
700 | static int blk_init(struct XenDevice *xendev) | |
701 | { | |
702 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
5cce43bb | 703 | int index, qflags, info = 0; |
62d23efa AL |
704 | |
705 | /* read xenstore entries */ | |
706 | if (blkdev->params == NULL) { | |
5ea3c2b4 | 707 | char *h = NULL; |
209cd7ab | 708 | blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); |
5ea3c2b4 SS |
709 | if (blkdev->params != NULL) { |
710 | h = strchr(blkdev->params, ':'); | |
711 | } | |
209cd7ab AP |
712 | if (h != NULL) { |
713 | blkdev->fileproto = blkdev->params; | |
714 | blkdev->filename = h+1; | |
715 | *h = 0; | |
716 | } else { | |
717 | blkdev->fileproto = "<unset>"; | |
718 | blkdev->filename = blkdev->params; | |
719 | } | |
720 | } | |
7cef3f4f SS |
721 | if (!strcmp("aio", blkdev->fileproto)) { |
722 | blkdev->fileproto = "raw"; | |
723 | } | |
209cd7ab AP |
724 | if (blkdev->mode == NULL) { |
725 | blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); | |
726 | } | |
727 | if (blkdev->type == NULL) { | |
728 | blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); | |
729 | } | |
730 | if (blkdev->dev == NULL) { | |
731 | blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); | |
732 | } | |
733 | if (blkdev->devtype == NULL) { | |
734 | blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); | |
735 | } | |
62d23efa AL |
736 | |
737 | /* do we have all we need? */ | |
738 | if (blkdev->params == NULL || | |
209cd7ab AP |
739 | blkdev->mode == NULL || |
740 | blkdev->type == NULL || | |
741 | blkdev->dev == NULL) { | |
5ea3c2b4 | 742 | goto out_error; |
209cd7ab | 743 | } |
62d23efa AL |
744 | |
745 | /* read-only ? */ | |
82091410 | 746 | qflags = BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NATIVE_AIO; |
62d23efa | 747 | if (strcmp(blkdev->mode, "w") == 0) { |
82091410 | 748 | qflags |= BDRV_O_RDWR; |
62d23efa | 749 | } else { |
209cd7ab | 750 | info |= VDISK_READONLY; |
62d23efa AL |
751 | } |
752 | ||
753 | /* cdrom ? */ | |
209cd7ab AP |
754 | if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { |
755 | info |= VDISK_CDROM; | |
756 | } | |
62d23efa AL |
757 | |
758 | /* init qemu block driver */ | |
751c6a17 GH |
759 | index = (blkdev->xendev.dev - 202 * 256) / 16; |
760 | blkdev->dinfo = drive_get(IF_XEN, 0, index); | |
761 | if (!blkdev->dinfo) { | |
62d23efa AL |
762 | /* setup via xenbus -> create new block driver instance */ |
763 | xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); | |
ad717139 | 764 | blkdev->bs = bdrv_new(blkdev->dev); |
5ea3c2b4 SS |
765 | if (blkdev->bs) { |
766 | if (bdrv_open(blkdev->bs, blkdev->filename, qflags, | |
767 | bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) { | |
768 | bdrv_delete(blkdev->bs); | |
769 | blkdev->bs = NULL; | |
770 | } | |
771 | } | |
772 | if (!blkdev->bs) { | |
773 | goto out_error; | |
ad717139 | 774 | } |
62d23efa AL |
775 | } else { |
776 | /* setup via qemu cmdline -> already setup for us */ | |
777 | xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); | |
209cd7ab | 778 | blkdev->bs = blkdev->dinfo->bdrv; |
62d23efa | 779 | } |
fa879d62 | 780 | bdrv_attach_dev_nofail(blkdev->bs, blkdev); |
62d23efa AL |
781 | blkdev->file_blk = BLOCK_SIZE; |
782 | blkdev->file_size = bdrv_getlength(blkdev->bs); | |
783 | if (blkdev->file_size < 0) { | |
784 | xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", | |
785 | (int)blkdev->file_size, strerror(-blkdev->file_size), | |
093003b1 | 786 | bdrv_get_format_name(blkdev->bs) ?: "-"); |
209cd7ab | 787 | blkdev->file_size = 0; |
62d23efa | 788 | } |
62d23efa AL |
789 | |
790 | xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," | |
209cd7ab AP |
791 | " size %" PRId64 " (%" PRId64 " MB)\n", |
792 | blkdev->type, blkdev->fileproto, blkdev->filename, | |
793 | blkdev->file_size, blkdev->file_size >> 20); | |
62d23efa AL |
794 | |
795 | /* fill info */ | |
7e7b7cba | 796 | xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1); |
9e496d74 | 797 | xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1); |
62d23efa AL |
798 | xenstore_write_be_int(&blkdev->xendev, "info", info); |
799 | xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); | |
800 | xenstore_write_be_int(&blkdev->xendev, "sectors", | |
209cd7ab | 801 | blkdev->file_size / blkdev->file_blk); |
62d23efa | 802 | return 0; |
5ea3c2b4 SS |
803 | |
804 | out_error: | |
7267c094 | 805 | g_free(blkdev->params); |
5ea3c2b4 | 806 | blkdev->params = NULL; |
7267c094 | 807 | g_free(blkdev->mode); |
5ea3c2b4 | 808 | blkdev->mode = NULL; |
7267c094 | 809 | g_free(blkdev->type); |
5ea3c2b4 | 810 | blkdev->type = NULL; |
7267c094 | 811 | g_free(blkdev->dev); |
5ea3c2b4 | 812 | blkdev->dev = NULL; |
7267c094 | 813 | g_free(blkdev->devtype); |
5ea3c2b4 SS |
814 | blkdev->devtype = NULL; |
815 | return -1; | |
62d23efa AL |
816 | } |
817 | ||
818 | static int blk_connect(struct XenDevice *xendev) | |
819 | { | |
820 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
9e496d74 | 821 | int pers; |
62d23efa | 822 | |
209cd7ab AP |
823 | if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { |
824 | return -1; | |
825 | } | |
62d23efa | 826 | if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", |
209cd7ab AP |
827 | &blkdev->xendev.remote_port) == -1) { |
828 | return -1; | |
829 | } | |
9e496d74 RPM |
830 | if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) { |
831 | blkdev->feature_persistent = FALSE; | |
832 | } else { | |
833 | blkdev->feature_persistent = !!pers; | |
834 | } | |
62d23efa AL |
835 | |
836 | blkdev->protocol = BLKIF_PROTOCOL_NATIVE; | |
837 | if (blkdev->xendev.protocol) { | |
209cd7ab | 838 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { |
62d23efa | 839 | blkdev->protocol = BLKIF_PROTOCOL_X86_32; |
209cd7ab AP |
840 | } |
841 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { | |
62d23efa | 842 | blkdev->protocol = BLKIF_PROTOCOL_X86_64; |
209cd7ab | 843 | } |
62d23efa AL |
844 | } |
845 | ||
846 | blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, | |
209cd7ab AP |
847 | blkdev->xendev.dom, |
848 | blkdev->ring_ref, | |
849 | PROT_READ | PROT_WRITE); | |
850 | if (!blkdev->sring) { | |
851 | return -1; | |
852 | } | |
62d23efa AL |
853 | blkdev->cnt_map++; |
854 | ||
855 | switch (blkdev->protocol) { | |
856 | case BLKIF_PROTOCOL_NATIVE: | |
857 | { | |
209cd7ab AP |
858 | blkif_sring_t *sring_native = blkdev->sring; |
859 | BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); | |
860 | break; | |
62d23efa AL |
861 | } |
862 | case BLKIF_PROTOCOL_X86_32: | |
863 | { | |
209cd7ab | 864 | blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; |
6fcfeff9 BS |
865 | |
866 | BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); | |
209cd7ab | 867 | break; |
62d23efa AL |
868 | } |
869 | case BLKIF_PROTOCOL_X86_64: | |
870 | { | |
209cd7ab | 871 | blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; |
6fcfeff9 BS |
872 | |
873 | BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); | |
209cd7ab | 874 | break; |
62d23efa AL |
875 | } |
876 | } | |
877 | ||
9e496d74 RPM |
878 | if (blkdev->feature_persistent) { |
879 | /* Init persistent grants */ | |
880 | blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST; | |
881 | blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp, | |
882 | NULL, NULL, | |
883 | (GDestroyNotify)destroy_grant); | |
884 | blkdev->persistent_gnt_count = 0; | |
885 | } | |
886 | ||
62d23efa AL |
887 | xen_be_bind_evtchn(&blkdev->xendev); |
888 | ||
889 | xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " | |
209cd7ab AP |
890 | "remote port %d, local port %d\n", |
891 | blkdev->xendev.protocol, blkdev->ring_ref, | |
892 | blkdev->xendev.remote_port, blkdev->xendev.local_port); | |
62d23efa AL |
893 | return 0; |
894 | } | |
895 | ||
896 | static void blk_disconnect(struct XenDevice *xendev) | |
897 | { | |
898 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
899 | ||
900 | if (blkdev->bs) { | |
751c6a17 | 901 | if (!blkdev->dinfo) { |
62d23efa AL |
902 | /* close/delete only if we created it ourself */ |
903 | bdrv_close(blkdev->bs); | |
7429f2e1 | 904 | bdrv_detach_dev(blkdev->bs, blkdev); |
62d23efa AL |
905 | bdrv_delete(blkdev->bs); |
906 | } | |
209cd7ab | 907 | blkdev->bs = NULL; |
62d23efa AL |
908 | } |
909 | xen_be_unbind_evtchn(&blkdev->xendev); | |
910 | ||
911 | if (blkdev->sring) { | |
209cd7ab AP |
912 | xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); |
913 | blkdev->cnt_map--; | |
914 | blkdev->sring = NULL; | |
62d23efa AL |
915 | } |
916 | } | |
917 | ||
918 | static int blk_free(struct XenDevice *xendev) | |
919 | { | |
920 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
921 | struct ioreq *ioreq; | |
922 | ||
77ba8fef SS |
923 | if (blkdev->bs || blkdev->sring) { |
924 | blk_disconnect(xendev); | |
925 | } | |
926 | ||
9e496d74 RPM |
927 | /* Free persistent grants */ |
928 | if (blkdev->feature_persistent) { | |
929 | g_tree_destroy(blkdev->persistent_gnts); | |
930 | } | |
931 | ||
72cf2d4f | 932 | while (!QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab | 933 | ioreq = QLIST_FIRST(&blkdev->freelist); |
72cf2d4f | 934 | QLIST_REMOVE(ioreq, list); |
62d23efa | 935 | qemu_iovec_destroy(&ioreq->v); |
7267c094 | 936 | g_free(ioreq); |
62d23efa AL |
937 | } |
938 | ||
7267c094 AL |
939 | g_free(blkdev->params); |
940 | g_free(blkdev->mode); | |
941 | g_free(blkdev->type); | |
942 | g_free(blkdev->dev); | |
943 | g_free(blkdev->devtype); | |
62d23efa AL |
944 | qemu_bh_delete(blkdev->bh); |
945 | return 0; | |
946 | } | |
947 | ||
948 | static void blk_event(struct XenDevice *xendev) | |
949 | { | |
950 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
951 | ||
952 | qemu_bh_schedule(blkdev->bh); | |
953 | } | |
954 | ||
955 | struct XenDevOps xen_blkdev_ops = { | |
956 | .size = sizeof(struct XenBlkDev), | |
957 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
958 | .alloc = blk_alloc, | |
959 | .init = blk_init, | |
384087b2 | 960 | .initialise = blk_connect, |
62d23efa AL |
961 | .disconnect = blk_disconnect, |
962 | .event = blk_event, | |
963 | .free = blk_free, | |
964 | }; |