]>
Commit | Line | Data |
---|---|---|
62d23efa AL |
1 | /* |
2 | * xen paravirt block device backend | |
3 | * | |
4 | * (c) Gerd Hoffmann <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; under version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
8167ee88 | 16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
6b620ca3 PB |
17 | * |
18 | * Contributions after 2012-01-13 are licensed under the terms of the | |
19 | * GNU GPL, version 2 or (at your option) any later version. | |
62d23efa AL |
20 | */ |
21 | ||
80c71a24 | 22 | #include "qemu/osdep.h" |
62d23efa | 23 | #include <sys/ioctl.h> |
62d23efa AL |
24 | #include <sys/mman.h> |
25 | #include <sys/uio.h> | |
26 | ||
83c9f4ca | 27 | #include "hw/hw.h" |
0d09e41a | 28 | #include "hw/xen/xen_backend.h" |
47b43a1f | 29 | #include "xen_blkif.h" |
9c17d615 | 30 | #include "sysemu/blockdev.h" |
26f54e9a | 31 | #include "sysemu/block-backend.h" |
da34e65c | 32 | #include "qapi/error.h" |
9a925356 HR |
33 | #include "qapi/qmp/qdict.h" |
34 | #include "qapi/qmp/qstring.h" | |
62d23efa AL |
35 | |
36 | /* ------------------------------------------------------------- */ | |
37 | ||
62d23efa AL |
38 | static int batch_maps = 0; |
39 | ||
40 | static int max_requests = 32; | |
62d23efa AL |
41 | |
42 | /* ------------------------------------------------------------- */ | |
43 | ||
44 | #define BLOCK_SIZE 512 | |
45 | #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) | |
46 | ||
9e496d74 RPM |
47 | struct PersistentGrant { |
48 | void *page; | |
49 | struct XenBlkDev *blkdev; | |
50 | }; | |
51 | ||
52 | typedef struct PersistentGrant PersistentGrant; | |
53 | ||
2f01dfac RPM |
54 | struct PersistentRegion { |
55 | void *addr; | |
56 | int num; | |
57 | }; | |
58 | ||
59 | typedef struct PersistentRegion PersistentRegion; | |
60 | ||
62d23efa AL |
61 | struct ioreq { |
62 | blkif_request_t req; | |
63 | int16_t status; | |
64 | ||
65 | /* parsed request */ | |
66 | off_t start; | |
67 | QEMUIOVector v; | |
68 | int presync; | |
c6961b7d | 69 | uint8_t mapped; |
62d23efa AL |
70 | |
71 | /* grant mapping */ | |
72 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
73 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
74 | int prot; | |
75 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
76 | void *pages; | |
9e496d74 | 77 | int num_unmap; |
62d23efa AL |
78 | |
79 | /* aio status */ | |
80 | int aio_inflight; | |
81 | int aio_errors; | |
82 | ||
83 | struct XenBlkDev *blkdev; | |
72cf2d4f | 84 | QLIST_ENTRY(ioreq) list; |
a597e79c | 85 | BlockAcctCookie acct; |
62d23efa AL |
86 | }; |
87 | ||
88 | struct XenBlkDev { | |
89 | struct XenDevice xendev; /* must be first */ | |
90 | char *params; | |
91 | char *mode; | |
92 | char *type; | |
93 | char *dev; | |
94 | char *devtype; | |
454ae734 | 95 | bool directiosafe; |
62d23efa AL |
96 | const char *fileproto; |
97 | const char *filename; | |
98 | int ring_ref; | |
99 | void *sring; | |
100 | int64_t file_blk; | |
101 | int64_t file_size; | |
102 | int protocol; | |
103 | blkif_back_rings_t rings; | |
104 | int more_work; | |
105 | int cnt_map; | |
106 | ||
107 | /* request lists */ | |
72cf2d4f BS |
108 | QLIST_HEAD(inflight_head, ioreq) inflight; |
109 | QLIST_HEAD(finished_head, ioreq) finished; | |
110 | QLIST_HEAD(freelist_head, ioreq) freelist; | |
62d23efa AL |
111 | int requests_total; |
112 | int requests_inflight; | |
113 | int requests_finished; | |
114 | ||
9e496d74 | 115 | /* Persistent grants extension */ |
f3135204 | 116 | gboolean feature_discard; |
9e496d74 RPM |
117 | gboolean feature_persistent; |
118 | GTree *persistent_gnts; | |
2f01dfac | 119 | GSList *persistent_regions; |
9e496d74 RPM |
120 | unsigned int persistent_gnt_count; |
121 | unsigned int max_grants; | |
122 | ||
62d23efa | 123 | /* qemu block driver */ |
751c6a17 | 124 | DriveInfo *dinfo; |
4be74634 | 125 | BlockBackend *blk; |
62d23efa AL |
126 | QEMUBH *bh; |
127 | }; | |
128 | ||
129 | /* ------------------------------------------------------------- */ | |
130 | ||
282c6a2f RPM |
131 | static void ioreq_reset(struct ioreq *ioreq) |
132 | { | |
133 | memset(&ioreq->req, 0, sizeof(ioreq->req)); | |
134 | ioreq->status = 0; | |
135 | ioreq->start = 0; | |
136 | ioreq->presync = 0; | |
282c6a2f RPM |
137 | ioreq->mapped = 0; |
138 | ||
139 | memset(ioreq->domids, 0, sizeof(ioreq->domids)); | |
140 | memset(ioreq->refs, 0, sizeof(ioreq->refs)); | |
141 | ioreq->prot = 0; | |
142 | memset(ioreq->page, 0, sizeof(ioreq->page)); | |
143 | ioreq->pages = NULL; | |
144 | ||
145 | ioreq->aio_inflight = 0; | |
146 | ioreq->aio_errors = 0; | |
147 | ||
148 | ioreq->blkdev = NULL; | |
149 | memset(&ioreq->list, 0, sizeof(ioreq->list)); | |
150 | memset(&ioreq->acct, 0, sizeof(ioreq->acct)); | |
151 | ||
152 | qemu_iovec_reset(&ioreq->v); | |
153 | } | |
154 | ||
9e496d74 RPM |
155 | static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) |
156 | { | |
157 | uint ua = GPOINTER_TO_UINT(a); | |
158 | uint ub = GPOINTER_TO_UINT(b); | |
159 | return (ua > ub) - (ua < ub); | |
160 | } | |
161 | ||
162 | static void destroy_grant(gpointer pgnt) | |
163 | { | |
164 | PersistentGrant *grant = pgnt; | |
c1345a88 | 165 | xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev; |
9e496d74 | 166 | |
c1345a88 | 167 | if (xengnttab_unmap(gnt, grant->page, 1) != 0) { |
9e496d74 | 168 | xen_be_printf(&grant->blkdev->xendev, 0, |
c1345a88 | 169 | "xengnttab_unmap failed: %s\n", |
9e496d74 RPM |
170 | strerror(errno)); |
171 | } | |
172 | grant->blkdev->persistent_gnt_count--; | |
173 | xen_be_printf(&grant->blkdev->xendev, 3, | |
174 | "unmapped grant %p\n", grant->page); | |
175 | g_free(grant); | |
176 | } | |
177 | ||
2f01dfac RPM |
178 | static void remove_persistent_region(gpointer data, gpointer dev) |
179 | { | |
180 | PersistentRegion *region = data; | |
181 | struct XenBlkDev *blkdev = dev; | |
c1345a88 | 182 | xengnttab_handle *gnt = blkdev->xendev.gnttabdev; |
2f01dfac | 183 | |
c1345a88 | 184 | if (xengnttab_unmap(gnt, region->addr, region->num) != 0) { |
2f01dfac | 185 | xen_be_printf(&blkdev->xendev, 0, |
c1345a88 | 186 | "xengnttab_unmap region %p failed: %s\n", |
2f01dfac RPM |
187 | region->addr, strerror(errno)); |
188 | } | |
189 | xen_be_printf(&blkdev->xendev, 3, | |
190 | "unmapped grant region %p with %d pages\n", | |
191 | region->addr, region->num); | |
192 | g_free(region); | |
193 | } | |
194 | ||
62d23efa AL |
195 | static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) |
196 | { | |
197 | struct ioreq *ioreq = NULL; | |
198 | ||
72cf2d4f | 199 | if (QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab AP |
200 | if (blkdev->requests_total >= max_requests) { |
201 | goto out; | |
202 | } | |
203 | /* allocate new struct */ | |
7267c094 | 204 | ioreq = g_malloc0(sizeof(*ioreq)); |
209cd7ab AP |
205 | ioreq->blkdev = blkdev; |
206 | blkdev->requests_total++; | |
62d23efa AL |
207 | qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
208 | } else { | |
209cd7ab AP |
209 | /* get one from freelist */ |
210 | ioreq = QLIST_FIRST(&blkdev->freelist); | |
211 | QLIST_REMOVE(ioreq, list); | |
62d23efa | 212 | } |
72cf2d4f | 213 | QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); |
62d23efa AL |
214 | blkdev->requests_inflight++; |
215 | ||
216 | out: | |
217 | return ioreq; | |
218 | } | |
219 | ||
220 | static void ioreq_finish(struct ioreq *ioreq) | |
221 | { | |
222 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
223 | ||
72cf2d4f BS |
224 | QLIST_REMOVE(ioreq, list); |
225 | QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); | |
62d23efa AL |
226 | blkdev->requests_inflight--; |
227 | blkdev->requests_finished++; | |
228 | } | |
229 | ||
ed547766 | 230 | static void ioreq_release(struct ioreq *ioreq, bool finish) |
62d23efa AL |
231 | { |
232 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
233 | ||
72cf2d4f | 234 | QLIST_REMOVE(ioreq, list); |
282c6a2f | 235 | ioreq_reset(ioreq); |
62d23efa | 236 | ioreq->blkdev = blkdev; |
72cf2d4f | 237 | QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); |
ed547766 JB |
238 | if (finish) { |
239 | blkdev->requests_finished--; | |
240 | } else { | |
241 | blkdev->requests_inflight--; | |
242 | } | |
62d23efa AL |
243 | } |
244 | ||
245 | /* | |
246 | * translate request into iovec + start offset | |
247 | * do sanity checks along the way | |
248 | */ | |
249 | static int ioreq_parse(struct ioreq *ioreq) | |
250 | { | |
251 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
252 | uintptr_t mem; | |
253 | size_t len; | |
254 | int i; | |
255 | ||
256 | xen_be_printf(&blkdev->xendev, 3, | |
209cd7ab AP |
257 | "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", |
258 | ioreq->req.operation, ioreq->req.nr_segments, | |
259 | ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); | |
62d23efa AL |
260 | switch (ioreq->req.operation) { |
261 | case BLKIF_OP_READ: | |
209cd7ab AP |
262 | ioreq->prot = PROT_WRITE; /* to memory */ |
263 | break; | |
7e7b7cba SS |
264 | case BLKIF_OP_FLUSH_DISKCACHE: |
265 | ioreq->presync = 1; | |
5cbdebe3 | 266 | if (!ioreq->req.nr_segments) { |
5cbdebe3 SS |
267 | return 0; |
268 | } | |
209cd7ab | 269 | /* fall through */ |
62d23efa | 270 | case BLKIF_OP_WRITE: |
209cd7ab | 271 | ioreq->prot = PROT_READ; /* from memory */ |
209cd7ab | 272 | break; |
f3135204 OH |
273 | case BLKIF_OP_DISCARD: |
274 | return 0; | |
62d23efa | 275 | default: |
209cd7ab AP |
276 | xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", |
277 | ioreq->req.operation); | |
278 | goto err; | |
62d23efa AL |
279 | }; |
280 | ||
908c7b9f GH |
281 | if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { |
282 | xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n"); | |
283 | goto err; | |
284 | } | |
285 | ||
62d23efa AL |
286 | ioreq->start = ioreq->req.sector_number * blkdev->file_blk; |
287 | for (i = 0; i < ioreq->req.nr_segments; i++) { | |
209cd7ab AP |
288 | if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
289 | xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); | |
290 | goto err; | |
291 | } | |
292 | if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { | |
293 | xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); | |
294 | goto err; | |
295 | } | |
296 | if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { | |
297 | xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); | |
298 | goto err; | |
299 | } | |
300 | ||
301 | ioreq->domids[i] = blkdev->xendev.dom; | |
302 | ioreq->refs[i] = ioreq->req.seg[i].gref; | |
303 | ||
304 | mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; | |
305 | len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; | |
62d23efa AL |
306 | qemu_iovec_add(&ioreq->v, (void*)mem, len); |
307 | } | |
308 | if (ioreq->start + ioreq->v.size > blkdev->file_size) { | |
209cd7ab AP |
309 | xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); |
310 | goto err; | |
62d23efa AL |
311 | } |
312 | return 0; | |
313 | ||
314 | err: | |
315 | ioreq->status = BLKIF_RSP_ERROR; | |
316 | return -1; | |
317 | } | |
318 | ||
319 | static void ioreq_unmap(struct ioreq *ioreq) | |
320 | { | |
c1345a88 | 321 | xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev; |
62d23efa AL |
322 | int i; |
323 | ||
9e496d74 | 324 | if (ioreq->num_unmap == 0 || ioreq->mapped == 0) { |
62d23efa | 325 | return; |
209cd7ab | 326 | } |
62d23efa | 327 | if (batch_maps) { |
209cd7ab AP |
328 | if (!ioreq->pages) { |
329 | return; | |
330 | } | |
c1345a88 IC |
331 | if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) { |
332 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
333 | "xengnttab_unmap failed: %s\n", | |
209cd7ab AP |
334 | strerror(errno)); |
335 | } | |
9e496d74 | 336 | ioreq->blkdev->cnt_map -= ioreq->num_unmap; |
209cd7ab | 337 | ioreq->pages = NULL; |
62d23efa | 338 | } else { |
9e496d74 | 339 | for (i = 0; i < ioreq->num_unmap; i++) { |
209cd7ab AP |
340 | if (!ioreq->page[i]) { |
341 | continue; | |
342 | } | |
c1345a88 IC |
343 | if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) { |
344 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
345 | "xengnttab_unmap failed: %s\n", | |
209cd7ab AP |
346 | strerror(errno)); |
347 | } | |
348 | ioreq->blkdev->cnt_map--; | |
349 | ioreq->page[i] = NULL; | |
350 | } | |
62d23efa | 351 | } |
c6961b7d | 352 | ioreq->mapped = 0; |
62d23efa AL |
353 | } |
354 | ||
355 | static int ioreq_map(struct ioreq *ioreq) | |
356 | { | |
c1345a88 | 357 | xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev; |
9e496d74 RPM |
358 | uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
359 | uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
360 | void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
361 | int i, j, new_maps = 0; | |
362 | PersistentGrant *grant; | |
2f01dfac | 363 | PersistentRegion *region; |
9e496d74 RPM |
364 | /* domids and refs variables will contain the information necessary |
365 | * to map the grants that are needed to fulfill this request. | |
366 | * | |
367 | * After mapping the needed grants, the page array will contain the | |
368 | * memory address of each granted page in the order specified in ioreq | |
369 | * (disregarding if it's a persistent grant or not). | |
370 | */ | |
62d23efa | 371 | |
c6961b7d | 372 | if (ioreq->v.niov == 0 || ioreq->mapped == 1) { |
62d23efa | 373 | return 0; |
209cd7ab | 374 | } |
9e496d74 RPM |
375 | if (ioreq->blkdev->feature_persistent) { |
376 | for (i = 0; i < ioreq->v.niov; i++) { | |
377 | grant = g_tree_lookup(ioreq->blkdev->persistent_gnts, | |
378 | GUINT_TO_POINTER(ioreq->refs[i])); | |
379 | ||
380 | if (grant != NULL) { | |
381 | page[i] = grant->page; | |
382 | xen_be_printf(&ioreq->blkdev->xendev, 3, | |
383 | "using persistent-grant %" PRIu32 "\n", | |
384 | ioreq->refs[i]); | |
385 | } else { | |
386 | /* Add the grant to the list of grants that | |
387 | * should be mapped | |
388 | */ | |
389 | domids[new_maps] = ioreq->domids[i]; | |
390 | refs[new_maps] = ioreq->refs[i]; | |
391 | page[i] = NULL; | |
392 | new_maps++; | |
393 | } | |
394 | } | |
395 | /* Set the protection to RW, since grants may be reused later | |
396 | * with a different protection than the one needed for this request | |
397 | */ | |
398 | ioreq->prot = PROT_WRITE | PROT_READ; | |
399 | } else { | |
400 | /* All grants in the request should be mapped */ | |
401 | memcpy(refs, ioreq->refs, sizeof(refs)); | |
402 | memcpy(domids, ioreq->domids, sizeof(domids)); | |
403 | memset(page, 0, sizeof(page)); | |
404 | new_maps = ioreq->v.niov; | |
405 | } | |
406 | ||
407 | if (batch_maps && new_maps) { | |
c1345a88 | 408 | ioreq->pages = xengnttab_map_grant_refs |
9e496d74 | 409 | (gnt, new_maps, domids, refs, ioreq->prot); |
209cd7ab AP |
410 | if (ioreq->pages == NULL) { |
411 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
412 | "can't map %d grant refs (%s, %d maps)\n", | |
9e496d74 | 413 | new_maps, strerror(errno), ioreq->blkdev->cnt_map); |
209cd7ab AP |
414 | return -1; |
415 | } | |
9e496d74 RPM |
416 | for (i = 0, j = 0; i < ioreq->v.niov; i++) { |
417 | if (page[i] == NULL) { | |
418 | page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE; | |
419 | } | |
209cd7ab | 420 | } |
9e496d74 RPM |
421 | ioreq->blkdev->cnt_map += new_maps; |
422 | } else if (new_maps) { | |
423 | for (i = 0; i < new_maps; i++) { | |
c1345a88 | 424 | ioreq->page[i] = xengnttab_map_grant_ref |
9e496d74 | 425 | (gnt, domids[i], refs[i], ioreq->prot); |
209cd7ab AP |
426 | if (ioreq->page[i] == NULL) { |
427 | xen_be_printf(&ioreq->blkdev->xendev, 0, | |
428 | "can't map grant ref %d (%s, %d maps)\n", | |
9e496d74 | 429 | refs[i], strerror(errno), ioreq->blkdev->cnt_map); |
a76f48e5 | 430 | ioreq->mapped = 1; |
209cd7ab AP |
431 | ioreq_unmap(ioreq); |
432 | return -1; | |
433 | } | |
209cd7ab AP |
434 | ioreq->blkdev->cnt_map++; |
435 | } | |
9e496d74 RPM |
436 | for (i = 0, j = 0; i < ioreq->v.niov; i++) { |
437 | if (page[i] == NULL) { | |
438 | page[i] = ioreq->page[j++]; | |
439 | } | |
440 | } | |
441 | } | |
2f01dfac RPM |
442 | if (ioreq->blkdev->feature_persistent && new_maps != 0 && |
443 | (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <= | |
444 | ioreq->blkdev->max_grants))) { | |
445 | /* | |
446 | * If we are using persistent grants and batch mappings only | |
447 | * add the new maps to the list of persistent grants if the whole | |
448 | * area can be persistently mapped. | |
449 | */ | |
450 | if (batch_maps) { | |
451 | region = g_malloc0(sizeof(*region)); | |
452 | region->addr = ioreq->pages; | |
453 | region->num = new_maps; | |
454 | ioreq->blkdev->persistent_regions = g_slist_append( | |
455 | ioreq->blkdev->persistent_regions, | |
456 | region); | |
457 | } | |
9e496d74 RPM |
458 | while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants) |
459 | && new_maps) { | |
460 | /* Go through the list of newly mapped grants and add as many | |
461 | * as possible to the list of persistently mapped grants. | |
462 | * | |
463 | * Since we start at the end of ioreq->page(s), we only need | |
464 | * to decrease new_maps to prevent this granted pages from | |
465 | * being unmapped in ioreq_unmap. | |
466 | */ | |
467 | grant = g_malloc0(sizeof(*grant)); | |
468 | new_maps--; | |
469 | if (batch_maps) { | |
470 | grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE; | |
471 | } else { | |
472 | grant->page = ioreq->page[new_maps]; | |
473 | } | |
474 | grant->blkdev = ioreq->blkdev; | |
475 | xen_be_printf(&ioreq->blkdev->xendev, 3, | |
476 | "adding grant %" PRIu32 " page: %p\n", | |
477 | refs[new_maps], grant->page); | |
478 | g_tree_insert(ioreq->blkdev->persistent_gnts, | |
479 | GUINT_TO_POINTER(refs[new_maps]), | |
480 | grant); | |
481 | ioreq->blkdev->persistent_gnt_count++; | |
482 | } | |
2f01dfac | 483 | assert(!batch_maps || new_maps == 0); |
9e496d74 RPM |
484 | } |
485 | for (i = 0; i < ioreq->v.niov; i++) { | |
486 | ioreq->v.iov[i].iov_base += (uintptr_t)page[i]; | |
62d23efa | 487 | } |
c6961b7d | 488 | ioreq->mapped = 1; |
9e496d74 | 489 | ioreq->num_unmap = new_maps; |
62d23efa AL |
490 | return 0; |
491 | } | |
492 | ||
c6961b7d SS |
493 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq); |
494 | ||
62d23efa AL |
495 | static void qemu_aio_complete(void *opaque, int ret) |
496 | { | |
497 | struct ioreq *ioreq = opaque; | |
498 | ||
499 | if (ret != 0) { | |
500 | xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", | |
501 | ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); | |
502 | ioreq->aio_errors++; | |
503 | } | |
504 | ||
505 | ioreq->aio_inflight--; | |
c6961b7d SS |
506 | if (ioreq->presync) { |
507 | ioreq->presync = 0; | |
508 | ioreq_runio_qemu_aio(ioreq); | |
509 | return; | |
510 | } | |
209cd7ab | 511 | if (ioreq->aio_inflight > 0) { |
62d23efa | 512 | return; |
209cd7ab | 513 | } |
62d23efa AL |
514 | |
515 | ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; | |
516 | ioreq_unmap(ioreq); | |
517 | ioreq_finish(ioreq); | |
58da5b1e OH |
518 | switch (ioreq->req.operation) { |
519 | case BLKIF_OP_WRITE: | |
520 | case BLKIF_OP_FLUSH_DISKCACHE: | |
521 | if (!ioreq->req.nr_segments) { | |
522 | break; | |
523 | } | |
524 | case BLKIF_OP_READ: | |
57ee366c AG |
525 | if (ioreq->status == BLKIF_RSP_OKAY) { |
526 | block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct); | |
527 | } else { | |
528 | block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct); | |
529 | } | |
58da5b1e | 530 | break; |
f3135204 | 531 | case BLKIF_OP_DISCARD: |
58da5b1e OH |
532 | default: |
533 | break; | |
534 | } | |
62d23efa AL |
535 | qemu_bh_schedule(ioreq->blkdev->bh); |
536 | } | |
537 | ||
538 | static int ioreq_runio_qemu_aio(struct ioreq *ioreq) | |
539 | { | |
540 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
541 | ||
209cd7ab AP |
542 | if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { |
543 | goto err_no_map; | |
544 | } | |
62d23efa AL |
545 | |
546 | ioreq->aio_inflight++; | |
209cd7ab | 547 | if (ioreq->presync) { |
4be74634 | 548 | blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq); |
c6961b7d | 549 | return 0; |
209cd7ab | 550 | } |
62d23efa AL |
551 | |
552 | switch (ioreq->req.operation) { | |
553 | case BLKIF_OP_READ: | |
4be74634 | 554 | block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct, |
5366d0c8 | 555 | ioreq->v.size, BLOCK_ACCT_READ); |
62d23efa | 556 | ioreq->aio_inflight++; |
4be74634 MA |
557 | blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE, |
558 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
559 | qemu_aio_complete, ioreq); | |
209cd7ab | 560 | break; |
62d23efa | 561 | case BLKIF_OP_WRITE: |
7e7b7cba | 562 | case BLKIF_OP_FLUSH_DISKCACHE: |
209cd7ab | 563 | if (!ioreq->req.nr_segments) { |
5cbdebe3 | 564 | break; |
209cd7ab | 565 | } |
a597e79c | 566 | |
4be74634 | 567 | block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct, |
693044eb AG |
568 | ioreq->v.size, |
569 | ioreq->req.operation == BLKIF_OP_WRITE ? | |
570 | BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH); | |
209bef3e | 571 | ioreq->aio_inflight++; |
4be74634 MA |
572 | blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE, |
573 | &ioreq->v, ioreq->v.size / BLOCK_SIZE, | |
574 | qemu_aio_complete, ioreq); | |
209cd7ab | 575 | break; |
f3135204 OH |
576 | case BLKIF_OP_DISCARD: |
577 | { | |
578 | struct blkif_request_discard *discard_req = (void *)&ioreq->req; | |
579 | ioreq->aio_inflight++; | |
4be74634 | 580 | blk_aio_discard(blkdev->blk, |
f3135204 OH |
581 | discard_req->sector_number, discard_req->nr_sectors, |
582 | qemu_aio_complete, ioreq); | |
583 | break; | |
584 | } | |
62d23efa | 585 | default: |
209cd7ab AP |
586 | /* unknown operation (shouldn't happen -- parse catches this) */ |
587 | goto err; | |
62d23efa AL |
588 | } |
589 | ||
62d23efa AL |
590 | qemu_aio_complete(ioreq, 0); |
591 | ||
592 | return 0; | |
593 | ||
594 | err: | |
f6ec953c FZ |
595 | ioreq_unmap(ioreq); |
596 | err_no_map: | |
597 | ioreq_finish(ioreq); | |
62d23efa AL |
598 | ioreq->status = BLKIF_RSP_ERROR; |
599 | return -1; | |
600 | } | |
601 | ||
602 | static int blk_send_response_one(struct ioreq *ioreq) | |
603 | { | |
604 | struct XenBlkDev *blkdev = ioreq->blkdev; | |
605 | int send_notify = 0; | |
606 | int have_requests = 0; | |
607 | blkif_response_t resp; | |
608 | void *dst; | |
609 | ||
610 | resp.id = ioreq->req.id; | |
611 | resp.operation = ioreq->req.operation; | |
612 | resp.status = ioreq->status; | |
613 | ||
614 | /* Place on the response ring for the relevant domain. */ | |
615 | switch (blkdev->protocol) { | |
616 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
617 | dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); |
618 | break; | |
62d23efa | 619 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
620 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, |
621 | blkdev->rings.x86_32_part.rsp_prod_pvt); | |
209cd7ab | 622 | break; |
62d23efa | 623 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
624 | dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, |
625 | blkdev->rings.x86_64_part.rsp_prod_pvt); | |
209cd7ab | 626 | break; |
62d23efa | 627 | default: |
209cd7ab | 628 | dst = NULL; |
8cced121 | 629 | return 0; |
62d23efa AL |
630 | } |
631 | memcpy(dst, &resp, sizeof(resp)); | |
632 | blkdev->rings.common.rsp_prod_pvt++; | |
633 | ||
634 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); | |
635 | if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { | |
209cd7ab AP |
636 | /* |
637 | * Tail check for pending requests. Allows frontend to avoid | |
638 | * notifications if requests are already in flight (lower | |
639 | * overheads and promotes batching). | |
640 | */ | |
641 | RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); | |
62d23efa | 642 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { |
209cd7ab | 643 | have_requests = 1; |
62d23efa AL |
644 | } |
645 | ||
209cd7ab AP |
646 | if (have_requests) { |
647 | blkdev->more_work++; | |
648 | } | |
62d23efa AL |
649 | return send_notify; |
650 | } | |
651 | ||
652 | /* walk finished list, send outstanding responses, free requests */ | |
653 | static void blk_send_response_all(struct XenBlkDev *blkdev) | |
654 | { | |
655 | struct ioreq *ioreq; | |
656 | int send_notify = 0; | |
657 | ||
72cf2d4f BS |
658 | while (!QLIST_EMPTY(&blkdev->finished)) { |
659 | ioreq = QLIST_FIRST(&blkdev->finished); | |
209cd7ab | 660 | send_notify += blk_send_response_one(ioreq); |
ed547766 | 661 | ioreq_release(ioreq, true); |
209cd7ab AP |
662 | } |
663 | if (send_notify) { | |
664 | xen_be_send_notify(&blkdev->xendev); | |
62d23efa | 665 | } |
62d23efa AL |
666 | } |
667 | ||
668 | static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) | |
669 | { | |
670 | switch (blkdev->protocol) { | |
671 | case BLKIF_PROTOCOL_NATIVE: | |
209cd7ab AP |
672 | memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), |
673 | sizeof(ioreq->req)); | |
674 | break; | |
62d23efa | 675 | case BLKIF_PROTOCOL_X86_32: |
6fcfeff9 BS |
676 | blkif_get_x86_32_req(&ioreq->req, |
677 | RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); | |
209cd7ab | 678 | break; |
62d23efa | 679 | case BLKIF_PROTOCOL_X86_64: |
6fcfeff9 BS |
680 | blkif_get_x86_64_req(&ioreq->req, |
681 | RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); | |
209cd7ab | 682 | break; |
62d23efa AL |
683 | } |
684 | return 0; | |
685 | } | |
686 | ||
687 | static void blk_handle_requests(struct XenBlkDev *blkdev) | |
688 | { | |
689 | RING_IDX rc, rp; | |
690 | struct ioreq *ioreq; | |
691 | ||
692 | blkdev->more_work = 0; | |
693 | ||
694 | rc = blkdev->rings.common.req_cons; | |
695 | rp = blkdev->rings.common.sring->req_prod; | |
696 | xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
697 | ||
4e5b184d | 698 | blk_send_response_all(blkdev); |
fc1f79f7 | 699 | while (rc != rp) { |
62d23efa | 700 | /* pull request from ring */ |
209cd7ab | 701 | if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { |
62d23efa | 702 | break; |
209cd7ab | 703 | } |
62d23efa AL |
704 | ioreq = ioreq_start(blkdev); |
705 | if (ioreq == NULL) { | |
706 | blkdev->more_work++; | |
707 | break; | |
708 | } | |
709 | blk_get_request(blkdev, ioreq, rc); | |
710 | blkdev->rings.common.req_cons = ++rc; | |
711 | ||
712 | /* parse them */ | |
713 | if (ioreq_parse(ioreq) != 0) { | |
57ee366c AG |
714 | |
715 | switch (ioreq->req.operation) { | |
716 | case BLKIF_OP_READ: | |
717 | block_acct_invalid(blk_get_stats(blkdev->blk), | |
718 | BLOCK_ACCT_READ); | |
719 | break; | |
720 | case BLKIF_OP_WRITE: | |
721 | block_acct_invalid(blk_get_stats(blkdev->blk), | |
722 | BLOCK_ACCT_WRITE); | |
723 | break; | |
724 | case BLKIF_OP_FLUSH_DISKCACHE: | |
725 | block_acct_invalid(blk_get_stats(blkdev->blk), | |
726 | BLOCK_ACCT_FLUSH); | |
727 | default: | |
728 | break; | |
729 | }; | |
730 | ||
209cd7ab | 731 | if (blk_send_response_one(ioreq)) { |
62d23efa | 732 | xen_be_send_notify(&blkdev->xendev); |
209cd7ab | 733 | } |
ed547766 | 734 | ioreq_release(ioreq, false); |
62d23efa AL |
735 | continue; |
736 | } | |
737 | ||
4e5b184d | 738 | ioreq_runio_qemu_aio(ioreq); |
209cd7ab | 739 | } |
62d23efa | 740 | |
209cd7ab | 741 | if (blkdev->more_work && blkdev->requests_inflight < max_requests) { |
62d23efa | 742 | qemu_bh_schedule(blkdev->bh); |
209cd7ab | 743 | } |
62d23efa AL |
744 | } |
745 | ||
746 | /* ------------------------------------------------------------- */ | |
747 | ||
748 | static void blk_bh(void *opaque) | |
749 | { | |
750 | struct XenBlkDev *blkdev = opaque; | |
751 | blk_handle_requests(blkdev); | |
752 | } | |
753 | ||
64c27e5b JB |
754 | /* |
755 | * We need to account for the grant allocations requiring contiguous | |
756 | * chunks; the worst case number would be | |
757 | * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, | |
758 | * but in order to keep things simple just use | |
759 | * 2 * max_req * max_seg. | |
760 | */ | |
761 | #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) | |
762 | ||
62d23efa AL |
763 | static void blk_alloc(struct XenDevice *xendev) |
764 | { | |
765 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
766 | ||
72cf2d4f BS |
767 | QLIST_INIT(&blkdev->inflight); |
768 | QLIST_INIT(&blkdev->finished); | |
769 | QLIST_INIT(&blkdev->freelist); | |
62d23efa | 770 | blkdev->bh = qemu_bh_new(blk_bh, blkdev); |
209cd7ab | 771 | if (xen_mode != XEN_EMULATE) { |
62d23efa | 772 | batch_maps = 1; |
209cd7ab | 773 | } |
c1345a88 | 774 | if (xengnttab_set_max_grants(xendev->gnttabdev, |
64c27e5b | 775 | MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { |
c1345a88 | 776 | xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n", |
64c27e5b JB |
777 | strerror(errno)); |
778 | } | |
62d23efa AL |
779 | } |
780 | ||
f3135204 OH |
781 | static void blk_parse_discard(struct XenBlkDev *blkdev) |
782 | { | |
783 | int enable; | |
784 | ||
785 | blkdev->feature_discard = true; | |
786 | ||
787 | if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) { | |
788 | blkdev->feature_discard = !!enable; | |
789 | } | |
790 | ||
791 | if (blkdev->feature_discard) { | |
792 | xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1); | |
793 | } | |
794 | } | |
795 | ||
62d23efa AL |
796 | static int blk_init(struct XenDevice *xendev) |
797 | { | |
798 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
86f425db | 799 | int info = 0; |
454ae734 | 800 | char *directiosafe = NULL; |
62d23efa AL |
801 | |
802 | /* read xenstore entries */ | |
803 | if (blkdev->params == NULL) { | |
5ea3c2b4 | 804 | char *h = NULL; |
209cd7ab | 805 | blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); |
5ea3c2b4 SS |
806 | if (blkdev->params != NULL) { |
807 | h = strchr(blkdev->params, ':'); | |
808 | } | |
209cd7ab AP |
809 | if (h != NULL) { |
810 | blkdev->fileproto = blkdev->params; | |
811 | blkdev->filename = h+1; | |
812 | *h = 0; | |
813 | } else { | |
814 | blkdev->fileproto = "<unset>"; | |
815 | blkdev->filename = blkdev->params; | |
816 | } | |
817 | } | |
7cef3f4f SS |
818 | if (!strcmp("aio", blkdev->fileproto)) { |
819 | blkdev->fileproto = "raw"; | |
820 | } | |
fc3e493b SS |
821 | if (!strcmp("vhd", blkdev->fileproto)) { |
822 | blkdev->fileproto = "vpc"; | |
823 | } | |
209cd7ab AP |
824 | if (blkdev->mode == NULL) { |
825 | blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); | |
826 | } | |
827 | if (blkdev->type == NULL) { | |
828 | blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); | |
829 | } | |
830 | if (blkdev->dev == NULL) { | |
831 | blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); | |
832 | } | |
833 | if (blkdev->devtype == NULL) { | |
834 | blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); | |
835 | } | |
454ae734 SS |
836 | directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe"); |
837 | blkdev->directiosafe = (directiosafe && atoi(directiosafe)); | |
62d23efa AL |
838 | |
839 | /* do we have all we need? */ | |
840 | if (blkdev->params == NULL || | |
209cd7ab AP |
841 | blkdev->mode == NULL || |
842 | blkdev->type == NULL || | |
843 | blkdev->dev == NULL) { | |
5ea3c2b4 | 844 | goto out_error; |
209cd7ab | 845 | } |
62d23efa AL |
846 | |
847 | /* read-only ? */ | |
86f425db | 848 | if (strcmp(blkdev->mode, "w")) { |
209cd7ab | 849 | info |= VDISK_READONLY; |
62d23efa AL |
850 | } |
851 | ||
852 | /* cdrom ? */ | |
209cd7ab AP |
853 | if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { |
854 | info |= VDISK_CDROM; | |
855 | } | |
62d23efa | 856 | |
86f425db AB |
857 | blkdev->file_blk = BLOCK_SIZE; |
858 | ||
859 | /* fill info | |
860 | * blk_connect supplies sector-size and sectors | |
861 | */ | |
862 | xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1); | |
863 | xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1); | |
864 | xenstore_write_be_int(&blkdev->xendev, "info", info); | |
454ae734 | 865 | |
f3135204 OH |
866 | blk_parse_discard(blkdev); |
867 | ||
454ae734 | 868 | g_free(directiosafe); |
86f425db AB |
869 | return 0; |
870 | ||
871 | out_error: | |
872 | g_free(blkdev->params); | |
873 | blkdev->params = NULL; | |
874 | g_free(blkdev->mode); | |
875 | blkdev->mode = NULL; | |
876 | g_free(blkdev->type); | |
877 | blkdev->type = NULL; | |
878 | g_free(blkdev->dev); | |
879 | blkdev->dev = NULL; | |
880 | g_free(blkdev->devtype); | |
881 | blkdev->devtype = NULL; | |
454ae734 SS |
882 | g_free(directiosafe); |
883 | blkdev->directiosafe = false; | |
86f425db AB |
884 | return -1; |
885 | } | |
886 | ||
887 | static int blk_connect(struct XenDevice *xendev) | |
888 | { | |
889 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
890 | int pers, index, qflags; | |
b64ec4e4 | 891 | bool readonly = true; |
86f425db AB |
892 | |
893 | /* read-only ? */ | |
454ae734 SS |
894 | if (blkdev->directiosafe) { |
895 | qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO; | |
896 | } else { | |
897 | qflags = BDRV_O_CACHE_WB; | |
898 | } | |
86f425db AB |
899 | if (strcmp(blkdev->mode, "w") == 0) { |
900 | qflags |= BDRV_O_RDWR; | |
b64ec4e4 | 901 | readonly = false; |
86f425db | 902 | } |
f3135204 OH |
903 | if (blkdev->feature_discard) { |
904 | qflags |= BDRV_O_UNMAP; | |
905 | } | |
86f425db | 906 | |
62d23efa | 907 | /* init qemu block driver */ |
751c6a17 GH |
908 | index = (blkdev->xendev.dev - 202 * 256) / 16; |
909 | blkdev->dinfo = drive_get(IF_XEN, 0, index); | |
910 | if (!blkdev->dinfo) { | |
98522f63 | 911 | Error *local_err = NULL; |
9a925356 | 912 | QDict *options = NULL; |
cedccf13 | 913 | |
9a925356 HR |
914 | if (strcmp(blkdev->fileproto, "<unset>")) { |
915 | options = qdict_new(); | |
916 | qdict_put(options, "driver", qstring_from_str(blkdev->fileproto)); | |
26f54e9a | 917 | } |
cedccf13 | 918 | |
9a925356 HR |
919 | /* setup via xenbus -> create new block driver instance */ |
920 | xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); | |
efaa7c4e | 921 | blkdev->blk = blk_new_open(blkdev->filename, NULL, options, |
9a925356 HR |
922 | qflags, &local_err); |
923 | if (!blkdev->blk) { | |
cedccf13 MA |
924 | xen_be_printf(&blkdev->xendev, 0, "error: %s\n", |
925 | error_get_pretty(local_err)); | |
926 | error_free(local_err); | |
cedccf13 MA |
927 | return -1; |
928 | } | |
62d23efa AL |
929 | } else { |
930 | /* setup via qemu cmdline -> already setup for us */ | |
931 | xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); | |
4be74634 MA |
932 | blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo); |
933 | if (blk_is_read_only(blkdev->blk) && !readonly) { | |
4f8a066b | 934 | xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive"); |
4be74634 | 935 | blkdev->blk = NULL; |
4f8a066b KW |
936 | return -1; |
937 | } | |
4be74634 MA |
938 | /* blkdev->blk is not create by us, we get a reference |
939 | * so we can blk_unref() unconditionally */ | |
940 | blk_ref(blkdev->blk); | |
941 | } | |
942 | blk_attach_dev_nofail(blkdev->blk, blkdev); | |
943 | blkdev->file_size = blk_getlength(blkdev->blk); | |
62d23efa | 944 | if (blkdev->file_size < 0) { |
5433c24f HR |
945 | BlockDriverState *bs = blk_bs(blkdev->blk); |
946 | const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL; | |
4be74634 | 947 | xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n", |
62d23efa | 948 | (int)blkdev->file_size, strerror(-blkdev->file_size), |
5433c24f | 949 | drv_name ?: "-"); |
209cd7ab | 950 | blkdev->file_size = 0; |
62d23efa | 951 | } |
62d23efa AL |
952 | |
953 | xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," | |
209cd7ab AP |
954 | " size %" PRId64 " (%" PRId64 " MB)\n", |
955 | blkdev->type, blkdev->fileproto, blkdev->filename, | |
956 | blkdev->file_size, blkdev->file_size >> 20); | |
62d23efa | 957 | |
86f425db AB |
958 | /* Fill in number of sector size and number of sectors */ |
959 | xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); | |
9246ce88 FF |
960 | xenstore_write_be_int64(&blkdev->xendev, "sectors", |
961 | blkdev->file_size / blkdev->file_blk); | |
62d23efa | 962 | |
209cd7ab AP |
963 | if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { |
964 | return -1; | |
965 | } | |
62d23efa | 966 | if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", |
209cd7ab AP |
967 | &blkdev->xendev.remote_port) == -1) { |
968 | return -1; | |
969 | } | |
9e496d74 RPM |
970 | if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) { |
971 | blkdev->feature_persistent = FALSE; | |
972 | } else { | |
973 | blkdev->feature_persistent = !!pers; | |
974 | } | |
62d23efa AL |
975 | |
976 | blkdev->protocol = BLKIF_PROTOCOL_NATIVE; | |
977 | if (blkdev->xendev.protocol) { | |
209cd7ab | 978 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { |
62d23efa | 979 | blkdev->protocol = BLKIF_PROTOCOL_X86_32; |
209cd7ab AP |
980 | } |
981 | if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { | |
62d23efa | 982 | blkdev->protocol = BLKIF_PROTOCOL_X86_64; |
209cd7ab | 983 | } |
62d23efa AL |
984 | } |
985 | ||
c1345a88 | 986 | blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev, |
209cd7ab AP |
987 | blkdev->xendev.dom, |
988 | blkdev->ring_ref, | |
989 | PROT_READ | PROT_WRITE); | |
990 | if (!blkdev->sring) { | |
991 | return -1; | |
992 | } | |
62d23efa AL |
993 | blkdev->cnt_map++; |
994 | ||
995 | switch (blkdev->protocol) { | |
996 | case BLKIF_PROTOCOL_NATIVE: | |
997 | { | |
209cd7ab AP |
998 | blkif_sring_t *sring_native = blkdev->sring; |
999 | BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); | |
1000 | break; | |
62d23efa AL |
1001 | } |
1002 | case BLKIF_PROTOCOL_X86_32: | |
1003 | { | |
209cd7ab | 1004 | blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; |
6fcfeff9 BS |
1005 | |
1006 | BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); | |
209cd7ab | 1007 | break; |
62d23efa AL |
1008 | } |
1009 | case BLKIF_PROTOCOL_X86_64: | |
1010 | { | |
209cd7ab | 1011 | blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; |
6fcfeff9 BS |
1012 | |
1013 | BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); | |
209cd7ab | 1014 | break; |
62d23efa AL |
1015 | } |
1016 | } | |
1017 | ||
9e496d74 RPM |
1018 | if (blkdev->feature_persistent) { |
1019 | /* Init persistent grants */ | |
1020 | blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST; | |
1021 | blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp, | |
1022 | NULL, NULL, | |
2f01dfac RPM |
1023 | batch_maps ? |
1024 | (GDestroyNotify)g_free : | |
9e496d74 | 1025 | (GDestroyNotify)destroy_grant); |
2f01dfac | 1026 | blkdev->persistent_regions = NULL; |
9e496d74 RPM |
1027 | blkdev->persistent_gnt_count = 0; |
1028 | } | |
1029 | ||
62d23efa AL |
1030 | xen_be_bind_evtchn(&blkdev->xendev); |
1031 | ||
1032 | xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " | |
209cd7ab AP |
1033 | "remote port %d, local port %d\n", |
1034 | blkdev->xendev.protocol, blkdev->ring_ref, | |
1035 | blkdev->xendev.remote_port, blkdev->xendev.local_port); | |
62d23efa AL |
1036 | return 0; |
1037 | } | |
1038 | ||
1039 | static void blk_disconnect(struct XenDevice *xendev) | |
1040 | { | |
1041 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
1042 | ||
4be74634 MA |
1043 | if (blkdev->blk) { |
1044 | blk_detach_dev(blkdev->blk, blkdev); | |
1045 | blk_unref(blkdev->blk); | |
1046 | blkdev->blk = NULL; | |
62d23efa AL |
1047 | } |
1048 | xen_be_unbind_evtchn(&blkdev->xendev); | |
1049 | ||
1050 | if (blkdev->sring) { | |
c1345a88 | 1051 | xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); |
209cd7ab AP |
1052 | blkdev->cnt_map--; |
1053 | blkdev->sring = NULL; | |
62d23efa | 1054 | } |
2f01dfac RPM |
1055 | |
1056 | /* | |
1057 | * Unmap persistent grants before switching to the closed state | |
1058 | * so the frontend can free them. | |
1059 | * | |
1060 | * In the !batch_maps case g_tree_destroy will take care of unmapping | |
1061 | * the grant, but in the batch_maps case we need to iterate over every | |
1062 | * region in persistent_regions and unmap it. | |
1063 | */ | |
1064 | if (blkdev->feature_persistent) { | |
1065 | g_tree_destroy(blkdev->persistent_gnts); | |
1066 | assert(batch_maps || blkdev->persistent_gnt_count == 0); | |
1067 | if (batch_maps) { | |
1068 | blkdev->persistent_gnt_count = 0; | |
1069 | g_slist_foreach(blkdev->persistent_regions, | |
1070 | (GFunc)remove_persistent_region, blkdev); | |
1071 | g_slist_free(blkdev->persistent_regions); | |
1072 | } | |
1073 | blkdev->feature_persistent = false; | |
1074 | } | |
62d23efa AL |
1075 | } |
1076 | ||
1077 | static int blk_free(struct XenDevice *xendev) | |
1078 | { | |
1079 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
1080 | struct ioreq *ioreq; | |
1081 | ||
4be74634 | 1082 | if (blkdev->blk || blkdev->sring) { |
77ba8fef SS |
1083 | blk_disconnect(xendev); |
1084 | } | |
1085 | ||
72cf2d4f | 1086 | while (!QLIST_EMPTY(&blkdev->freelist)) { |
209cd7ab | 1087 | ioreq = QLIST_FIRST(&blkdev->freelist); |
72cf2d4f | 1088 | QLIST_REMOVE(ioreq, list); |
62d23efa | 1089 | qemu_iovec_destroy(&ioreq->v); |
7267c094 | 1090 | g_free(ioreq); |
62d23efa AL |
1091 | } |
1092 | ||
7267c094 AL |
1093 | g_free(blkdev->params); |
1094 | g_free(blkdev->mode); | |
1095 | g_free(blkdev->type); | |
1096 | g_free(blkdev->dev); | |
1097 | g_free(blkdev->devtype); | |
62d23efa AL |
1098 | qemu_bh_delete(blkdev->bh); |
1099 | return 0; | |
1100 | } | |
1101 | ||
1102 | static void blk_event(struct XenDevice *xendev) | |
1103 | { | |
1104 | struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); | |
1105 | ||
1106 | qemu_bh_schedule(blkdev->bh); | |
1107 | } | |
1108 | ||
1109 | struct XenDevOps xen_blkdev_ops = { | |
1110 | .size = sizeof(struct XenBlkDev), | |
1111 | .flags = DEVOPS_FLAG_NEED_GNTDEV, | |
1112 | .alloc = blk_alloc, | |
1113 | .init = blk_init, | |
384087b2 | 1114 | .initialise = blk_connect, |
62d23efa AL |
1115 | .disconnect = blk_disconnect, |
1116 | .event = blk_event, | |
1117 | .free = blk_free, | |
1118 | }; |