]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * GlusterFS backend for QEMU | |
3 | * | |
4 | * Copyright (C) 2012 Bharata B Rao <[email protected]> | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | #include <glusterfs/api/glfs.h> | |
11 | #include "block/block_int.h" | |
12 | #include "qemu/uri.h" | |
13 | ||
14 | typedef struct GlusterAIOCB { | |
15 | int64_t size; | |
16 | int ret; | |
17 | QEMUBH *bh; | |
18 | Coroutine *coroutine; | |
19 | AioContext *aio_context; | |
20 | } GlusterAIOCB; | |
21 | ||
22 | typedef struct BDRVGlusterState { | |
23 | struct glfs *glfs; | |
24 | struct glfs_fd *fd; | |
25 | } BDRVGlusterState; | |
26 | ||
27 | typedef struct GlusterConf { | |
28 | char *server; | |
29 | int port; | |
30 | char *volname; | |
31 | char *image; | |
32 | char *transport; | |
33 | } GlusterConf; | |
34 | ||
35 | static void qemu_gluster_gconf_free(GlusterConf *gconf) | |
36 | { | |
37 | if (gconf) { | |
38 | g_free(gconf->server); | |
39 | g_free(gconf->volname); | |
40 | g_free(gconf->image); | |
41 | g_free(gconf->transport); | |
42 | g_free(gconf); | |
43 | } | |
44 | } | |
45 | ||
46 | static int parse_volume_options(GlusterConf *gconf, char *path) | |
47 | { | |
48 | char *p, *q; | |
49 | ||
50 | if (!path) { | |
51 | return -EINVAL; | |
52 | } | |
53 | ||
54 | /* volume */ | |
55 | p = q = path + strspn(path, "/"); | |
56 | p += strcspn(p, "/"); | |
57 | if (*p == '\0') { | |
58 | return -EINVAL; | |
59 | } | |
60 | gconf->volname = g_strndup(q, p - q); | |
61 | ||
62 | /* image */ | |
63 | p += strspn(p, "/"); | |
64 | if (*p == '\0') { | |
65 | return -EINVAL; | |
66 | } | |
67 | gconf->image = g_strdup(p); | |
68 | return 0; | |
69 | } | |
70 | ||
71 | /* | |
72 | * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] | |
73 | * | |
74 | * 'gluster' is the protocol. | |
75 | * | |
76 | * 'transport' specifies the transport type used to connect to gluster | |
77 | * management daemon (glusterd). Valid transport types are | |
78 | * tcp, unix and rdma. If a transport type isn't specified, then tcp | |
79 | * type is assumed. | |
80 | * | |
81 | * 'server' specifies the server where the volume file specification for | |
82 | * the given volume resides. This can be either hostname, ipv4 address | |
83 | * or ipv6 address. ipv6 address needs to be within square brackets [ ]. | |
84 | * If transport type is 'unix', then 'server' field should not be specified. | |
85 | * The 'socket' field needs to be populated with the path to unix domain | |
86 | * socket. | |
87 | * | |
88 | * 'port' is the port number on which glusterd is listening. This is optional | |
89 | * and if not specified, QEMU will send 0 which will make gluster to use the | |
90 | * default port. If the transport type is unix, then 'port' should not be | |
91 | * specified. | |
92 | * | |
93 | * 'volname' is the name of the gluster volume which contains the VM image. | |
94 | * | |
95 | * 'image' is the path to the actual VM image that resides on gluster volume. | |
96 | * | |
97 | * Examples: | |
98 | * | |
99 | * file=gluster://1.2.3.4/testvol/a.img | |
100 | * file=gluster+tcp://1.2.3.4/testvol/a.img | |
101 | * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img | |
102 | * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img | |
103 | * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img | |
104 | * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img | |
105 | * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket | |
106 | * file=gluster+rdma://1.2.3.4:24007/testvol/a.img | |
107 | */ | |
108 | static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) | |
109 | { | |
110 | URI *uri; | |
111 | QueryParams *qp = NULL; | |
112 | bool is_unix = false; | |
113 | int ret = 0; | |
114 | ||
115 | uri = uri_parse(filename); | |
116 | if (!uri) { | |
117 | return -EINVAL; | |
118 | } | |
119 | ||
120 | /* transport */ | |
121 | if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { | |
122 | gconf->transport = g_strdup("tcp"); | |
123 | } else if (!strcmp(uri->scheme, "gluster+tcp")) { | |
124 | gconf->transport = g_strdup("tcp"); | |
125 | } else if (!strcmp(uri->scheme, "gluster+unix")) { | |
126 | gconf->transport = g_strdup("unix"); | |
127 | is_unix = true; | |
128 | } else if (!strcmp(uri->scheme, "gluster+rdma")) { | |
129 | gconf->transport = g_strdup("rdma"); | |
130 | } else { | |
131 | ret = -EINVAL; | |
132 | goto out; | |
133 | } | |
134 | ||
135 | ret = parse_volume_options(gconf, uri->path); | |
136 | if (ret < 0) { | |
137 | goto out; | |
138 | } | |
139 | ||
140 | qp = query_params_parse(uri->query); | |
141 | if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { | |
142 | ret = -EINVAL; | |
143 | goto out; | |
144 | } | |
145 | ||
146 | if (is_unix) { | |
147 | if (uri->server || uri->port) { | |
148 | ret = -EINVAL; | |
149 | goto out; | |
150 | } | |
151 | if (strcmp(qp->p[0].name, "socket")) { | |
152 | ret = -EINVAL; | |
153 | goto out; | |
154 | } | |
155 | gconf->server = g_strdup(qp->p[0].value); | |
156 | } else { | |
157 | gconf->server = g_strdup(uri->server ? uri->server : "localhost"); | |
158 | gconf->port = uri->port; | |
159 | } | |
160 | ||
161 | out: | |
162 | if (qp) { | |
163 | query_params_free(qp); | |
164 | } | |
165 | uri_free(uri); | |
166 | return ret; | |
167 | } | |
168 | ||
169 | static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, | |
170 | Error **errp) | |
171 | { | |
172 | struct glfs *glfs = NULL; | |
173 | int ret; | |
174 | int old_errno; | |
175 | ||
176 | ret = qemu_gluster_parseuri(gconf, filename); | |
177 | if (ret < 0) { | |
178 | error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" | |
179 | "volname/image[?socket=...]"); | |
180 | errno = -ret; | |
181 | goto out; | |
182 | } | |
183 | ||
184 | glfs = glfs_new(gconf->volname); | |
185 | if (!glfs) { | |
186 | goto out; | |
187 | } | |
188 | ||
189 | ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, | |
190 | gconf->port); | |
191 | if (ret < 0) { | |
192 | goto out; | |
193 | } | |
194 | ||
195 | /* | |
196 | * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when | |
197 | * GlusterFS makes GF_LOG_* macros available to libgfapi users. | |
198 | */ | |
199 | ret = glfs_set_logging(glfs, "-", 4); | |
200 | if (ret < 0) { | |
201 | goto out; | |
202 | } | |
203 | ||
204 | ret = glfs_init(glfs); | |
205 | if (ret) { | |
206 | error_setg_errno(errp, errno, | |
207 | "Gluster connection failed for server=%s port=%d " | |
208 | "volume=%s image=%s transport=%s", gconf->server, | |
209 | gconf->port, gconf->volname, gconf->image, | |
210 | gconf->transport); | |
211 | ||
212 | /* glfs_init sometimes doesn't set errno although docs suggest that */ | |
213 | if (errno == 0) | |
214 | errno = EINVAL; | |
215 | ||
216 | goto out; | |
217 | } | |
218 | return glfs; | |
219 | ||
220 | out: | |
221 | if (glfs) { | |
222 | old_errno = errno; | |
223 | glfs_fini(glfs); | |
224 | errno = old_errno; | |
225 | } | |
226 | return NULL; | |
227 | } | |
228 | ||
229 | static void qemu_gluster_complete_aio(void *opaque) | |
230 | { | |
231 | GlusterAIOCB *acb = (GlusterAIOCB *)opaque; | |
232 | ||
233 | qemu_bh_delete(acb->bh); | |
234 | acb->bh = NULL; | |
235 | qemu_coroutine_enter(acb->coroutine, NULL); | |
236 | } | |
237 | ||
238 | /* | |
239 | * AIO callback routine called from GlusterFS thread. | |
240 | */ | |
241 | static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) | |
242 | { | |
243 | GlusterAIOCB *acb = (GlusterAIOCB *)arg; | |
244 | ||
245 | if (!ret || ret == acb->size) { | |
246 | acb->ret = 0; /* Success */ | |
247 | } else if (ret < 0) { | |
248 | acb->ret = ret; /* Read/Write failed */ | |
249 | } else { | |
250 | acb->ret = -EIO; /* Partial read/write - fail it */ | |
251 | } | |
252 | ||
253 | acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb); | |
254 | qemu_bh_schedule(acb->bh); | |
255 | } | |
256 | ||
257 | /* TODO Convert to fine grained options */ | |
258 | static QemuOptsList runtime_opts = { | |
259 | .name = "gluster", | |
260 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), | |
261 | .desc = { | |
262 | { | |
263 | .name = "filename", | |
264 | .type = QEMU_OPT_STRING, | |
265 | .help = "URL to the gluster image", | |
266 | }, | |
267 | { /* end of list */ } | |
268 | }, | |
269 | }; | |
270 | ||
271 | static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) | |
272 | { | |
273 | assert(open_flags != NULL); | |
274 | ||
275 | *open_flags |= O_BINARY; | |
276 | ||
277 | if (bdrv_flags & BDRV_O_RDWR) { | |
278 | *open_flags |= O_RDWR; | |
279 | } else { | |
280 | *open_flags |= O_RDONLY; | |
281 | } | |
282 | ||
283 | if ((bdrv_flags & BDRV_O_NOCACHE)) { | |
284 | *open_flags |= O_DIRECT; | |
285 | } | |
286 | } | |
287 | ||
288 | static int qemu_gluster_open(BlockDriverState *bs, QDict *options, | |
289 | int bdrv_flags, Error **errp) | |
290 | { | |
291 | BDRVGlusterState *s = bs->opaque; | |
292 | int open_flags = 0; | |
293 | int ret = 0; | |
294 | GlusterConf *gconf = g_new0(GlusterConf, 1); | |
295 | QemuOpts *opts; | |
296 | Error *local_err = NULL; | |
297 | const char *filename; | |
298 | ||
299 | opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); | |
300 | qemu_opts_absorb_qdict(opts, options, &local_err); | |
301 | if (local_err) { | |
302 | error_propagate(errp, local_err); | |
303 | ret = -EINVAL; | |
304 | goto out; | |
305 | } | |
306 | ||
307 | filename = qemu_opt_get(opts, "filename"); | |
308 | ||
309 | s->glfs = qemu_gluster_init(gconf, filename, errp); | |
310 | if (!s->glfs) { | |
311 | ret = -errno; | |
312 | goto out; | |
313 | } | |
314 | ||
315 | qemu_gluster_parse_flags(bdrv_flags, &open_flags); | |
316 | ||
317 | s->fd = glfs_open(s->glfs, gconf->image, open_flags); | |
318 | if (!s->fd) { | |
319 | ret = -errno; | |
320 | } | |
321 | ||
322 | out: | |
323 | qemu_opts_del(opts); | |
324 | qemu_gluster_gconf_free(gconf); | |
325 | if (!ret) { | |
326 | return ret; | |
327 | } | |
328 | if (s->fd) { | |
329 | glfs_close(s->fd); | |
330 | } | |
331 | if (s->glfs) { | |
332 | glfs_fini(s->glfs); | |
333 | } | |
334 | return ret; | |
335 | } | |
336 | ||
337 | typedef struct BDRVGlusterReopenState { | |
338 | struct glfs *glfs; | |
339 | struct glfs_fd *fd; | |
340 | } BDRVGlusterReopenState; | |
341 | ||
342 | ||
343 | static int qemu_gluster_reopen_prepare(BDRVReopenState *state, | |
344 | BlockReopenQueue *queue, Error **errp) | |
345 | { | |
346 | int ret = 0; | |
347 | BDRVGlusterReopenState *reop_s; | |
348 | GlusterConf *gconf = NULL; | |
349 | int open_flags = 0; | |
350 | ||
351 | assert(state != NULL); | |
352 | assert(state->bs != NULL); | |
353 | ||
354 | state->opaque = g_new0(BDRVGlusterReopenState, 1); | |
355 | reop_s = state->opaque; | |
356 | ||
357 | qemu_gluster_parse_flags(state->flags, &open_flags); | |
358 | ||
359 | gconf = g_new0(GlusterConf, 1); | |
360 | ||
361 | reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); | |
362 | if (reop_s->glfs == NULL) { | |
363 | ret = -errno; | |
364 | goto exit; | |
365 | } | |
366 | ||
367 | reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags); | |
368 | if (reop_s->fd == NULL) { | |
369 | /* reops->glfs will be cleaned up in _abort */ | |
370 | ret = -errno; | |
371 | goto exit; | |
372 | } | |
373 | ||
374 | exit: | |
375 | /* state->opaque will be freed in either the _abort or _commit */ | |
376 | qemu_gluster_gconf_free(gconf); | |
377 | return ret; | |
378 | } | |
379 | ||
380 | static void qemu_gluster_reopen_commit(BDRVReopenState *state) | |
381 | { | |
382 | BDRVGlusterReopenState *reop_s = state->opaque; | |
383 | BDRVGlusterState *s = state->bs->opaque; | |
384 | ||
385 | ||
386 | /* close the old */ | |
387 | if (s->fd) { | |
388 | glfs_close(s->fd); | |
389 | } | |
390 | if (s->glfs) { | |
391 | glfs_fini(s->glfs); | |
392 | } | |
393 | ||
394 | /* use the newly opened image / connection */ | |
395 | s->fd = reop_s->fd; | |
396 | s->glfs = reop_s->glfs; | |
397 | ||
398 | g_free(state->opaque); | |
399 | state->opaque = NULL; | |
400 | ||
401 | return; | |
402 | } | |
403 | ||
404 | ||
405 | static void qemu_gluster_reopen_abort(BDRVReopenState *state) | |
406 | { | |
407 | BDRVGlusterReopenState *reop_s = state->opaque; | |
408 | ||
409 | if (reop_s == NULL) { | |
410 | return; | |
411 | } | |
412 | ||
413 | if (reop_s->fd) { | |
414 | glfs_close(reop_s->fd); | |
415 | } | |
416 | ||
417 | if (reop_s->glfs) { | |
418 | glfs_fini(reop_s->glfs); | |
419 | } | |
420 | ||
421 | g_free(state->opaque); | |
422 | state->opaque = NULL; | |
423 | ||
424 | return; | |
425 | } | |
426 | ||
427 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
428 | static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs, | |
429 | int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) | |
430 | { | |
431 | int ret; | |
432 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
433 | BDRVGlusterState *s = bs->opaque; | |
434 | off_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
435 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
436 | ||
437 | acb->size = size; | |
438 | acb->ret = 0; | |
439 | acb->coroutine = qemu_coroutine_self(); | |
440 | acb->aio_context = bdrv_get_aio_context(bs); | |
441 | ||
442 | ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); | |
443 | if (ret < 0) { | |
444 | ret = -errno; | |
445 | goto out; | |
446 | } | |
447 | ||
448 | qemu_coroutine_yield(); | |
449 | ret = acb->ret; | |
450 | ||
451 | out: | |
452 | g_slice_free(GlusterAIOCB, acb); | |
453 | return ret; | |
454 | } | |
455 | ||
456 | static inline bool gluster_supports_zerofill(void) | |
457 | { | |
458 | return 1; | |
459 | } | |
460 | ||
461 | static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, | |
462 | int64_t size) | |
463 | { | |
464 | return glfs_zerofill(fd, offset, size); | |
465 | } | |
466 | ||
467 | #else | |
468 | static inline bool gluster_supports_zerofill(void) | |
469 | { | |
470 | return 0; | |
471 | } | |
472 | ||
473 | static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, | |
474 | int64_t size) | |
475 | { | |
476 | return 0; | |
477 | } | |
478 | #endif | |
479 | ||
480 | static int qemu_gluster_create(const char *filename, | |
481 | QemuOpts *opts, Error **errp) | |
482 | { | |
483 | struct glfs *glfs; | |
484 | struct glfs_fd *fd; | |
485 | int ret = 0; | |
486 | int prealloc = 0; | |
487 | int64_t total_size = 0; | |
488 | char *tmp = NULL; | |
489 | GlusterConf *gconf = g_new0(GlusterConf, 1); | |
490 | ||
491 | glfs = qemu_gluster_init(gconf, filename, errp); | |
492 | if (!glfs) { | |
493 | ret = -errno; | |
494 | goto out; | |
495 | } | |
496 | ||
497 | total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | |
498 | BDRV_SECTOR_SIZE); | |
499 | ||
500 | tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); | |
501 | if (!tmp || !strcmp(tmp, "off")) { | |
502 | prealloc = 0; | |
503 | } else if (!strcmp(tmp, "full") && | |
504 | gluster_supports_zerofill()) { | |
505 | prealloc = 1; | |
506 | } else { | |
507 | error_setg(errp, "Invalid preallocation mode: '%s'" | |
508 | " or GlusterFS doesn't support zerofill API", | |
509 | tmp); | |
510 | ret = -EINVAL; | |
511 | goto out; | |
512 | } | |
513 | ||
514 | fd = glfs_creat(glfs, gconf->image, | |
515 | O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); | |
516 | if (!fd) { | |
517 | ret = -errno; | |
518 | } else { | |
519 | if (!glfs_ftruncate(fd, total_size)) { | |
520 | if (prealloc && qemu_gluster_zerofill(fd, 0, total_size)) { | |
521 | ret = -errno; | |
522 | } | |
523 | } else { | |
524 | ret = -errno; | |
525 | } | |
526 | ||
527 | if (glfs_close(fd) != 0) { | |
528 | ret = -errno; | |
529 | } | |
530 | } | |
531 | out: | |
532 | g_free(tmp); | |
533 | qemu_gluster_gconf_free(gconf); | |
534 | if (glfs) { | |
535 | glfs_fini(glfs); | |
536 | } | |
537 | return ret; | |
538 | } | |
539 | ||
540 | static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, | |
541 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) | |
542 | { | |
543 | int ret; | |
544 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
545 | BDRVGlusterState *s = bs->opaque; | |
546 | size_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
547 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
548 | ||
549 | acb->size = size; | |
550 | acb->ret = 0; | |
551 | acb->coroutine = qemu_coroutine_self(); | |
552 | acb->aio_context = bdrv_get_aio_context(bs); | |
553 | ||
554 | if (write) { | |
555 | ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
556 | &gluster_finish_aiocb, acb); | |
557 | } else { | |
558 | ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, | |
559 | &gluster_finish_aiocb, acb); | |
560 | } | |
561 | ||
562 | if (ret < 0) { | |
563 | ret = -errno; | |
564 | goto out; | |
565 | } | |
566 | ||
567 | qemu_coroutine_yield(); | |
568 | ret = acb->ret; | |
569 | ||
570 | out: | |
571 | g_slice_free(GlusterAIOCB, acb); | |
572 | return ret; | |
573 | } | |
574 | ||
575 | static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) | |
576 | { | |
577 | int ret; | |
578 | BDRVGlusterState *s = bs->opaque; | |
579 | ||
580 | ret = glfs_ftruncate(s->fd, offset); | |
581 | if (ret < 0) { | |
582 | return -errno; | |
583 | } | |
584 | ||
585 | return 0; | |
586 | } | |
587 | ||
588 | static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, | |
589 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) | |
590 | { | |
591 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); | |
592 | } | |
593 | ||
594 | static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, | |
595 | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) | |
596 | { | |
597 | return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); | |
598 | } | |
599 | ||
600 | static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) | |
601 | { | |
602 | int ret; | |
603 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
604 | BDRVGlusterState *s = bs->opaque; | |
605 | ||
606 | acb->size = 0; | |
607 | acb->ret = 0; | |
608 | acb->coroutine = qemu_coroutine_self(); | |
609 | acb->aio_context = bdrv_get_aio_context(bs); | |
610 | ||
611 | ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); | |
612 | if (ret < 0) { | |
613 | ret = -errno; | |
614 | goto out; | |
615 | } | |
616 | ||
617 | qemu_coroutine_yield(); | |
618 | ret = acb->ret; | |
619 | ||
620 | out: | |
621 | g_slice_free(GlusterAIOCB, acb); | |
622 | return ret; | |
623 | } | |
624 | ||
625 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
626 | static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, | |
627 | int64_t sector_num, int nb_sectors) | |
628 | { | |
629 | int ret; | |
630 | GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); | |
631 | BDRVGlusterState *s = bs->opaque; | |
632 | size_t size = nb_sectors * BDRV_SECTOR_SIZE; | |
633 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
634 | ||
635 | acb->size = 0; | |
636 | acb->ret = 0; | |
637 | acb->coroutine = qemu_coroutine_self(); | |
638 | acb->aio_context = bdrv_get_aio_context(bs); | |
639 | ||
640 | ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); | |
641 | if (ret < 0) { | |
642 | ret = -errno; | |
643 | goto out; | |
644 | } | |
645 | ||
646 | qemu_coroutine_yield(); | |
647 | ret = acb->ret; | |
648 | ||
649 | out: | |
650 | g_slice_free(GlusterAIOCB, acb); | |
651 | return ret; | |
652 | } | |
653 | #endif | |
654 | ||
655 | static int64_t qemu_gluster_getlength(BlockDriverState *bs) | |
656 | { | |
657 | BDRVGlusterState *s = bs->opaque; | |
658 | int64_t ret; | |
659 | ||
660 | ret = glfs_lseek(s->fd, 0, SEEK_END); | |
661 | if (ret < 0) { | |
662 | return -errno; | |
663 | } else { | |
664 | return ret; | |
665 | } | |
666 | } | |
667 | ||
668 | static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) | |
669 | { | |
670 | BDRVGlusterState *s = bs->opaque; | |
671 | struct stat st; | |
672 | int ret; | |
673 | ||
674 | ret = glfs_fstat(s->fd, &st); | |
675 | if (ret < 0) { | |
676 | return -errno; | |
677 | } else { | |
678 | return st.st_blocks * 512; | |
679 | } | |
680 | } | |
681 | ||
682 | static void qemu_gluster_close(BlockDriverState *bs) | |
683 | { | |
684 | BDRVGlusterState *s = bs->opaque; | |
685 | ||
686 | if (s->fd) { | |
687 | glfs_close(s->fd); | |
688 | s->fd = NULL; | |
689 | } | |
690 | glfs_fini(s->glfs); | |
691 | } | |
692 | ||
693 | static int qemu_gluster_has_zero_init(BlockDriverState *bs) | |
694 | { | |
695 | /* GlusterFS volume could be backed by a block device */ | |
696 | return 0; | |
697 | } | |
698 | ||
699 | static QemuOptsList qemu_gluster_create_opts = { | |
700 | .name = "qemu-gluster-create-opts", | |
701 | .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), | |
702 | .desc = { | |
703 | { | |
704 | .name = BLOCK_OPT_SIZE, | |
705 | .type = QEMU_OPT_SIZE, | |
706 | .help = "Virtual disk size" | |
707 | }, | |
708 | { | |
709 | .name = BLOCK_OPT_PREALLOC, | |
710 | .type = QEMU_OPT_STRING, | |
711 | .help = "Preallocation mode (allowed values: off, full)" | |
712 | }, | |
713 | { /* end of list */ } | |
714 | } | |
715 | }; | |
716 | ||
717 | static BlockDriver bdrv_gluster = { | |
718 | .format_name = "gluster", | |
719 | .protocol_name = "gluster", | |
720 | .instance_size = sizeof(BDRVGlusterState), | |
721 | .bdrv_needs_filename = true, | |
722 | .bdrv_file_open = qemu_gluster_open, | |
723 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
724 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
725 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
726 | .bdrv_close = qemu_gluster_close, | |
727 | .bdrv_create = qemu_gluster_create, | |
728 | .bdrv_getlength = qemu_gluster_getlength, | |
729 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
730 | .bdrv_truncate = qemu_gluster_truncate, | |
731 | .bdrv_co_readv = qemu_gluster_co_readv, | |
732 | .bdrv_co_writev = qemu_gluster_co_writev, | |
733 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
734 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
735 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
736 | .bdrv_co_discard = qemu_gluster_co_discard, | |
737 | #endif | |
738 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
739 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
740 | #endif | |
741 | .create_opts = &qemu_gluster_create_opts, | |
742 | }; | |
743 | ||
744 | static BlockDriver bdrv_gluster_tcp = { | |
745 | .format_name = "gluster", | |
746 | .protocol_name = "gluster+tcp", | |
747 | .instance_size = sizeof(BDRVGlusterState), | |
748 | .bdrv_needs_filename = true, | |
749 | .bdrv_file_open = qemu_gluster_open, | |
750 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
751 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
752 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
753 | .bdrv_close = qemu_gluster_close, | |
754 | .bdrv_create = qemu_gluster_create, | |
755 | .bdrv_getlength = qemu_gluster_getlength, | |
756 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
757 | .bdrv_truncate = qemu_gluster_truncate, | |
758 | .bdrv_co_readv = qemu_gluster_co_readv, | |
759 | .bdrv_co_writev = qemu_gluster_co_writev, | |
760 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
761 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
762 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
763 | .bdrv_co_discard = qemu_gluster_co_discard, | |
764 | #endif | |
765 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
766 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
767 | #endif | |
768 | .create_opts = &qemu_gluster_create_opts, | |
769 | }; | |
770 | ||
771 | static BlockDriver bdrv_gluster_unix = { | |
772 | .format_name = "gluster", | |
773 | .protocol_name = "gluster+unix", | |
774 | .instance_size = sizeof(BDRVGlusterState), | |
775 | .bdrv_needs_filename = true, | |
776 | .bdrv_file_open = qemu_gluster_open, | |
777 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
778 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
779 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
780 | .bdrv_close = qemu_gluster_close, | |
781 | .bdrv_create = qemu_gluster_create, | |
782 | .bdrv_getlength = qemu_gluster_getlength, | |
783 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
784 | .bdrv_truncate = qemu_gluster_truncate, | |
785 | .bdrv_co_readv = qemu_gluster_co_readv, | |
786 | .bdrv_co_writev = qemu_gluster_co_writev, | |
787 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
788 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
789 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
790 | .bdrv_co_discard = qemu_gluster_co_discard, | |
791 | #endif | |
792 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
793 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
794 | #endif | |
795 | .create_opts = &qemu_gluster_create_opts, | |
796 | }; | |
797 | ||
798 | static BlockDriver bdrv_gluster_rdma = { | |
799 | .format_name = "gluster", | |
800 | .protocol_name = "gluster+rdma", | |
801 | .instance_size = sizeof(BDRVGlusterState), | |
802 | .bdrv_needs_filename = true, | |
803 | .bdrv_file_open = qemu_gluster_open, | |
804 | .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, | |
805 | .bdrv_reopen_commit = qemu_gluster_reopen_commit, | |
806 | .bdrv_reopen_abort = qemu_gluster_reopen_abort, | |
807 | .bdrv_close = qemu_gluster_close, | |
808 | .bdrv_create = qemu_gluster_create, | |
809 | .bdrv_getlength = qemu_gluster_getlength, | |
810 | .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, | |
811 | .bdrv_truncate = qemu_gluster_truncate, | |
812 | .bdrv_co_readv = qemu_gluster_co_readv, | |
813 | .bdrv_co_writev = qemu_gluster_co_writev, | |
814 | .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, | |
815 | .bdrv_has_zero_init = qemu_gluster_has_zero_init, | |
816 | #ifdef CONFIG_GLUSTERFS_DISCARD | |
817 | .bdrv_co_discard = qemu_gluster_co_discard, | |
818 | #endif | |
819 | #ifdef CONFIG_GLUSTERFS_ZEROFILL | |
820 | .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, | |
821 | #endif | |
822 | .create_opts = &qemu_gluster_create_opts, | |
823 | }; | |
824 | ||
825 | static void bdrv_gluster_init(void) | |
826 | { | |
827 | bdrv_register(&bdrv_gluster_rdma); | |
828 | bdrv_register(&bdrv_gluster_unix); | |
829 | bdrv_register(&bdrv_gluster_tcp); | |
830 | bdrv_register(&bdrv_gluster); | |
831 | } | |
832 | ||
833 | block_init(bdrv_gluster_init); |