2 * GlusterFS backend for QEMU
6 * Pipe handling mechanism in AIO implementation is derived from
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
15 * Contributions after 2012-01-13 are licensed under the terms of the
16 * GNU GPL, version 2 or (at your option) any later version.
18 #include <glusterfs/api/glfs.h>
19 #include "block/block_int.h"
20 #include "qemu/sockets.h"
23 typedef struct GlusterAIOCB {
24 BlockDriverAIOCB common;
31 typedef struct BDRVGlusterState {
36 GlusterAIOCB *event_acb;
39 #define GLUSTER_FD_READ 0
40 #define GLUSTER_FD_WRITE 1
42 typedef struct GlusterConf {
50 static void qemu_gluster_gconf_free(GlusterConf *gconf)
52 g_free(gconf->server);
53 g_free(gconf->volname);
55 g_free(gconf->transport);
59 static int parse_volume_options(GlusterConf *gconf, char *path)
68 p = q = path + strspn(path, "/");
73 gconf->volname = g_strndup(q, p - q);
80 gconf->image = g_strdup(p);
85 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
87 * 'gluster' is the protocol.
89 * 'transport' specifies the transport type used to connect to gluster
90 * management daemon (glusterd). Valid transport types are
91 * tcp, unix and rdma. If a transport type isn't specified, then tcp
94 * 'server' specifies the server where the volume file specification for
95 * the given volume resides. This can be either hostname, ipv4 address
96 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
97 * If transport type is 'unix', then 'server' field should not be specifed.
98 * The 'socket' field needs to be populated with the path to unix domain
101 * 'port' is the port number on which glusterd is listening. This is optional
102 * and if not specified, QEMU will send 0 which will make gluster to use the
103 * default port. If the transport type is unix, then 'port' should not be
106 * 'volname' is the name of the gluster volume which contains the VM image.
108 * 'image' is the path to the actual VM image that resides on gluster volume.
112 * file=gluster://1.2.3.4/testvol/a.img
113 * file=gluster+tcp://1.2.3.4/testvol/a.img
114 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
115 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
116 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
117 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
118 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
119 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
121 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
124 QueryParams *qp = NULL;
125 bool is_unix = false;
128 uri = uri_parse(filename);
134 if (!strcmp(uri->scheme, "gluster")) {
135 gconf->transport = g_strdup("tcp");
136 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
137 gconf->transport = g_strdup("tcp");
138 } else if (!strcmp(uri->scheme, "gluster+unix")) {
139 gconf->transport = g_strdup("unix");
141 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
142 gconf->transport = g_strdup("rdma");
148 ret = parse_volume_options(gconf, uri->path);
153 qp = query_params_parse(uri->query);
154 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
160 if (uri->server || uri->port) {
164 if (strcmp(qp->p[0].name, "socket")) {
168 gconf->server = g_strdup(qp->p[0].value);
170 gconf->server = g_strdup(uri->server);
171 gconf->port = uri->port;
176 query_params_free(qp);
182 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename)
184 struct glfs *glfs = NULL;
188 ret = qemu_gluster_parseuri(gconf, filename);
190 error_report("Usage: file=gluster[+transport]://[server[:port]]/"
191 "volname/image[?socket=...]");
196 glfs = glfs_new(gconf->volname);
201 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
208 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
209 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
211 ret = glfs_set_logging(glfs, "-", 4);
216 ret = glfs_init(glfs);
218 error_report("Gluster connection failed for server=%s port=%d "
219 "volume=%s image=%s transport=%s", gconf->server, gconf->port,
220 gconf->volname, gconf->image, gconf->transport);
234 static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s)
237 bool *finished = acb->finished;
238 BlockDriverCompletionFunc *cb = acb->common.cb;
239 void *opaque = acb->common.opaque;
241 if (!acb->ret || acb->ret == acb->size) {
242 ret = 0; /* Success */
243 } else if (acb->ret < 0) {
244 ret = acb->ret; /* Read/Write failed */
246 ret = -EIO; /* Partial read/write - fail it */
249 qemu_aio_release(acb);
256 static void qemu_gluster_aio_event_reader(void *opaque)
258 BDRVGlusterState *s = opaque;
262 char *p = (char *)&s->event_acb;
264 ret = read(s->fds[GLUSTER_FD_READ], p + s->event_reader_pos,
265 sizeof(s->event_acb) - s->event_reader_pos);
267 s->event_reader_pos += ret;
268 if (s->event_reader_pos == sizeof(s->event_acb)) {
269 s->event_reader_pos = 0;
270 qemu_gluster_complete_aio(s->event_acb, s);
273 } while (ret < 0 && errno == EINTR);
276 /* TODO Convert to fine grained options */
277 static QemuOptsList runtime_opts = {
279 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
283 .type = QEMU_OPT_STRING,
284 .help = "URL to the gluster image",
286 { /* end of list */ }
290 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
293 BDRVGlusterState *s = bs->opaque;
294 int open_flags = O_BINARY;
296 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
298 Error *local_err = NULL;
299 const char *filename;
301 opts = qemu_opts_create_nofail(&runtime_opts);
302 qemu_opts_absorb_qdict(opts, options, &local_err);
303 if (error_is_set(&local_err)) {
304 qerror_report_err(local_err);
305 error_free(local_err);
310 filename = qemu_opt_get(opts, "filename");
313 s->glfs = qemu_gluster_init(gconf, filename);
319 if (bdrv_flags & BDRV_O_RDWR) {
320 open_flags |= O_RDWR;
322 open_flags |= O_RDONLY;
325 if ((bdrv_flags & BDRV_O_NOCACHE)) {
326 open_flags |= O_DIRECT;
329 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
335 ret = qemu_pipe(s->fds);
340 fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
341 qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
342 qemu_gluster_aio_event_reader, NULL, s);
346 qemu_gluster_gconf_free(gconf);
359 static int qemu_gluster_create(const char *filename,
360 QEMUOptionParameter *options)
365 int64_t total_size = 0;
366 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
368 glfs = qemu_gluster_init(gconf, filename);
374 while (options && options->name) {
375 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
376 total_size = options->value.n / BDRV_SECTOR_SIZE;
381 fd = glfs_creat(glfs, gconf->image,
382 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
386 if (glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE) != 0) {
389 if (glfs_close(fd) != 0) {
394 qemu_gluster_gconf_free(gconf);
401 static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb)
403 GlusterAIOCB *acb = (GlusterAIOCB *)blockacb;
404 bool finished = false;
406 acb->finished = &finished;
412 static const AIOCBInfo gluster_aiocb_info = {
413 .aiocb_size = sizeof(GlusterAIOCB),
414 .cancel = qemu_gluster_aio_cancel,
417 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
419 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
420 BlockDriverState *bs = acb->common.bs;
421 BDRVGlusterState *s = bs->opaque;
425 retval = qemu_write_full(s->fds[GLUSTER_FD_WRITE], &acb, sizeof(acb));
426 if (retval != sizeof(acb)) {
428 * Gluster AIO callback thread failed to notify the waiting
429 * QEMU thread about IO completion.
431 * Complete this IO request and make the disk inaccessible for
432 * subsequent reads and writes.
434 error_report("Gluster failed to notify QEMU about IO completion");
436 qemu_mutex_lock_iothread(); /* We are in gluster thread context */
437 acb->common.cb(acb->common.opaque, -EIO);
438 qemu_aio_release(acb);
439 close(s->fds[GLUSTER_FD_READ]);
440 close(s->fds[GLUSTER_FD_WRITE]);
441 qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
442 bs->drv = NULL; /* Make the disk inaccessible */
443 qemu_mutex_unlock_iothread();
447 static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
448 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
449 BlockDriverCompletionFunc *cb, void *opaque, int write)
453 BDRVGlusterState *s = bs->opaque;
457 offset = sector_num * BDRV_SECTOR_SIZE;
458 size = nb_sectors * BDRV_SECTOR_SIZE;
460 acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
463 acb->finished = NULL;
466 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
467 &gluster_finish_aiocb, acb);
469 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
470 &gluster_finish_aiocb, acb);
479 qemu_aio_release(acb);
483 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
486 BDRVGlusterState *s = bs->opaque;
488 ret = glfs_ftruncate(s->fd, offset);
496 static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs,
497 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
498 BlockDriverCompletionFunc *cb, void *opaque)
500 return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
503 static BlockDriverAIOCB *qemu_gluster_aio_writev(BlockDriverState *bs,
504 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
505 BlockDriverCompletionFunc *cb, void *opaque)
507 return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
510 static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
511 BlockDriverCompletionFunc *cb, void *opaque)
515 BDRVGlusterState *s = bs->opaque;
517 acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
520 acb->finished = NULL;
522 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
529 qemu_aio_release(acb);
533 #ifdef CONFIG_GLUSTERFS_DISCARD
534 static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs,
535 int64_t sector_num, int nb_sectors, BlockDriverCompletionFunc *cb,
540 BDRVGlusterState *s = bs->opaque;
544 offset = sector_num * BDRV_SECTOR_SIZE;
545 size = nb_sectors * BDRV_SECTOR_SIZE;
547 acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
550 acb->finished = NULL;
552 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
559 qemu_aio_release(acb);
564 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
566 BDRVGlusterState *s = bs->opaque;
569 ret = glfs_lseek(s->fd, 0, SEEK_END);
577 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
579 BDRVGlusterState *s = bs->opaque;
583 ret = glfs_fstat(s->fd, &st);
587 return st.st_blocks * 512;
591 static void qemu_gluster_close(BlockDriverState *bs)
593 BDRVGlusterState *s = bs->opaque;
595 close(s->fds[GLUSTER_FD_READ]);
596 close(s->fds[GLUSTER_FD_WRITE]);
597 qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
606 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
608 /* GlusterFS volume could be backed by a block device */
612 static QEMUOptionParameter qemu_gluster_create_options[] = {
614 .name = BLOCK_OPT_SIZE,
616 .help = "Virtual disk size"
621 static BlockDriver bdrv_gluster = {
622 .format_name = "gluster",
623 .protocol_name = "gluster",
624 .instance_size = sizeof(BDRVGlusterState),
625 .bdrv_file_open = qemu_gluster_open,
626 .bdrv_close = qemu_gluster_close,
627 .bdrv_create = qemu_gluster_create,
628 .bdrv_getlength = qemu_gluster_getlength,
629 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
630 .bdrv_truncate = qemu_gluster_truncate,
631 .bdrv_aio_readv = qemu_gluster_aio_readv,
632 .bdrv_aio_writev = qemu_gluster_aio_writev,
633 .bdrv_aio_flush = qemu_gluster_aio_flush,
634 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
635 #ifdef CONFIG_GLUSTERFS_DISCARD
636 .bdrv_aio_discard = qemu_gluster_aio_discard,
638 .create_options = qemu_gluster_create_options,
641 static BlockDriver bdrv_gluster_tcp = {
642 .format_name = "gluster",
643 .protocol_name = "gluster+tcp",
644 .instance_size = sizeof(BDRVGlusterState),
645 .bdrv_file_open = qemu_gluster_open,
646 .bdrv_close = qemu_gluster_close,
647 .bdrv_create = qemu_gluster_create,
648 .bdrv_getlength = qemu_gluster_getlength,
649 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
650 .bdrv_truncate = qemu_gluster_truncate,
651 .bdrv_aio_readv = qemu_gluster_aio_readv,
652 .bdrv_aio_writev = qemu_gluster_aio_writev,
653 .bdrv_aio_flush = qemu_gluster_aio_flush,
654 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
655 #ifdef CONFIG_GLUSTERFS_DISCARD
656 .bdrv_aio_discard = qemu_gluster_aio_discard,
658 .create_options = qemu_gluster_create_options,
661 static BlockDriver bdrv_gluster_unix = {
662 .format_name = "gluster",
663 .protocol_name = "gluster+unix",
664 .instance_size = sizeof(BDRVGlusterState),
665 .bdrv_file_open = qemu_gluster_open,
666 .bdrv_close = qemu_gluster_close,
667 .bdrv_create = qemu_gluster_create,
668 .bdrv_getlength = qemu_gluster_getlength,
669 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
670 .bdrv_truncate = qemu_gluster_truncate,
671 .bdrv_aio_readv = qemu_gluster_aio_readv,
672 .bdrv_aio_writev = qemu_gluster_aio_writev,
673 .bdrv_aio_flush = qemu_gluster_aio_flush,
674 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
675 #ifdef CONFIG_GLUSTERFS_DISCARD
676 .bdrv_aio_discard = qemu_gluster_aio_discard,
678 .create_options = qemu_gluster_create_options,
681 static BlockDriver bdrv_gluster_rdma = {
682 .format_name = "gluster",
683 .protocol_name = "gluster+rdma",
684 .instance_size = sizeof(BDRVGlusterState),
685 .bdrv_file_open = qemu_gluster_open,
686 .bdrv_close = qemu_gluster_close,
687 .bdrv_create = qemu_gluster_create,
688 .bdrv_getlength = qemu_gluster_getlength,
689 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
690 .bdrv_truncate = qemu_gluster_truncate,
691 .bdrv_aio_readv = qemu_gluster_aio_readv,
692 .bdrv_aio_writev = qemu_gluster_aio_writev,
693 .bdrv_aio_flush = qemu_gluster_aio_flush,
694 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
695 #ifdef CONFIG_GLUSTERFS_DISCARD
696 .bdrv_aio_discard = qemu_gluster_aio_discard,
698 .create_options = qemu_gluster_create_options,
701 static void bdrv_gluster_init(void)
703 bdrv_register(&bdrv_gluster_rdma);
704 bdrv_register(&bdrv_gluster_unix);
705 bdrv_register(&bdrv_gluster_tcp);
706 bdrv_register(&bdrv_gluster);
709 block_init(bdrv_gluster_init);