2 * QEMU Block driver for RADOS (Ceph)
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
10 * Contributions after 2012-01-13 are licensed under the terms of the
11 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "qemu-error.h"
18 #include "block_int.h"
20 #include <rbd/librbd.h>
23 * When specifying the image filename use:
25 * rbd:poolname/devicename[@snapshotname][:option1=value1[:option2=value2...]]
27 * poolname must be the name of an existing rados pool.
29 * devicename is the name of the rbd image.
31 * Each option given is used to configure rados, and may be any valid
32 * Ceph option, "id", or "conf".
34 * The "id" option indicates what user we should authenticate as to
35 * the Ceph cluster. If it is excluded we will use the Ceph default
38 * The "conf" option specifies a Ceph configuration file to read. If
39 * it is not specified, we will read from the default Ceph locations
40 * (e.g., /etc/ceph/ceph.conf). To avoid reading _any_ configuration
41 * file, specify conf=/dev/null.
43 * Configuration values containing :, @, or = can be escaped with a
47 /* rbd_aio_discard added in 0.1.2 */
48 #if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 2)
49 #define LIBRBD_SUPPORTS_DISCARD
51 #undef LIBRBD_SUPPORTS_DISCARD
54 #define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER)
56 #define RBD_MAX_CONF_NAME_SIZE 128
57 #define RBD_MAX_CONF_VAL_SIZE 512
58 #define RBD_MAX_CONF_SIZE 1024
59 #define RBD_MAX_POOL_NAME_SIZE 128
60 #define RBD_MAX_SNAP_NAME_SIZE 128
61 #define RBD_MAX_SNAPS 100
69 typedef struct RBDAIOCB {
70 BlockDriverAIOCB common;
78 struct BDRVRBDState *s;
82 typedef struct RADOSCB {
85 struct BDRVRBDState *s;
93 #define RBD_FD_WRITE 1
95 typedef struct BDRVRBDState {
100 char name[RBD_MAX_IMAGE_NAME_SIZE];
103 int event_reader_pos;
107 static void rbd_aio_bh_cb(void *opaque);
109 static int qemu_rbd_next_tok(char *dst, int dst_len,
110 char *src, char delim,
120 for (end = src; *end; ++end) {
124 if (*end == '\\' && end[1] != '\0') {
135 error_report("%s too long", name);
138 error_report("%s too short", name);
142 pstrcpy(dst, dst_len, src);
147 static void qemu_rbd_unescape(char *src)
151 for (p = src; *src; ++src, ++p) {
152 if (*src == '\\' && src[1] != '\0') {
160 static int qemu_rbd_parsename(const char *filename,
161 char *pool, int pool_len,
162 char *snap, int snap_len,
163 char *name, int name_len,
164 char *conf, int conf_len)
170 if (!strstart(filename, "rbd:", &start)) {
174 buf = g_strdup(start);
179 ret = qemu_rbd_next_tok(pool, pool_len, p, '/', "pool name", &p);
184 qemu_rbd_unescape(pool);
186 if (strchr(p, '@')) {
187 ret = qemu_rbd_next_tok(name, name_len, p, '@', "object name", &p);
191 ret = qemu_rbd_next_tok(snap, snap_len, p, ':', "snap name", &p);
192 qemu_rbd_unescape(snap);
194 ret = qemu_rbd_next_tok(name, name_len, p, ':', "object name", &p);
196 qemu_rbd_unescape(name);
201 ret = qemu_rbd_next_tok(conf, conf_len, p, '\0', "configuration", &p);
208 static char *qemu_rbd_parse_clientname(const char *conf, char *clientname)
210 const char *p = conf;
214 const char *end = strchr(p, ':');
222 if (strncmp(p, "id=", 3) == 0) {
224 strncpy(clientname, p + 3, len);
225 clientname[len] = '\0';
236 static int qemu_rbd_set_conf(rados_t cluster, const char *conf)
239 char name[RBD_MAX_CONF_NAME_SIZE];
240 char value[RBD_MAX_CONF_VAL_SIZE];
243 buf = g_strdup(conf);
247 ret = qemu_rbd_next_tok(name, sizeof(name), p,
248 '=', "conf option name", &p);
252 qemu_rbd_unescape(name);
255 error_report("conf option %s has no value", name);
260 ret = qemu_rbd_next_tok(value, sizeof(value), p,
261 ':', "conf option value", &p);
265 qemu_rbd_unescape(value);
267 if (strcmp(name, "conf") == 0) {
268 ret = rados_conf_read_file(cluster, value);
270 error_report("error reading conf file %s", value);
273 } else if (strcmp(name, "id") == 0) {
274 /* ignore, this is parsed by qemu_rbd_parse_clientname() */
276 ret = rados_conf_set(cluster, name, value);
278 error_report("invalid conf option %s", name);
289 static int qemu_rbd_create(const char *filename, QEMUOptionParameter *options)
294 char pool[RBD_MAX_POOL_NAME_SIZE];
295 char name[RBD_MAX_IMAGE_NAME_SIZE];
296 char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
297 char conf[RBD_MAX_CONF_SIZE];
298 char clientname_buf[RBD_MAX_CONF_SIZE];
301 rados_ioctx_t io_ctx;
304 if (qemu_rbd_parsename(filename, pool, sizeof(pool),
305 snap_buf, sizeof(snap_buf),
307 conf, sizeof(conf)) < 0) {
311 /* Read out options */
312 while (options && options->name) {
313 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
314 bytes = options->value.n;
315 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
316 if (options->value.n) {
317 objsize = options->value.n;
318 if ((objsize - 1) & objsize) { /* not a power of 2? */
319 error_report("obj size needs to be power of 2");
322 if (objsize < 4096) {
323 error_report("obj size too small");
326 obj_order = ffs(objsize) - 1;
332 clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
333 if (rados_create(&cluster, clientname) < 0) {
334 error_report("error initializing");
338 if (strstr(conf, "conf=") == NULL) {
339 /* try default location, but ignore failure */
340 rados_conf_read_file(cluster, NULL);
343 if (conf[0] != '\0' &&
344 qemu_rbd_set_conf(cluster, conf) < 0) {
345 error_report("error setting config options");
346 rados_shutdown(cluster);
350 if (rados_connect(cluster) < 0) {
351 error_report("error connecting");
352 rados_shutdown(cluster);
356 if (rados_ioctx_create(cluster, pool, &io_ctx) < 0) {
357 error_report("error opening pool %s", pool);
358 rados_shutdown(cluster);
362 ret = rbd_create(io_ctx, name, bytes, &obj_order);
363 rados_ioctx_destroy(io_ctx);
364 rados_shutdown(cluster);
370 * This aio completion is being called from qemu_rbd_aio_event_reader()
371 * and runs in qemu context. It schedules a bh, but just in case the aio
372 * was not cancelled before.
374 static void qemu_rbd_complete_aio(RADOSCB *rcb)
376 RBDAIOCB *acb = rcb->acb;
379 if (acb->cancelled) {
380 qemu_vfree(acb->bounce);
381 qemu_aio_release(acb);
387 if (acb->cmd == RBD_AIO_WRITE ||
388 acb->cmd == RBD_AIO_DISCARD) {
392 } else if (!acb->error) {
393 acb->ret = rcb->size;
397 memset(rcb->buf, 0, rcb->size);
400 } else if (r < rcb->size) {
401 memset(rcb->buf + r, 0, rcb->size - r);
403 acb->ret = rcb->size;
405 } else if (!acb->error) {
409 /* Note that acb->bh can be NULL in case where the aio was cancelled */
410 acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb);
411 qemu_bh_schedule(acb->bh);
417 * aio fd read handler. It runs in the qemu context and calls the
418 * completion handling of completed rados aio operations.
420 static void qemu_rbd_aio_event_reader(void *opaque)
422 BDRVRBDState *s = opaque;
427 char *p = (char *)&s->event_rcb;
429 /* now read the rcb pointer that was sent from a non qemu thread */
430 ret = read(s->fds[RBD_FD_READ], p + s->event_reader_pos,
431 sizeof(s->event_rcb) - s->event_reader_pos);
433 s->event_reader_pos += ret;
434 if (s->event_reader_pos == sizeof(s->event_rcb)) {
435 s->event_reader_pos = 0;
436 qemu_rbd_complete_aio(s->event_rcb);
440 } while (ret < 0 && errno == EINTR);
443 static int qemu_rbd_aio_flush_cb(void *opaque)
445 BDRVRBDState *s = opaque;
447 return (s->qemu_aio_count > 0);
450 static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
452 BDRVRBDState *s = bs->opaque;
453 char pool[RBD_MAX_POOL_NAME_SIZE];
454 char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
455 char conf[RBD_MAX_CONF_SIZE];
456 char clientname_buf[RBD_MAX_CONF_SIZE];
460 if (qemu_rbd_parsename(filename, pool, sizeof(pool),
461 snap_buf, sizeof(snap_buf),
462 s->name, sizeof(s->name),
463 conf, sizeof(conf)) < 0) {
467 clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
468 r = rados_create(&s->cluster, clientname);
470 error_report("error initializing");
475 if (snap_buf[0] != '\0') {
476 s->snap = g_strdup(snap_buf);
479 if (strstr(conf, "conf=") == NULL) {
480 /* try default location, but ignore failure */
481 rados_conf_read_file(s->cluster, NULL);
484 if (conf[0] != '\0') {
485 r = qemu_rbd_set_conf(s->cluster, conf);
487 error_report("error setting config options");
488 goto failed_shutdown;
492 r = rados_connect(s->cluster);
494 error_report("error connecting");
495 goto failed_shutdown;
498 r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
500 error_report("error opening pool %s", pool);
501 goto failed_shutdown;
504 r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
506 error_report("error reading header from %s", s->name);
510 bs->read_only = (s->snap != NULL);
512 s->event_reader_pos = 0;
513 r = qemu_pipe(s->fds);
515 error_report("error opening eventfd");
518 fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
519 fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
520 qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
521 NULL, qemu_rbd_aio_flush_cb, s);
529 rados_ioctx_destroy(s->io_ctx);
531 rados_shutdown(s->cluster);
536 static void qemu_rbd_close(BlockDriverState *bs)
538 BDRVRBDState *s = bs->opaque;
542 qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
545 rados_ioctx_destroy(s->io_ctx);
547 rados_shutdown(s->cluster);
551 * Cancel aio. Since we don't reference acb in a non qemu threads,
552 * it is safe to access it here.
554 static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb)
556 RBDAIOCB *acb = (RBDAIOCB *) blockacb;
560 static AIOPool rbd_aio_pool = {
561 .aiocb_size = sizeof(RBDAIOCB),
562 .cancel = qemu_rbd_aio_cancel,
565 static int qemu_rbd_send_pipe(BDRVRBDState *s, RADOSCB *rcb)
570 int fd = s->fds[RBD_FD_WRITE];
572 /* send the op pointer to the qemu thread that is responsible
573 for the aio/op completion. Must do it in a qemu thread context */
574 ret = write(fd, (void *)&rcb, sizeof(rcb));
578 if (errno == EINTR) {
581 if (errno != EAGAIN) {
588 ret = select(fd + 1, NULL, &wfd, NULL, NULL);
589 } while (ret < 0 && errno == EINTR);
596 * This is the callback function for rbd_aio_read and _write
598 * Note: this function is being called from a non qemu thread so
599 * we need to be careful about what we do here. Generally we only
600 * write to the block notification pipe, and do the rest of the
601 * io completion handling from qemu_rbd_aio_event_reader() which
602 * runs in a qemu context.
604 static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
607 rcb->ret = rbd_aio_get_return_value(c);
609 ret = qemu_rbd_send_pipe(rcb->s, rcb);
611 error_report("failed writing to acb->s->fds");
616 /* Callback when all queued rbd_aio requests are complete */
618 static void rbd_aio_bh_cb(void *opaque)
620 RBDAIOCB *acb = opaque;
622 if (acb->cmd == RBD_AIO_READ) {
623 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
625 qemu_vfree(acb->bounce);
626 acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
627 qemu_bh_delete(acb->bh);
630 qemu_aio_release(acb);
633 static int rbd_aio_discard_wrapper(rbd_image_t image,
636 rbd_completion_t comp)
638 #ifdef LIBRBD_SUPPORTS_DISCARD
639 return rbd_aio_discard(image, off, len, comp);
645 static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
649 BlockDriverCompletionFunc *cb,
660 BDRVRBDState *s = bs->opaque;
662 acb = qemu_aio_get(&rbd_aio_pool, bs, cb, opaque);
665 if (cmd == RBD_AIO_DISCARD) {
668 acb->bounce = qemu_blockalign(bs, qiov->size);
676 if (cmd == RBD_AIO_WRITE) {
677 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
682 off = sector_num * BDRV_SECTOR_SIZE;
683 size = nb_sectors * BDRV_SECTOR_SIZE;
685 s->qemu_aio_count++; /* All the RADOSCB */
687 rcb = g_malloc(sizeof(RADOSCB));
693 r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c);
700 r = rbd_aio_write(s->image, off, size, buf, c);
703 r = rbd_aio_read(s->image, off, size, buf, c);
705 case RBD_AIO_DISCARD:
706 r = rbd_aio_discard_wrapper(s->image, off, size, c);
721 qemu_aio_release(acb);
725 static BlockDriverAIOCB *qemu_rbd_aio_readv(BlockDriverState *bs,
729 BlockDriverCompletionFunc *cb,
732 return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
736 static BlockDriverAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs,
740 BlockDriverCompletionFunc *cb,
743 return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
747 static int qemu_rbd_co_flush(BlockDriverState *bs)
749 #if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 1)
750 /* rbd_flush added in 0.1.1 */
751 BDRVRBDState *s = bs->opaque;
752 return rbd_flush(s->image);
758 static int qemu_rbd_getinfo(BlockDriverState *bs, BlockDriverInfo *bdi)
760 BDRVRBDState *s = bs->opaque;
761 rbd_image_info_t info;
764 r = rbd_stat(s->image, &info, sizeof(info));
769 bdi->cluster_size = info.obj_size;
773 static int64_t qemu_rbd_getlength(BlockDriverState *bs)
775 BDRVRBDState *s = bs->opaque;
776 rbd_image_info_t info;
779 r = rbd_stat(s->image, &info, sizeof(info));
787 static int qemu_rbd_truncate(BlockDriverState *bs, int64_t offset)
789 BDRVRBDState *s = bs->opaque;
792 r = rbd_resize(s->image, offset);
800 static int qemu_rbd_snap_create(BlockDriverState *bs,
801 QEMUSnapshotInfo *sn_info)
803 BDRVRBDState *s = bs->opaque;
806 if (sn_info->name[0] == '\0') {
807 return -EINVAL; /* we need a name for rbd snapshots */
811 * rbd snapshots are using the name as the user controlled unique identifier
812 * we can't use the rbd snapid for that purpose, as it can't be set
814 if (sn_info->id_str[0] != '\0' &&
815 strcmp(sn_info->id_str, sn_info->name) != 0) {
819 if (strlen(sn_info->name) >= sizeof(sn_info->id_str)) {
823 r = rbd_snap_create(s->image, sn_info->name);
825 error_report("failed to create snap: %s", strerror(-r));
832 static int qemu_rbd_snap_remove(BlockDriverState *bs,
833 const char *snapshot_name)
835 BDRVRBDState *s = bs->opaque;
838 r = rbd_snap_remove(s->image, snapshot_name);
842 static int qemu_rbd_snap_rollback(BlockDriverState *bs,
843 const char *snapshot_name)
845 BDRVRBDState *s = bs->opaque;
848 r = rbd_snap_rollback(s->image, snapshot_name);
852 static int qemu_rbd_snap_list(BlockDriverState *bs,
853 QEMUSnapshotInfo **psn_tab)
855 BDRVRBDState *s = bs->opaque;
856 QEMUSnapshotInfo *sn_info, *sn_tab = NULL;
858 rbd_snap_info_t *snaps;
859 int max_snaps = RBD_MAX_SNAPS;
862 snaps = g_malloc(sizeof(*snaps) * max_snaps);
863 snap_count = rbd_snap_list(s->image, snaps, &max_snaps);
864 if (snap_count < 0) {
867 } while (snap_count == -ERANGE);
869 if (snap_count <= 0) {
873 sn_tab = g_malloc0(snap_count * sizeof(QEMUSnapshotInfo));
875 for (i = 0; i < snap_count; i++) {
876 const char *snap_name = snaps[i].name;
878 sn_info = sn_tab + i;
879 pstrcpy(sn_info->id_str, sizeof(sn_info->id_str), snap_name);
880 pstrcpy(sn_info->name, sizeof(sn_info->name), snap_name);
882 sn_info->vm_state_size = snaps[i].size;
883 sn_info->date_sec = 0;
884 sn_info->date_nsec = 0;
885 sn_info->vm_clock_nsec = 0;
887 rbd_snap_list_end(snaps);
894 #ifdef LIBRBD_SUPPORTS_DISCARD
895 static BlockDriverAIOCB* qemu_rbd_aio_discard(BlockDriverState *bs,
898 BlockDriverCompletionFunc *cb,
901 return rbd_start_aio(bs, sector_num, NULL, nb_sectors, cb, opaque,
906 static QEMUOptionParameter qemu_rbd_create_options[] = {
908 .name = BLOCK_OPT_SIZE,
910 .help = "Virtual disk size"
913 .name = BLOCK_OPT_CLUSTER_SIZE,
915 .help = "RBD object size"
920 static BlockDriver bdrv_rbd = {
921 .format_name = "rbd",
922 .instance_size = sizeof(BDRVRBDState),
923 .bdrv_file_open = qemu_rbd_open,
924 .bdrv_close = qemu_rbd_close,
925 .bdrv_create = qemu_rbd_create,
926 .bdrv_get_info = qemu_rbd_getinfo,
927 .create_options = qemu_rbd_create_options,
928 .bdrv_getlength = qemu_rbd_getlength,
929 .bdrv_truncate = qemu_rbd_truncate,
930 .protocol_name = "rbd",
932 .bdrv_aio_readv = qemu_rbd_aio_readv,
933 .bdrv_aio_writev = qemu_rbd_aio_writev,
934 .bdrv_co_flush_to_disk = qemu_rbd_co_flush,
936 #ifdef LIBRBD_SUPPORTS_DISCARD
937 .bdrv_aio_discard = qemu_rbd_aio_discard,
940 .bdrv_snapshot_create = qemu_rbd_snap_create,
941 .bdrv_snapshot_delete = qemu_rbd_snap_remove,
942 .bdrv_snapshot_list = qemu_rbd_snap_list,
943 .bdrv_snapshot_goto = qemu_rbd_snap_rollback,
946 static void bdrv_rbd_init(void)
948 bdrv_register(&bdrv_rbd);
951 block_init(bdrv_rbd_init);