1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) 2007-2008 Samuel Thibault.
4 * (C) Copyright 2020 EPAM Systems Inc.
7 #define LOG_CATEGORY UCLASS_PVBLOCK
11 #include <dm/device-internal.h>
15 #include <asm/armv8/mmu.h>
16 #include <asm/global_data.h>
18 #include <asm/xen/system.h>
20 #include <linux/bug.h>
21 #include <linux/compat.h>
23 #include <xen/events.h>
24 #include <xen/gnttab.h>
26 #include <xen/xenbus.h>
28 #include <xen/interface/io/ring.h>
29 #include <xen/interface/io/blkif.h>
30 #include <xen/interface/io/protocols.h>
32 #define DRV_NAME "pvblock"
33 #define DRV_NAME_BLK "pvblock_blk"
37 #define WAIT_RING_TO_MS 10
39 struct blkfront_info {
41 unsigned int sector_size;
49 * struct blkfront_dev - Struct representing blkfront device
51 * @ring: Front_ring structure
52 * @ring_ref: The grant reference, allowing us to grant access
53 * to the ring to the other end/domain
54 * @evtchn: Event channel used to signal ring events
55 * @handle: Events handle
56 * @nodename: Device XenStore path in format "device/vbd/" + @devid
57 * @backend: Backend XenStore path
64 struct blkif_front_ring ring;
71 struct blkfront_info info;
76 struct blkfront_plat {
81 * struct blkfront_aiocb - AIO control block
82 * @aio_dev: Blockfront device
83 * @aio_buf: Memory buffer, which must be sector-aligned for
85 * @aio_nbytes: Size of AIO, which must be less than @aio_dev
86 * sector-sized amounts
87 * @aio_offset: Offset, which must not go beyond @aio_dev
88 * sector-aligned location
89 * @data: Data used to receiving response from ring
90 * @gref: Array of grant references
91 * @n: Number of segments
92 * @aio_cb: Represents one I/O request.
94 struct blkfront_aiocb {
95 struct blkfront_dev *aio_dev;
101 grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
104 void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
107 static void blkfront_sync(struct blkfront_dev *dev);
109 static void free_blkfront(struct blkfront_dev *dev)
111 mask_evtchn(dev->evtchn);
114 gnttab_end_access(dev->ring_ref);
115 free(dev->ring.sring);
117 unbind_evtchn(dev->evtchn);
119 free(dev->bounce_buffer);
124 static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
126 xenbus_transaction_t xbt;
128 char *message = NULL;
129 struct blkif_sring *s;
134 char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
136 sprintf(nodename, "device/vbd/%d", devid);
138 memset(dev, 0, sizeof(*dev));
139 dev->nodename = strdup(nodename);
142 snprintf(path, sizeof(path), "%s/backend-id", nodename);
143 dev->dom = xenbus_read_integer(path);
144 evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
146 s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
148 printf("Failed to allocate shared ring\n");
153 FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
155 dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
158 err = xenbus_transaction_start(&xbt);
160 printf("starting transaction\n");
164 err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
166 message = "writing ring-ref";
167 goto abort_transaction;
169 err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
171 message = "writing event-channel";
172 goto abort_transaction;
174 err = xenbus_printf(xbt, nodename, "protocol", "%s",
175 XEN_IO_PROTO_ABI_NATIVE);
177 message = "writing protocol";
178 goto abort_transaction;
181 snprintf(path, sizeof(path), "%s/state", nodename);
182 err = xenbus_switch_state(xbt, path, XenbusStateConnected);
184 message = "switching state";
185 goto abort_transaction;
188 err = xenbus_transaction_end(xbt, 0, &retry);
192 printf("completing transaction\n");
199 err = xenbus_transaction_end(xbt, 1, &retry);
200 printf("Abort transaction %s\n", message);
204 snprintf(path, sizeof(path), "%s/backend", nodename);
205 msg = xenbus_read(XBT_NIL, path, &dev->backend);
207 printf("Error %s when reading the backend path %s\n",
212 dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
216 char path[strlen(dev->backend) +
217 strlen("/feature-flush-cache") + 1];
219 snprintf(path, sizeof(path), "%s/mode", dev->backend);
220 msg = xenbus_read(XBT_NIL, path, &c);
222 printf("Error %s when reading the mode\n", msg);
226 dev->info.mode = O_RDWR;
228 dev->info.mode = O_RDONLY;
231 snprintf(path, sizeof(path), "%s/state", dev->backend);
234 state = xenbus_read_integer(path);
235 while (!msg && state < XenbusStateConnected)
236 msg = xenbus_wait_for_state_change(path, &state);
237 if (msg || state != XenbusStateConnected) {
238 printf("backend not available, state=%d\n", state);
242 snprintf(path, sizeof(path), "%s/info", dev->backend);
243 dev->info.info = xenbus_read_integer(path);
245 snprintf(path, sizeof(path), "%s/sectors", dev->backend);
247 * FIXME: read_integer returns an int, so disk size
248 * limited to 1TB for now
250 dev->info.sectors = xenbus_read_integer(path);
252 snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
253 dev->info.sector_size = xenbus_read_integer(path);
255 snprintf(path, sizeof(path), "%s/feature-barrier",
257 dev->info.barrier = xenbus_read_integer(path);
259 snprintf(path, sizeof(path), "%s/feature-flush-cache",
261 dev->info.flush = xenbus_read_integer(path);
263 unmask_evtchn(dev->evtchn);
265 dev->bounce_buffer = memalign(dev->info.sector_size,
266 dev->info.sector_size);
267 if (!dev->bounce_buffer) {
268 printf("Failed to allocate bouncing buffer\n");
272 debug("%llu sectors of %u bytes, bounce buffer at %p\n",
273 dev->info.sectors, dev->info.sector_size,
285 static void shutdown_blkfront(struct blkfront_dev *dev)
287 char *err = NULL, *err2;
290 char path[strlen(dev->backend) + strlen("/state") + 1];
291 char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
293 debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
297 snprintf(path, sizeof(path), "%s/state", dev->backend);
298 snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
300 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
302 printf("%s: error changing state to %d: %s\n", __func__,
303 XenbusStateClosing, err);
307 state = xenbus_read_integer(path);
308 while (!err && state < XenbusStateClosing)
309 err = xenbus_wait_for_state_change(path, &state);
312 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
314 printf("%s: error changing state to %d: %s\n", __func__,
315 XenbusStateClosed, err);
319 state = xenbus_read_integer(path);
320 while (state < XenbusStateClosed) {
321 err = xenbus_wait_for_state_change(path, &state);
325 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
327 printf("%s: error changing state to %d: %s\n", __func__,
328 XenbusStateInitialising, err);
332 state = xenbus_read_integer(path);
334 (state < XenbusStateInitWait || state >= XenbusStateClosed))
335 err = xenbus_wait_for_state_change(path, &state);
340 snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
341 err2 = xenbus_rm(XBT_NIL, nodename);
343 snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
344 err2 = xenbus_rm(XBT_NIL, nodename);
352 * blkfront_aio_poll() - AIO polling function.
353 * @dev: Blkfront device
355 * Here we receive response from the ring and check its status. This happens
356 * until we read all data from the ring. We read the data from consumed pointer
357 * to the response pointer. Then increase consumed pointer to make it clear that
358 * the data has been read.
360 * Return: Number of consumed bytes.
362 static int blkfront_aio_poll(struct blkfront_dev *dev)
365 struct blkif_response *rsp;
370 rp = dev->ring.sring->rsp_prod;
371 rmb(); /* Ensure we see queued responses up to 'rp'. */
372 cons = dev->ring.rsp_cons;
375 while ((cons != rp)) {
376 struct blkfront_aiocb *aiocbp;
379 rsp = RING_GET_RESPONSE(&dev->ring, cons);
382 aiocbp = (void *)(uintptr_t)rsp->id;
383 status = rsp->status;
385 switch (rsp->operation) {
391 if (status != BLKIF_RSP_OKAY)
392 printf("%s error %d on %s at offset %llu, num bytes %llu\n",
393 rsp->operation == BLKIF_OP_READ ?
395 status, aiocbp->aio_dev->nodename,
396 (unsigned long long)aiocbp->aio_offset,
397 (unsigned long long)aiocbp->aio_nbytes);
399 for (j = 0; j < aiocbp->n; j++)
400 gnttab_end_access(aiocbp->gref[j]);
405 case BLKIF_OP_WRITE_BARRIER:
406 if (status != BLKIF_RSP_OKAY)
407 printf("write barrier error %d\n", status);
409 case BLKIF_OP_FLUSH_DISKCACHE:
410 if (status != BLKIF_RSP_OKAY)
411 printf("flush error %d\n", status);
415 printf("unrecognized block operation %d response (status %d)\n",
416 rsp->operation, status);
420 dev->ring.rsp_cons = ++cons;
421 /* Nota: callback frees aiocbp itself */
422 if (aiocbp && aiocbp->aio_cb)
423 aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
424 if (dev->ring.rsp_cons != cons)
425 /* We reentered, we must not continue here */
429 RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
436 static void blkfront_wait_slot(struct blkfront_dev *dev)
438 /* Wait for a slot */
439 if (RING_FULL(&dev->ring)) {
441 blkfront_aio_poll(dev);
442 if (!RING_FULL(&dev->ring))
444 wait_event_timeout(NULL, !RING_FULL(&dev->ring),
451 * blkfront_aio_poll() - Issue an aio.
452 * @aiocbp: AIO control block structure
453 * @write: Describes is it read or write operation
457 * We check whether the AIO parameters meet the requirements of the device.
458 * Then receive request from ring and define its arguments. After this we
459 * grant access to the grant references. The last step is notifying about AIO
462 static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
464 struct blkfront_dev *dev = aiocbp->aio_dev;
465 struct blkif_request *req;
469 uintptr_t start, end;
471 /* Can't io at non-sector-aligned location */
472 BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
473 /* Can't io non-sector-sized amounts */
474 BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
475 /* Can't io non-sector-aligned buffer */
476 BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
478 start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
479 end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
480 PAGE_SIZE - 1) & PAGE_MASK;
481 n = (end - start) / PAGE_SIZE;
484 BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
486 blkfront_wait_slot(dev);
487 i = dev->ring.req_prod_pvt;
488 req = RING_GET_REQUEST(&dev->ring, i);
490 req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
491 req->nr_segments = n;
492 req->handle = dev->handle;
493 req->id = (uintptr_t)aiocbp;
494 req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
496 for (j = 0; j < n; j++) {
497 req->seg[j].first_sect = 0;
498 req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
500 req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
501 dev->info.sector_size;
502 req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
503 aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
504 for (j = 0; j < n; j++) {
505 uintptr_t data = start + j * PAGE_SIZE;
508 /* Trigger CoW if needed */
509 *(char *)(data + (req->seg[j].first_sect *
510 dev->info.sector_size)) = 0;
513 req->seg[j].gref = gnttab_grant_access(dev->dom,
514 virt_to_pfn((void *)data),
516 aiocbp->gref[j] = req->seg[j].gref;
519 dev->ring.req_prod_pvt = i + 1;
522 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
525 notify_remote_via_evtchn(dev->evtchn);
528 static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
530 aiocbp->data = (void *)1;
531 aiocbp->aio_cb = NULL;
534 static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
536 aiocbp->aio_cb = blkfront_aio_cb;
537 blkfront_aio(aiocbp, write);
541 blkfront_aio_poll(aiocbp->aio_dev);
548 static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
551 struct blkif_request *req;
554 blkfront_wait_slot(dev);
555 i = dev->ring.req_prod_pvt;
556 req = RING_GET_REQUEST(&dev->ring, i);
558 req->nr_segments = 0;
559 req->handle = dev->handle;
561 req->sector_number = 0;
562 dev->ring.req_prod_pvt = i + 1;
564 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
566 notify_remote_via_evtchn(dev->evtchn);
569 static void blkfront_sync(struct blkfront_dev *dev)
571 if (dev->info.mode == O_RDWR) {
572 if (dev->info.barrier == 1)
573 blkfront_push_operation(dev,
574 BLKIF_OP_WRITE_BARRIER, 0);
576 if (dev->info.flush == 1)
577 blkfront_push_operation(dev,
578 BLKIF_OP_FLUSH_DISKCACHE, 0);
582 blkfront_aio_poll(dev);
583 if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
590 * pvblock_iop() - Issue an aio.
591 * @udev: Pvblock device
592 * @blknr: Block number to read from / write to
593 * @blkcnt: Amount of blocks to read / write
594 * @buffer: Memory buffer with data to be read / write
595 * @write: Describes is it read or write operation
599 * Depending on the operation - reading or writing, data is read / written from the
600 * specified address (@buffer) to the sector (@blknr).
602 static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
603 lbaint_t blkcnt, void *buffer, int write)
605 struct blkfront_dev *blk_dev = dev_get_priv(udev);
606 struct blk_desc *desc = dev_get_uclass_plat(udev);
607 struct blkfront_aiocb aiocb;
608 lbaint_t blocks_todo;
614 if ((blknr + blkcnt) > desc->lba) {
615 printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
616 blknr + blkcnt, desc->lba);
620 unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
622 aiocb.aio_dev = blk_dev;
623 aiocb.aio_offset = blknr * desc->blksz;
626 blocks_todo = blkcnt;
628 aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
630 if (write && unaligned)
631 memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
633 aiocb.aio_nbytes = unaligned ? desc->blksz :
634 min((size_t)((BLKIF_MAX_SEGMENTS_PER_REQUEST - 1)
636 (size_t)(blocks_todo * desc->blksz));
638 blkfront_io(&aiocb, write);
640 if (!write && unaligned)
641 memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
643 aiocb.aio_offset += aiocb.aio_nbytes;
644 buffer += aiocb.aio_nbytes;
645 blocks_todo -= aiocb.aio_nbytes / desc->blksz;
646 } while (blocks_todo > 0);
651 ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
654 return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
657 ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
660 return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
663 static int pvblock_blk_bind(struct udevice *udev)
665 struct blk_desc *desc = dev_get_uclass_plat(udev);
668 desc->uclass_id = UCLASS_PVBLOCK;
670 * Initialize the devnum to -ENODEV. This is to make sure that
671 * blk_next_free_devnum() works as expected, since the default
672 * value 0 is a valid devnum.
674 desc->devnum = -ENODEV;
675 devnum = blk_next_free_devnum(UCLASS_PVBLOCK);
678 desc->devnum = devnum;
679 desc->part_type = PART_TYPE_UNKNOWN;
682 strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
683 strncpy(desc->revision, "1", sizeof(desc->revision));
684 strncpy(desc->product, "Virtual disk", sizeof(desc->product));
689 static int pvblock_blk_probe(struct udevice *udev)
691 struct blkfront_dev *blk_dev = dev_get_priv(udev);
692 struct blkfront_plat *plat = dev_get_plat(udev);
693 struct blk_desc *desc = dev_get_uclass_plat(udev);
699 ret = init_blkfront(devid, blk_dev);
703 desc->blksz = blk_dev->info.sector_size;
704 desc->lba = blk_dev->info.sectors;
705 desc->log2blksz = LOG2(blk_dev->info.sector_size);
710 static int pvblock_blk_remove(struct udevice *udev)
712 struct blkfront_dev *blk_dev = dev_get_priv(udev);
714 shutdown_blkfront(blk_dev);
718 static const struct blk_ops pvblock_blk_ops = {
719 .read = pvblock_blk_read,
720 .write = pvblock_blk_write,
723 U_BOOT_DRIVER(pvblock_blk) = {
724 .name = DRV_NAME_BLK,
726 .ops = &pvblock_blk_ops,
727 .bind = pvblock_blk_bind,
728 .probe = pvblock_blk_probe,
729 .remove = pvblock_blk_remove,
730 .priv_auto = sizeof(struct blkfront_dev),
731 .flags = DM_FLAG_OS_PREPARE,
734 /*******************************************************************************
735 * Para-virtual block device class
736 *******************************************************************************/
738 typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
740 static int on_new_vbd(struct udevice *parent, unsigned int devid)
742 struct driver_info info;
743 struct udevice *udev;
744 struct blkfront_plat *plat;
747 debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
749 plat = malloc(sizeof(struct blkfront_plat));
751 printf("Failed to allocate platform data\n");
757 info.name = DRV_NAME_BLK;
760 ret = device_bind_by_name(parent, false, &info, &udev);
762 printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
769 static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
774 msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
776 printf("Failed to read device/vbd directory: %s\n", msg);
781 for (i = 0; dirs[i]; i++) {
784 sscanf(dirs[i], "%d", &devid);
785 ret = clb(udev, devid);
800 static void print_pvblock_devices(void)
802 struct udevice *udev;
804 const char *class_name;
806 class_name = uclass_get_name(UCLASS_PVBLOCK);
807 for (blk_first_device(UCLASS_PVBLOCK, &udev); udev;
808 blk_next_device(&udev), first = false) {
809 struct blk_desc *desc = dev_get_uclass_plat(udev);
813 printf("%s: %d", class_name, desc->devnum);
818 void pvblock_init(void)
820 struct driver_info info;
824 * At this point Xen drivers have already initialized,
825 * so we can instantiate the class driver and enumerate
826 * virtual block devices.
828 info.name = DRV_NAME;
829 ret = device_bind_by_name(gd->dm_root, false, &info, NULL);
831 printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
833 /* Bootstrap virtual block devices class driver */
834 uclass_probe_all(UCLASS_PVBLOCK);
836 print_pvblock_devices();
839 static int pvblock_probe(struct udevice *udev)
844 if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
847 ret = uclass_get(UCLASS_BLK, &uc);
850 uclass_foreach_dev_probe(UCLASS_BLK, udev);
854 U_BOOT_DRIVER(pvblock_drv) = {
856 .id = UCLASS_PVBLOCK,
857 .probe = pvblock_probe,
860 UCLASS_DRIVER(pvblock) = {
862 .id = UCLASS_PVBLOCK,