1 /* sunvdc.c: Sun LDOM Virtual Disk Client.
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/blkdev.h>
10 #include <linux/hdreg.h>
11 #include <linux/genhd.h>
12 #include <linux/cdrom.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/completion.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/scatterlist.h>
24 #define DRV_MODULE_NAME "sunvdc"
25 #define PFX DRV_MODULE_NAME ": "
26 #define DRV_MODULE_VERSION "1.2"
27 #define DRV_MODULE_RELDATE "November 24, 2014"
29 static char version[] =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
32 MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION(DRV_MODULE_VERSION);
36 #define VDC_TX_RING_SIZE 512
38 #define WAITING_FOR_LINK_UP 0x01
39 #define WAITING_FOR_TX_SPACE 0x02
40 #define WAITING_FOR_GEN_CMD 0x04
41 #define WAITING_FOR_ANY -1
43 static struct workqueue_struct *sunvdc_wq;
45 struct vdc_req_entry {
50 struct vio_driver_state vio;
54 struct vdc_completion *cmp;
58 struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
60 unsigned long ring_cookies;
66 struct timer_list ldc_reset_timer;
67 struct work_struct ldc_reset_work;
69 /* The server fills these in for us in the disk attribute
80 static void vdc_ldc_reset(struct vdc_port *port);
81 static void vdc_ldc_reset_work(struct work_struct *work);
82 static void vdc_ldc_reset_timer(unsigned long _arg);
84 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
86 return container_of(vio, struct vdc_port, vio);
89 /* Ordered from largest major to lowest */
90 static struct vio_version vdc_versions[] = {
91 { .major = 1, .minor = 1 },
92 { .major = 1, .minor = 0 },
95 static inline int vdc_version_supported(struct vdc_port *port,
98 return port->vio.ver.major == major && port->vio.ver.minor >= minor;
101 #define VDCBLK_NAME "vdisk"
102 static int vdc_major;
103 #define PARTITION_SHIFT 3
105 static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
107 return vio_dring_avail(dr, VDC_TX_RING_SIZE);
110 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
112 struct gendisk *disk = bdev->bd_disk;
113 sector_t nsect = get_capacity(disk);
114 sector_t cylinders = nsect;
118 sector_div(cylinders, geo->heads * geo->sectors);
119 geo->cylinders = cylinders;
120 if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
121 geo->cylinders = 0xffff;
126 /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
127 * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
128 * Needed to be able to install inside an ldom from an iso image.
130 static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
131 unsigned command, unsigned long argument)
134 struct gendisk *disk;
137 case CDROMMULTISESSION:
138 pr_debug(PFX "Multisession CDs not supported\n");
139 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
140 if (put_user(0, (char __user *)(argument + i)))
144 case CDROM_GET_CAPABILITY:
145 disk = bdev->bd_disk;
147 if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
152 pr_debug(PFX "ioctl %08x not supported\n", command);
157 static const struct block_device_operations vdc_fops = {
158 .owner = THIS_MODULE,
159 .getgeo = vdc_getgeo,
163 static void vdc_blk_queue_start(struct vdc_port *port)
165 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
167 /* restart blk queue when ring is half emptied. also called after
168 * handshake completes, so check for initial handshake before we've
171 if (port->disk && blk_queue_stopped(port->disk->queue) &&
172 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
173 blk_start_queue(port->disk->queue);
178 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
181 (waiting_for == -1 ||
182 vio->cmp->waiting_for == waiting_for)) {
184 complete(&vio->cmp->com);
189 static void vdc_handshake_complete(struct vio_driver_state *vio)
191 struct vdc_port *port = to_vdc_port(vio);
193 del_timer(&port->ldc_reset_timer);
194 vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
195 vdc_blk_queue_start(port);
198 static int vdc_handle_unknown(struct vdc_port *port, void *arg)
200 struct vio_msg_tag *pkt = arg;
202 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
203 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
204 printk(KERN_ERR PFX "Resetting connection.\n");
206 ldc_disconnect(port->vio.lp);
211 static int vdc_send_attr(struct vio_driver_state *vio)
213 struct vdc_port *port = to_vdc_port(vio);
214 struct vio_disk_attr_info pkt;
216 memset(&pkt, 0, sizeof(pkt));
218 pkt.tag.type = VIO_TYPE_CTRL;
219 pkt.tag.stype = VIO_SUBTYPE_INFO;
220 pkt.tag.stype_env = VIO_ATTR_INFO;
221 pkt.tag.sid = vio_send_sid(vio);
223 pkt.xfer_mode = VIO_DRING_MODE;
224 pkt.vdisk_block_size = port->vdisk_block_size;
225 pkt.max_xfer_size = port->max_xfer_size;
227 viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
228 pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
230 return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
233 static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
235 struct vdc_port *port = to_vdc_port(vio);
236 struct vio_disk_attr_info *pkt = arg;
238 viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
239 "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
240 pkt->tag.stype, pkt->operations,
241 pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
242 pkt->xfer_mode, pkt->vdisk_block_size,
245 if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
246 switch (pkt->vdisk_type) {
247 case VD_DISK_TYPE_DISK:
248 case VD_DISK_TYPE_SLICE:
252 printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
253 vio->name, pkt->vdisk_type);
257 if (pkt->vdisk_block_size > port->vdisk_block_size) {
258 printk(KERN_ERR PFX "%s: BLOCK size increased "
261 port->vdisk_block_size, pkt->vdisk_block_size);
265 port->operations = pkt->operations;
266 port->vdisk_type = pkt->vdisk_type;
267 if (vdc_version_supported(port, 1, 1)) {
268 port->vdisk_size = pkt->vdisk_size;
269 port->vdisk_mtype = pkt->vdisk_mtype;
271 if (pkt->max_xfer_size < port->max_xfer_size)
272 port->max_xfer_size = pkt->max_xfer_size;
273 port->vdisk_block_size = pkt->vdisk_block_size;
276 printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
282 static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
284 int err = desc->status;
286 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
289 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
292 struct vio_disk_desc *desc = vio_dring_entry(dr, index);
293 struct vdc_req_entry *rqe = &port->rq_arr[index];
296 if (unlikely(desc->hdr.state != VIO_DESC_DONE))
299 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
300 desc->hdr.state = VIO_DESC_FREE;
301 dr->cons = vio_dring_next(dr, index);
305 vdc_end_special(port, desc);
311 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
313 vdc_blk_queue_start(port);
316 static int vdc_ack(struct vdc_port *port, void *msgbuf)
318 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
319 struct vio_dring_data *pkt = msgbuf;
321 if (unlikely(pkt->dring_ident != dr->ident ||
322 pkt->start_idx != pkt->end_idx ||
323 pkt->start_idx >= VDC_TX_RING_SIZE))
326 vdc_end_one(port, dr, pkt->start_idx);
331 static int vdc_nack(struct vdc_port *port, void *msgbuf)
333 /* XXX Implement me XXX */
337 static void vdc_event(void *arg, int event)
339 struct vdc_port *port = arg;
340 struct vio_driver_state *vio = &port->vio;
344 spin_lock_irqsave(&vio->lock, flags);
346 if (unlikely(event == LDC_EVENT_RESET)) {
347 vio_link_state_change(vio, event);
348 queue_work(sunvdc_wq, &port->ldc_reset_work);
352 if (unlikely(event == LDC_EVENT_UP)) {
353 vio_link_state_change(vio, event);
357 if (unlikely(event != LDC_EVENT_DATA_READY)) {
358 pr_warn(PFX "Unexpected LDC event %d\n", event);
365 struct vio_msg_tag tag;
369 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
370 if (unlikely(err < 0)) {
371 if (err == -ECONNRESET)
377 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
380 msgbuf.tag.stype_env,
382 err = vio_validate_sid(vio, &msgbuf.tag);
386 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
387 if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
388 err = vdc_ack(port, &msgbuf);
389 else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
390 err = vdc_nack(port, &msgbuf);
392 err = vdc_handle_unknown(port, &msgbuf);
393 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
394 err = vio_control_pkt_engine(vio, &msgbuf);
396 err = vdc_handle_unknown(port, &msgbuf);
402 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
404 spin_unlock_irqrestore(&vio->lock, flags);
407 static int __vdc_tx_trigger(struct vdc_port *port)
409 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
410 struct vio_dring_data hdr = {
412 .type = VIO_TYPE_DATA,
413 .stype = VIO_SUBTYPE_INFO,
414 .stype_env = VIO_DRING_DATA,
415 .sid = vio_send_sid(&port->vio),
417 .dring_ident = dr->ident,
418 .start_idx = dr->prod,
423 hdr.seq = dr->snd_nxt;
426 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
432 if ((delay <<= 1) > 128)
434 } while (err == -EAGAIN);
436 if (err == -ENOTCONN)
441 static int __send_request(struct request *req)
443 struct vdc_port *port = req->rq_disk->private_data;
444 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
445 struct scatterlist sg[port->ring_cookies];
446 struct vdc_req_entry *rqe;
447 struct vio_disk_desc *desc;
448 unsigned int map_perm;
453 map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
455 if (rq_data_dir(req) == READ) {
456 map_perm |= LDC_MAP_W;
459 map_perm |= LDC_MAP_R;
463 sg_init_table(sg, port->ring_cookies);
464 nsg = blk_rq_map_sg(req->q, req, sg);
467 for (i = 0; i < nsg; i++)
470 desc = vio_dring_cur(dr);
472 err = ldc_map_sg(port->vio.lp, sg, nsg,
473 desc->cookies, port->ring_cookies,
476 printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
480 rqe = &port->rq_arr[dr->prod];
483 desc->hdr.ack = VIO_ACK_ENABLE;
484 desc->req_id = port->req_id;
485 desc->operation = op;
486 if (port->vdisk_type == VD_DISK_TYPE_DISK) {
492 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
494 desc->ncookies = err;
496 /* This has to be a non-SMP write barrier because we are writing
497 * to memory which is shared with the peer LDOM.
500 desc->hdr.state = VIO_DESC_READY;
502 err = __vdc_tx_trigger(port);
504 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
507 dr->prod = vio_dring_next(dr, dr->prod);
513 static void do_vdc_request(struct request_queue *rq)
517 while ((req = blk_peek_request(rq)) != NULL) {
518 struct vdc_port *port;
519 struct vio_dring_state *dr;
521 port = req->rq_disk->private_data;
522 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
523 if (unlikely(vdc_tx_dring_avail(dr) < 1))
526 blk_start_request(req);
528 if (__send_request(req) < 0) {
529 blk_requeue_request(rq, req);
531 /* Avoid pointless unplugs. */
538 static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
540 struct vio_dring_state *dr;
541 struct vio_completion comp;
542 struct vio_disk_desc *desc;
543 unsigned int map_perm;
548 if (!(((u64)1 << (u64)op) & port->operations))
563 op_len = sizeof(u32);
564 map_perm = LDC_MAP_W;
568 op_len = sizeof(u32);
569 map_perm = LDC_MAP_R;
573 op_len = sizeof(struct vio_disk_vtoc);
574 map_perm = LDC_MAP_W;
578 op_len = sizeof(struct vio_disk_vtoc);
579 map_perm = LDC_MAP_R;
582 case VD_OP_GET_DISKGEOM:
583 op_len = sizeof(struct vio_disk_geom);
584 map_perm = LDC_MAP_W;
587 case VD_OP_SET_DISKGEOM:
588 op_len = sizeof(struct vio_disk_geom);
589 map_perm = LDC_MAP_R;
594 map_perm = LDC_MAP_RW;
597 case VD_OP_GET_DEVID:
598 op_len = sizeof(struct vio_disk_devid);
599 map_perm = LDC_MAP_W;
608 map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
610 op_len = (op_len + 7) & ~7;
611 req_buf = kzalloc(op_len, GFP_KERNEL);
618 if (map_perm & LDC_MAP_R)
619 memcpy(req_buf, buf, len);
621 spin_lock_irqsave(&port->vio.lock, flags);
623 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
625 /* XXX If we want to use this code generically we have to
626 * XXX handle TX ring exhaustion etc.
628 desc = vio_dring_cur(dr);
630 err = ldc_map_single(port->vio.lp, req_buf, op_len,
631 desc->cookies, port->ring_cookies,
634 spin_unlock_irqrestore(&port->vio.lock, flags);
639 init_completion(&comp.com);
640 comp.waiting_for = WAITING_FOR_GEN_CMD;
641 port->vio.cmp = ∁
643 desc->hdr.ack = VIO_ACK_ENABLE;
644 desc->req_id = port->req_id;
645 desc->operation = op;
650 desc->ncookies = err;
652 /* This has to be a non-SMP write barrier because we are writing
653 * to memory which is shared with the peer LDOM.
656 desc->hdr.state = VIO_DESC_READY;
658 err = __vdc_tx_trigger(port);
661 dr->prod = vio_dring_next(dr, dr->prod);
662 spin_unlock_irqrestore(&port->vio.lock, flags);
664 wait_for_completion(&comp.com);
667 port->vio.cmp = NULL;
668 spin_unlock_irqrestore(&port->vio.lock, flags);
671 if (map_perm & LDC_MAP_W)
672 memcpy(buf, req_buf, len);
679 static int vdc_alloc_tx_ring(struct vdc_port *port)
681 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
682 unsigned long len, entry_size;
686 entry_size = sizeof(struct vio_disk_desc) +
687 (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
688 len = (VDC_TX_RING_SIZE * entry_size);
690 ncookies = VIO_MAX_RING_COOKIES;
691 dring = ldc_alloc_exp_dring(port->vio.lp, len,
692 dr->cookies, &ncookies,
697 return PTR_ERR(dring);
700 dr->entry_size = entry_size;
701 dr->num_entries = VDC_TX_RING_SIZE;
702 dr->prod = dr->cons = 0;
703 dr->pending = VDC_TX_RING_SIZE;
704 dr->ncookies = ncookies;
709 static void vdc_free_tx_ring(struct vdc_port *port)
711 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
714 ldc_free_exp_dring(port->vio.lp, dr->base,
715 (dr->entry_size * dr->num_entries),
716 dr->cookies, dr->ncookies);
725 static int vdc_port_up(struct vdc_port *port)
727 struct vio_completion comp;
729 init_completion(&comp.com);
731 comp.waiting_for = WAITING_FOR_LINK_UP;
732 port->vio.cmp = ∁
734 vio_port_up(&port->vio);
735 wait_for_completion(&comp.com);
739 static void vdc_port_down(struct vdc_port *port)
741 ldc_disconnect(port->vio.lp);
742 ldc_unbind(port->vio.lp);
743 vdc_free_tx_ring(port);
744 vio_ldc_free(&port->vio);
747 static int probe_disk(struct vdc_port *port)
749 struct request_queue *q;
753 err = vdc_port_up(port);
757 if (vdc_version_supported(port, 1, 1)) {
758 /* vdisk_size should be set during the handshake, if it wasn't
759 * then the underlying disk is reserved by another system
761 if (port->vdisk_size == -1)
764 struct vio_disk_geom geom;
766 err = generic_request(port, VD_OP_GET_DISKGEOM,
767 &geom, sizeof(geom));
769 printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
773 port->vdisk_size = ((u64)geom.num_cyl *
778 q = blk_init_queue(do_vdc_request, &port->vio.lock);
780 printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
784 g = alloc_disk(1 << PARTITION_SHIFT);
786 printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
788 blk_cleanup_queue(q);
794 /* Each segment in a request is up to an aligned page in size. */
795 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
796 blk_queue_max_segment_size(q, PAGE_SIZE);
798 blk_queue_max_segments(q, port->ring_cookies);
799 blk_queue_max_hw_sectors(q, port->max_xfer_size);
800 g->major = vdc_major;
801 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
802 strcpy(g->disk_name, port->disk_name);
806 g->private_data = port;
808 set_capacity(g, port->vdisk_size);
810 if (vdc_version_supported(port, 1, 1)) {
811 switch (port->vdisk_mtype) {
812 case VD_MEDIA_TYPE_CD:
813 pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
814 g->flags |= GENHD_FL_CD;
815 g->flags |= GENHD_FL_REMOVABLE;
819 case VD_MEDIA_TYPE_DVD:
820 pr_info(PFX "Virtual DVD %s\n", port->disk_name);
821 g->flags |= GENHD_FL_CD;
822 g->flags |= GENHD_FL_REMOVABLE;
826 case VD_MEDIA_TYPE_FIXED:
827 pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
832 pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
834 port->vdisk_size, (port->vdisk_size >> (20 - 9)),
835 port->vio.ver.major, port->vio.ver.minor);
837 device_add_disk(&port->vio.vdev->dev, g);
842 static struct ldc_channel_config vdc_ldc_cfg = {
845 .mode = LDC_MODE_UNRELIABLE,
848 static struct vio_driver_ops vdc_vio_ops = {
849 .send_attr = vdc_send_attr,
850 .handle_attr = vdc_handle_attr,
851 .handshake_complete = vdc_handshake_complete,
854 static void print_version(void)
856 static int version_printed;
858 if (version_printed++ == 0)
859 printk(KERN_INFO "%s", version);
862 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
864 struct mdesc_handle *hp;
865 struct vdc_port *port;
867 const u64 *ldc_timeout;
874 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
875 printk(KERN_ERR PFX "Port id [%llu] too large.\n",
877 goto err_out_release_mdesc;
880 port = kzalloc(sizeof(*port), GFP_KERNEL);
883 printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
884 goto err_out_release_mdesc;
887 if (vdev->dev_no >= 26)
888 snprintf(port->disk_name, sizeof(port->disk_name),
890 'a' + ((int)vdev->dev_no / 26) - 1,
891 'a' + ((int)vdev->dev_no % 26));
893 snprintf(port->disk_name, sizeof(port->disk_name),
894 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
895 port->vdisk_size = -1;
897 /* Actual wall time may be double due to do_generic_file_read() doing
898 * a readahead I/O first, and once that fails it will try to read a
901 ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
902 port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
903 setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
904 (unsigned long)port);
905 INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
907 err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
908 vdc_versions, ARRAY_SIZE(vdc_versions),
909 &vdc_vio_ops, port->disk_name);
911 goto err_out_free_port;
913 port->vdisk_block_size = 512;
914 port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
915 port->ring_cookies = ((port->max_xfer_size *
916 port->vdisk_block_size) / PAGE_SIZE) + 2;
918 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
920 goto err_out_free_port;
922 err = vdc_alloc_tx_ring(port);
924 goto err_out_free_ldc;
926 err = probe_disk(port);
928 goto err_out_free_tx_ring;
930 dev_set_drvdata(&vdev->dev, port);
936 err_out_free_tx_ring:
937 vdc_free_tx_ring(port);
940 vio_ldc_free(&port->vio);
945 err_out_release_mdesc:
950 static int vdc_port_remove(struct vio_dev *vdev)
952 struct vdc_port *port = dev_get_drvdata(&vdev->dev);
957 spin_lock_irqsave(&port->vio.lock, flags);
958 blk_stop_queue(port->disk->queue);
959 spin_unlock_irqrestore(&port->vio.lock, flags);
961 flush_work(&port->ldc_reset_work);
962 del_timer_sync(&port->ldc_reset_timer);
963 del_timer_sync(&port->vio.timer);
965 del_gendisk(port->disk);
966 blk_cleanup_queue(port->disk->queue);
967 put_disk(port->disk);
970 vdc_free_tx_ring(port);
971 vio_ldc_free(&port->vio);
973 dev_set_drvdata(&vdev->dev, NULL);
980 static void vdc_requeue_inflight(struct vdc_port *port)
982 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
985 for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
986 struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
987 struct vdc_req_entry *rqe = &port->rq_arr[idx];
990 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
991 desc->hdr.state = VIO_DESC_FREE;
992 dr->cons = vio_dring_next(dr, idx);
996 vdc_end_special(port, desc);
1001 blk_requeue_request(port->disk->queue, req);
1005 static void vdc_queue_drain(struct vdc_port *port)
1007 struct request *req;
1009 while ((req = blk_fetch_request(port->disk->queue)) != NULL)
1010 __blk_end_request_all(req, -EIO);
1013 static void vdc_ldc_reset_timer(unsigned long _arg)
1015 struct vdc_port *port = (struct vdc_port *) _arg;
1016 struct vio_driver_state *vio = &port->vio;
1017 unsigned long flags;
1019 spin_lock_irqsave(&vio->lock, flags);
1020 if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1021 pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1022 port->disk_name, port->ldc_timeout);
1023 vdc_queue_drain(port);
1024 vdc_blk_queue_start(port);
1026 spin_unlock_irqrestore(&vio->lock, flags);
1029 static void vdc_ldc_reset_work(struct work_struct *work)
1031 struct vdc_port *port;
1032 struct vio_driver_state *vio;
1033 unsigned long flags;
1035 port = container_of(work, struct vdc_port, ldc_reset_work);
1038 spin_lock_irqsave(&vio->lock, flags);
1039 vdc_ldc_reset(port);
1040 spin_unlock_irqrestore(&vio->lock, flags);
1043 static void vdc_ldc_reset(struct vdc_port *port)
1047 assert_spin_locked(&port->vio.lock);
1049 pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1050 blk_stop_queue(port->disk->queue);
1051 vdc_requeue_inflight(port);
1052 vdc_port_down(port);
1054 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1056 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1060 err = vdc_alloc_tx_ring(port);
1062 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1066 if (port->ldc_timeout)
1067 mod_timer(&port->ldc_reset_timer,
1068 round_jiffies(jiffies + HZ * port->ldc_timeout));
1069 mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1073 vio_ldc_free(&port->vio);
1076 static const struct vio_device_id vdc_port_match[] = {
1082 MODULE_DEVICE_TABLE(vio, vdc_port_match);
1084 static struct vio_driver vdc_port_driver = {
1085 .id_table = vdc_port_match,
1086 .probe = vdc_port_probe,
1087 .remove = vdc_port_remove,
1091 static int __init vdc_init(void)
1095 sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1099 err = register_blkdev(0, VDCBLK_NAME);
1105 err = vio_register_driver(&vdc_port_driver);
1107 goto out_unregister_blkdev;
1111 out_unregister_blkdev:
1112 unregister_blkdev(vdc_major, VDCBLK_NAME);
1116 destroy_workqueue(sunvdc_wq);
1120 static void __exit vdc_exit(void)
1122 vio_unregister_driver(&vdc_port_driver);
1123 unregister_blkdev(vdc_major, VDCBLK_NAME);
1124 destroy_workqueue(sunvdc_wq);
1127 module_init(vdc_init);
1128 module_exit(vdc_exit);