2 * ccw based virtio transport
4 * Copyright IBM Corp. 2012
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
13 #include <linux/kernel_stat.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 #include <linux/err.h>
17 #include <linux/virtio.h>
18 #include <linux/virtio_config.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/virtio_ring.h>
22 #include <linux/pfn.h>
23 #include <linux/async.h>
24 #include <linux/wait.h>
25 #include <linux/list.h>
26 #include <linux/bitops.h>
27 #include <linux/module.h>
29 #include <linux/kvm_para.h>
30 #include <asm/setup.h>
33 #include <asm/ccwdev.h>
34 #include <asm/virtio-ccw.h>
37 * virtio related functions
40 struct vq_config_block {
45 #define VIRTIO_CCW_CONFIG_SIZE 0x100
46 /* same as PCI config space size, should be enough for all drivers */
48 struct virtio_ccw_device {
49 struct virtio_device vdev;
51 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
52 struct ccw_device *cdev;
55 wait_queue_head_t wait_q;
57 struct list_head virtqueues;
58 unsigned long indicators;
59 unsigned long indicators2;
60 struct vq_config_block *config_block;
63 struct vq_info_block {
70 struct virtio_feature_desc {
75 struct virtio_ccw_vq_info {
79 struct vq_info_block *info_block;
80 struct list_head node;
84 #define CCW_CMD_SET_VQ 0x13
85 #define CCW_CMD_VDEV_RESET 0x33
86 #define CCW_CMD_SET_IND 0x43
87 #define CCW_CMD_SET_CONF_IND 0x53
88 #define CCW_CMD_READ_FEAT 0x12
89 #define CCW_CMD_WRITE_FEAT 0x11
90 #define CCW_CMD_READ_CONF 0x22
91 #define CCW_CMD_WRITE_CONF 0x21
92 #define CCW_CMD_WRITE_STATUS 0x31
93 #define CCW_CMD_READ_VQ_CONF 0x32
95 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
96 #define VIRTIO_CCW_DOING_RESET 0x00040000
97 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
98 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
99 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
100 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
101 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
102 #define VIRTIO_CCW_DOING_SET_IND 0x01000000
103 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
104 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
105 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
107 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
109 return container_of(vdev, struct virtio_ccw_device, vdev);
112 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
117 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
121 ret = vcdev->curr_io & flag;
122 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
126 static int ccw_io_helper(struct virtio_ccw_device *vcdev,
127 struct ccw1 *ccw, __u32 intparm)
131 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
134 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
135 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
139 vcdev->curr_io |= flag;
141 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
143 } while (ret == -EBUSY);
144 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
145 return ret ? ret : vcdev->err;
148 static inline long do_kvm_notify(struct subchannel_id schid,
149 unsigned long queue_index,
152 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
153 register struct subchannel_id __schid asm("2") = schid;
154 register unsigned long __index asm("3") = queue_index;
155 register long __rc asm("2");
156 register long __cookie asm("4") = cookie;
158 asm volatile ("diag 2,4,0x500\n"
159 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
165 static void virtio_ccw_kvm_notify(struct virtqueue *vq)
167 struct virtio_ccw_vq_info *info = vq->priv;
168 struct virtio_ccw_device *vcdev;
169 struct subchannel_id schid;
171 vcdev = to_vc_device(info->vq->vdev);
172 ccw_device_get_schid(vcdev->cdev, &schid);
173 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
176 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
177 struct ccw1 *ccw, int index)
179 vcdev->config_block->index = index;
180 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
182 ccw->count = sizeof(struct vq_config_block);
183 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
184 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
185 return vcdev->config_block->num;
188 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
190 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
191 struct virtio_ccw_vq_info *info = vq->priv;
195 unsigned int index = vq->index;
197 /* Remove from our list. */
198 spin_lock_irqsave(&vcdev->lock, flags);
199 list_del(&info->node);
200 spin_unlock_irqrestore(&vcdev->lock, flags);
202 /* Release from host. */
203 info->info_block->queue = 0;
204 info->info_block->align = 0;
205 info->info_block->index = index;
206 info->info_block->num = 0;
207 ccw->cmd_code = CCW_CMD_SET_VQ;
209 ccw->count = sizeof(*info->info_block);
210 ccw->cda = (__u32)(unsigned long)(info->info_block);
211 ret = ccw_io_helper(vcdev, ccw,
212 VIRTIO_CCW_DOING_SET_VQ | index);
214 * -ENODEV isn't considered an error: The device is gone anyway.
215 * This may happen on device detach.
217 if (ret && (ret != -ENODEV))
218 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
221 vring_del_virtqueue(vq);
222 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
223 free_pages_exact(info->queue, size);
224 kfree(info->info_block);
228 static void virtio_ccw_del_vqs(struct virtio_device *vdev)
230 struct virtqueue *vq, *n;
233 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
238 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
239 virtio_ccw_del_vq(vq, ccw);
244 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
245 int i, vq_callback_t *callback,
249 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
251 struct virtqueue *vq = NULL;
252 struct virtio_ccw_vq_info *info;
253 unsigned long size = 0; /* silence the compiler */
256 /* Allocate queue. */
257 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
259 dev_warn(&vcdev->cdev->dev, "no info\n");
263 info->info_block = kzalloc(sizeof(*info->info_block),
264 GFP_DMA | GFP_KERNEL);
265 if (!info->info_block) {
266 dev_warn(&vcdev->cdev->dev, "no info block\n");
270 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
271 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
272 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
273 if (info->queue == NULL) {
274 dev_warn(&vcdev->cdev->dev, "no queue\n");
279 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
280 true, info->queue, virtio_ccw_kvm_notify,
283 /* For now, we fail if we can't get the requested size. */
284 dev_warn(&vcdev->cdev->dev, "no vq\n");
289 /* Register it with the host. */
290 info->info_block->queue = (__u64)info->queue;
291 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
292 info->info_block->index = i;
293 info->info_block->num = info->num;
294 ccw->cmd_code = CCW_CMD_SET_VQ;
296 ccw->count = sizeof(*info->info_block);
297 ccw->cda = (__u32)(unsigned long)(info->info_block);
298 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
300 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
307 /* Save it to our list. */
308 spin_lock_irqsave(&vcdev->lock, flags);
309 list_add(&info->node, &vcdev->virtqueues);
310 spin_unlock_irqrestore(&vcdev->lock, flags);
316 vring_del_virtqueue(vq);
319 free_pages_exact(info->queue, size);
320 kfree(info->info_block);
326 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
327 struct virtqueue *vqs[],
328 vq_callback_t *callbacks[],
331 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
332 unsigned long *indicatorp = NULL;
336 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
340 for (i = 0; i < nvqs; ++i) {
341 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
343 if (IS_ERR(vqs[i])) {
344 ret = PTR_ERR(vqs[i]);
350 /* We need a data area under 2G to communicate. */
351 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
354 *indicatorp = (unsigned long) &vcdev->indicators;
355 /* Register queue indicators with host. */
356 vcdev->indicators = 0;
357 ccw->cmd_code = CCW_CMD_SET_IND;
359 ccw->count = sizeof(vcdev->indicators);
360 ccw->cda = (__u32)(unsigned long) indicatorp;
361 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
364 /* Register indicators2 with host for config changes */
365 *indicatorp = (unsigned long) &vcdev->indicators2;
366 vcdev->indicators2 = 0;
367 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
369 ccw->count = sizeof(vcdev->indicators2);
370 ccw->cda = (__u32)(unsigned long) indicatorp;
371 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
381 virtio_ccw_del_vqs(vdev);
385 static void virtio_ccw_reset(struct virtio_device *vdev)
387 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
390 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
394 /* Zero status bits. */
397 /* Send a reset ccw on device. */
398 ccw->cmd_code = CCW_CMD_VDEV_RESET;
402 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
406 static u32 virtio_ccw_get_features(struct virtio_device *vdev)
408 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
409 struct virtio_feature_desc *features;
413 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
417 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
422 /* Read the feature bits from the host. */
423 /* TODO: Features > 32 bits */
425 ccw->cmd_code = CCW_CMD_READ_FEAT;
427 ccw->count = sizeof(*features);
428 ccw->cda = (__u32)(unsigned long)features;
429 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
435 rc = le32_to_cpu(features->features);
443 static void virtio_ccw_finalize_features(struct virtio_device *vdev)
445 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
446 struct virtio_feature_desc *features;
450 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
454 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
458 /* Give virtio_ring a chance to accept features. */
459 vring_transport_features(vdev);
461 for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
463 int highbits = i % 2 ? 32 : 0;
465 features->features = cpu_to_le32(vdev->features[i / 2]
467 /* Write the feature bits to the host. */
468 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
470 ccw->count = sizeof(*features);
471 ccw->cda = (__u32)(unsigned long)features;
472 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
479 static void virtio_ccw_get_config(struct virtio_device *vdev,
480 unsigned int offset, void *buf, unsigned len)
482 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
487 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
491 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
495 /* Read the config area from the host. */
496 ccw->cmd_code = CCW_CMD_READ_CONF;
498 ccw->count = offset + len;
499 ccw->cda = (__u32)(unsigned long)config_area;
500 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
504 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
505 memcpy(buf, &vcdev->config[offset], len);
512 static void virtio_ccw_set_config(struct virtio_device *vdev,
513 unsigned int offset, const void *buf,
516 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
520 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
524 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
528 memcpy(&vcdev->config[offset], buf, len);
529 /* Write the config area to the host. */
530 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
531 ccw->cmd_code = CCW_CMD_WRITE_CONF;
533 ccw->count = offset + len;
534 ccw->cda = (__u32)(unsigned long)config_area;
535 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
542 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
544 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
546 return *vcdev->status;
549 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
551 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
554 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
558 /* Write the status to the host. */
559 *vcdev->status = status;
560 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
562 ccw->count = sizeof(status);
563 ccw->cda = (__u32)(unsigned long)vcdev->status;
564 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
568 static struct virtio_config_ops virtio_ccw_config_ops = {
569 .get_features = virtio_ccw_get_features,
570 .finalize_features = virtio_ccw_finalize_features,
571 .get = virtio_ccw_get_config,
572 .set = virtio_ccw_set_config,
573 .get_status = virtio_ccw_get_status,
574 .set_status = virtio_ccw_set_status,
575 .reset = virtio_ccw_reset,
576 .find_vqs = virtio_ccw_find_vqs,
577 .del_vqs = virtio_ccw_del_vqs,
582 * ccw bus driver related functions
585 static void virtio_ccw_release_dev(struct device *_d)
587 struct virtio_device *dev = container_of(_d, struct virtio_device,
589 struct virtio_ccw_device *vcdev = to_vc_device(dev);
591 kfree(vcdev->status);
592 kfree(vcdev->config_block);
596 static int irb_is_error(struct irb *irb)
598 if (scsw_cstat(&irb->scsw) != 0)
600 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
602 if (scsw_cc(&irb->scsw) != 0)
607 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
610 struct virtio_ccw_vq_info *info;
612 struct virtqueue *vq;
615 spin_lock_irqsave(&vcdev->lock, flags);
616 list_for_each_entry(info, &vcdev->virtqueues, node) {
617 if (info->vq->index == index) {
622 spin_unlock_irqrestore(&vcdev->lock, flags);
626 static void virtio_ccw_int_handler(struct ccw_device *cdev,
627 unsigned long intparm,
630 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
631 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
633 struct virtqueue *vq;
634 struct virtio_driver *drv;
636 /* Check if it's a notification from the host. */
637 if ((intparm == 0) &&
638 (scsw_stctl(&irb->scsw) ==
639 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
642 if (irb_is_error(irb))
643 vcdev->err = -EIO; /* XXX - use real error */
644 if (vcdev->curr_io & activity) {
646 case VIRTIO_CCW_DOING_READ_FEAT:
647 case VIRTIO_CCW_DOING_WRITE_FEAT:
648 case VIRTIO_CCW_DOING_READ_CONFIG:
649 case VIRTIO_CCW_DOING_WRITE_CONFIG:
650 case VIRTIO_CCW_DOING_WRITE_STATUS:
651 case VIRTIO_CCW_DOING_SET_VQ:
652 case VIRTIO_CCW_DOING_SET_IND:
653 case VIRTIO_CCW_DOING_SET_CONF_IND:
654 case VIRTIO_CCW_DOING_RESET:
655 case VIRTIO_CCW_DOING_READ_VQ_CONF:
656 vcdev->curr_io &= ~activity;
657 wake_up(&vcdev->wait_q);
660 /* don't know what to do... */
661 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
667 for_each_set_bit(i, &vcdev->indicators,
668 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
669 /* The bit clear must happen before the vring kick. */
670 clear_bit(i, &vcdev->indicators);
672 vq = virtio_ccw_vq_by_ind(vcdev, i);
673 vring_interrupt(0, vq);
675 if (test_bit(0, &vcdev->indicators2)) {
676 drv = container_of(vcdev->vdev.dev.driver,
677 struct virtio_driver, driver);
679 if (drv && drv->config_changed)
680 drv->config_changed(&vcdev->vdev);
681 clear_bit(0, &vcdev->indicators2);
686 * We usually want to autoonline all devices, but give the admin
687 * a way to exempt devices from this.
689 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
691 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
693 static char *no_auto = "";
695 module_param(no_auto, charp, 0444);
696 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
698 static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
700 struct ccw_dev_id id;
702 ccw_device_get_id(cdev, &id);
703 if (test_bit(id.devno, devs_no_auto[id.ssid]))
708 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
710 struct ccw_device *cdev = data;
713 ret = ccw_device_set_online(cdev);
715 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
718 static int virtio_ccw_probe(struct ccw_device *cdev)
720 cdev->handler = virtio_ccw_int_handler;
722 if (virtio_ccw_check_autoonline(cdev))
723 async_schedule(virtio_ccw_auto_online, cdev);
727 static void virtio_ccw_remove(struct ccw_device *cdev)
729 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
732 unregister_virtio_device(&vcdev->vdev);
733 dev_set_drvdata(&cdev->dev, NULL);
735 cdev->handler = NULL;
738 static int virtio_ccw_offline(struct ccw_device *cdev)
740 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
742 unregister_virtio_device(&vcdev->vdev);
743 dev_set_drvdata(&cdev->dev, NULL);
748 static int virtio_ccw_online(struct ccw_device *cdev)
751 struct virtio_ccw_device *vcdev;
753 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
755 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
759 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
760 GFP_DMA | GFP_KERNEL);
761 if (!vcdev->config_block) {
765 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
766 if (!vcdev->status) {
771 vcdev->vdev.dev.parent = &cdev->dev;
772 vcdev->vdev.dev.release = virtio_ccw_release_dev;
773 vcdev->vdev.config = &virtio_ccw_config_ops;
775 init_waitqueue_head(&vcdev->wait_q);
776 INIT_LIST_HEAD(&vcdev->virtqueues);
777 spin_lock_init(&vcdev->lock);
779 dev_set_drvdata(&cdev->dev, vcdev);
780 vcdev->vdev.id.vendor = cdev->id.cu_type;
781 vcdev->vdev.id.device = cdev->id.cu_model;
782 ret = register_virtio_device(&vcdev->vdev);
784 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
790 dev_set_drvdata(&cdev->dev, NULL);
791 put_device(&vcdev->vdev.dev);
795 kfree(vcdev->status);
796 kfree(vcdev->config_block);
802 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
804 /* TODO: Check whether we need special handling here. */
808 static struct ccw_device_id virtio_ids[] = {
809 { CCW_DEVICE(0x3832, 0) },
812 MODULE_DEVICE_TABLE(ccw, virtio_ids);
814 static struct ccw_driver virtio_ccw_driver = {
816 .owner = THIS_MODULE,
817 .name = "virtio_ccw",
820 .probe = virtio_ccw_probe,
821 .remove = virtio_ccw_remove,
822 .set_offline = virtio_ccw_offline,
823 .set_online = virtio_ccw_online,
824 .notify = virtio_ccw_cio_notify,
825 .int_class = IRQIO_VIR,
828 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
829 int max_digit, int max_val)
836 while (diff <= max_digit) {
837 int value = hex_to_bin(**cp);
841 *val = *val * 16 + value;
846 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
852 static int __init parse_busid(char *str, unsigned int *cssid,
853 unsigned int *ssid, unsigned int *devno)
864 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
865 if (ret || (str_work[0] != '.'))
868 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
869 if (ret || (str_work[0] != '.'))
872 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
873 if (ret || (str_work[0] != '\0'))
881 static void __init no_auto_parse(void)
883 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
888 while ((parm = strsep(&str, ","))) {
889 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
894 rc = parse_busid(parm, &to_cssid,
896 if ((from_ssid > to_ssid) ||
897 ((from_ssid == to_ssid) && (from > to)))
900 to_cssid = from_cssid;
906 while ((from_ssid < to_ssid) ||
907 ((from_ssid == to_ssid) && (from <= to))) {
908 set_bit(from, devs_no_auto[from_ssid]);
910 if (from > __MAX_SUBCHANNEL) {
918 static int __init virtio_ccw_init(void)
920 /* parse no_auto string before we do anything further */
922 return ccw_driver_register(&virtio_ccw_driver);
924 module_init(virtio_ccw_init);
926 static void __exit virtio_ccw_exit(void)
928 ccw_driver_unregister(&virtio_ccw_driver);
930 module_exit(virtio_ccw_exit);