1 // SPDX-License-Identifier: GPL-2.0
3 * driver for channel subsystem
5 * Copyright IBM Corp. 2002, 2010
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
29 #include "blacklist.h"
30 #include "cio_debug.h"
37 int css_init_done = 0;
41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42 static struct bus_type css_bus_type;
45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
47 struct subchannel_id schid;
50 init_subchannel_id(&schid);
53 ret = fn(schid, data);
56 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
58 } while (schid.ssid++ < max_ssid);
65 int (*fn_known_sch)(struct subchannel *, void *);
66 int (*fn_unknown_sch)(struct subchannel_id, void *);
69 static int call_fn_known_sch(struct device *dev, void *data)
71 struct subchannel *sch = to_subchannel(dev);
72 struct cb_data *cb = data;
76 idset_sch_del(cb->set, sch->schid);
78 rc = cb->fn_known_sch(sch, cb->data);
82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
84 struct cb_data *cb = data;
87 if (idset_sch_contains(cb->set, schid))
88 rc = cb->fn_unknown_sch(schid, cb->data);
92 static int call_fn_all_sch(struct subchannel_id schid, void *data)
94 struct cb_data *cb = data;
95 struct subchannel *sch;
98 sch = get_subchannel_by_schid(schid);
100 if (cb->fn_known_sch)
101 rc = cb->fn_known_sch(sch, cb->data);
102 put_device(&sch->dev);
104 if (cb->fn_unknown_sch)
105 rc = cb->fn_unknown_sch(schid, cb->data);
111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 int (*fn_unknown)(struct subchannel_id,
119 cb.fn_known_sch = fn_known;
120 cb.fn_unknown_sch = fn_unknown;
122 if (fn_known && !fn_unknown) {
123 /* Skip idset allocation in case of known-only loop. */
125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
129 cb.set = idset_sch_new();
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch, &cb);
136 /* Process registered subchannels. */
137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
140 /* Process unregistered subchannels. */
142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
149 static void css_sch_todo(struct work_struct *work);
151 static int css_sch_create_locks(struct subchannel *sch)
153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
157 spin_lock_init(sch->lock);
158 mutex_init(&sch->reg_mutex);
163 static void css_subchannel_release(struct device *dev)
165 struct subchannel *sch = to_subchannel(dev);
167 sch->config.intparm = 0;
168 cio_commit_config(sch);
169 kfree(sch->driver_override);
174 static int css_validate_subchannel(struct subchannel_id schid,
179 switch (schib->pmcw.st) {
180 case SUBCHANNEL_TYPE_IO:
181 case SUBCHANNEL_TYPE_MSG:
182 if (!css_sch_is_valid(schib))
184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib->pmcw.dev, schid.ssid);
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid.ssid, schid.sch_no, schib->pmcw.st);
204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
207 struct subchannel *sch;
210 ret = css_validate_subchannel(schid, schib);
214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
216 return ERR_PTR(-ENOMEM);
220 sch->st = schib->pmcw.st;
222 ret = css_sch_create_locks(sch);
226 INIT_WORK(&sch->todo_work, css_sch_todo);
227 sch->dev.release = &css_subchannel_release;
228 device_initialize(&sch->dev);
230 * The physical addresses of some the dma structures that can
231 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
233 sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
235 * But we don't have such restrictions imposed on the stuff that
236 * is handled by the streaming API.
238 sch->dma_mask = DMA_BIT_MASK(64);
239 sch->dev.dma_mask = &sch->dma_mask;
247 static int css_sch_device_register(struct subchannel *sch)
251 mutex_lock(&sch->reg_mutex);
252 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
254 ret = device_add(&sch->dev);
255 mutex_unlock(&sch->reg_mutex);
260 * css_sch_device_unregister - unregister a subchannel
261 * @sch: subchannel to be unregistered
263 void css_sch_device_unregister(struct subchannel *sch)
265 mutex_lock(&sch->reg_mutex);
266 if (device_is_registered(&sch->dev))
267 device_unregister(&sch->dev);
268 mutex_unlock(&sch->reg_mutex);
270 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
272 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
277 memset(ssd, 0, sizeof(struct chsc_ssd_info));
278 ssd->path_mask = pmcw->pim;
279 for (i = 0; i < 8; i++) {
281 if (pmcw->pim & mask) {
282 chp_id_init(&ssd->chpid[i]);
283 ssd->chpid[i].id = pmcw->chpid[i];
288 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
293 for (i = 0; i < 8; i++) {
295 if (ssd->path_mask & mask)
296 chp_new(ssd->chpid[i]);
300 void css_update_ssd_info(struct subchannel *sch)
304 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
306 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
308 ssd_register_chpids(&sch->ssd_info);
311 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
314 struct subchannel *sch = to_subchannel(dev);
316 return sprintf(buf, "%01x\n", sch->st);
319 static DEVICE_ATTR_RO(type);
321 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
324 struct subchannel *sch = to_subchannel(dev);
326 return sprintf(buf, "css:t%01X\n", sch->st);
329 static DEVICE_ATTR_RO(modalias);
331 static ssize_t driver_override_store(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf, size_t count)
335 struct subchannel *sch = to_subchannel(dev);
336 char *driver_override, *old, *cp;
338 /* We need to keep extra room for a newline */
339 if (count >= (PAGE_SIZE - 1))
342 driver_override = kstrndup(buf, count, GFP_KERNEL);
343 if (!driver_override)
346 cp = strchr(driver_override, '\n');
351 old = sch->driver_override;
352 if (strlen(driver_override)) {
353 sch->driver_override = driver_override;
355 kfree(driver_override);
356 sch->driver_override = NULL;
365 static ssize_t driver_override_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
368 struct subchannel *sch = to_subchannel(dev);
372 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
376 static DEVICE_ATTR_RW(driver_override);
378 static struct attribute *subch_attrs[] = {
380 &dev_attr_modalias.attr,
381 &dev_attr_driver_override.attr,
385 static struct attribute_group subch_attr_group = {
386 .attrs = subch_attrs,
389 static const struct attribute_group *default_subch_attr_groups[] = {
394 static ssize_t chpids_show(struct device *dev,
395 struct device_attribute *attr,
398 struct subchannel *sch = to_subchannel(dev);
399 struct chsc_ssd_info *ssd = &sch->ssd_info;
404 for (chp = 0; chp < 8; chp++) {
406 if (ssd->path_mask & mask)
407 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
409 ret += sprintf(buf + ret, "00 ");
411 ret += sprintf(buf + ret, "\n");
414 static DEVICE_ATTR_RO(chpids);
416 static ssize_t pimpampom_show(struct device *dev,
417 struct device_attribute *attr,
420 struct subchannel *sch = to_subchannel(dev);
421 struct pmcw *pmcw = &sch->schib.pmcw;
423 return sprintf(buf, "%02x %02x %02x\n",
424 pmcw->pim, pmcw->pam, pmcw->pom);
426 static DEVICE_ATTR_RO(pimpampom);
428 static struct attribute *io_subchannel_type_attrs[] = {
429 &dev_attr_chpids.attr,
430 &dev_attr_pimpampom.attr,
433 ATTRIBUTE_GROUPS(io_subchannel_type);
435 static const struct device_type io_subchannel_type = {
436 .groups = io_subchannel_type_groups,
439 int css_register_subchannel(struct subchannel *sch)
443 /* Initialize the subchannel structure */
444 sch->dev.parent = &channel_subsystems[0]->device;
445 sch->dev.bus = &css_bus_type;
446 sch->dev.groups = default_subch_attr_groups;
448 if (sch->st == SUBCHANNEL_TYPE_IO)
449 sch->dev.type = &io_subchannel_type;
452 * We don't want to generate uevents for I/O subchannels that don't
453 * have a working ccw device behind them since they will be
454 * unregistered before they can be used anyway, so we delay the add
455 * uevent until after device recognition was successful.
456 * Note that we suppress the uevent for all subchannel types;
457 * the subchannel driver can decide itself when it wants to inform
458 * userspace of its existence.
460 dev_set_uevent_suppress(&sch->dev, 1);
461 css_update_ssd_info(sch);
462 /* make it known to the system */
463 ret = css_sch_device_register(sch);
465 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
466 sch->schid.ssid, sch->schid.sch_no, ret);
471 * No driver matched. Generate the uevent now so that
472 * a fitting driver module may be loaded based on the
475 dev_set_uevent_suppress(&sch->dev, 0);
476 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
481 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
483 struct subchannel *sch;
486 sch = css_alloc_subchannel(schid, schib);
490 ret = css_register_subchannel(sch);
492 put_device(&sch->dev);
498 check_subchannel(struct device *dev, const void *data)
500 struct subchannel *sch;
501 struct subchannel_id *schid = (void *)data;
503 sch = to_subchannel(dev);
504 return schid_equal(&sch->schid, schid);
508 get_subchannel_by_schid(struct subchannel_id schid)
512 dev = bus_find_device(&css_bus_type, NULL,
513 &schid, check_subchannel);
515 return dev ? to_subchannel(dev) : NULL;
519 * css_sch_is_valid() - check if a subchannel is valid
520 * @schib: subchannel information block for the subchannel
522 int css_sch_is_valid(struct schib *schib)
524 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
526 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
530 EXPORT_SYMBOL_GPL(css_sch_is_valid);
532 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
538 /* Will be done on the slow path. */
542 * The first subchannel that is not-operational (ccode==3)
543 * indicates that there aren't any more devices available.
544 * If stsch gets an exception, it means the current subchannel set
547 ccode = stsch(schid, &schib);
549 return (ccode == 3) ? -ENXIO : ccode;
551 return css_probe_device(schid, &schib);
554 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
559 if (sch->driver->sch_event)
560 ret = sch->driver->sch_event(sch, slow);
563 "Got subchannel machine check but "
564 "no sch_event handler provided.\n");
566 if (ret != 0 && ret != -EAGAIN) {
567 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
568 sch->schid.ssid, sch->schid.sch_no, ret);
573 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
575 struct subchannel *sch;
578 sch = get_subchannel_by_schid(schid);
580 ret = css_evaluate_known_subchannel(sch, slow);
581 put_device(&sch->dev);
583 ret = css_evaluate_new_subchannel(schid, slow);
585 css_schedule_eval(schid);
589 * css_sched_sch_todo - schedule a subchannel operation
593 * Schedule the operation identified by @todo to be performed on the slow path
594 * workqueue. Do nothing if another operation with higher priority is already
595 * scheduled. Needs to be called with subchannel lock held.
597 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
599 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
600 sch->schid.ssid, sch->schid.sch_no, todo);
601 if (sch->todo >= todo)
603 /* Get workqueue ref. */
604 if (!get_device(&sch->dev))
607 if (!queue_work(cio_work_q, &sch->todo_work)) {
608 /* Already queued, release workqueue ref. */
609 put_device(&sch->dev);
612 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
614 static void css_sch_todo(struct work_struct *work)
616 struct subchannel *sch;
620 sch = container_of(work, struct subchannel, todo_work);
622 spin_lock_irq(sch->lock);
624 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
625 sch->schid.sch_no, todo);
626 sch->todo = SCH_TODO_NOTHING;
627 spin_unlock_irq(sch->lock);
630 case SCH_TODO_NOTHING:
633 ret = css_evaluate_known_subchannel(sch, 1);
634 if (ret == -EAGAIN) {
635 spin_lock_irq(sch->lock);
636 css_sched_sch_todo(sch, todo);
637 spin_unlock_irq(sch->lock);
641 css_sch_device_unregister(sch);
644 /* Release workqueue ref. */
645 put_device(&sch->dev);
648 static struct idset *slow_subchannel_set;
649 static spinlock_t slow_subchannel_lock;
650 static wait_queue_head_t css_eval_wq;
651 static atomic_t css_eval_scheduled;
653 static int __init slow_subchannel_init(void)
655 spin_lock_init(&slow_subchannel_lock);
656 atomic_set(&css_eval_scheduled, 0);
657 init_waitqueue_head(&css_eval_wq);
658 slow_subchannel_set = idset_sch_new();
659 if (!slow_subchannel_set) {
660 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
666 static int slow_eval_known_fn(struct subchannel *sch, void *data)
671 spin_lock_irq(&slow_subchannel_lock);
672 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
673 idset_sch_del(slow_subchannel_set, sch->schid);
674 spin_unlock_irq(&slow_subchannel_lock);
676 rc = css_evaluate_known_subchannel(sch, 1);
678 css_schedule_eval(sch->schid);
680 * The loop might take long time for platforms with lots of
681 * known devices. Allow scheduling here.
688 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
693 spin_lock_irq(&slow_subchannel_lock);
694 eval = idset_sch_contains(slow_subchannel_set, schid);
695 idset_sch_del(slow_subchannel_set, schid);
696 spin_unlock_irq(&slow_subchannel_lock);
698 rc = css_evaluate_new_subchannel(schid, 1);
701 css_schedule_eval(schid);
707 /* These should abort looping */
708 spin_lock_irq(&slow_subchannel_lock);
709 idset_sch_del_subseq(slow_subchannel_set, schid);
710 spin_unlock_irq(&slow_subchannel_lock);
715 /* Allow scheduling here since the containing loop might
722 static void css_slow_path_func(struct work_struct *unused)
726 CIO_TRACE_EVENT(4, "slowpath");
727 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
729 spin_lock_irqsave(&slow_subchannel_lock, flags);
730 if (idset_is_empty(slow_subchannel_set)) {
731 atomic_set(&css_eval_scheduled, 0);
732 wake_up(&css_eval_wq);
734 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
737 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
738 struct workqueue_struct *cio_work_q;
740 void css_schedule_eval(struct subchannel_id schid)
744 spin_lock_irqsave(&slow_subchannel_lock, flags);
745 idset_sch_add(slow_subchannel_set, schid);
746 atomic_set(&css_eval_scheduled, 1);
747 queue_delayed_work(cio_work_q, &slow_path_work, 0);
748 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
751 void css_schedule_eval_all(void)
755 spin_lock_irqsave(&slow_subchannel_lock, flags);
756 idset_fill(slow_subchannel_set);
757 atomic_set(&css_eval_scheduled, 1);
758 queue_delayed_work(cio_work_q, &slow_path_work, 0);
759 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
762 static int __unset_registered(struct device *dev, void *data)
764 struct idset *set = data;
765 struct subchannel *sch = to_subchannel(dev);
767 idset_sch_del(set, sch->schid);
771 void css_schedule_eval_all_unreg(unsigned long delay)
774 struct idset *unreg_set;
776 /* Find unregistered subchannels. */
777 unreg_set = idset_sch_new();
780 css_schedule_eval_all();
783 idset_fill(unreg_set);
784 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
785 /* Apply to slow_subchannel_set. */
786 spin_lock_irqsave(&slow_subchannel_lock, flags);
787 idset_add_set(slow_subchannel_set, unreg_set);
788 atomic_set(&css_eval_scheduled, 1);
789 queue_delayed_work(cio_work_q, &slow_path_work, delay);
790 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
791 idset_free(unreg_set);
794 void css_wait_for_slow_path(void)
796 flush_workqueue(cio_work_q);
799 /* Schedule reprobing of all unregistered subchannels. */
800 void css_schedule_reprobe(void)
802 /* Schedule with a delay to allow merging of subsequent calls. */
803 css_schedule_eval_all_unreg(1 * HZ);
805 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
808 * Called from the machine check handler for subchannel report words.
810 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
812 struct subchannel_id mchk_schid;
813 struct subchannel *sch;
816 css_schedule_eval_all();
819 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
820 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
821 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
822 crw0->erc, crw0->rsid);
824 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
825 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
826 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
827 crw1->anc, crw1->erc, crw1->rsid);
828 init_subchannel_id(&mchk_schid);
829 mchk_schid.sch_no = crw0->rsid;
831 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
833 if (crw0->erc == CRW_ERC_PMOD) {
834 sch = get_subchannel_by_schid(mchk_schid);
836 css_update_ssd_info(sch);
837 put_device(&sch->dev);
841 * Since we are always presented with IPI in the CRW, we have to
842 * use stsch() to find out if the subchannel in question has come
845 css_evaluate_subchannel(mchk_schid, 0);
849 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
853 if (css_general_characteristics.mcss) {
854 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
855 css->global_pgid.pgid_high.ext_cssid.cssid =
856 css->id_valid ? css->cssid : 0;
858 css->global_pgid.pgid_high.cpu_addr = stap();
861 css->global_pgid.cpu_id = cpu_id.ident;
862 css->global_pgid.cpu_model = cpu_id.machine;
863 css->global_pgid.tod_high = tod_high;
866 static void channel_subsystem_release(struct device *dev)
868 struct channel_subsystem *css = to_css(dev);
870 mutex_destroy(&css->mutex);
874 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
877 struct channel_subsystem *css = to_css(dev);
882 return sprintf(buf, "%x\n", css->cssid);
884 static DEVICE_ATTR_RO(real_cssid);
886 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
889 struct channel_subsystem *css = to_css(dev);
892 mutex_lock(&css->mutex);
893 ret = sprintf(buf, "%x\n", css->cm_enabled);
894 mutex_unlock(&css->mutex);
898 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
899 const char *buf, size_t count)
901 struct channel_subsystem *css = to_css(dev);
905 ret = kstrtoul(buf, 16, &val);
908 mutex_lock(&css->mutex);
911 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
914 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
919 mutex_unlock(&css->mutex);
920 return ret < 0 ? ret : count;
922 static DEVICE_ATTR_RW(cm_enable);
924 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
927 return css_chsc_characteristics.secm ? attr->mode : 0;
930 static struct attribute *cssdev_attrs[] = {
931 &dev_attr_real_cssid.attr,
935 static struct attribute_group cssdev_attr_group = {
936 .attrs = cssdev_attrs,
939 static struct attribute *cssdev_cm_attrs[] = {
940 &dev_attr_cm_enable.attr,
944 static struct attribute_group cssdev_cm_attr_group = {
945 .attrs = cssdev_cm_attrs,
946 .is_visible = cm_enable_mode,
949 static const struct attribute_group *cssdev_attr_groups[] = {
951 &cssdev_cm_attr_group,
955 static int __init setup_css(int nr)
957 struct channel_subsystem *css;
960 css = kzalloc(sizeof(*css), GFP_KERNEL);
964 channel_subsystems[nr] = css;
965 dev_set_name(&css->device, "css%x", nr);
966 css->device.groups = cssdev_attr_groups;
967 css->device.release = channel_subsystem_release;
969 * We currently allocate notifier bits with this (using
970 * css->device as the device argument with the DMA API)
971 * and are fine with 64 bit addresses.
973 css->device.coherent_dma_mask = DMA_BIT_MASK(64);
974 css->device.dma_mask = &css->device.coherent_dma_mask;
976 mutex_init(&css->mutex);
977 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
979 css->id_valid = true;
980 pr_info("Partition identifier %01x.%01x\n", css->cssid,
983 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
985 ret = device_register(&css->device);
987 put_device(&css->device);
991 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
993 if (!css->pseudo_subchannel) {
994 device_unregister(&css->device);
999 css->pseudo_subchannel->dev.parent = &css->device;
1000 css->pseudo_subchannel->dev.release = css_subchannel_release;
1001 mutex_init(&css->pseudo_subchannel->reg_mutex);
1002 ret = css_sch_create_locks(css->pseudo_subchannel);
1004 kfree(css->pseudo_subchannel);
1005 device_unregister(&css->device);
1009 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1010 ret = device_register(&css->pseudo_subchannel->dev);
1012 put_device(&css->pseudo_subchannel->dev);
1013 device_unregister(&css->device);
1019 channel_subsystems[nr] = NULL;
1023 static int css_reboot_event(struct notifier_block *this,
1024 unsigned long event,
1027 struct channel_subsystem *css;
1032 mutex_lock(&css->mutex);
1033 if (css->cm_enabled)
1034 if (chsc_secm(css, 0))
1036 mutex_unlock(&css->mutex);
1042 static struct notifier_block css_reboot_notifier = {
1043 .notifier_call = css_reboot_event,
1046 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1047 static struct gen_pool *cio_dma_pool;
1049 /* Currently cio supports only a single css */
1050 struct device *cio_get_dma_css_dev(void)
1052 return &channel_subsystems[0]->device;
1055 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1057 struct gen_pool *gp_dma;
1059 dma_addr_t dma_addr;
1062 gp_dma = gen_pool_create(3, -1);
1065 for (i = 0; i < nr_pages; ++i) {
1066 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1070 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1071 dma_addr, PAGE_SIZE, -1);
1076 static void __gp_dma_free_dma(struct gen_pool *pool,
1077 struct gen_pool_chunk *chunk, void *data)
1079 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1081 dma_free_coherent((struct device *) data, chunk_size,
1082 (void *) chunk->start_addr,
1083 (dma_addr_t) chunk->phys_addr);
1086 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1090 /* this is quite ugly but no better idea */
1091 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1092 gen_pool_destroy(gp_dma);
1095 static int cio_dma_pool_init(void)
1097 /* No need to free up the resources: compiled in */
1098 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1104 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1107 dma_addr_t dma_addr;
1113 addr = gen_pool_alloc(gp_dma, size);
1115 chunk_size = round_up(size, PAGE_SIZE);
1116 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1117 chunk_size, &dma_addr, CIO_DMA_GFP);
1120 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1121 addr = gen_pool_alloc(gp_dma, size);
1123 return (void *) addr;
1126 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1130 memset(cpu_addr, 0, size);
1131 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1135 * Allocate dma memory from the css global pool. Intended for memory not
1136 * specific to any single device within the css. The allocated memory
1137 * is not guaranteed to be 31-bit addressable.
1139 * Caution: Not suitable for early stuff like console.
1141 void *cio_dma_zalloc(size_t size)
1143 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1146 void cio_dma_free(void *cpu_addr, size_t size)
1148 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1152 * Now that the driver core is running, we can setup our channel subsystem.
1153 * The struct subchannel's are created during probing.
1155 static int __init css_bus_init(void)
1163 chsc_determine_css_characteristics();
1164 /* Try to enable MSS. */
1165 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1169 max_ssid = __MAX_SSID;
1171 ret = slow_subchannel_init();
1175 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1179 if ((ret = bus_register(&css_bus_type)))
1182 /* Setup css structure. */
1183 for (i = 0; i <= MAX_CSS_IDX; i++) {
1186 goto out_unregister;
1188 ret = register_reboot_notifier(&css_reboot_notifier);
1190 goto out_unregister;
1191 ret = cio_dma_pool_init();
1193 goto out_unregister_rn;
1197 /* Enable default isc for I/O subchannels. */
1198 isc_register(IO_SCH_ISC);
1202 unregister_reboot_notifier(&css_reboot_notifier);
1205 struct channel_subsystem *css = channel_subsystems[i];
1206 device_unregister(&css->pseudo_subchannel->dev);
1207 device_unregister(&css->device);
1209 bus_unregister(&css_bus_type);
1211 crw_unregister_handler(CRW_RSC_SCH);
1212 idset_free(slow_subchannel_set);
1213 chsc_init_cleanup();
1214 pr_alert("The CSS device driver initialization failed with "
1219 static void __init css_bus_cleanup(void)
1221 struct channel_subsystem *css;
1224 device_unregister(&css->pseudo_subchannel->dev);
1225 device_unregister(&css->device);
1227 bus_unregister(&css_bus_type);
1228 crw_unregister_handler(CRW_RSC_SCH);
1229 idset_free(slow_subchannel_set);
1230 chsc_init_cleanup();
1231 isc_unregister(IO_SCH_ISC);
1234 static int __init channel_subsystem_init(void)
1238 ret = css_bus_init();
1241 cio_work_q = create_singlethread_workqueue("cio");
1246 ret = io_subchannel_init();
1250 /* Register subchannels which are already in use. */
1251 cio_register_early_subchannels();
1252 /* Start initial subchannel evaluation. */
1253 css_schedule_eval_all();
1257 destroy_workqueue(cio_work_q);
1262 subsys_initcall(channel_subsystem_init);
1264 static int css_settle(struct device_driver *drv, void *unused)
1266 struct css_driver *cssdrv = to_cssdriver(drv);
1269 return cssdrv->settle();
1273 int css_complete_work(void)
1277 /* Wait for the evaluation of subchannels to finish. */
1278 ret = wait_event_interruptible(css_eval_wq,
1279 atomic_read(&css_eval_scheduled) == 0);
1282 flush_workqueue(cio_work_q);
1283 /* Wait for the subchannel type specific initialization to finish */
1284 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1289 * Wait for the initialization of devices to finish, to make sure we are
1290 * done with our setup if the search for the root device starts.
1292 static int __init channel_subsystem_init_sync(void)
1294 css_complete_work();
1297 subsys_initcall_sync(channel_subsystem_init_sync);
1299 #ifdef CONFIG_PROC_FS
1300 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1301 size_t count, loff_t *ppos)
1305 /* Handle pending CRW's. */
1306 crw_wait_for_channel_report();
1307 ret = css_complete_work();
1309 return ret ? ret : count;
1312 static const struct proc_ops cio_settle_proc_ops = {
1313 .proc_open = nonseekable_open,
1314 .proc_write = cio_settle_write,
1315 .proc_lseek = no_llseek,
1318 static int __init cio_settle_init(void)
1320 struct proc_dir_entry *entry;
1322 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1327 device_initcall(cio_settle_init);
1328 #endif /*CONFIG_PROC_FS*/
1330 int sch_is_pseudo_sch(struct subchannel *sch)
1332 if (!sch->dev.parent)
1334 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1337 static int css_bus_match(struct device *dev, struct device_driver *drv)
1339 struct subchannel *sch = to_subchannel(dev);
1340 struct css_driver *driver = to_cssdriver(drv);
1341 struct css_device_id *id;
1343 /* When driver_override is set, only bind to the matching driver */
1344 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1347 for (id = driver->subchannel_type; id->match_flags; id++) {
1348 if (sch->st == id->type)
1355 static int css_probe(struct device *dev)
1357 struct subchannel *sch;
1360 sch = to_subchannel(dev);
1361 sch->driver = to_cssdriver(dev->driver);
1362 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1368 static int css_remove(struct device *dev)
1370 struct subchannel *sch;
1373 sch = to_subchannel(dev);
1374 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1379 static void css_shutdown(struct device *dev)
1381 struct subchannel *sch;
1383 sch = to_subchannel(dev);
1384 if (sch->driver && sch->driver->shutdown)
1385 sch->driver->shutdown(sch);
1388 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1390 struct subchannel *sch = to_subchannel(dev);
1393 ret = add_uevent_var(env, "ST=%01X", sch->st);
1396 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1400 static struct bus_type css_bus_type = {
1402 .match = css_bus_match,
1404 .remove = css_remove,
1405 .shutdown = css_shutdown,
1406 .uevent = css_uevent,
1410 * css_driver_register - register a css driver
1411 * @cdrv: css driver to register
1413 * This is mainly a wrapper around driver_register that sets name
1414 * and bus_type in the embedded struct device_driver correctly.
1416 int css_driver_register(struct css_driver *cdrv)
1418 cdrv->drv.bus = &css_bus_type;
1419 return driver_register(&cdrv->drv);
1421 EXPORT_SYMBOL_GPL(css_driver_register);
1424 * css_driver_unregister - unregister a css driver
1425 * @cdrv: css driver to unregister
1427 * This is a wrapper around driver_unregister.
1429 void css_driver_unregister(struct css_driver *cdrv)
1431 driver_unregister(&cdrv->drv);
1433 EXPORT_SYMBOL_GPL(css_driver_unregister);