1 // SPDX-License-Identifier: GPL-2.0
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright IBM Corp. 1999,2012
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/mutex.h>
19 #include <linux/pci.h>
22 #include <asm/chpid.h>
26 #include <asm/ebcdic.h>
31 #include "cio_debug.h"
36 static void *sei_page;
37 static void *chsc_page;
38 static DEFINE_SPINLOCK(chsc_page_lock);
41 * chsc_error_from_response() - convert a chsc response to an error
42 * @response: chsc response code
44 * Returns an appropriate Linux error code for @response.
46 int chsc_error_from_response(int response)
60 case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */
63 case 0x0107: /* "Channel busy" for the op 0x003d */
72 EXPORT_SYMBOL_GPL(chsc_error_from_response);
74 struct chsc_ssd_area {
75 struct chsc_header request;
79 u16 f_sch; /* first subchannel */
81 u16 l_sch; /* last subchannel */
83 struct chsc_header response;
87 u8 st : 3; /* subchannel type */
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
96 } __packed __aligned(PAGE_SIZE);
98 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
100 struct chsc_ssd_area *ssd_area;
107 spin_lock_irqsave(&chsc_page_lock, flags);
108 memset(chsc_page, 0, PAGE_SIZE);
109 ssd_area = chsc_page;
110 ssd_area->request.length = 0x0010;
111 ssd_area->request.code = 0x0004;
112 ssd_area->ssid = schid.ssid;
113 ssd_area->f_sch = schid.sch_no;
114 ssd_area->l_sch = schid.sch_no;
116 ccode = chsc(ssd_area);
117 /* Check response. */
119 ret = (ccode == 3) ? -ENODEV : -EBUSY;
122 ret = chsc_error_from_response(ssd_area->response.code);
124 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
125 schid.ssid, schid.sch_no,
126 ssd_area->response.code);
129 if (!ssd_area->sch_valid) {
135 memset(ssd, 0, sizeof(struct chsc_ssd_info));
136 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
137 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
139 ssd->path_mask = ssd_area->path_mask;
140 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
141 for (i = 0; i < 8; i++) {
143 if (ssd_area->path_mask & mask) {
144 chp_id_init(&ssd->chpid[i]);
145 ssd->chpid[i].id = ssd_area->chpid[i];
147 if (ssd_area->fla_valid_mask & mask)
148 ssd->fla[i] = ssd_area->fla[i];
151 spin_unlock_irqrestore(&chsc_page_lock, flags);
156 * chsc_ssqd() - store subchannel QDIO data (SSQD)
157 * @schid: id of the subchannel on which SSQD is performed
158 * @ssqd: request and response block for SSQD
160 * Returns 0 on success.
162 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
164 memset(ssqd, 0, sizeof(*ssqd));
165 ssqd->request.length = 0x0010;
166 ssqd->request.code = 0x0024;
167 ssqd->first_sch = schid.sch_no;
168 ssqd->last_sch = schid.sch_no;
169 ssqd->ssid = schid.ssid;
174 return chsc_error_from_response(ssqd->response.code);
176 EXPORT_SYMBOL_GPL(chsc_ssqd);
179 * chsc_sadc() - set adapter device controls (SADC)
180 * @schid: id of the subchannel on which SADC is performed
181 * @scssc: request and response block for SADC
182 * @summary_indicator_addr: summary indicator address
183 * @subchannel_indicator_addr: subchannel indicator address
184 * @isc: Interruption Subclass for this subchannel
186 * Returns 0 on success.
188 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
189 u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
191 memset(scssc, 0, sizeof(*scssc));
192 scssc->request.length = 0x0fe0;
193 scssc->request.code = 0x0021;
194 scssc->operation_code = 0;
196 scssc->summary_indicator_addr = summary_indicator_addr;
197 scssc->subchannel_indicator_addr = subchannel_indicator_addr;
199 scssc->ks = PAGE_DEFAULT_KEY >> 4;
200 scssc->kc = PAGE_DEFAULT_KEY >> 4;
202 scssc->schid = schid;
204 /* enable the time delay disablement facility */
205 if (css_general_characteristics.aif_tdd)
206 scssc->word_with_d_bit = 0x10000000;
211 return chsc_error_from_response(scssc->response.code);
213 EXPORT_SYMBOL_GPL(chsc_sadc);
215 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
217 spin_lock_irq(sch->lock);
218 if (sch->driver && sch->driver->chp_event)
219 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
221 spin_unlock_irq(sch->lock);
226 spin_unlock_irq(sch->lock);
227 css_schedule_eval(sch->schid);
231 void chsc_chp_offline(struct chp_id chpid)
233 struct channel_path *chp = chpid_to_chp(chpid);
234 struct chp_link link;
237 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
238 CIO_TRACE_EVENT(2, dbf_txt);
240 if (chp_get_status(chpid) <= 0)
242 memset(&link, 0, sizeof(struct chp_link));
244 /* Wait until previous actions have settled. */
245 css_wait_for_slow_path();
247 mutex_lock(&chp->lock);
248 chp_update_desc(chp);
249 mutex_unlock(&chp->lock);
251 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
254 static int __s390_process_res_acc(struct subchannel *sch, void *data)
256 spin_lock_irq(sch->lock);
257 if (sch->driver && sch->driver->chp_event)
258 sch->driver->chp_event(sch, data, CHP_ONLINE);
259 spin_unlock_irq(sch->lock);
264 static void s390_process_res_acc(struct chp_link *link)
268 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
270 CIO_TRACE_EVENT( 2, dbf_txt);
271 if (link->fla != 0) {
272 sprintf(dbf_txt, "fla%x", link->fla);
273 CIO_TRACE_EVENT( 2, dbf_txt);
275 /* Wait until previous actions have settled. */
276 css_wait_for_slow_path();
278 * I/O resources may have become accessible.
279 * Scan through all subchannels that may be concerned and
280 * do a validation on those.
281 * The more information we have (info), the less scanning
282 * will we have to do.
284 for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
285 css_schedule_reprobe();
288 struct chsc_sei_nt0_area {
290 u8 vf; /* validity flags */
291 u8 rs; /* reporting source */
292 u8 cc; /* content code */
293 u16 fla; /* full link address */
294 u16 rsid; /* reporting source id */
297 /* ccdf has to be big enough for a link-incident record */
298 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
301 struct chsc_sei_nt2_area {
302 u8 flags; /* p and v bit */
305 u8 cc; /* content code */
307 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
310 #define CHSC_SEI_NT0 (1ULL << 63)
311 #define CHSC_SEI_NT2 (1ULL << 61)
314 struct chsc_header request;
316 u64 ntsm; /* notification type mask */
317 struct chsc_header response;
321 struct chsc_sei_nt0_area nt0_area;
322 struct chsc_sei_nt2_area nt2_area;
323 u8 nt_area[PAGE_SIZE - 24];
325 } __packed __aligned(PAGE_SIZE);
328 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
331 #define LIR_IQ_CLASS_INFO 0
332 #define LIR_IQ_CLASS_DEGRADED 1
333 #define LIR_IQ_CLASS_NOT_OPERATIONAL 2
344 struct node_descriptor incident_node;
345 struct node_descriptor attached_node;
349 #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
350 #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
352 /* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
353 static char *store_ebcdic(char *dest, const char *src, unsigned long len,
356 memcpy(dest, src, len);
365 /* Format node ID and parameters for output in LIR log message. */
366 static void format_node_data(char *params, char *id, struct node_descriptor *nd)
368 memset(params, 0, PARAMS_LEN);
369 memset(id, 0, NODEID_LEN);
371 if (nd->validity != ND_VALIDITY_VALID) {
372 strncpy(params, "n/a", PARAMS_LEN - 1);
373 strncpy(id, "n/a", NODEID_LEN - 1);
377 /* PARAMS=xx,xxxxxx */
378 snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
379 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
380 id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
381 id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
382 id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
383 id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
384 id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
385 sprintf(id, "%04X", nd->tag);
388 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
390 struct lir *lir = (struct lir *) &sei_area->ccdf;
391 char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
392 aunodeid[NODEID_LEN];
394 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
395 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
397 /* Ignore NULL Link Incident Records. */
401 /* Inform user that a link requires maintenance actions because it has
402 * become degraded or not operational. Note that this log message is
403 * the primary intention behind a Link Incident Record. */
405 format_node_data(iuparams, iunodeid, &lir->incident_node);
406 format_node_data(auparams, aunodeid, &lir->attached_node);
408 switch (lir->iq.class) {
409 case LIR_IQ_CLASS_DEGRADED:
410 pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
411 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
412 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
413 iunodeid, auparams, aunodeid);
415 case LIR_IQ_CLASS_NOT_OPERATIONAL:
416 pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
417 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
418 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
419 iunodeid, auparams, aunodeid);
426 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
428 struct channel_path *chp;
429 struct chp_link link;
433 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
434 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
435 if (sei_area->rs != 4)
438 chpid.id = sei_area->rsid;
439 /* allocate a new channel path structure, if needed */
440 status = chp_get_status(chpid);
447 chp = chpid_to_chp(chpid);
448 mutex_lock(&chp->lock);
449 chp_update_desc(chp);
450 mutex_unlock(&chp->lock);
452 memset(&link, 0, sizeof(struct chp_link));
454 if ((sei_area->vf & 0xc0) != 0) {
455 link.fla = sei_area->fla;
456 if ((sei_area->vf & 0xc0) == 0xc0)
457 /* full link address */
458 link.fla_mask = 0xffff;
461 link.fla_mask = 0xff00;
463 s390_process_res_acc(&link);
466 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
468 struct channel_path *chp;
473 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
474 if (sei_area->rs != 0)
476 data = sei_area->ccdf;
478 for (num = 0; num <= __MAX_CHPID; num++) {
479 if (!chp_test_bit(data, num))
483 CIO_CRW_EVENT(4, "Update information for channel path "
484 "%x.%02x\n", chpid.cssid, chpid.id);
485 chp = chpid_to_chp(chpid);
490 mutex_lock(&chp->lock);
491 chp_update_desc(chp);
492 mutex_unlock(&chp->lock);
496 struct chp_config_data {
502 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
504 struct chp_config_data *data;
507 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
509 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
510 if (sei_area->rs != 0)
512 data = (struct chp_config_data *) &(sei_area->ccdf);
514 for (num = 0; num <= __MAX_CHPID; num++) {
515 if (!chp_test_bit(data->map, num))
518 pr_notice("Processing %s for channel path %x.%02x\n",
519 events[data->op], chpid.cssid, chpid.id);
522 chp_cfg_schedule(chpid, 1);
525 chp_cfg_schedule(chpid, 0);
528 chp_cfg_cancel_deconfigure(chpid);
534 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
538 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
539 if (sei_area->rs != 7)
542 ret = scm_update_information();
544 CIO_CRW_EVENT(0, "chsc: updating change notification"
545 " failed (rc=%d).\n", ret);
548 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
552 CIO_CRW_EVENT(4, "chsc: scm available information\n");
553 if (sei_area->rs != 7)
556 ret = scm_process_availability_information();
558 CIO_CRW_EVENT(0, "chsc: process availability information"
559 " failed (rc=%d).\n", ret);
562 static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
564 CIO_CRW_EVENT(3, "chsc: ap config changed\n");
565 if (sei_area->rs != 5)
571 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
573 switch (sei_area->cc) {
575 zpci_event_error(sei_area->ccdf);
578 zpci_event_availability(sei_area->ccdf);
581 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
587 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
589 /* which kind of information was stored? */
590 switch (sei_area->cc) {
591 case 1: /* link incident*/
592 chsc_process_sei_link_incident(sei_area);
594 case 2: /* i/o resource accessibility */
595 chsc_process_sei_res_acc(sei_area);
597 case 3: /* ap config changed */
598 chsc_process_sei_ap_cfg_chg(sei_area);
600 case 7: /* channel-path-availability information */
601 chsc_process_sei_chp_avail(sei_area);
603 case 8: /* channel-path-configuration notification */
604 chsc_process_sei_chp_config(sei_area);
606 case 12: /* scm change notification */
607 chsc_process_sei_scm_change(sei_area);
609 case 14: /* scm available notification */
610 chsc_process_sei_scm_avail(sei_area);
612 default: /* other stuff */
613 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
618 /* Check if we might have lost some information. */
619 if (sei_area->flags & 0x40) {
620 CIO_CRW_EVENT(2, "chsc: event overflow\n");
621 css_schedule_eval_all();
625 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
627 static int ntsm_unsupported;
630 memset(sei, 0, sizeof(*sei));
631 sei->request.length = 0x0010;
632 sei->request.code = 0x000e;
633 if (!ntsm_unsupported)
639 if (sei->response.code != 0x0001) {
640 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
641 sei->response.code, sei->ntsm);
643 if (sei->response.code == 3 && sei->ntsm) {
644 /* Fallback for old firmware. */
645 ntsm_unsupported = 1;
651 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
654 chsc_process_sei_nt0(&sei->u.nt0_area);
657 chsc_process_sei_nt2(&sei->u.nt2_area);
660 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
664 if (!(sei->u.nt0_area.flags & 0x80))
670 * Handle channel subsystem related CRWs.
671 * Use store event information to find out what's going on.
673 * Note: Access to sei_page is serialized through machine check handler
674 * thread, so no need for locking.
676 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
678 struct chsc_sei *sei = sei_page;
681 css_schedule_eval_all();
684 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
685 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
686 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
687 crw0->erc, crw0->rsid);
689 CIO_TRACE_EVENT(2, "prcss");
690 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
693 void chsc_chp_online(struct chp_id chpid)
695 struct channel_path *chp = chpid_to_chp(chpid);
696 struct chp_link link;
699 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
700 CIO_TRACE_EVENT(2, dbf_txt);
702 if (chp_get_status(chpid) != 0) {
703 memset(&link, 0, sizeof(struct chp_link));
705 /* Wait until previous actions have settled. */
706 css_wait_for_slow_path();
708 mutex_lock(&chp->lock);
709 chp_update_desc(chp);
710 mutex_unlock(&chp->lock);
712 for_each_subchannel_staged(__s390_process_res_acc, NULL,
714 css_schedule_reprobe();
718 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
719 struct chp_id chpid, int on)
722 struct chp_link link;
724 memset(&link, 0, sizeof(struct chp_link));
726 spin_lock_irqsave(sch->lock, flags);
727 if (sch->driver && sch->driver->chp_event)
728 sch->driver->chp_event(sch, &link,
729 on ? CHP_VARY_ON : CHP_VARY_OFF);
730 spin_unlock_irqrestore(sch->lock, flags);
733 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
735 struct chp_id *chpid = data;
737 __s390_subchannel_vary_chpid(sch, *chpid, 0);
741 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
743 struct chp_id *chpid = data;
745 __s390_subchannel_vary_chpid(sch, *chpid, 1);
750 * chsc_chp_vary - propagate channel-path vary operation to subchannels
751 * @chpid: channl-path ID
752 * @on: non-zero for vary online, zero for vary offline
754 int chsc_chp_vary(struct chp_id chpid, int on)
756 struct channel_path *chp = chpid_to_chp(chpid);
758 /* Wait until previous actions have settled. */
759 css_wait_for_slow_path();
761 * Redo PathVerification on the devices the chpid connects to
764 /* Try to update the channel path description. */
765 chp_update_desc(chp);
766 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
768 css_schedule_reprobe();
770 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
777 chsc_remove_cmg_attr(struct channel_subsystem *css)
781 for (i = 0; i <= __MAX_CHPID; i++) {
784 chp_remove_cmg_attr(css->chps[i]);
789 chsc_add_cmg_attr(struct channel_subsystem *css)
794 for (i = 0; i <= __MAX_CHPID; i++) {
797 ret = chp_add_cmg_attr(css->chps[i]);
803 for (--i; i >= 0; i--) {
806 chp_remove_cmg_attr(css->chps[i]);
811 int __chsc_do_secm(struct channel_subsystem *css, int enable)
814 struct chsc_header request;
815 u32 operation_code : 2;
824 struct chsc_header response;
833 spin_lock_irqsave(&chsc_page_lock, flags);
834 memset(chsc_page, 0, PAGE_SIZE);
835 secm_area = chsc_page;
836 secm_area->request.length = 0x0050;
837 secm_area->request.code = 0x0016;
839 secm_area->key = PAGE_DEFAULT_KEY >> 4;
840 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
841 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
843 secm_area->operation_code = enable ? 0 : 1;
845 ccode = chsc(secm_area);
847 ret = (ccode == 3) ? -ENODEV : -EBUSY;
851 switch (secm_area->response.code) {
857 ret = chsc_error_from_response(secm_area->response.code);
860 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
861 secm_area->response.code);
863 spin_unlock_irqrestore(&chsc_page_lock, flags);
868 chsc_secm(struct channel_subsystem *css, int enable)
872 if (enable && !css->cm_enabled) {
873 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
874 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
875 if (!css->cub_addr1 || !css->cub_addr2) {
876 free_page((unsigned long)css->cub_addr1);
877 free_page((unsigned long)css->cub_addr2);
881 ret = __chsc_do_secm(css, enable);
883 css->cm_enabled = enable;
884 if (css->cm_enabled) {
885 ret = chsc_add_cmg_attr(css);
887 __chsc_do_secm(css, 0);
891 chsc_remove_cmg_attr(css);
893 if (!css->cm_enabled) {
894 free_page((unsigned long)css->cub_addr1);
895 free_page((unsigned long)css->cub_addr2);
900 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
901 int c, int m, void *page)
903 struct chsc_scpd *scpd_area;
906 if ((rfmt == 1 || rfmt == 0) && c == 1 &&
907 !css_general_characteristics.fcs)
909 if ((rfmt == 2) && !css_general_characteristics.cib)
911 if ((rfmt == 3) && !css_general_characteristics.util_str)
914 memset(page, 0, PAGE_SIZE);
916 scpd_area->request.length = 0x0010;
917 scpd_area->request.code = 0x0002;
918 scpd_area->cssid = chpid.cssid;
919 scpd_area->first_chpid = chpid.id;
920 scpd_area->last_chpid = chpid.id;
923 scpd_area->fmt = fmt;
924 scpd_area->rfmt = rfmt;
926 ccode = chsc(scpd_area);
928 return (ccode == 3) ? -ENODEV : -EBUSY;
930 ret = chsc_error_from_response(scpd_area->response.code);
932 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
933 scpd_area->response.code);
936 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
938 #define chsc_det_chp_desc(FMT, c) \
939 int chsc_determine_fmt##FMT##_channel_path_desc( \
940 struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \
942 struct chsc_scpd *scpd_area; \
943 unsigned long flags; \
946 spin_lock_irqsave(&chsc_page_lock, flags); \
947 scpd_area = chsc_page; \
948 ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \
953 memcpy(desc, scpd_area->data, sizeof(*desc)); \
955 spin_unlock_irqrestore(&chsc_page_lock, flags); \
959 chsc_det_chp_desc(0, 0)
960 chsc_det_chp_desc(1, 1)
961 chsc_det_chp_desc(3, 0)
964 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
965 struct cmg_chars *chars)
969 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
970 mask = 0x80 >> (i + 3);
972 chp->cmg_chars.values[i] = chars->values[i];
974 chp->cmg_chars.values[i] = 0;
978 int chsc_get_channel_measurement_chars(struct channel_path *chp)
984 struct chsc_header request;
990 struct chsc_header response;
1001 u32 data[NR_MEASUREMENT_CHARS];
1007 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1010 spin_lock_irqsave(&chsc_page_lock, flags);
1011 memset(chsc_page, 0, PAGE_SIZE);
1012 scmc_area = chsc_page;
1013 scmc_area->request.length = 0x0010;
1014 scmc_area->request.code = 0x0022;
1015 scmc_area->first_chpid = chp->chpid.id;
1016 scmc_area->last_chpid = chp->chpid.id;
1018 ccode = chsc(scmc_area);
1020 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1024 ret = chsc_error_from_response(scmc_area->response.code);
1026 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
1027 scmc_area->response.code);
1030 if (scmc_area->not_valid)
1033 chp->cmg = scmc_area->cmg;
1034 chp->shared = scmc_area->shared;
1035 if (chp->cmg != 2 && chp->cmg != 3) {
1036 /* No cmg-dependent data. */
1039 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1040 (struct cmg_chars *) &scmc_area->data);
1042 spin_unlock_irqrestore(&chsc_page_lock, flags);
1046 int __init chsc_init(void)
1050 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1051 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1052 if (!sei_page || !chsc_page) {
1056 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
1061 free_page((unsigned long)chsc_page);
1062 free_page((unsigned long)sei_page);
1066 void __init chsc_init_cleanup(void)
1068 crw_unregister_handler(CRW_RSC_CSS);
1069 free_page((unsigned long)chsc_page);
1070 free_page((unsigned long)sei_page);
1073 int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1077 sda_area->request.length = 0x0400;
1078 sda_area->request.code = 0x0031;
1079 sda_area->operation_code = operation_code;
1081 ret = chsc(sda_area);
1083 ret = (ret == 3) ? -ENODEV : -EBUSY;
1087 switch (sda_area->response.code) {
1092 ret = chsc_error_from_response(sda_area->response.code);
1098 int chsc_enable_facility(int operation_code)
1100 struct chsc_sda_area *sda_area;
1101 unsigned long flags;
1104 spin_lock_irqsave(&chsc_page_lock, flags);
1105 memset(chsc_page, 0, PAGE_SIZE);
1106 sda_area = chsc_page;
1108 ret = __chsc_enable_facility(sda_area, operation_code);
1110 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1111 operation_code, sda_area->response.code);
1113 spin_unlock_irqrestore(&chsc_page_lock, flags);
1117 int __init chsc_get_cssid(int idx)
1120 struct chsc_header request;
1124 struct chsc_header response;
1133 spin_lock_irq(&chsc_page_lock);
1134 memset(chsc_page, 0, PAGE_SIZE);
1135 sdcal_area = chsc_page;
1136 sdcal_area->request.length = 0x0020;
1137 sdcal_area->request.code = 0x0034;
1138 sdcal_area->atype = 4;
1140 ret = chsc(sdcal_area);
1142 ret = (ret == 3) ? -ENODEV : -EBUSY;
1146 ret = chsc_error_from_response(sdcal_area->response.code);
1148 CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
1149 sdcal_area->response.code);
1153 if ((addr_t) &sdcal_area->list[idx] <
1154 (addr_t) &sdcal_area->response + sdcal_area->response.length)
1155 ret = sdcal_area->list[idx].cssid;
1159 spin_unlock_irq(&chsc_page_lock);
1163 struct css_general_char css_general_characteristics;
1164 struct css_chsc_char css_chsc_characteristics;
1167 chsc_determine_css_characteristics(void)
1169 unsigned long flags;
1172 struct chsc_header request;
1176 struct chsc_header response;
1178 u32 general_char[510];
1182 spin_lock_irqsave(&chsc_page_lock, flags);
1183 memset(chsc_page, 0, PAGE_SIZE);
1184 scsc_area = chsc_page;
1185 scsc_area->request.length = 0x0010;
1186 scsc_area->request.code = 0x0010;
1188 result = chsc(scsc_area);
1190 result = (result == 3) ? -ENODEV : -EBUSY;
1194 result = chsc_error_from_response(scsc_area->response.code);
1196 memcpy(&css_general_characteristics, scsc_area->general_char,
1197 sizeof(css_general_characteristics));
1198 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1199 sizeof(css_chsc_characteristics));
1201 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1202 scsc_area->response.code);
1204 spin_unlock_irqrestore(&chsc_page_lock, flags);
1208 EXPORT_SYMBOL_GPL(css_general_characteristics);
1209 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1211 int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
1214 struct chsc_header request;
1216 unsigned int op : 8;
1217 unsigned int rsvd1 : 8;
1218 unsigned int ctrl : 16;
1219 unsigned int rsvd2[5];
1220 struct chsc_header response;
1221 unsigned int rsvd3[3];
1223 unsigned int rsvd4[2];
1227 memset(page, 0, PAGE_SIZE);
1229 rr->request.length = 0x0020;
1230 rr->request.code = 0x0033;
1236 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1238 *clock_delta = rr->clock_delta;
1242 int chsc_sstpi(void *page, void *result, size_t size)
1245 struct chsc_header request;
1246 unsigned int rsvd0[3];
1247 struct chsc_header response;
1252 memset(page, 0, PAGE_SIZE);
1254 rr->request.length = 0x0010;
1255 rr->request.code = 0x0038;
1259 memcpy(result, &rr->data, size);
1260 return (rr->response.code == 0x0001) ? 0 : -EIO;
1263 int chsc_siosl(struct subchannel_id schid)
1266 struct chsc_header request;
1268 struct subchannel_id sid;
1270 struct chsc_header response;
1273 unsigned long flags;
1277 spin_lock_irqsave(&chsc_page_lock, flags);
1278 memset(chsc_page, 0, PAGE_SIZE);
1279 siosl_area = chsc_page;
1280 siosl_area->request.length = 0x0010;
1281 siosl_area->request.code = 0x0046;
1282 siosl_area->word1 = 0x80000000;
1283 siosl_area->sid = schid;
1285 ccode = chsc(siosl_area);
1291 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1292 schid.ssid, schid.sch_no, ccode);
1295 rc = chsc_error_from_response(siosl_area->response.code);
1297 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1298 schid.ssid, schid.sch_no,
1299 siosl_area->response.code);
1301 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1302 schid.ssid, schid.sch_no);
1304 spin_unlock_irqrestore(&chsc_page_lock, flags);
1307 EXPORT_SYMBOL_GPL(chsc_siosl);
1310 * chsc_scm_info() - store SCM information (SSI)
1311 * @scm_area: request and response block for SSI
1312 * @token: continuation token
1314 * Returns 0 on success.
1316 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1320 memset(scm_area, 0, sizeof(*scm_area));
1321 scm_area->request.length = 0x0020;
1322 scm_area->request.code = 0x004C;
1323 scm_area->reqtok = token;
1325 ccode = chsc(scm_area);
1327 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1330 ret = chsc_error_from_response(scm_area->response.code);
1332 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1333 scm_area->response.code);
1337 EXPORT_SYMBOL_GPL(chsc_scm_info);
1340 * chsc_pnso() - Perform Network-Subchannel Operation
1341 * @schid: id of the subchannel on which PNSO is performed
1342 * @pnso_area: request and response block for the operation
1343 * @resume_token: resume token for multiblock response
1344 * @cnc: Boolean change-notification control
1346 * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
1348 * Returns 0 on success.
1350 int chsc_pnso(struct subchannel_id schid,
1351 struct chsc_pnso_area *pnso_area,
1352 struct chsc_pnso_resume_token resume_token,
1355 memset(pnso_area, 0, sizeof(*pnso_area));
1356 pnso_area->request.length = 0x0030;
1357 pnso_area->request.code = 0x003d; /* network-subchannel operation */
1358 pnso_area->m = schid.m;
1359 pnso_area->ssid = schid.ssid;
1360 pnso_area->sch = schid.sch_no;
1361 pnso_area->cssid = schid.cssid;
1362 pnso_area->oc = 0; /* Store-network-bridging-information list */
1363 pnso_area->resume_token = resume_token;
1364 pnso_area->n = (cnc != 0);
1365 if (chsc(pnso_area))
1367 return chsc_error_from_response(pnso_area->response.code);
1370 int chsc_sgib(u32 origin)
1373 struct chsc_header request;
1379 /* operation data area begin */
1384 u8 reserved06[4029];
1385 struct chsc_header response;
1390 spin_lock_irq(&chsc_page_lock);
1391 memset(chsc_page, 0, PAGE_SIZE);
1392 sgib_area = chsc_page;
1393 sgib_area->request.length = 0x0fe0;
1394 sgib_area->request.code = 0x0021;
1395 sgib_area->op = 0x1;
1396 sgib_area->gib_origin = origin;
1398 ret = chsc(sgib_area);
1400 ret = chsc_error_from_response(sgib_area->response.code);
1401 spin_unlock_irq(&chsc_page_lock);
1405 EXPORT_SYMBOL_GPL(chsc_sgib);