]>
Commit | Line | Data |
---|---|---|
2356f4cb MS |
1 | /* |
2 | * IUCV base infrastructure. | |
3 | * | |
6c005961 UB |
4 | * Copyright IBM Corp. 2001, 2009 |
5 | * | |
2356f4cb MS |
6 | * Author(s): |
7 | * Original source: | |
8 | * Alan Altmark ([email protected]) Sept. 2000 | |
9 | * Xenia Tkatschow ([email protected]) | |
10 | * 2Gb awareness and general cleanup: | |
11 | * Fritz Elfert ([email protected], [email protected]) | |
12 | * Rewritten for af_iucv: | |
13 | * Martin Schwidefsky <[email protected]> | |
672e405b UB |
14 | * PM functions: |
15 | * Ursula Braun ([email protected]) | |
2356f4cb MS |
16 | * |
17 | * Documentation used: | |
18 | * The original source | |
19 | * CP Programming Service, IBM document # SC24-5760 | |
20 | * | |
21 | * This program is free software; you can redistribute it and/or modify | |
22 | * it under the terms of the GNU General Public License as published by | |
23 | * the Free Software Foundation; either version 2, or (at your option) | |
24 | * any later version. | |
25 | * | |
26 | * This program is distributed in the hope that it will be useful, | |
27 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
28 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
29 | * GNU General Public License for more details. | |
30 | * | |
31 | * You should have received a copy of the GNU General Public License | |
32 | * along with this program; if not, write to the Free Software | |
33 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
34 | */ | |
35 | ||
8f7c502c UB |
36 | #define KMSG_COMPONENT "iucv" |
37 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
38 | ||
052ff461 | 39 | #include <linux/kernel_stat.h> |
2356f4cb MS |
40 | #include <linux/module.h> |
41 | #include <linux/moduleparam.h> | |
2356f4cb MS |
42 | #include <linux/spinlock.h> |
43 | #include <linux/kernel.h> | |
44 | #include <linux/slab.h> | |
45 | #include <linux/init.h> | |
46 | #include <linux/interrupt.h> | |
47 | #include <linux/list.h> | |
48 | #include <linux/errno.h> | |
49 | #include <linux/err.h> | |
50 | #include <linux/device.h> | |
51 | #include <linux/cpu.h> | |
6c005961 | 52 | #include <linux/reboot.h> |
2356f4cb | 53 | #include <net/iucv/iucv.h> |
60063497 | 54 | #include <linux/atomic.h> |
2356f4cb MS |
55 | #include <asm/ebcdic.h> |
56 | #include <asm/io.h> | |
d7b250e2 | 57 | #include <asm/irq.h> |
2356f4cb MS |
58 | #include <asm/smp.h> |
59 | ||
60 | /* | |
61 | * FLAGS: | |
62 | * All flags are defined in the field IPFLAGS1 of each function | |
63 | * and can be found in CP Programming Services. | |
64 | * IPSRCCLS - Indicates you have specified a source class. | |
65 | * IPTRGCLS - Indicates you have specified a target class. | |
66 | * IPFGPID - Indicates you have specified a pathid. | |
67 | * IPFGMID - Indicates you have specified a message ID. | |
68 | * IPNORPY - Indicates a one-way message. No reply expected. | |
69 | * IPALL - Indicates that all paths are affected. | |
70 | */ | |
71 | #define IUCV_IPSRCCLS 0x01 | |
72 | #define IUCV_IPTRGCLS 0x01 | |
73 | #define IUCV_IPFGPID 0x02 | |
74 | #define IUCV_IPFGMID 0x04 | |
75 | #define IUCV_IPNORPY 0x10 | |
76 | #define IUCV_IPALL 0x80 | |
77 | ||
da99f056 | 78 | static int iucv_bus_match(struct device *dev, struct device_driver *drv) |
2356f4cb MS |
79 | { |
80 | return 0; | |
81 | } | |
82 | ||
4c89d86b UB |
83 | enum iucv_pm_states { |
84 | IUCV_PM_INITIAL = 0, | |
85 | IUCV_PM_FREEZING = 1, | |
86 | IUCV_PM_THAWING = 2, | |
87 | IUCV_PM_RESTORING = 3, | |
88 | }; | |
89 | static enum iucv_pm_states iucv_pm_state; | |
90 | ||
672e405b UB |
91 | static int iucv_pm_prepare(struct device *); |
92 | static void iucv_pm_complete(struct device *); | |
93 | static int iucv_pm_freeze(struct device *); | |
94 | static int iucv_pm_thaw(struct device *); | |
95 | static int iucv_pm_restore(struct device *); | |
96 | ||
47145210 | 97 | static const struct dev_pm_ops iucv_pm_ops = { |
672e405b UB |
98 | .prepare = iucv_pm_prepare, |
99 | .complete = iucv_pm_complete, | |
100 | .freeze = iucv_pm_freeze, | |
101 | .thaw = iucv_pm_thaw, | |
102 | .restore = iucv_pm_restore, | |
103 | }; | |
104 | ||
2356f4cb MS |
105 | struct bus_type iucv_bus = { |
106 | .name = "iucv", | |
107 | .match = iucv_bus_match, | |
672e405b | 108 | .pm = &iucv_pm_ops, |
2356f4cb | 109 | }; |
da99f056 | 110 | EXPORT_SYMBOL(iucv_bus); |
2356f4cb MS |
111 | |
112 | struct device *iucv_root; | |
da99f056 HC |
113 | EXPORT_SYMBOL(iucv_root); |
114 | ||
2356f4cb MS |
115 | static int iucv_available; |
116 | ||
117 | /* General IUCV interrupt structure */ | |
118 | struct iucv_irq_data { | |
119 | u16 ippathid; | |
120 | u8 ipflags1; | |
121 | u8 iptype; | |
122 | u32 res2[8]; | |
123 | }; | |
124 | ||
04b090d5 | 125 | struct iucv_irq_list { |
2356f4cb MS |
126 | struct list_head list; |
127 | struct iucv_irq_data data; | |
128 | }; | |
129 | ||
70cf5035 | 130 | static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; |
f2019030 KM |
131 | static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; |
132 | static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; | |
2356f4cb | 133 | |
04b090d5 MS |
134 | /* |
135 | * Queue of interrupt buffers lock for delivery via the tasklet | |
136 | * (fast but can't call smp_call_function). | |
137 | */ | |
138 | static LIST_HEAD(iucv_task_queue); | |
139 | ||
140 | /* | |
141 | * The tasklet for fast delivery of iucv interrupts. | |
142 | */ | |
143 | static void iucv_tasklet_fn(unsigned long); | |
144 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); | |
145 | ||
146 | /* | |
147 | * Queue of interrupt buffers for delivery via a work queue | |
148 | * (slower but can call smp_call_function). | |
149 | */ | |
150 | static LIST_HEAD(iucv_work_queue); | |
151 | ||
152 | /* | |
153 | * The work element to deliver path pending interrupts. | |
154 | */ | |
155 | static void iucv_work_fn(struct work_struct *work); | |
156 | static DECLARE_WORK(iucv_work, iucv_work_fn); | |
157 | ||
158 | /* | |
159 | * Spinlock protecting task and work queue. | |
160 | */ | |
161 | static DEFINE_SPINLOCK(iucv_queue_lock); | |
2356f4cb MS |
162 | |
163 | enum iucv_command_codes { | |
164 | IUCV_QUERY = 0, | |
165 | IUCV_RETRIEVE_BUFFER = 2, | |
166 | IUCV_SEND = 4, | |
167 | IUCV_RECEIVE = 5, | |
168 | IUCV_REPLY = 6, | |
169 | IUCV_REJECT = 8, | |
170 | IUCV_PURGE = 9, | |
171 | IUCV_ACCEPT = 10, | |
172 | IUCV_CONNECT = 11, | |
173 | IUCV_DECLARE_BUFFER = 12, | |
174 | IUCV_QUIESCE = 13, | |
175 | IUCV_RESUME = 14, | |
176 | IUCV_SEVER = 15, | |
177 | IUCV_SETMASK = 16, | |
672e405b | 178 | IUCV_SETCONTROLMASK = 17, |
2356f4cb MS |
179 | }; |
180 | ||
181 | /* | |
182 | * Error messages that are used with the iucv_sever function. They get | |
183 | * converted to EBCDIC. | |
184 | */ | |
185 | static char iucv_error_no_listener[16] = "NO LISTENER"; | |
186 | static char iucv_error_no_memory[16] = "NO MEMORY"; | |
187 | static char iucv_error_pathid[16] = "INVALID PATHID"; | |
188 | ||
189 | /* | |
190 | * iucv_handler_list: List of registered handlers. | |
191 | */ | |
192 | static LIST_HEAD(iucv_handler_list); | |
193 | ||
194 | /* | |
195 | * iucv_path_table: an array of iucv_path structures. | |
196 | */ | |
197 | static struct iucv_path **iucv_path_table; | |
198 | static unsigned long iucv_max_pathid; | |
199 | ||
200 | /* | |
201 | * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table | |
202 | */ | |
203 | static DEFINE_SPINLOCK(iucv_table_lock); | |
204 | ||
205 | /* | |
04b090d5 MS |
206 | * iucv_active_cpu: contains the number of the cpu executing the tasklet |
207 | * or the work handler. Needed for iucv_path_sever called from tasklet. | |
2356f4cb | 208 | */ |
04b090d5 | 209 | static int iucv_active_cpu = -1; |
2356f4cb MS |
210 | |
211 | /* | |
212 | * Mutex and wait queue for iucv_register/iucv_unregister. | |
213 | */ | |
214 | static DEFINE_MUTEX(iucv_register_mutex); | |
215 | ||
216 | /* | |
217 | * Counter for number of non-smp capable handlers. | |
218 | */ | |
219 | static int iucv_nonsmp_handler; | |
220 | ||
221 | /* | |
222 | * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, | |
223 | * iucv_path_quiesce and iucv_path_sever. | |
224 | */ | |
225 | struct iucv_cmd_control { | |
226 | u16 ippathid; | |
227 | u8 ipflags1; | |
228 | u8 iprcode; | |
229 | u16 ipmsglim; | |
230 | u16 res1; | |
231 | u8 ipvmid[8]; | |
232 | u8 ipuser[16]; | |
233 | u8 iptarget[8]; | |
234 | } __attribute__ ((packed,aligned(8))); | |
235 | ||
236 | /* | |
237 | * Data in parameter list iucv structure. Used by iucv_message_send, | |
238 | * iucv_message_send2way and iucv_message_reply. | |
239 | */ | |
240 | struct iucv_cmd_dpl { | |
241 | u16 ippathid; | |
242 | u8 ipflags1; | |
243 | u8 iprcode; | |
244 | u32 ipmsgid; | |
245 | u32 iptrgcls; | |
246 | u8 iprmmsg[8]; | |
247 | u32 ipsrccls; | |
248 | u32 ipmsgtag; | |
249 | u32 ipbfadr2; | |
250 | u32 ipbfln2f; | |
251 | u32 res; | |
252 | } __attribute__ ((packed,aligned(8))); | |
253 | ||
254 | /* | |
255 | * Data in buffer iucv structure. Used by iucv_message_receive, | |
256 | * iucv_message_reject, iucv_message_send, iucv_message_send2way | |
257 | * and iucv_declare_cpu. | |
258 | */ | |
259 | struct iucv_cmd_db { | |
260 | u16 ippathid; | |
261 | u8 ipflags1; | |
262 | u8 iprcode; | |
263 | u32 ipmsgid; | |
264 | u32 iptrgcls; | |
265 | u32 ipbfadr1; | |
266 | u32 ipbfln1f; | |
267 | u32 ipsrccls; | |
268 | u32 ipmsgtag; | |
269 | u32 ipbfadr2; | |
270 | u32 ipbfln2f; | |
271 | u32 res; | |
272 | } __attribute__ ((packed,aligned(8))); | |
273 | ||
274 | /* | |
275 | * Purge message iucv structure. Used by iucv_message_purge. | |
276 | */ | |
277 | struct iucv_cmd_purge { | |
278 | u16 ippathid; | |
279 | u8 ipflags1; | |
280 | u8 iprcode; | |
281 | u32 ipmsgid; | |
282 | u8 ipaudit[3]; | |
283 | u8 res1[5]; | |
284 | u32 res2; | |
285 | u32 ipsrccls; | |
286 | u32 ipmsgtag; | |
287 | u32 res3[3]; | |
288 | } __attribute__ ((packed,aligned(8))); | |
289 | ||
290 | /* | |
291 | * Set mask iucv structure. Used by iucv_enable_cpu. | |
292 | */ | |
293 | struct iucv_cmd_set_mask { | |
294 | u8 ipmask; | |
295 | u8 res1[2]; | |
296 | u8 iprcode; | |
297 | u32 res2[9]; | |
298 | } __attribute__ ((packed,aligned(8))); | |
299 | ||
300 | union iucv_param { | |
301 | struct iucv_cmd_control ctrl; | |
302 | struct iucv_cmd_dpl dpl; | |
303 | struct iucv_cmd_db db; | |
304 | struct iucv_cmd_purge purge; | |
305 | struct iucv_cmd_set_mask set_mask; | |
306 | }; | |
307 | ||
308 | /* | |
309 | * Anchor for per-cpu IUCV command parameter block. | |
310 | */ | |
70cf5035 | 311 | static union iucv_param *iucv_param[NR_CPUS]; |
42e1b4c2 | 312 | static union iucv_param *iucv_param_irq[NR_CPUS]; |
2356f4cb MS |
313 | |
314 | /** | |
315 | * iucv_call_b2f0 | |
316 | * @code: identifier of IUCV call to CP. | |
317 | * @parm: pointer to a struct iucv_parm block | |
318 | * | |
319 | * Calls CP to execute IUCV commands. | |
320 | * | |
321 | * Returns the result of the CP IUCV call. | |
322 | */ | |
323 | static inline int iucv_call_b2f0(int command, union iucv_param *parm) | |
324 | { | |
325 | register unsigned long reg0 asm ("0"); | |
326 | register unsigned long reg1 asm ("1"); | |
327 | int ccode; | |
328 | ||
329 | reg0 = command; | |
330 | reg1 = virt_to_phys(parm); | |
331 | asm volatile( | |
332 | " .long 0xb2f01000\n" | |
333 | " ipm %0\n" | |
334 | " srl %0,28\n" | |
335 | : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) | |
336 | : "m" (*parm) : "cc"); | |
337 | return (ccode == 1) ? parm->ctrl.iprcode : ccode; | |
338 | } | |
339 | ||
340 | /** | |
341 | * iucv_query_maxconn | |
342 | * | |
343 | * Determines the maximum number of connections that may be established. | |
344 | * | |
345 | * Returns the maximum number of connections or -EPERM is IUCV is not | |
346 | * available. | |
347 | */ | |
348 | static int iucv_query_maxconn(void) | |
349 | { | |
350 | register unsigned long reg0 asm ("0"); | |
351 | register unsigned long reg1 asm ("1"); | |
352 | void *param; | |
353 | int ccode; | |
354 | ||
355 | param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); | |
356 | if (!param) | |
357 | return -ENOMEM; | |
358 | reg0 = IUCV_QUERY; | |
359 | reg1 = (unsigned long) param; | |
360 | asm volatile ( | |
361 | " .long 0xb2f01000\n" | |
362 | " ipm %0\n" | |
363 | " srl %0,28\n" | |
364 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | |
365 | if (ccode == 0) | |
b29e4da4 | 366 | iucv_max_pathid = reg1; |
2356f4cb MS |
367 | kfree(param); |
368 | return ccode ? -EPERM : 0; | |
369 | } | |
370 | ||
371 | /** | |
372 | * iucv_allow_cpu | |
373 | * @data: unused | |
374 | * | |
375 | * Allow iucv interrupts on this cpu. | |
376 | */ | |
377 | static void iucv_allow_cpu(void *data) | |
378 | { | |
379 | int cpu = smp_processor_id(); | |
380 | union iucv_param *parm; | |
381 | ||
382 | /* | |
383 | * Enable all iucv interrupts. | |
384 | * ipmask contains bits for the different interrupts | |
385 | * 0x80 - Flag to allow nonpriority message pending interrupts | |
386 | * 0x40 - Flag to allow priority message pending interrupts | |
387 | * 0x20 - Flag to allow nonpriority message completion interrupts | |
388 | * 0x10 - Flag to allow priority message completion interrupts | |
389 | * 0x08 - Flag to allow IUCV control interrupts | |
390 | */ | |
42e1b4c2 | 391 | parm = iucv_param_irq[cpu]; |
2356f4cb MS |
392 | memset(parm, 0, sizeof(union iucv_param)); |
393 | parm->set_mask.ipmask = 0xf8; | |
394 | iucv_call_b2f0(IUCV_SETMASK, parm); | |
395 | ||
672e405b UB |
396 | /* |
397 | * Enable all iucv control interrupts. | |
398 | * ipmask contains bits for the different interrupts | |
399 | * 0x80 - Flag to allow pending connections interrupts | |
400 | * 0x40 - Flag to allow connection complete interrupts | |
401 | * 0x20 - Flag to allow connection severed interrupts | |
402 | * 0x10 - Flag to allow connection quiesced interrupts | |
403 | * 0x08 - Flag to allow connection resumed interrupts | |
404 | */ | |
405 | memset(parm, 0, sizeof(union iucv_param)); | |
406 | parm->set_mask.ipmask = 0xf8; | |
407 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | |
2356f4cb | 408 | /* Set indication that iucv interrupts are allowed for this cpu. */ |
f2019030 | 409 | cpumask_set_cpu(cpu, &iucv_irq_cpumask); |
2356f4cb MS |
410 | } |
411 | ||
412 | /** | |
413 | * iucv_block_cpu | |
414 | * @data: unused | |
415 | * | |
416 | * Block iucv interrupts on this cpu. | |
417 | */ | |
418 | static void iucv_block_cpu(void *data) | |
419 | { | |
420 | int cpu = smp_processor_id(); | |
421 | union iucv_param *parm; | |
422 | ||
423 | /* Disable all iucv interrupts. */ | |
42e1b4c2 | 424 | parm = iucv_param_irq[cpu]; |
2356f4cb MS |
425 | memset(parm, 0, sizeof(union iucv_param)); |
426 | iucv_call_b2f0(IUCV_SETMASK, parm); | |
427 | ||
428 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | |
f2019030 | 429 | cpumask_clear_cpu(cpu, &iucv_irq_cpumask); |
2356f4cb MS |
430 | } |
431 | ||
672e405b UB |
432 | /** |
433 | * iucv_block_cpu_almost | |
434 | * @data: unused | |
435 | * | |
436 | * Allow connection-severed interrupts only on this cpu. | |
437 | */ | |
438 | static void iucv_block_cpu_almost(void *data) | |
439 | { | |
440 | int cpu = smp_processor_id(); | |
441 | union iucv_param *parm; | |
442 | ||
443 | /* Allow iucv control interrupts only */ | |
444 | parm = iucv_param_irq[cpu]; | |
445 | memset(parm, 0, sizeof(union iucv_param)); | |
446 | parm->set_mask.ipmask = 0x08; | |
447 | iucv_call_b2f0(IUCV_SETMASK, parm); | |
448 | /* Allow iucv-severed interrupt only */ | |
449 | memset(parm, 0, sizeof(union iucv_param)); | |
450 | parm->set_mask.ipmask = 0x20; | |
451 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | |
452 | ||
453 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | |
f2019030 | 454 | cpumask_clear_cpu(cpu, &iucv_irq_cpumask); |
672e405b UB |
455 | } |
456 | ||
2356f4cb MS |
457 | /** |
458 | * iucv_declare_cpu | |
459 | * @data: unused | |
460 | * | |
3a4fa0a2 | 461 | * Declare a interrupt buffer on this cpu. |
2356f4cb MS |
462 | */ |
463 | static void iucv_declare_cpu(void *data) | |
464 | { | |
465 | int cpu = smp_processor_id(); | |
466 | union iucv_param *parm; | |
467 | int rc; | |
468 | ||
f2019030 | 469 | if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) |
2356f4cb MS |
470 | return; |
471 | ||
472 | /* Declare interrupt buffer. */ | |
42e1b4c2 | 473 | parm = iucv_param_irq[cpu]; |
2356f4cb | 474 | memset(parm, 0, sizeof(union iucv_param)); |
70cf5035 | 475 | parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); |
2356f4cb MS |
476 | rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); |
477 | if (rc) { | |
478 | char *err = "Unknown"; | |
da99f056 | 479 | switch (rc) { |
2356f4cb MS |
480 | case 0x03: |
481 | err = "Directory error"; | |
482 | break; | |
483 | case 0x0a: | |
484 | err = "Invalid length"; | |
485 | break; | |
486 | case 0x13: | |
487 | err = "Buffer already exists"; | |
488 | break; | |
489 | case 0x3e: | |
490 | err = "Buffer overlap"; | |
491 | break; | |
492 | case 0x5c: | |
493 | err = "Paging or storage error"; | |
494 | break; | |
495 | } | |
47c4cfc3 JP |
496 | pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", |
497 | cpu, rc, err); | |
2356f4cb MS |
498 | return; |
499 | } | |
500 | ||
501 | /* Set indication that an iucv buffer exists for this cpu. */ | |
f2019030 | 502 | cpumask_set_cpu(cpu, &iucv_buffer_cpumask); |
2356f4cb | 503 | |
f2019030 | 504 | if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) |
2356f4cb MS |
505 | /* Enable iucv interrupts on this cpu. */ |
506 | iucv_allow_cpu(NULL); | |
507 | else | |
508 | /* Disable iucv interrupts on this cpu. */ | |
509 | iucv_block_cpu(NULL); | |
510 | } | |
511 | ||
512 | /** | |
513 | * iucv_retrieve_cpu | |
514 | * @data: unused | |
515 | * | |
516 | * Retrieve interrupt buffer on this cpu. | |
517 | */ | |
518 | static void iucv_retrieve_cpu(void *data) | |
519 | { | |
520 | int cpu = smp_processor_id(); | |
521 | union iucv_param *parm; | |
522 | ||
f2019030 | 523 | if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) |
2356f4cb MS |
524 | return; |
525 | ||
526 | /* Block iucv interrupts. */ | |
527 | iucv_block_cpu(NULL); | |
528 | ||
529 | /* Retrieve interrupt buffer. */ | |
42e1b4c2 | 530 | parm = iucv_param_irq[cpu]; |
2356f4cb MS |
531 | iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); |
532 | ||
533 | /* Clear indication that an iucv buffer exists for this cpu. */ | |
f2019030 | 534 | cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); |
2356f4cb MS |
535 | } |
536 | ||
537 | /** | |
538 | * iucv_setmask_smp | |
539 | * | |
540 | * Allow iucv interrupts on all cpus. | |
541 | */ | |
542 | static void iucv_setmask_mp(void) | |
543 | { | |
544 | int cpu; | |
545 | ||
7b9d1b22 | 546 | get_online_cpus(); |
2356f4cb MS |
547 | for_each_online_cpu(cpu) |
548 | /* Enable all cpus with a declared buffer. */ | |
f2019030 KM |
549 | if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && |
550 | !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) | |
3bb447fc | 551 | smp_call_function_single(cpu, iucv_allow_cpu, |
8691e5a8 | 552 | NULL, 1); |
7b9d1b22 | 553 | put_online_cpus(); |
2356f4cb MS |
554 | } |
555 | ||
556 | /** | |
557 | * iucv_setmask_up | |
558 | * | |
04b090d5 | 559 | * Allow iucv interrupts on a single cpu. |
2356f4cb MS |
560 | */ |
561 | static void iucv_setmask_up(void) | |
562 | { | |
563 | cpumask_t cpumask; | |
564 | int cpu; | |
565 | ||
566 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | |
f2019030 KM |
567 | cpumask_copy(&cpumask, &iucv_irq_cpumask); |
568 | cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); | |
569 | for_each_cpu(cpu, &cpumask) | |
8691e5a8 | 570 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); |
2356f4cb MS |
571 | } |
572 | ||
573 | /** | |
574 | * iucv_enable | |
575 | * | |
576 | * This function makes iucv ready for use. It allocates the pathid | |
577 | * table, declares an iucv interrupt buffer and enables the iucv | |
578 | * interrupts. Called when the first user has registered an iucv | |
579 | * handler. | |
580 | */ | |
581 | static int iucv_enable(void) | |
582 | { | |
583 | size_t alloc_size; | |
584 | int cpu, rc; | |
585 | ||
f1d3e4dc | 586 | get_online_cpus(); |
2356f4cb MS |
587 | rc = -ENOMEM; |
588 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | |
589 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | |
590 | if (!iucv_path_table) | |
591 | goto out; | |
592 | /* Declare per cpu buffers. */ | |
593 | rc = -EIO; | |
594 | for_each_online_cpu(cpu) | |
8691e5a8 | 595 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
f2019030 | 596 | if (cpumask_empty(&iucv_buffer_cpumask)) |
2356f4cb | 597 | /* No cpu could declare an iucv buffer. */ |
f1d3e4dc | 598 | goto out; |
7b9d1b22 | 599 | put_online_cpus(); |
2356f4cb | 600 | return 0; |
2356f4cb | 601 | out: |
f1d3e4dc HC |
602 | kfree(iucv_path_table); |
603 | iucv_path_table = NULL; | |
604 | put_online_cpus(); | |
2356f4cb MS |
605 | return rc; |
606 | } | |
607 | ||
608 | /** | |
609 | * iucv_disable | |
610 | * | |
611 | * This function shuts down iucv. It disables iucv interrupts, retrieves | |
612 | * the iucv interrupt buffer and frees the pathid table. Called after the | |
613 | * last user unregister its iucv handler. | |
614 | */ | |
615 | static void iucv_disable(void) | |
616 | { | |
8b122efd | 617 | get_online_cpus(); |
15c8b6c1 | 618 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
2356f4cb | 619 | kfree(iucv_path_table); |
f1d3e4dc HC |
620 | iucv_path_table = NULL; |
621 | put_online_cpus(); | |
2356f4cb MS |
622 | } |
623 | ||
a0e247a8 SB |
624 | static void free_iucv_data(int cpu) |
625 | { | |
626 | kfree(iucv_param_irq[cpu]); | |
627 | iucv_param_irq[cpu] = NULL; | |
628 | kfree(iucv_param[cpu]); | |
629 | iucv_param[cpu] = NULL; | |
630 | kfree(iucv_irq_data[cpu]); | |
631 | iucv_irq_data[cpu] = NULL; | |
632 | } | |
633 | ||
634 | static int alloc_iucv_data(int cpu) | |
635 | { | |
636 | /* Note: GFP_DMA used to get memory below 2G */ | |
637 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | |
638 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | |
639 | if (!iucv_irq_data[cpu]) | |
640 | goto out_free; | |
641 | ||
642 | /* Allocate parameter blocks. */ | |
643 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | |
644 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | |
645 | if (!iucv_param[cpu]) | |
646 | goto out_free; | |
647 | ||
648 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | |
649 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | |
650 | if (!iucv_param_irq[cpu]) | |
651 | goto out_free; | |
652 | ||
653 | return 0; | |
654 | ||
655 | out_free: | |
656 | free_iucv_data(cpu); | |
657 | return -ENOMEM; | |
658 | } | |
659 | ||
013dbb32 | 660 | static int iucv_cpu_notify(struct notifier_block *self, |
2356f4cb MS |
661 | unsigned long action, void *hcpu) |
662 | { | |
663 | cpumask_t cpumask; | |
664 | long cpu = (long) hcpu; | |
665 | ||
666 | switch (action) { | |
667 | case CPU_UP_PREPARE: | |
8bb78442 | 668 | case CPU_UP_PREPARE_FROZEN: |
a0e247a8 | 669 | if (alloc_iucv_data(cpu)) |
92e99a98 | 670 | return notifier_from_errno(-ENOMEM); |
2356f4cb MS |
671 | break; |
672 | case CPU_UP_CANCELED: | |
8bb78442 | 673 | case CPU_UP_CANCELED_FROZEN: |
2356f4cb | 674 | case CPU_DEAD: |
8bb78442 | 675 | case CPU_DEAD_FROZEN: |
a0e247a8 | 676 | free_iucv_data(cpu); |
2356f4cb MS |
677 | break; |
678 | case CPU_ONLINE: | |
8bb78442 | 679 | case CPU_ONLINE_FROZEN: |
2356f4cb | 680 | case CPU_DOWN_FAILED: |
8bb78442 | 681 | case CPU_DOWN_FAILED_FROZEN: |
f1d3e4dc HC |
682 | if (!iucv_path_table) |
683 | break; | |
8691e5a8 | 684 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
2356f4cb MS |
685 | break; |
686 | case CPU_DOWN_PREPARE: | |
8bb78442 | 687 | case CPU_DOWN_PREPARE_FROZEN: |
f1d3e4dc HC |
688 | if (!iucv_path_table) |
689 | break; | |
f2019030 KM |
690 | cpumask_copy(&cpumask, &iucv_buffer_cpumask); |
691 | cpumask_clear_cpu(cpu, &cpumask); | |
692 | if (cpumask_empty(&cpumask)) | |
2356f4cb | 693 | /* Can't offline last IUCV enabled cpu. */ |
92e99a98 | 694 | return notifier_from_errno(-EINVAL); |
8691e5a8 | 695 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
f2019030 KM |
696 | if (cpumask_empty(&iucv_irq_cpumask)) |
697 | smp_call_function_single( | |
698 | cpumask_first(&iucv_buffer_cpumask), | |
699 | iucv_allow_cpu, NULL, 1); | |
2356f4cb MS |
700 | break; |
701 | } | |
702 | return NOTIFY_OK; | |
703 | } | |
704 | ||
f1494ed1 | 705 | static struct notifier_block __refdata iucv_cpu_notifier = { |
2356f4cb MS |
706 | .notifier_call = iucv_cpu_notify, |
707 | }; | |
2356f4cb MS |
708 | |
709 | /** | |
710 | * iucv_sever_pathid | |
711 | * @pathid: path identification number. | |
712 | * @userdata: 16-bytes of user data. | |
713 | * | |
714 | * Sever an iucv path to free up the pathid. Used internally. | |
715 | */ | |
716 | static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) | |
717 | { | |
718 | union iucv_param *parm; | |
719 | ||
42e1b4c2 | 720 | parm = iucv_param_irq[smp_processor_id()]; |
2356f4cb MS |
721 | memset(parm, 0, sizeof(union iucv_param)); |
722 | if (userdata) | |
723 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | |
724 | parm->ctrl.ippathid = pathid; | |
725 | return iucv_call_b2f0(IUCV_SEVER, parm); | |
726 | } | |
727 | ||
728 | /** | |
04b090d5 | 729 | * __iucv_cleanup_queue |
2356f4cb MS |
730 | * @dummy: unused dummy argument |
731 | * | |
732 | * Nop function called via smp_call_function to force work items from | |
733 | * pending external iucv interrupts to the work queue. | |
734 | */ | |
04b090d5 | 735 | static void __iucv_cleanup_queue(void *dummy) |
2356f4cb MS |
736 | { |
737 | } | |
738 | ||
739 | /** | |
04b090d5 | 740 | * iucv_cleanup_queue |
2356f4cb MS |
741 | * |
742 | * Function called after a path has been severed to find all remaining | |
743 | * work items for the now stale pathid. The caller needs to hold the | |
744 | * iucv_table_lock. | |
745 | */ | |
04b090d5 | 746 | static void iucv_cleanup_queue(void) |
2356f4cb | 747 | { |
04b090d5 | 748 | struct iucv_irq_list *p, *n; |
2356f4cb MS |
749 | |
750 | /* | |
25985edc | 751 | * When a path is severed, the pathid can be reused immediately |
04b090d5 MS |
752 | * on a iucv connect or a connection pending interrupt. Remove |
753 | * all entries from the task queue that refer to a stale pathid | |
754 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect | |
755 | * or deliver the connection pending interrupt. To get all the | |
756 | * pending interrupts force them to the work queue by calling | |
757 | * an empty function on all cpus. | |
2356f4cb | 758 | */ |
8691e5a8 | 759 | smp_call_function(__iucv_cleanup_queue, NULL, 1); |
04b090d5 MS |
760 | spin_lock_irq(&iucv_queue_lock); |
761 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | |
762 | /* Remove stale work items from the task queue. */ | |
763 | if (iucv_path_table[p->data.ippathid] == NULL) { | |
2356f4cb MS |
764 | list_del(&p->list); |
765 | kfree(p); | |
766 | } | |
767 | } | |
04b090d5 | 768 | spin_unlock_irq(&iucv_queue_lock); |
2356f4cb MS |
769 | } |
770 | ||
771 | /** | |
772 | * iucv_register: | |
773 | * @handler: address of iucv handler structure | |
774 | * @smp: != 0 indicates that the handler can deal with out of order messages | |
775 | * | |
776 | * Registers a driver with IUCV. | |
777 | * | |
778 | * Returns 0 on success, -ENOMEM if the memory allocation for the pathid | |
779 | * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. | |
780 | */ | |
781 | int iucv_register(struct iucv_handler *handler, int smp) | |
782 | { | |
783 | int rc; | |
784 | ||
785 | if (!iucv_available) | |
786 | return -ENOSYS; | |
787 | mutex_lock(&iucv_register_mutex); | |
788 | if (!smp) | |
789 | iucv_nonsmp_handler++; | |
790 | if (list_empty(&iucv_handler_list)) { | |
791 | rc = iucv_enable(); | |
792 | if (rc) | |
793 | goto out_mutex; | |
794 | } else if (!smp && iucv_nonsmp_handler == 1) | |
795 | iucv_setmask_up(); | |
796 | INIT_LIST_HEAD(&handler->paths); | |
797 | ||
435bc9df | 798 | spin_lock_bh(&iucv_table_lock); |
2356f4cb | 799 | list_add_tail(&handler->list, &iucv_handler_list); |
435bc9df | 800 | spin_unlock_bh(&iucv_table_lock); |
2356f4cb MS |
801 | rc = 0; |
802 | out_mutex: | |
803 | mutex_unlock(&iucv_register_mutex); | |
804 | return rc; | |
805 | } | |
da99f056 | 806 | EXPORT_SYMBOL(iucv_register); |
2356f4cb MS |
807 | |
808 | /** | |
809 | * iucv_unregister | |
810 | * @handler: address of iucv handler structure | |
811 | * @smp: != 0 indicates that the handler can deal with out of order messages | |
812 | * | |
813 | * Unregister driver from IUCV. | |
814 | */ | |
815 | void iucv_unregister(struct iucv_handler *handler, int smp) | |
816 | { | |
817 | struct iucv_path *p, *n; | |
818 | ||
819 | mutex_lock(&iucv_register_mutex); | |
820 | spin_lock_bh(&iucv_table_lock); | |
821 | /* Remove handler from the iucv_handler_list. */ | |
822 | list_del_init(&handler->list); | |
25985edc | 823 | /* Sever all pathids still referring to the handler. */ |
2356f4cb MS |
824 | list_for_each_entry_safe(p, n, &handler->paths, list) { |
825 | iucv_sever_pathid(p->pathid, NULL); | |
826 | iucv_path_table[p->pathid] = NULL; | |
827 | list_del(&p->list); | |
2356f4cb MS |
828 | iucv_path_free(p); |
829 | } | |
830 | spin_unlock_bh(&iucv_table_lock); | |
831 | if (!smp) | |
832 | iucv_nonsmp_handler--; | |
833 | if (list_empty(&iucv_handler_list)) | |
834 | iucv_disable(); | |
835 | else if (!smp && iucv_nonsmp_handler == 0) | |
836 | iucv_setmask_mp(); | |
837 | mutex_unlock(&iucv_register_mutex); | |
838 | } | |
da99f056 | 839 | EXPORT_SYMBOL(iucv_unregister); |
2356f4cb | 840 | |
6c005961 UB |
841 | static int iucv_reboot_event(struct notifier_block *this, |
842 | unsigned long event, void *ptr) | |
843 | { | |
5db79c06 | 844 | int i; |
6c005961 | 845 | |
c0048de2 HB |
846 | if (cpumask_empty(&iucv_irq_cpumask)) |
847 | return NOTIFY_DONE; | |
848 | ||
6c005961 | 849 | get_online_cpus(); |
c0048de2 | 850 | on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); |
6c005961 UB |
851 | preempt_disable(); |
852 | for (i = 0; i < iucv_max_pathid; i++) { | |
853 | if (iucv_path_table[i]) | |
5db79c06 | 854 | iucv_sever_pathid(i, NULL); |
6c005961 UB |
855 | } |
856 | preempt_enable(); | |
857 | put_online_cpus(); | |
858 | iucv_disable(); | |
859 | return NOTIFY_DONE; | |
860 | } | |
861 | ||
862 | static struct notifier_block iucv_reboot_notifier = { | |
863 | .notifier_call = iucv_reboot_event, | |
864 | }; | |
865 | ||
2356f4cb MS |
866 | /** |
867 | * iucv_path_accept | |
868 | * @path: address of iucv path structure | |
869 | * @handler: address of iucv handler structure | |
870 | * @userdata: 16 bytes of data reflected to the communication partner | |
871 | * @private: private data passed to interrupt handlers for this path | |
872 | * | |
873 | * This function is issued after the user received a connection pending | |
874 | * external interrupt and now wishes to complete the IUCV communication path. | |
875 | * | |
876 | * Returns the result of the CP IUCV call. | |
877 | */ | |
878 | int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | |
879 | u8 userdata[16], void *private) | |
880 | { | |
881 | union iucv_param *parm; | |
882 | int rc; | |
883 | ||
884 | local_bh_disable(); | |
f2019030 | 885 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
886 | rc = -EIO; |
887 | goto out; | |
888 | } | |
2356f4cb | 889 | /* Prepare parameter block. */ |
70cf5035 | 890 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
891 | memset(parm, 0, sizeof(union iucv_param)); |
892 | parm->ctrl.ippathid = path->pathid; | |
893 | parm->ctrl.ipmsglim = path->msglim; | |
894 | if (userdata) | |
895 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | |
896 | parm->ctrl.ipflags1 = path->flags; | |
897 | ||
898 | rc = iucv_call_b2f0(IUCV_ACCEPT, parm); | |
899 | if (!rc) { | |
900 | path->private = private; | |
901 | path->msglim = parm->ctrl.ipmsglim; | |
902 | path->flags = parm->ctrl.ipflags1; | |
903 | } | |
6c005961 | 904 | out: |
2356f4cb MS |
905 | local_bh_enable(); |
906 | return rc; | |
907 | } | |
da99f056 | 908 | EXPORT_SYMBOL(iucv_path_accept); |
2356f4cb MS |
909 | |
910 | /** | |
911 | * iucv_path_connect | |
912 | * @path: address of iucv path structure | |
913 | * @handler: address of iucv handler structure | |
914 | * @userid: 8-byte user identification | |
915 | * @system: 8-byte target system identification | |
916 | * @userdata: 16 bytes of data reflected to the communication partner | |
917 | * @private: private data passed to interrupt handlers for this path | |
918 | * | |
919 | * This function establishes an IUCV path. Although the connect may complete | |
920 | * successfully, you are not able to use the path until you receive an IUCV | |
921 | * Connection Complete external interrupt. | |
922 | * | |
923 | * Returns the result of the CP IUCV call. | |
924 | */ | |
925 | int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | |
926 | u8 userid[8], u8 system[8], u8 userdata[16], | |
927 | void *private) | |
928 | { | |
929 | union iucv_param *parm; | |
930 | int rc; | |
931 | ||
04b090d5 MS |
932 | spin_lock_bh(&iucv_table_lock); |
933 | iucv_cleanup_queue(); | |
f2019030 | 934 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
935 | rc = -EIO; |
936 | goto out; | |
937 | } | |
70cf5035 | 938 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
939 | memset(parm, 0, sizeof(union iucv_param)); |
940 | parm->ctrl.ipmsglim = path->msglim; | |
941 | parm->ctrl.ipflags1 = path->flags; | |
942 | if (userid) { | |
943 | memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); | |
944 | ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | |
945 | EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | |
946 | } | |
947 | if (system) { | |
948 | memcpy(parm->ctrl.iptarget, system, | |
949 | sizeof(parm->ctrl.iptarget)); | |
950 | ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | |
951 | EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | |
952 | } | |
953 | if (userdata) | |
954 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | |
955 | ||
956 | rc = iucv_call_b2f0(IUCV_CONNECT, parm); | |
957 | if (!rc) { | |
958 | if (parm->ctrl.ippathid < iucv_max_pathid) { | |
959 | path->pathid = parm->ctrl.ippathid; | |
960 | path->msglim = parm->ctrl.ipmsglim; | |
961 | path->flags = parm->ctrl.ipflags1; | |
962 | path->handler = handler; | |
963 | path->private = private; | |
964 | list_add_tail(&path->list, &handler->paths); | |
965 | iucv_path_table[path->pathid] = path; | |
966 | } else { | |
967 | iucv_sever_pathid(parm->ctrl.ippathid, | |
968 | iucv_error_pathid); | |
969 | rc = -EIO; | |
970 | } | |
971 | } | |
6c005961 | 972 | out: |
04b090d5 | 973 | spin_unlock_bh(&iucv_table_lock); |
2356f4cb MS |
974 | return rc; |
975 | } | |
da99f056 | 976 | EXPORT_SYMBOL(iucv_path_connect); |
2356f4cb MS |
977 | |
978 | /** | |
979 | * iucv_path_quiesce: | |
980 | * @path: address of iucv path structure | |
981 | * @userdata: 16 bytes of data reflected to the communication partner | |
982 | * | |
983 | * This function temporarily suspends incoming messages on an IUCV path. | |
984 | * You can later reactivate the path by invoking the iucv_resume function. | |
985 | * | |
986 | * Returns the result from the CP IUCV call. | |
987 | */ | |
988 | int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) | |
989 | { | |
990 | union iucv_param *parm; | |
991 | int rc; | |
992 | ||
993 | local_bh_disable(); | |
f2019030 | 994 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
995 | rc = -EIO; |
996 | goto out; | |
997 | } | |
70cf5035 | 998 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
999 | memset(parm, 0, sizeof(union iucv_param)); |
1000 | if (userdata) | |
1001 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | |
1002 | parm->ctrl.ippathid = path->pathid; | |
1003 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); | |
6c005961 | 1004 | out: |
2356f4cb MS |
1005 | local_bh_enable(); |
1006 | return rc; | |
1007 | } | |
da99f056 | 1008 | EXPORT_SYMBOL(iucv_path_quiesce); |
2356f4cb MS |
1009 | |
1010 | /** | |
1011 | * iucv_path_resume: | |
1012 | * @path: address of iucv path structure | |
1013 | * @userdata: 16 bytes of data reflected to the communication partner | |
1014 | * | |
1015 | * This function resumes incoming messages on an IUCV path that has | |
1016 | * been stopped with iucv_path_quiesce. | |
1017 | * | |
1018 | * Returns the result from the CP IUCV call. | |
1019 | */ | |
1020 | int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) | |
1021 | { | |
1022 | union iucv_param *parm; | |
1023 | int rc; | |
1024 | ||
1025 | local_bh_disable(); | |
f2019030 | 1026 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1027 | rc = -EIO; |
1028 | goto out; | |
1029 | } | |
70cf5035 | 1030 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1031 | memset(parm, 0, sizeof(union iucv_param)); |
1032 | if (userdata) | |
1033 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | |
1034 | parm->ctrl.ippathid = path->pathid; | |
1035 | rc = iucv_call_b2f0(IUCV_RESUME, parm); | |
6c005961 | 1036 | out: |
2356f4cb MS |
1037 | local_bh_enable(); |
1038 | return rc; | |
1039 | } | |
1040 | ||
1041 | /** | |
1042 | * iucv_path_sever | |
1043 | * @path: address of iucv path structure | |
1044 | * @userdata: 16 bytes of data reflected to the communication partner | |
1045 | * | |
1046 | * This function terminates an IUCV path. | |
1047 | * | |
1048 | * Returns the result from the CP IUCV call. | |
1049 | */ | |
1050 | int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | |
1051 | { | |
1052 | int rc; | |
1053 | ||
2356f4cb | 1054 | preempt_disable(); |
f2019030 | 1055 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1056 | rc = -EIO; |
1057 | goto out; | |
1058 | } | |
04b090d5 | 1059 | if (iucv_active_cpu != smp_processor_id()) |
2356f4cb MS |
1060 | spin_lock_bh(&iucv_table_lock); |
1061 | rc = iucv_sever_pathid(path->pathid, userdata); | |
42e1b4c2 UB |
1062 | iucv_path_table[path->pathid] = NULL; |
1063 | list_del_init(&path->list); | |
04b090d5 | 1064 | if (iucv_active_cpu != smp_processor_id()) |
2356f4cb | 1065 | spin_unlock_bh(&iucv_table_lock); |
6c005961 | 1066 | out: |
2356f4cb MS |
1067 | preempt_enable(); |
1068 | return rc; | |
1069 | } | |
da99f056 | 1070 | EXPORT_SYMBOL(iucv_path_sever); |
2356f4cb MS |
1071 | |
1072 | /** | |
1073 | * iucv_message_purge | |
1074 | * @path: address of iucv path structure | |
1075 | * @msg: address of iucv msg structure | |
1076 | * @srccls: source class of message | |
1077 | * | |
1078 | * Cancels a message you have sent. | |
1079 | * | |
1080 | * Returns the result from the CP IUCV call. | |
1081 | */ | |
1082 | int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | |
1083 | u32 srccls) | |
1084 | { | |
1085 | union iucv_param *parm; | |
1086 | int rc; | |
1087 | ||
1088 | local_bh_disable(); | |
f2019030 | 1089 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1090 | rc = -EIO; |
1091 | goto out; | |
1092 | } | |
70cf5035 | 1093 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1094 | memset(parm, 0, sizeof(union iucv_param)); |
1095 | parm->purge.ippathid = path->pathid; | |
1096 | parm->purge.ipmsgid = msg->id; | |
1097 | parm->purge.ipsrccls = srccls; | |
1098 | parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; | |
1099 | rc = iucv_call_b2f0(IUCV_PURGE, parm); | |
1100 | if (!rc) { | |
1101 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; | |
1102 | msg->tag = parm->purge.ipmsgtag; | |
1103 | } | |
6c005961 | 1104 | out: |
2356f4cb MS |
1105 | local_bh_enable(); |
1106 | return rc; | |
1107 | } | |
da99f056 | 1108 | EXPORT_SYMBOL(iucv_message_purge); |
2356f4cb MS |
1109 | |
1110 | /** | |
91d5d45e HB |
1111 | * iucv_message_receive_iprmdata |
1112 | * @path: address of iucv path structure | |
1113 | * @msg: address of iucv msg structure | |
1114 | * @flags: how the message is received (IUCV_IPBUFLST) | |
1115 | * @buffer: address of data buffer or address of struct iucv_array | |
1116 | * @size: length of data buffer | |
1117 | * @residual: | |
1118 | * | |
1119 | * Internal function used by iucv_message_receive and __iucv_message_receive | |
1120 | * to receive RMDATA data stored in struct iucv_message. | |
1121 | */ | |
1122 | static int iucv_message_receive_iprmdata(struct iucv_path *path, | |
1123 | struct iucv_message *msg, | |
1124 | u8 flags, void *buffer, | |
1125 | size_t size, size_t *residual) | |
1126 | { | |
1127 | struct iucv_array *array; | |
1128 | u8 *rmmsg; | |
1129 | size_t copy; | |
1130 | ||
1131 | /* | |
1132 | * Message is 8 bytes long and has been stored to the | |
1133 | * message descriptor itself. | |
1134 | */ | |
1135 | if (residual) | |
1136 | *residual = abs(size - 8); | |
1137 | rmmsg = msg->rmmsg; | |
1138 | if (flags & IUCV_IPBUFLST) { | |
1139 | /* Copy to struct iucv_array. */ | |
1140 | size = (size < 8) ? size : 8; | |
1141 | for (array = buffer; size > 0; array++) { | |
1142 | copy = min_t(size_t, size, array->length); | |
1143 | memcpy((u8 *)(addr_t) array->address, | |
1144 | rmmsg, copy); | |
1145 | rmmsg += copy; | |
1146 | size -= copy; | |
1147 | } | |
1148 | } else { | |
1149 | /* Copy to direct buffer. */ | |
1150 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | |
1151 | } | |
1152 | return 0; | |
1153 | } | |
1154 | ||
1155 | /** | |
1156 | * __iucv_message_receive | |
2356f4cb MS |
1157 | * @path: address of iucv path structure |
1158 | * @msg: address of iucv msg structure | |
1159 | * @flags: how the message is received (IUCV_IPBUFLST) | |
1160 | * @buffer: address of data buffer or address of struct iucv_array | |
1161 | * @size: length of data buffer | |
1162 | * @residual: | |
1163 | * | |
1164 | * This function receives messages that are being sent to you over | |
1165 | * established paths. This function will deal with RMDATA messages | |
1166 | * embedded in struct iucv_message as well. | |
1167 | * | |
91d5d45e HB |
1168 | * Locking: no locking |
1169 | * | |
2356f4cb MS |
1170 | * Returns the result from the CP IUCV call. |
1171 | */ | |
91d5d45e HB |
1172 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, |
1173 | u8 flags, void *buffer, size_t size, size_t *residual) | |
2356f4cb MS |
1174 | { |
1175 | union iucv_param *parm; | |
2356f4cb MS |
1176 | int rc; |
1177 | ||
91d5d45e HB |
1178 | if (msg->flags & IUCV_IPRMDATA) |
1179 | return iucv_message_receive_iprmdata(path, msg, flags, | |
1180 | buffer, size, residual); | |
f2019030 | 1181 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1182 | rc = -EIO; |
1183 | goto out; | |
1184 | } | |
70cf5035 | 1185 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1186 | memset(parm, 0, sizeof(union iucv_param)); |
1187 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | |
1188 | parm->db.ipbfln1f = (u32) size; | |
1189 | parm->db.ipmsgid = msg->id; | |
1190 | parm->db.ippathid = path->pathid; | |
1191 | parm->db.iptrgcls = msg->class; | |
1192 | parm->db.ipflags1 = (flags | IUCV_IPFGPID | | |
1193 | IUCV_IPFGMID | IUCV_IPTRGCLS); | |
1194 | rc = iucv_call_b2f0(IUCV_RECEIVE, parm); | |
1195 | if (!rc || rc == 5) { | |
1196 | msg->flags = parm->db.ipflags1; | |
1197 | if (residual) | |
1198 | *residual = parm->db.ipbfln1f; | |
1199 | } | |
6c005961 | 1200 | out: |
91d5d45e HB |
1201 | return rc; |
1202 | } | |
1203 | EXPORT_SYMBOL(__iucv_message_receive); | |
1204 | ||
1205 | /** | |
1206 | * iucv_message_receive | |
1207 | * @path: address of iucv path structure | |
1208 | * @msg: address of iucv msg structure | |
1209 | * @flags: how the message is received (IUCV_IPBUFLST) | |
1210 | * @buffer: address of data buffer or address of struct iucv_array | |
1211 | * @size: length of data buffer | |
1212 | * @residual: | |
1213 | * | |
1214 | * This function receives messages that are being sent to you over | |
1215 | * established paths. This function will deal with RMDATA messages | |
1216 | * embedded in struct iucv_message as well. | |
1217 | * | |
1218 | * Locking: local_bh_enable/local_bh_disable | |
1219 | * | |
1220 | * Returns the result from the CP IUCV call. | |
1221 | */ | |
1222 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | |
1223 | u8 flags, void *buffer, size_t size, size_t *residual) | |
1224 | { | |
1225 | int rc; | |
1226 | ||
1227 | if (msg->flags & IUCV_IPRMDATA) | |
1228 | return iucv_message_receive_iprmdata(path, msg, flags, | |
1229 | buffer, size, residual); | |
1230 | local_bh_disable(); | |
1231 | rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); | |
2356f4cb MS |
1232 | local_bh_enable(); |
1233 | return rc; | |
1234 | } | |
da99f056 | 1235 | EXPORT_SYMBOL(iucv_message_receive); |
2356f4cb MS |
1236 | |
1237 | /** | |
1238 | * iucv_message_reject | |
1239 | * @path: address of iucv path structure | |
1240 | * @msg: address of iucv msg structure | |
1241 | * | |
1242 | * The reject function refuses a specified message. Between the time you | |
1243 | * are notified of a message and the time that you complete the message, | |
1244 | * the message may be rejected. | |
1245 | * | |
1246 | * Returns the result from the CP IUCV call. | |
1247 | */ | |
1248 | int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | |
1249 | { | |
1250 | union iucv_param *parm; | |
1251 | int rc; | |
1252 | ||
1253 | local_bh_disable(); | |
f2019030 | 1254 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1255 | rc = -EIO; |
1256 | goto out; | |
1257 | } | |
70cf5035 | 1258 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1259 | memset(parm, 0, sizeof(union iucv_param)); |
1260 | parm->db.ippathid = path->pathid; | |
1261 | parm->db.ipmsgid = msg->id; | |
1262 | parm->db.iptrgcls = msg->class; | |
1263 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); | |
1264 | rc = iucv_call_b2f0(IUCV_REJECT, parm); | |
6c005961 | 1265 | out: |
2356f4cb MS |
1266 | local_bh_enable(); |
1267 | return rc; | |
1268 | } | |
da99f056 | 1269 | EXPORT_SYMBOL(iucv_message_reject); |
2356f4cb MS |
1270 | |
1271 | /** | |
1272 | * iucv_message_reply | |
1273 | * @path: address of iucv path structure | |
1274 | * @msg: address of iucv msg structure | |
1275 | * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | |
1276 | * @reply: address of reply data buffer or address of struct iucv_array | |
1277 | * @size: length of reply data buffer | |
1278 | * | |
1279 | * This function responds to the two-way messages that you receive. You | |
1280 | * must identify completely the message to which you wish to reply. ie, | |
1281 | * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into | |
1282 | * the parameter list. | |
1283 | * | |
1284 | * Returns the result from the CP IUCV call. | |
1285 | */ | |
1286 | int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | |
1287 | u8 flags, void *reply, size_t size) | |
1288 | { | |
1289 | union iucv_param *parm; | |
1290 | int rc; | |
1291 | ||
1292 | local_bh_disable(); | |
f2019030 | 1293 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1294 | rc = -EIO; |
1295 | goto out; | |
1296 | } | |
70cf5035 | 1297 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1298 | memset(parm, 0, sizeof(union iucv_param)); |
1299 | if (flags & IUCV_IPRMDATA) { | |
1300 | parm->dpl.ippathid = path->pathid; | |
1301 | parm->dpl.ipflags1 = flags; | |
1302 | parm->dpl.ipmsgid = msg->id; | |
1303 | parm->dpl.iptrgcls = msg->class; | |
1304 | memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); | |
1305 | } else { | |
1306 | parm->db.ipbfadr1 = (u32)(addr_t) reply; | |
1307 | parm->db.ipbfln1f = (u32) size; | |
1308 | parm->db.ippathid = path->pathid; | |
1309 | parm->db.ipflags1 = flags; | |
1310 | parm->db.ipmsgid = msg->id; | |
1311 | parm->db.iptrgcls = msg->class; | |
1312 | } | |
1313 | rc = iucv_call_b2f0(IUCV_REPLY, parm); | |
6c005961 | 1314 | out: |
2356f4cb MS |
1315 | local_bh_enable(); |
1316 | return rc; | |
1317 | } | |
da99f056 | 1318 | EXPORT_SYMBOL(iucv_message_reply); |
2356f4cb MS |
1319 | |
1320 | /** | |
91d5d45e | 1321 | * __iucv_message_send |
2356f4cb MS |
1322 | * @path: address of iucv path structure |
1323 | * @msg: address of iucv msg structure | |
1324 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | |
1325 | * @srccls: source class of message | |
1326 | * @buffer: address of send buffer or address of struct iucv_array | |
1327 | * @size: length of send buffer | |
1328 | * | |
1329 | * This function transmits data to another application. Data to be | |
1330 | * transmitted is in a buffer and this is a one-way message and the | |
1331 | * receiver will not reply to the message. | |
1332 | * | |
91d5d45e HB |
1333 | * Locking: no locking |
1334 | * | |
2356f4cb MS |
1335 | * Returns the result from the CP IUCV call. |
1336 | */ | |
91d5d45e | 1337 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, |
2356f4cb MS |
1338 | u8 flags, u32 srccls, void *buffer, size_t size) |
1339 | { | |
1340 | union iucv_param *parm; | |
1341 | int rc; | |
1342 | ||
f2019030 | 1343 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1344 | rc = -EIO; |
1345 | goto out; | |
1346 | } | |
70cf5035 | 1347 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1348 | memset(parm, 0, sizeof(union iucv_param)); |
1349 | if (flags & IUCV_IPRMDATA) { | |
1350 | /* Message of 8 bytes can be placed into the parameter list. */ | |
1351 | parm->dpl.ippathid = path->pathid; | |
1352 | parm->dpl.ipflags1 = flags | IUCV_IPNORPY; | |
1353 | parm->dpl.iptrgcls = msg->class; | |
1354 | parm->dpl.ipsrccls = srccls; | |
1355 | parm->dpl.ipmsgtag = msg->tag; | |
1356 | memcpy(parm->dpl.iprmmsg, buffer, 8); | |
1357 | } else { | |
1358 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | |
1359 | parm->db.ipbfln1f = (u32) size; | |
1360 | parm->db.ippathid = path->pathid; | |
1361 | parm->db.ipflags1 = flags | IUCV_IPNORPY; | |
1362 | parm->db.iptrgcls = msg->class; | |
1363 | parm->db.ipsrccls = srccls; | |
1364 | parm->db.ipmsgtag = msg->tag; | |
1365 | } | |
1366 | rc = iucv_call_b2f0(IUCV_SEND, parm); | |
1367 | if (!rc) | |
1368 | msg->id = parm->db.ipmsgid; | |
6c005961 | 1369 | out: |
91d5d45e HB |
1370 | return rc; |
1371 | } | |
1372 | EXPORT_SYMBOL(__iucv_message_send); | |
1373 | ||
1374 | /** | |
1375 | * iucv_message_send | |
1376 | * @path: address of iucv path structure | |
1377 | * @msg: address of iucv msg structure | |
1378 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | |
1379 | * @srccls: source class of message | |
1380 | * @buffer: address of send buffer or address of struct iucv_array | |
1381 | * @size: length of send buffer | |
1382 | * | |
1383 | * This function transmits data to another application. Data to be | |
1384 | * transmitted is in a buffer and this is a one-way message and the | |
1385 | * receiver will not reply to the message. | |
1386 | * | |
1387 | * Locking: local_bh_enable/local_bh_disable | |
1388 | * | |
1389 | * Returns the result from the CP IUCV call. | |
1390 | */ | |
1391 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | |
1392 | u8 flags, u32 srccls, void *buffer, size_t size) | |
1393 | { | |
1394 | int rc; | |
1395 | ||
1396 | local_bh_disable(); | |
1397 | rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); | |
2356f4cb MS |
1398 | local_bh_enable(); |
1399 | return rc; | |
1400 | } | |
da99f056 | 1401 | EXPORT_SYMBOL(iucv_message_send); |
2356f4cb MS |
1402 | |
1403 | /** | |
1404 | * iucv_message_send2way | |
1405 | * @path: address of iucv path structure | |
1406 | * @msg: address of iucv msg structure | |
1407 | * @flags: how the message is sent and the reply is received | |
1408 | * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) | |
1409 | * @srccls: source class of message | |
1410 | * @buffer: address of send buffer or address of struct iucv_array | |
1411 | * @size: length of send buffer | |
1412 | * @ansbuf: address of answer buffer or address of struct iucv_array | |
1413 | * @asize: size of reply buffer | |
1414 | * | |
1415 | * This function transmits data to another application. Data to be | |
1416 | * transmitted is in a buffer. The receiver of the send is expected to | |
1417 | * reply to the message and a buffer is provided into which IUCV moves | |
1418 | * the reply to this message. | |
1419 | * | |
1420 | * Returns the result from the CP IUCV call. | |
1421 | */ | |
1422 | int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | |
1423 | u8 flags, u32 srccls, void *buffer, size_t size, | |
1424 | void *answer, size_t asize, size_t *residual) | |
1425 | { | |
1426 | union iucv_param *parm; | |
1427 | int rc; | |
1428 | ||
1429 | local_bh_disable(); | |
f2019030 | 1430 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
6c005961 UB |
1431 | rc = -EIO; |
1432 | goto out; | |
1433 | } | |
70cf5035 | 1434 | parm = iucv_param[smp_processor_id()]; |
2356f4cb MS |
1435 | memset(parm, 0, sizeof(union iucv_param)); |
1436 | if (flags & IUCV_IPRMDATA) { | |
1437 | parm->dpl.ippathid = path->pathid; | |
1438 | parm->dpl.ipflags1 = path->flags; /* priority message */ | |
1439 | parm->dpl.iptrgcls = msg->class; | |
1440 | parm->dpl.ipsrccls = srccls; | |
1441 | parm->dpl.ipmsgtag = msg->tag; | |
1442 | parm->dpl.ipbfadr2 = (u32)(addr_t) answer; | |
1443 | parm->dpl.ipbfln2f = (u32) asize; | |
1444 | memcpy(parm->dpl.iprmmsg, buffer, 8); | |
1445 | } else { | |
1446 | parm->db.ippathid = path->pathid; | |
1447 | parm->db.ipflags1 = path->flags; /* priority message */ | |
1448 | parm->db.iptrgcls = msg->class; | |
1449 | parm->db.ipsrccls = srccls; | |
1450 | parm->db.ipmsgtag = msg->tag; | |
1451 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | |
1452 | parm->db.ipbfln1f = (u32) size; | |
1453 | parm->db.ipbfadr2 = (u32)(addr_t) answer; | |
1454 | parm->db.ipbfln2f = (u32) asize; | |
1455 | } | |
1456 | rc = iucv_call_b2f0(IUCV_SEND, parm); | |
1457 | if (!rc) | |
1458 | msg->id = parm->db.ipmsgid; | |
6c005961 | 1459 | out: |
2356f4cb MS |
1460 | local_bh_enable(); |
1461 | return rc; | |
1462 | } | |
da99f056 | 1463 | EXPORT_SYMBOL(iucv_message_send2way); |
2356f4cb MS |
1464 | |
1465 | /** | |
1466 | * iucv_path_pending | |
1467 | * @data: Pointer to external interrupt buffer | |
1468 | * | |
1469 | * Process connection pending work item. Called from tasklet while holding | |
1470 | * iucv_table_lock. | |
1471 | */ | |
1472 | struct iucv_path_pending { | |
1473 | u16 ippathid; | |
1474 | u8 ipflags1; | |
1475 | u8 iptype; | |
1476 | u16 ipmsglim; | |
1477 | u16 res1; | |
1478 | u8 ipvmid[8]; | |
1479 | u8 ipuser[16]; | |
1480 | u32 res3; | |
1481 | u8 ippollfg; | |
1482 | u8 res4[3]; | |
bc10502d | 1483 | } __packed; |
2356f4cb MS |
1484 | |
1485 | static void iucv_path_pending(struct iucv_irq_data *data) | |
1486 | { | |
1487 | struct iucv_path_pending *ipp = (void *) data; | |
1488 | struct iucv_handler *handler; | |
1489 | struct iucv_path *path; | |
1490 | char *error; | |
1491 | ||
1492 | BUG_ON(iucv_path_table[ipp->ippathid]); | |
1493 | /* New pathid, handler found. Create a new path struct. */ | |
1494 | error = iucv_error_no_memory; | |
1495 | path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); | |
1496 | if (!path) | |
1497 | goto out_sever; | |
1498 | path->pathid = ipp->ippathid; | |
1499 | iucv_path_table[path->pathid] = path; | |
1500 | EBCASC(ipp->ipvmid, 8); | |
1501 | ||
1502 | /* Call registered handler until one is found that wants the path. */ | |
1503 | list_for_each_entry(handler, &iucv_handler_list, list) { | |
1504 | if (!handler->path_pending) | |
1505 | continue; | |
1506 | /* | |
1507 | * Add path to handler to allow a call to iucv_path_sever | |
1508 | * inside the path_pending function. If the handler returns | |
1509 | * an error remove the path from the handler again. | |
1510 | */ | |
1511 | list_add(&path->list, &handler->paths); | |
1512 | path->handler = handler; | |
1513 | if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) | |
1514 | return; | |
1515 | list_del(&path->list); | |
1516 | path->handler = NULL; | |
1517 | } | |
1518 | /* No handler wanted the path. */ | |
1519 | iucv_path_table[path->pathid] = NULL; | |
1520 | iucv_path_free(path); | |
1521 | error = iucv_error_no_listener; | |
1522 | out_sever: | |
1523 | iucv_sever_pathid(ipp->ippathid, error); | |
1524 | } | |
1525 | ||
1526 | /** | |
1527 | * iucv_path_complete | |
1528 | * @data: Pointer to external interrupt buffer | |
1529 | * | |
1530 | * Process connection complete work item. Called from tasklet while holding | |
1531 | * iucv_table_lock. | |
1532 | */ | |
1533 | struct iucv_path_complete { | |
1534 | u16 ippathid; | |
1535 | u8 ipflags1; | |
1536 | u8 iptype; | |
1537 | u16 ipmsglim; | |
1538 | u16 res1; | |
1539 | u8 res2[8]; | |
1540 | u8 ipuser[16]; | |
1541 | u32 res3; | |
1542 | u8 ippollfg; | |
1543 | u8 res4[3]; | |
bc10502d | 1544 | } __packed; |
2356f4cb MS |
1545 | |
1546 | static void iucv_path_complete(struct iucv_irq_data *data) | |
1547 | { | |
1548 | struct iucv_path_complete *ipc = (void *) data; | |
1549 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; | |
1550 | ||
b8942e3b HB |
1551 | if (path) |
1552 | path->flags = ipc->ipflags1; | |
04b090d5 | 1553 | if (path && path->handler && path->handler->path_complete) |
2356f4cb MS |
1554 | path->handler->path_complete(path, ipc->ipuser); |
1555 | } | |
1556 | ||
1557 | /** | |
1558 | * iucv_path_severed | |
1559 | * @data: Pointer to external interrupt buffer | |
1560 | * | |
1561 | * Process connection severed work item. Called from tasklet while holding | |
1562 | * iucv_table_lock. | |
1563 | */ | |
1564 | struct iucv_path_severed { | |
1565 | u16 ippathid; | |
1566 | u8 res1; | |
1567 | u8 iptype; | |
1568 | u32 res2; | |
1569 | u8 res3[8]; | |
1570 | u8 ipuser[16]; | |
1571 | u32 res4; | |
1572 | u8 ippollfg; | |
1573 | u8 res5[3]; | |
bc10502d | 1574 | } __packed; |
2356f4cb MS |
1575 | |
1576 | static void iucv_path_severed(struct iucv_irq_data *data) | |
1577 | { | |
1578 | struct iucv_path_severed *ips = (void *) data; | |
1579 | struct iucv_path *path = iucv_path_table[ips->ippathid]; | |
1580 | ||
04b090d5 MS |
1581 | if (!path || !path->handler) /* Already severed */ |
1582 | return; | |
2356f4cb MS |
1583 | if (path->handler->path_severed) |
1584 | path->handler->path_severed(path, ips->ipuser); | |
1585 | else { | |
1586 | iucv_sever_pathid(path->pathid, NULL); | |
1587 | iucv_path_table[path->pathid] = NULL; | |
42e1b4c2 | 1588 | list_del(&path->list); |
2356f4cb MS |
1589 | iucv_path_free(path); |
1590 | } | |
1591 | } | |
1592 | ||
1593 | /** | |
1594 | * iucv_path_quiesced | |
1595 | * @data: Pointer to external interrupt buffer | |
1596 | * | |
1597 | * Process connection quiesced work item. Called from tasklet while holding | |
1598 | * iucv_table_lock. | |
1599 | */ | |
1600 | struct iucv_path_quiesced { | |
1601 | u16 ippathid; | |
1602 | u8 res1; | |
1603 | u8 iptype; | |
1604 | u32 res2; | |
1605 | u8 res3[8]; | |
1606 | u8 ipuser[16]; | |
1607 | u32 res4; | |
1608 | u8 ippollfg; | |
1609 | u8 res5[3]; | |
bc10502d | 1610 | } __packed; |
2356f4cb MS |
1611 | |
1612 | static void iucv_path_quiesced(struct iucv_irq_data *data) | |
1613 | { | |
1614 | struct iucv_path_quiesced *ipq = (void *) data; | |
1615 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; | |
1616 | ||
04b090d5 | 1617 | if (path && path->handler && path->handler->path_quiesced) |
2356f4cb MS |
1618 | path->handler->path_quiesced(path, ipq->ipuser); |
1619 | } | |
1620 | ||
1621 | /** | |
1622 | * iucv_path_resumed | |
1623 | * @data: Pointer to external interrupt buffer | |
1624 | * | |
1625 | * Process connection resumed work item. Called from tasklet while holding | |
1626 | * iucv_table_lock. | |
1627 | */ | |
1628 | struct iucv_path_resumed { | |
1629 | u16 ippathid; | |
1630 | u8 res1; | |
1631 | u8 iptype; | |
1632 | u32 res2; | |
1633 | u8 res3[8]; | |
1634 | u8 ipuser[16]; | |
1635 | u32 res4; | |
1636 | u8 ippollfg; | |
1637 | u8 res5[3]; | |
bc10502d | 1638 | } __packed; |
2356f4cb MS |
1639 | |
1640 | static void iucv_path_resumed(struct iucv_irq_data *data) | |
1641 | { | |
1642 | struct iucv_path_resumed *ipr = (void *) data; | |
1643 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; | |
1644 | ||
04b090d5 | 1645 | if (path && path->handler && path->handler->path_resumed) |
2356f4cb MS |
1646 | path->handler->path_resumed(path, ipr->ipuser); |
1647 | } | |
1648 | ||
1649 | /** | |
1650 | * iucv_message_complete | |
1651 | * @data: Pointer to external interrupt buffer | |
1652 | * | |
1653 | * Process message complete work item. Called from tasklet while holding | |
1654 | * iucv_table_lock. | |
1655 | */ | |
1656 | struct iucv_message_complete { | |
1657 | u16 ippathid; | |
1658 | u8 ipflags1; | |
1659 | u8 iptype; | |
1660 | u32 ipmsgid; | |
1661 | u32 ipaudit; | |
1662 | u8 iprmmsg[8]; | |
1663 | u32 ipsrccls; | |
1664 | u32 ipmsgtag; | |
1665 | u32 res; | |
1666 | u32 ipbfln2f; | |
1667 | u8 ippollfg; | |
1668 | u8 res2[3]; | |
bc10502d | 1669 | } __packed; |
2356f4cb MS |
1670 | |
1671 | static void iucv_message_complete(struct iucv_irq_data *data) | |
1672 | { | |
1673 | struct iucv_message_complete *imc = (void *) data; | |
1674 | struct iucv_path *path = iucv_path_table[imc->ippathid]; | |
1675 | struct iucv_message msg; | |
1676 | ||
04b090d5 | 1677 | if (path && path->handler && path->handler->message_complete) { |
2356f4cb MS |
1678 | msg.flags = imc->ipflags1; |
1679 | msg.id = imc->ipmsgid; | |
1680 | msg.audit = imc->ipaudit; | |
1681 | memcpy(msg.rmmsg, imc->iprmmsg, 8); | |
1682 | msg.class = imc->ipsrccls; | |
1683 | msg.tag = imc->ipmsgtag; | |
1684 | msg.length = imc->ipbfln2f; | |
1685 | path->handler->message_complete(path, &msg); | |
1686 | } | |
1687 | } | |
1688 | ||
1689 | /** | |
1690 | * iucv_message_pending | |
1691 | * @data: Pointer to external interrupt buffer | |
1692 | * | |
1693 | * Process message pending work item. Called from tasklet while holding | |
1694 | * iucv_table_lock. | |
1695 | */ | |
1696 | struct iucv_message_pending { | |
1697 | u16 ippathid; | |
1698 | u8 ipflags1; | |
1699 | u8 iptype; | |
1700 | u32 ipmsgid; | |
1701 | u32 iptrgcls; | |
1702 | union { | |
1703 | u32 iprmmsg1_u32; | |
1704 | u8 iprmmsg1[4]; | |
1705 | } ln1msg1; | |
1706 | union { | |
1707 | u32 ipbfln1f; | |
1708 | u8 iprmmsg2[4]; | |
1709 | } ln1msg2; | |
1710 | u32 res1[3]; | |
1711 | u32 ipbfln2f; | |
1712 | u8 ippollfg; | |
1713 | u8 res2[3]; | |
bc10502d | 1714 | } __packed; |
2356f4cb MS |
1715 | |
1716 | static void iucv_message_pending(struct iucv_irq_data *data) | |
1717 | { | |
1718 | struct iucv_message_pending *imp = (void *) data; | |
1719 | struct iucv_path *path = iucv_path_table[imp->ippathid]; | |
1720 | struct iucv_message msg; | |
1721 | ||
04b090d5 | 1722 | if (path && path->handler && path->handler->message_pending) { |
2356f4cb MS |
1723 | msg.flags = imp->ipflags1; |
1724 | msg.id = imp->ipmsgid; | |
1725 | msg.class = imp->iptrgcls; | |
1726 | if (imp->ipflags1 & IUCV_IPRMDATA) { | |
1727 | memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); | |
1728 | msg.length = 8; | |
1729 | } else | |
1730 | msg.length = imp->ln1msg2.ipbfln1f; | |
1731 | msg.reply_size = imp->ipbfln2f; | |
1732 | path->handler->message_pending(path, &msg); | |
1733 | } | |
1734 | } | |
1735 | ||
1736 | /** | |
04b090d5 | 1737 | * iucv_tasklet_fn: |
2356f4cb MS |
1738 | * |
1739 | * This tasklet loops over the queue of irq buffers created by | |
1740 | * iucv_external_interrupt, calls the appropriate action handler | |
1741 | * and then frees the buffer. | |
1742 | */ | |
04b090d5 | 1743 | static void iucv_tasklet_fn(unsigned long ignored) |
2356f4cb MS |
1744 | { |
1745 | typedef void iucv_irq_fn(struct iucv_irq_data *); | |
1746 | static iucv_irq_fn *irq_fn[] = { | |
2356f4cb MS |
1747 | [0x02] = iucv_path_complete, |
1748 | [0x03] = iucv_path_severed, | |
1749 | [0x04] = iucv_path_quiesced, | |
1750 | [0x05] = iucv_path_resumed, | |
1751 | [0x06] = iucv_message_complete, | |
1752 | [0x07] = iucv_message_complete, | |
1753 | [0x08] = iucv_message_pending, | |
1754 | [0x09] = iucv_message_pending, | |
1755 | }; | |
b5e78337 | 1756 | LIST_HEAD(task_queue); |
04b090d5 | 1757 | struct iucv_irq_list *p, *n; |
2356f4cb MS |
1758 | |
1759 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | |
13fdc9a7 UB |
1760 | if (!spin_trylock(&iucv_table_lock)) { |
1761 | tasklet_schedule(&iucv_tasklet); | |
1762 | return; | |
1763 | } | |
04b090d5 | 1764 | iucv_active_cpu = smp_processor_id(); |
2356f4cb | 1765 | |
04b090d5 MS |
1766 | spin_lock_irq(&iucv_queue_lock); |
1767 | list_splice_init(&iucv_task_queue, &task_queue); | |
1768 | spin_unlock_irq(&iucv_queue_lock); | |
1769 | ||
1770 | list_for_each_entry_safe(p, n, &task_queue, list) { | |
2356f4cb | 1771 | list_del_init(&p->list); |
2356f4cb MS |
1772 | irq_fn[p->data.iptype](&p->data); |
1773 | kfree(p); | |
2356f4cb | 1774 | } |
2356f4cb | 1775 | |
04b090d5 | 1776 | iucv_active_cpu = -1; |
2356f4cb MS |
1777 | spin_unlock(&iucv_table_lock); |
1778 | } | |
1779 | ||
04b090d5 MS |
1780 | /** |
1781 | * iucv_work_fn: | |
1782 | * | |
1783 | * This work function loops over the queue of path pending irq blocks | |
1784 | * created by iucv_external_interrupt, calls the appropriate action | |
1785 | * handler and then frees the buffer. | |
1786 | */ | |
1787 | static void iucv_work_fn(struct work_struct *work) | |
1788 | { | |
b5e78337 | 1789 | LIST_HEAD(work_queue); |
04b090d5 MS |
1790 | struct iucv_irq_list *p, *n; |
1791 | ||
1792 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | |
1793 | spin_lock_bh(&iucv_table_lock); | |
1794 | iucv_active_cpu = smp_processor_id(); | |
1795 | ||
1796 | spin_lock_irq(&iucv_queue_lock); | |
1797 | list_splice_init(&iucv_work_queue, &work_queue); | |
1798 | spin_unlock_irq(&iucv_queue_lock); | |
1799 | ||
1800 | iucv_cleanup_queue(); | |
1801 | list_for_each_entry_safe(p, n, &work_queue, list) { | |
1802 | list_del_init(&p->list); | |
1803 | iucv_path_pending(&p->data); | |
1804 | kfree(p); | |
1805 | } | |
1806 | ||
1807 | iucv_active_cpu = -1; | |
1808 | spin_unlock_bh(&iucv_table_lock); | |
1809 | } | |
1810 | ||
2356f4cb MS |
1811 | /** |
1812 | * iucv_external_interrupt | |
1813 | * @code: irq code | |
1814 | * | |
1815 | * Handles external interrupts coming in from CP. | |
04b090d5 | 1816 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). |
2356f4cb | 1817 | */ |
fde15c3a | 1818 | static void iucv_external_interrupt(struct ext_code ext_code, |
f6649a7e | 1819 | unsigned int param32, unsigned long param64) |
2356f4cb MS |
1820 | { |
1821 | struct iucv_irq_data *p; | |
04b090d5 | 1822 | struct iucv_irq_list *work; |
2356f4cb | 1823 | |
420f42ec | 1824 | inc_irq_stat(IRQEXT_IUC); |
70cf5035 | 1825 | p = iucv_irq_data[smp_processor_id()]; |
2356f4cb | 1826 | if (p->ippathid >= iucv_max_pathid) { |
c2b4afd2 | 1827 | WARN_ON(p->ippathid >= iucv_max_pathid); |
2356f4cb MS |
1828 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); |
1829 | return; | |
1830 | } | |
c2b4afd2 | 1831 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); |
04b090d5 | 1832 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); |
2356f4cb | 1833 | if (!work) { |
47c4cfc3 | 1834 | pr_warn("iucv_external_interrupt: out of memory\n"); |
2356f4cb MS |
1835 | return; |
1836 | } | |
1837 | memcpy(&work->data, p, sizeof(work->data)); | |
04b090d5 MS |
1838 | spin_lock(&iucv_queue_lock); |
1839 | if (p->iptype == 0x01) { | |
1840 | /* Path pending interrupt. */ | |
1841 | list_add_tail(&work->list, &iucv_work_queue); | |
1842 | schedule_work(&iucv_work); | |
1843 | } else { | |
1844 | /* The other interrupts. */ | |
1845 | list_add_tail(&work->list, &iucv_task_queue); | |
1846 | tasklet_schedule(&iucv_tasklet); | |
1847 | } | |
1848 | spin_unlock(&iucv_queue_lock); | |
2356f4cb MS |
1849 | } |
1850 | ||
672e405b UB |
1851 | static int iucv_pm_prepare(struct device *dev) |
1852 | { | |
1853 | int rc = 0; | |
1854 | ||
1855 | #ifdef CONFIG_PM_DEBUG | |
1856 | printk(KERN_INFO "iucv_pm_prepare\n"); | |
1857 | #endif | |
1858 | if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) | |
1859 | rc = dev->driver->pm->prepare(dev); | |
1860 | return rc; | |
1861 | } | |
1862 | ||
1863 | static void iucv_pm_complete(struct device *dev) | |
1864 | { | |
1865 | #ifdef CONFIG_PM_DEBUG | |
1866 | printk(KERN_INFO "iucv_pm_complete\n"); | |
1867 | #endif | |
1868 | if (dev->driver && dev->driver->pm && dev->driver->pm->complete) | |
1869 | dev->driver->pm->complete(dev); | |
1870 | } | |
1871 | ||
1872 | /** | |
1873 | * iucv_path_table_empty() - determine if iucv path table is empty | |
1874 | * | |
1875 | * Returns 0 if there are still iucv pathes defined | |
1876 | * 1 if there are no iucv pathes defined | |
1877 | */ | |
1878 | int iucv_path_table_empty(void) | |
1879 | { | |
1880 | int i; | |
1881 | ||
1882 | for (i = 0; i < iucv_max_pathid; i++) { | |
1883 | if (iucv_path_table[i]) | |
1884 | return 0; | |
1885 | } | |
1886 | return 1; | |
1887 | } | |
1888 | ||
1889 | /** | |
1890 | * iucv_pm_freeze() - Freeze PM callback | |
1891 | * @dev: iucv-based device | |
1892 | * | |
1893 | * disable iucv interrupts | |
1894 | * invoke callback function of the iucv-based driver | |
1895 | * shut down iucv, if no iucv-pathes are established anymore | |
1896 | */ | |
1897 | static int iucv_pm_freeze(struct device *dev) | |
1898 | { | |
1899 | int cpu; | |
b7c2aecc | 1900 | struct iucv_irq_list *p, *n; |
672e405b UB |
1901 | int rc = 0; |
1902 | ||
1903 | #ifdef CONFIG_PM_DEBUG | |
1904 | printk(KERN_WARNING "iucv_pm_freeze\n"); | |
1905 | #endif | |
b7c2aecc | 1906 | if (iucv_pm_state != IUCV_PM_FREEZING) { |
f2019030 | 1907 | for_each_cpu(cpu, &iucv_irq_cpumask) |
b7c2aecc UB |
1908 | smp_call_function_single(cpu, iucv_block_cpu_almost, |
1909 | NULL, 1); | |
1910 | cancel_work_sync(&iucv_work); | |
1911 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { | |
1912 | list_del_init(&p->list); | |
1913 | iucv_sever_pathid(p->data.ippathid, | |
1914 | iucv_error_no_listener); | |
1915 | kfree(p); | |
1916 | } | |
1917 | } | |
4c89d86b | 1918 | iucv_pm_state = IUCV_PM_FREEZING; |
672e405b UB |
1919 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) |
1920 | rc = dev->driver->pm->freeze(dev); | |
1921 | if (iucv_path_table_empty()) | |
1922 | iucv_disable(); | |
1923 | return rc; | |
1924 | } | |
1925 | ||
1926 | /** | |
1927 | * iucv_pm_thaw() - Thaw PM callback | |
1928 | * @dev: iucv-based device | |
1929 | * | |
1930 | * make iucv ready for use again: allocate path table, declare interrupt buffers | |
1931 | * and enable iucv interrupts | |
1932 | * invoke callback function of the iucv-based driver | |
1933 | */ | |
1934 | static int iucv_pm_thaw(struct device *dev) | |
1935 | { | |
1936 | int rc = 0; | |
1937 | ||
1938 | #ifdef CONFIG_PM_DEBUG | |
1939 | printk(KERN_WARNING "iucv_pm_thaw\n"); | |
1940 | #endif | |
4c89d86b | 1941 | iucv_pm_state = IUCV_PM_THAWING; |
672e405b UB |
1942 | if (!iucv_path_table) { |
1943 | rc = iucv_enable(); | |
1944 | if (rc) | |
1945 | goto out; | |
1946 | } | |
f2019030 | 1947 | if (cpumask_empty(&iucv_irq_cpumask)) { |
672e405b UB |
1948 | if (iucv_nonsmp_handler) |
1949 | /* enable interrupts on one cpu */ | |
1950 | iucv_allow_cpu(NULL); | |
1951 | else | |
1952 | /* enable interrupts on all cpus */ | |
1953 | iucv_setmask_mp(); | |
1954 | } | |
1955 | if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) | |
1956 | rc = dev->driver->pm->thaw(dev); | |
1957 | out: | |
1958 | return rc; | |
1959 | } | |
1960 | ||
1961 | /** | |
1962 | * iucv_pm_restore() - Restore PM callback | |
1963 | * @dev: iucv-based device | |
1964 | * | |
1965 | * make iucv ready for use again: allocate path table, declare interrupt buffers | |
1966 | * and enable iucv interrupts | |
1967 | * invoke callback function of the iucv-based driver | |
1968 | */ | |
1969 | static int iucv_pm_restore(struct device *dev) | |
1970 | { | |
1971 | int rc = 0; | |
1972 | ||
1973 | #ifdef CONFIG_PM_DEBUG | |
1974 | printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); | |
1975 | #endif | |
4c89d86b | 1976 | if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) |
47c4cfc3 | 1977 | pr_warn("Suspending Linux did not completely close all IUCV connections\n"); |
4c89d86b | 1978 | iucv_pm_state = IUCV_PM_RESTORING; |
f2019030 | 1979 | if (cpumask_empty(&iucv_irq_cpumask)) { |
672e405b UB |
1980 | rc = iucv_query_maxconn(); |
1981 | rc = iucv_enable(); | |
1982 | if (rc) | |
1983 | goto out; | |
1984 | } | |
1985 | if (dev->driver && dev->driver->pm && dev->driver->pm->restore) | |
1986 | rc = dev->driver->pm->restore(dev); | |
1987 | out: | |
1988 | return rc; | |
1989 | } | |
1990 | ||
96d042a6 FB |
1991 | struct iucv_interface iucv_if = { |
1992 | .message_receive = iucv_message_receive, | |
1993 | .__message_receive = __iucv_message_receive, | |
1994 | .message_reply = iucv_message_reply, | |
1995 | .message_reject = iucv_message_reject, | |
1996 | .message_send = iucv_message_send, | |
1997 | .__message_send = __iucv_message_send, | |
1998 | .message_send2way = iucv_message_send2way, | |
1999 | .message_purge = iucv_message_purge, | |
2000 | .path_accept = iucv_path_accept, | |
2001 | .path_connect = iucv_path_connect, | |
2002 | .path_quiesce = iucv_path_quiesce, | |
2003 | .path_resume = iucv_path_resume, | |
2004 | .path_sever = iucv_path_sever, | |
2005 | .iucv_register = iucv_register, | |
2006 | .iucv_unregister = iucv_unregister, | |
2007 | .bus = NULL, | |
2008 | .root = NULL, | |
2009 | }; | |
2010 | EXPORT_SYMBOL(iucv_if); | |
2011 | ||
2356f4cb MS |
2012 | /** |
2013 | * iucv_init | |
2014 | * | |
2015 | * Allocates and initializes various data structures. | |
2016 | */ | |
da99f056 | 2017 | static int __init iucv_init(void) |
2356f4cb MS |
2018 | { |
2019 | int rc; | |
70cf5035 | 2020 | int cpu; |
2356f4cb MS |
2021 | |
2022 | if (!MACHINE_IS_VM) { | |
2023 | rc = -EPROTONOSUPPORT; | |
2024 | goto out; | |
2025 | } | |
5beab991 | 2026 | ctl_set_bit(0, 1); |
2356f4cb MS |
2027 | rc = iucv_query_maxconn(); |
2028 | if (rc) | |
5beab991 | 2029 | goto out_ctl; |
1dad093b | 2030 | rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); |
2356f4cb | 2031 | if (rc) |
5beab991 | 2032 | goto out_ctl; |
035da16f | 2033 | iucv_root = root_device_register("iucv"); |
2356f4cb MS |
2034 | if (IS_ERR(iucv_root)) { |
2035 | rc = PTR_ERR(iucv_root); | |
2d7bf367 | 2036 | goto out_int; |
2356f4cb | 2037 | } |
70cf5035 | 2038 | |
a0e247a8 | 2039 | cpu_notifier_register_begin(); |
70cf5035 | 2040 | |
a0e247a8 SB |
2041 | for_each_online_cpu(cpu) { |
2042 | if (alloc_iucv_data(cpu)) { | |
42e1b4c2 UB |
2043 | rc = -ENOMEM; |
2044 | goto out_free; | |
2045 | } | |
2356f4cb | 2046 | } |
a0e247a8 | 2047 | rc = __register_hotcpu_notifier(&iucv_cpu_notifier); |
2d7bf367 CH |
2048 | if (rc) |
2049 | goto out_free; | |
a0e247a8 SB |
2050 | |
2051 | cpu_notifier_register_done(); | |
2052 | ||
6c005961 UB |
2053 | rc = register_reboot_notifier(&iucv_reboot_notifier); |
2054 | if (rc) | |
2055 | goto out_cpu; | |
2356f4cb MS |
2056 | ASCEBC(iucv_error_no_listener, 16); |
2057 | ASCEBC(iucv_error_no_memory, 16); | |
2058 | ASCEBC(iucv_error_pathid, 16); | |
2059 | iucv_available = 1; | |
2d7bf367 CH |
2060 | rc = bus_register(&iucv_bus); |
2061 | if (rc) | |
6c005961 | 2062 | goto out_reboot; |
96d042a6 FB |
2063 | iucv_if.root = iucv_root; |
2064 | iucv_if.bus = &iucv_bus; | |
2356f4cb MS |
2065 | return 0; |
2066 | ||
6c005961 UB |
2067 | out_reboot: |
2068 | unregister_reboot_notifier(&iucv_reboot_notifier); | |
2d7bf367 | 2069 | out_cpu: |
a0e247a8 SB |
2070 | cpu_notifier_register_begin(); |
2071 | __unregister_hotcpu_notifier(&iucv_cpu_notifier); | |
70cf5035 | 2072 | out_free: |
a0e247a8 SB |
2073 | for_each_possible_cpu(cpu) |
2074 | free_iucv_data(cpu); | |
2075 | ||
2076 | cpu_notifier_register_done(); | |
2077 | ||
035da16f | 2078 | root_device_unregister(iucv_root); |
2356f4cb | 2079 | out_int: |
1dad093b | 2080 | unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); |
5beab991 MS |
2081 | out_ctl: |
2082 | ctl_clear_bit(0, 1); | |
2356f4cb MS |
2083 | out: |
2084 | return rc; | |
2085 | } | |
2086 | ||
2087 | /** | |
2088 | * iucv_exit | |
2089 | * | |
2090 | * Frees everything allocated from iucv_init. | |
2091 | */ | |
da99f056 | 2092 | static void __exit iucv_exit(void) |
2356f4cb | 2093 | { |
04b090d5 | 2094 | struct iucv_irq_list *p, *n; |
70cf5035 | 2095 | int cpu; |
2356f4cb | 2096 | |
04b090d5 MS |
2097 | spin_lock_irq(&iucv_queue_lock); |
2098 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) | |
2099 | kfree(p); | |
2356f4cb MS |
2100 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) |
2101 | kfree(p); | |
04b090d5 | 2102 | spin_unlock_irq(&iucv_queue_lock); |
6c005961 | 2103 | unregister_reboot_notifier(&iucv_reboot_notifier); |
a0e247a8 SB |
2104 | cpu_notifier_register_begin(); |
2105 | __unregister_hotcpu_notifier(&iucv_cpu_notifier); | |
2106 | for_each_possible_cpu(cpu) | |
2107 | free_iucv_data(cpu); | |
2108 | cpu_notifier_register_done(); | |
035da16f | 2109 | root_device_unregister(iucv_root); |
2356f4cb | 2110 | bus_unregister(&iucv_bus); |
1dad093b | 2111 | unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); |
2356f4cb MS |
2112 | } |
2113 | ||
2114 | subsys_initcall(iucv_init); | |
2115 | module_exit(iucv_exit); | |
2116 | ||
2356f4cb MS |
2117 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert ([email protected])"); |
2118 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | |
2119 | MODULE_LICENSE("GPL"); |