]> Git Repo - linux.git/blob - arch/s390/kvm/interrupt.c
scsi: zfcp: Trace when request remove fails after qdio send fails
[linux.git] / arch / s390 / kvm / interrupt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * handling kvm guest interrupts
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <[email protected]>
8  */
9
10 #define KMSG_COMPONENT "kvm-s390"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/nospec.h>
18 #include <linux/signal.h>
19 #include <linux/slab.h>
20 #include <linux/bitmap.h>
21 #include <linux/vmalloc.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/dis.h>
24 #include <linux/uaccess.h>
25 #include <asm/sclp.h>
26 #include <asm/isc.h>
27 #include <asm/gmap.h>
28 #include <asm/switch_to.h>
29 #include <asm/nmi.h>
30 #include <asm/airq.h>
31 #include <asm/tpi.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34 #include "trace-s390.h"
35 #include "pci.h"
36
37 #define PFAULT_INIT 0x0600
38 #define PFAULT_DONE 0x0680
39 #define VIRTIO_PARAM 0x0d00
40
41 static struct kvm_s390_gib *gib;
42
43 /* handle external calls via sigp interpretation facility */
44 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
45 {
46         int c, scn;
47
48         if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
49                 return 0;
50
51         BUG_ON(!kvm_s390_use_sca_entries());
52         read_lock(&vcpu->kvm->arch.sca_lock);
53         if (vcpu->kvm->arch.use_esca) {
54                 struct esca_block *sca = vcpu->kvm->arch.sca;
55                 union esca_sigp_ctrl sigp_ctrl =
56                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
57
58                 c = sigp_ctrl.c;
59                 scn = sigp_ctrl.scn;
60         } else {
61                 struct bsca_block *sca = vcpu->kvm->arch.sca;
62                 union bsca_sigp_ctrl sigp_ctrl =
63                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
64
65                 c = sigp_ctrl.c;
66                 scn = sigp_ctrl.scn;
67         }
68         read_unlock(&vcpu->kvm->arch.sca_lock);
69
70         if (src_id)
71                 *src_id = scn;
72
73         return c;
74 }
75
76 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
77 {
78         int expect, rc;
79
80         BUG_ON(!kvm_s390_use_sca_entries());
81         read_lock(&vcpu->kvm->arch.sca_lock);
82         if (vcpu->kvm->arch.use_esca) {
83                 struct esca_block *sca = vcpu->kvm->arch.sca;
84                 union esca_sigp_ctrl *sigp_ctrl =
85                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
86                 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
87
88                 new_val.scn = src_id;
89                 new_val.c = 1;
90                 old_val.c = 0;
91
92                 expect = old_val.value;
93                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
94         } else {
95                 struct bsca_block *sca = vcpu->kvm->arch.sca;
96                 union bsca_sigp_ctrl *sigp_ctrl =
97                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
98                 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
99
100                 new_val.scn = src_id;
101                 new_val.c = 1;
102                 old_val.c = 0;
103
104                 expect = old_val.value;
105                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
106         }
107         read_unlock(&vcpu->kvm->arch.sca_lock);
108
109         if (rc != expect) {
110                 /* another external call is pending */
111                 return -EBUSY;
112         }
113         kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
114         return 0;
115 }
116
117 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
118 {
119         int rc, expect;
120
121         if (!kvm_s390_use_sca_entries())
122                 return;
123         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
124         read_lock(&vcpu->kvm->arch.sca_lock);
125         if (vcpu->kvm->arch.use_esca) {
126                 struct esca_block *sca = vcpu->kvm->arch.sca;
127                 union esca_sigp_ctrl *sigp_ctrl =
128                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
129                 union esca_sigp_ctrl old = *sigp_ctrl;
130
131                 expect = old.value;
132                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
133         } else {
134                 struct bsca_block *sca = vcpu->kvm->arch.sca;
135                 union bsca_sigp_ctrl *sigp_ctrl =
136                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
137                 union bsca_sigp_ctrl old = *sigp_ctrl;
138
139                 expect = old.value;
140                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
141         }
142         read_unlock(&vcpu->kvm->arch.sca_lock);
143         WARN_ON(rc != expect); /* cannot clear? */
144 }
145
146 int psw_extint_disabled(struct kvm_vcpu *vcpu)
147 {
148         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
149 }
150
151 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
152 {
153         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
154 }
155
156 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
157 {
158         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
159 }
160
161 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
162 {
163         return psw_extint_disabled(vcpu) &&
164                psw_ioint_disabled(vcpu) &&
165                psw_mchk_disabled(vcpu);
166 }
167
168 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
169 {
170         if (psw_extint_disabled(vcpu) ||
171             !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
172                 return 0;
173         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
174                 /* No timer interrupts when single stepping */
175                 return 0;
176         return 1;
177 }
178
179 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
180 {
181         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
182         const u64 ckc = vcpu->arch.sie_block->ckc;
183
184         if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
185                 if ((s64)ckc >= (s64)now)
186                         return 0;
187         } else if (ckc >= now) {
188                 return 0;
189         }
190         return ckc_interrupts_enabled(vcpu);
191 }
192
193 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
194 {
195         return !psw_extint_disabled(vcpu) &&
196                (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
197 }
198
199 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
200 {
201         if (!cpu_timer_interrupts_enabled(vcpu))
202                 return 0;
203         return kvm_s390_get_cpu_timer(vcpu) >> 63;
204 }
205
206 static uint64_t isc_to_isc_bits(int isc)
207 {
208         return (0x80 >> isc) << 24;
209 }
210
211 static inline u32 isc_to_int_word(u8 isc)
212 {
213         return ((u32)isc << 27) | 0x80000000;
214 }
215
216 static inline u8 int_word_to_isc(u32 int_word)
217 {
218         return (int_word & 0x38000000) >> 27;
219 }
220
221 /*
222  * To use atomic bitmap functions, we have to provide a bitmap address
223  * that is u64 aligned. However, the ipm might be u32 aligned.
224  * Therefore, we logically start the bitmap at the very beginning of the
225  * struct and fixup the bit number.
226  */
227 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
228
229 /**
230  * gisa_set_iam - change the GISA interruption alert mask
231  *
232  * @gisa: gisa to operate on
233  * @iam: new IAM value to use
234  *
235  * Change the IAM atomically with the next alert address and the IPM
236  * of the GISA if the GISA is not part of the GIB alert list. All three
237  * fields are located in the first long word of the GISA.
238  *
239  * Returns: 0 on success
240  *          -EBUSY in case the gisa is part of the alert list
241  */
242 static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
243 {
244         u64 word, _word;
245
246         do {
247                 word = READ_ONCE(gisa->u64.word[0]);
248                 if ((u64)gisa != word >> 32)
249                         return -EBUSY;
250                 _word = (word & ~0xffUL) | iam;
251         } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
252
253         return 0;
254 }
255
256 /**
257  * gisa_clear_ipm - clear the GISA interruption pending mask
258  *
259  * @gisa: gisa to operate on
260  *
261  * Clear the IPM atomically with the next alert address and the IAM
262  * of the GISA unconditionally. All three fields are located in the
263  * first long word of the GISA.
264  */
265 static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
266 {
267         u64 word, _word;
268
269         do {
270                 word = READ_ONCE(gisa->u64.word[0]);
271                 _word = word & ~(0xffUL << 24);
272         } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
273 }
274
275 /**
276  * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
277  *
278  * @gi: gisa interrupt struct to work on
279  *
280  * Atomically restores the interruption alert mask if none of the
281  * relevant ISCs are pending and return the IPM.
282  *
283  * Returns: the relevant pending ISCs
284  */
285 static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
286 {
287         u8 pending_mask, alert_mask;
288         u64 word, _word;
289
290         do {
291                 word = READ_ONCE(gi->origin->u64.word[0]);
292                 alert_mask = READ_ONCE(gi->alert.mask);
293                 pending_mask = (u8)(word >> 24) & alert_mask;
294                 if (pending_mask)
295                         return pending_mask;
296                 _word = (word & ~0xffUL) | alert_mask;
297         } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
298
299         return 0;
300 }
301
302 static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
303 {
304         return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
305 }
306
307 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
308 {
309         set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
310 }
311
312 static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
313 {
314         return READ_ONCE(gisa->ipm);
315 }
316
317 static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
318 {
319         return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
320 }
321
322 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
323 {
324         unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
325                                 vcpu->arch.local_int.pending_irqs;
326
327         pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
328         return pending;
329 }
330
331 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
332 {
333         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
334         unsigned long pending_mask;
335
336         pending_mask = pending_irqs_no_gisa(vcpu);
337         if (gi->origin)
338                 pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
339         return pending_mask;
340 }
341
342 static inline int isc_to_irq_type(unsigned long isc)
343 {
344         return IRQ_PEND_IO_ISC_0 - isc;
345 }
346
347 static inline int irq_type_to_isc(unsigned long irq_type)
348 {
349         return IRQ_PEND_IO_ISC_0 - irq_type;
350 }
351
352 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
353                                    unsigned long active_mask)
354 {
355         int i;
356
357         for (i = 0; i <= MAX_ISC; i++)
358                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
359                         active_mask &= ~(1UL << (isc_to_irq_type(i)));
360
361         return active_mask;
362 }
363
364 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
365 {
366         unsigned long active_mask;
367
368         active_mask = pending_irqs(vcpu);
369         if (!active_mask)
370                 return 0;
371
372         if (psw_extint_disabled(vcpu))
373                 active_mask &= ~IRQ_PEND_EXT_MASK;
374         if (psw_ioint_disabled(vcpu))
375                 active_mask &= ~IRQ_PEND_IO_MASK;
376         else
377                 active_mask = disable_iscs(vcpu, active_mask);
378         if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
379                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
380         if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
381                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
382         if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
383                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
384         if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
385                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
386         if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
387                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
388                 __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
389         }
390         if (psw_mchk_disabled(vcpu))
391                 active_mask &= ~IRQ_PEND_MCHK_MASK;
392         /* PV guest cpus can have a single interruption injected at a time. */
393         if (kvm_s390_pv_cpu_get_handle(vcpu) &&
394             vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
395                 active_mask &= ~(IRQ_PEND_EXT_II_MASK |
396                                  IRQ_PEND_IO_MASK |
397                                  IRQ_PEND_MCHK_MASK);
398         /*
399          * Check both floating and local interrupt's cr14 because
400          * bit IRQ_PEND_MCHK_REP could be set in both cases.
401          */
402         if (!(vcpu->arch.sie_block->gcr[14] &
403            (vcpu->kvm->arch.float_int.mchk.cr14 |
404            vcpu->arch.local_int.irq.mchk.cr14)))
405                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
406
407         /*
408          * STOP irqs will never be actively delivered. They are triggered via
409          * intercept requests and cleared when the stop intercept is performed.
410          */
411         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
412
413         return active_mask;
414 }
415
416 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
417 {
418         kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
419         set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
420 }
421
422 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
423 {
424         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
425         clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
426 }
427
428 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
429 {
430         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
431                                       CPUSTAT_STOP_INT);
432         vcpu->arch.sie_block->lctl = 0x0000;
433         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
434
435         if (guestdbg_enabled(vcpu)) {
436                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
437                                                LCTL_CR10 | LCTL_CR11);
438                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
439         }
440 }
441
442 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
443 {
444         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
445                 return;
446         if (psw_ioint_disabled(vcpu))
447                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
448         else
449                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
450 }
451
452 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
453 {
454         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
455                 return;
456         if (psw_extint_disabled(vcpu))
457                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
458         else
459                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
460 }
461
462 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
463 {
464         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
465                 return;
466         if (psw_mchk_disabled(vcpu))
467                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
468         else
469                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
470 }
471
472 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
473 {
474         if (kvm_s390_is_stop_irq_pending(vcpu))
475                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
476 }
477
478 /* Set interception request for non-deliverable interrupts */
479 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
480 {
481         set_intercept_indicators_io(vcpu);
482         set_intercept_indicators_ext(vcpu);
483         set_intercept_indicators_mchk(vcpu);
484         set_intercept_indicators_stop(vcpu);
485 }
486
487 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
488 {
489         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
490         int rc = 0;
491
492         vcpu->stat.deliver_cputm++;
493         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
494                                          0, 0);
495         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
496                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
497                 vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
498         } else {
499                 rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
500                                    (u16 *)__LC_EXT_INT_CODE);
501                 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
502                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
503                                      &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
504                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
505                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
506         }
507         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
508         return rc ? -EFAULT : 0;
509 }
510
511 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
512 {
513         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
514         int rc = 0;
515
516         vcpu->stat.deliver_ckc++;
517         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
518                                          0, 0);
519         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
520                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
521                 vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
522         } else {
523                 rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
524                                    (u16 __user *)__LC_EXT_INT_CODE);
525                 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
526                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
527                                      &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
528                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
529                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
530         }
531         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
532         return rc ? -EFAULT : 0;
533 }
534
535 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
536 {
537         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
538         struct kvm_s390_ext_info ext;
539         int rc;
540
541         spin_lock(&li->lock);
542         ext = li->irq.ext;
543         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
544         li->irq.ext.ext_params2 = 0;
545         spin_unlock(&li->lock);
546
547         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
548                    ext.ext_params2);
549         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
550                                          KVM_S390_INT_PFAULT_INIT,
551                                          0, ext.ext_params2);
552
553         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
554         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
555         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
556                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
557         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
558                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
559         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
560         return rc ? -EFAULT : 0;
561 }
562
563 static int __write_machine_check(struct kvm_vcpu *vcpu,
564                                  struct kvm_s390_mchk_info *mchk)
565 {
566         unsigned long ext_sa_addr;
567         unsigned long lc;
568         freg_t fprs[NUM_FPRS];
569         union mci mci;
570         int rc;
571
572         /*
573          * All other possible payload for a machine check (e.g. the register
574          * contents in the save area) will be handled by the ultravisor, as
575          * the hypervisor does not not have the needed information for
576          * protected guests.
577          */
578         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
579                 vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
580                 vcpu->arch.sie_block->mcic = mchk->mcic;
581                 vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
582                 vcpu->arch.sie_block->edc = mchk->ext_damage_code;
583                 return 0;
584         }
585
586         mci.val = mchk->mcic;
587         /* take care of lazy register loading */
588         save_fpu_regs();
589         save_access_regs(vcpu->run->s.regs.acrs);
590         if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
591                 save_gs_cb(current->thread.gs_cb);
592
593         /* Extended save area */
594         rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
595                            sizeof(unsigned long));
596         /* Only bits 0 through 63-LC are used for address formation */
597         lc = ext_sa_addr & MCESA_LC_MASK;
598         if (test_kvm_facility(vcpu->kvm, 133)) {
599                 switch (lc) {
600                 case 0:
601                 case 10:
602                         ext_sa_addr &= ~0x3ffUL;
603                         break;
604                 case 11:
605                         ext_sa_addr &= ~0x7ffUL;
606                         break;
607                 case 12:
608                         ext_sa_addr &= ~0xfffUL;
609                         break;
610                 default:
611                         ext_sa_addr = 0;
612                         break;
613                 }
614         } else {
615                 ext_sa_addr &= ~0x3ffUL;
616         }
617
618         if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
619                 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
620                                     512))
621                         mci.vr = 0;
622         } else {
623                 mci.vr = 0;
624         }
625         if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
626             && (lc == 11 || lc == 12)) {
627                 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
628                                     &vcpu->run->s.regs.gscb, 32))
629                         mci.gs = 0;
630         } else {
631                 mci.gs = 0;
632         }
633
634         /* General interruption information */
635         rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
636         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
637                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
638         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
639                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
640         rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
641
642         /* Register-save areas */
643         if (MACHINE_HAS_VX) {
644                 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
645                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
646         } else {
647                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
648                                      vcpu->run->s.regs.fprs, 128);
649         }
650         rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
651                              vcpu->run->s.regs.gprs, 128);
652         rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
653                            (u32 __user *) __LC_FP_CREG_SAVE_AREA);
654         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
655                            (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
656         rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
657                            (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
658         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
659                            (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
660         rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
661                              &vcpu->run->s.regs.acrs, 64);
662         rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
663                              &vcpu->arch.sie_block->gcr, 128);
664
665         /* Extended interruption information */
666         rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
667                            (u32 __user *) __LC_EXT_DAMAGE_CODE);
668         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
669                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
670         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
671                              sizeof(mchk->fixed_logout));
672         return rc ? -EFAULT : 0;
673 }
674
675 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
676 {
677         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
678         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
679         struct kvm_s390_mchk_info mchk = {};
680         int deliver = 0;
681         int rc = 0;
682
683         spin_lock(&fi->lock);
684         spin_lock(&li->lock);
685         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
686             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
687                 /*
688                  * If there was an exigent machine check pending, then any
689                  * repressible machine checks that might have been pending
690                  * are indicated along with it, so always clear bits for
691                  * repressible and exigent interrupts
692                  */
693                 mchk = li->irq.mchk;
694                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
695                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
696                 memset(&li->irq.mchk, 0, sizeof(mchk));
697                 deliver = 1;
698         }
699         /*
700          * We indicate floating repressible conditions along with
701          * other pending conditions. Channel Report Pending and Channel
702          * Subsystem damage are the only two and are indicated by
703          * bits in mcic and masked in cr14.
704          */
705         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
706                 mchk.mcic |= fi->mchk.mcic;
707                 mchk.cr14 |= fi->mchk.cr14;
708                 memset(&fi->mchk, 0, sizeof(mchk));
709                 deliver = 1;
710         }
711         spin_unlock(&li->lock);
712         spin_unlock(&fi->lock);
713
714         if (deliver) {
715                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
716                            mchk.mcic);
717                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
718                                                  KVM_S390_MCHK,
719                                                  mchk.cr14, mchk.mcic);
720                 vcpu->stat.deliver_machine_check++;
721                 rc = __write_machine_check(vcpu, &mchk);
722         }
723         return rc;
724 }
725
726 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
727 {
728         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
729         int rc = 0;
730
731         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
732         vcpu->stat.deliver_restart_signal++;
733         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
734
735         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
736                 vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
737         } else {
738                 rc  = write_guest_lc(vcpu,
739                                      offsetof(struct lowcore, restart_old_psw),
740                                      &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
741                 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
742                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
743         }
744         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
745         return rc ? -EFAULT : 0;
746 }
747
748 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
749 {
750         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
751         struct kvm_s390_prefix_info prefix;
752
753         spin_lock(&li->lock);
754         prefix = li->irq.prefix;
755         li->irq.prefix.address = 0;
756         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
757         spin_unlock(&li->lock);
758
759         vcpu->stat.deliver_prefix_signal++;
760         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
761                                          KVM_S390_SIGP_SET_PREFIX,
762                                          prefix.address, 0);
763
764         kvm_s390_set_prefix(vcpu, prefix.address);
765         return 0;
766 }
767
768 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
769 {
770         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
771         int rc;
772         int cpu_addr;
773
774         spin_lock(&li->lock);
775         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
776         clear_bit(cpu_addr, li->sigp_emerg_pending);
777         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
778                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
779         spin_unlock(&li->lock);
780
781         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
782         vcpu->stat.deliver_emergency_signal++;
783         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
784                                          cpu_addr, 0);
785         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
786                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
787                 vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
788                 vcpu->arch.sie_block->extcpuaddr = cpu_addr;
789                 return 0;
790         }
791
792         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
793                            (u16 *)__LC_EXT_INT_CODE);
794         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
795         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
796                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
797         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
798                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
799         return rc ? -EFAULT : 0;
800 }
801
802 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
803 {
804         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
805         struct kvm_s390_extcall_info extcall;
806         int rc;
807
808         spin_lock(&li->lock);
809         extcall = li->irq.extcall;
810         li->irq.extcall.code = 0;
811         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
812         spin_unlock(&li->lock);
813
814         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
815         vcpu->stat.deliver_external_call++;
816         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
817                                          KVM_S390_INT_EXTERNAL_CALL,
818                                          extcall.code, 0);
819         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
820                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
821                 vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
822                 vcpu->arch.sie_block->extcpuaddr = extcall.code;
823                 return 0;
824         }
825
826         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
827                            (u16 *)__LC_EXT_INT_CODE);
828         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
829         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
830                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
831         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
832                             sizeof(psw_t));
833         return rc ? -EFAULT : 0;
834 }
835
836 static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
837 {
838         switch (code) {
839         case PGM_SPECIFICATION:
840                 vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
841                 break;
842         case PGM_OPERAND:
843                 vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
844                 break;
845         default:
846                 return -EINVAL;
847         }
848         return 0;
849 }
850
851 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
852 {
853         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
854         struct kvm_s390_pgm_info pgm_info;
855         int rc = 0, nullifying = false;
856         u16 ilen;
857
858         spin_lock(&li->lock);
859         pgm_info = li->irq.pgm;
860         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
861         memset(&li->irq.pgm, 0, sizeof(pgm_info));
862         spin_unlock(&li->lock);
863
864         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
865         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
866                    pgm_info.code, ilen);
867         vcpu->stat.deliver_program++;
868         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
869                                          pgm_info.code, 0);
870
871         /* PER is handled by the ultravisor */
872         if (kvm_s390_pv_cpu_is_protected(vcpu))
873                 return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
874
875         switch (pgm_info.code & ~PGM_PER) {
876         case PGM_AFX_TRANSLATION:
877         case PGM_ASX_TRANSLATION:
878         case PGM_EX_TRANSLATION:
879         case PGM_LFX_TRANSLATION:
880         case PGM_LSTE_SEQUENCE:
881         case PGM_LSX_TRANSLATION:
882         case PGM_LX_TRANSLATION:
883         case PGM_PRIMARY_AUTHORITY:
884         case PGM_SECONDARY_AUTHORITY:
885                 nullifying = true;
886                 fallthrough;
887         case PGM_SPACE_SWITCH:
888                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
889                                   (u64 *)__LC_TRANS_EXC_CODE);
890                 break;
891         case PGM_ALEN_TRANSLATION:
892         case PGM_ALE_SEQUENCE:
893         case PGM_ASTE_INSTANCE:
894         case PGM_ASTE_SEQUENCE:
895         case PGM_ASTE_VALIDITY:
896         case PGM_EXTENDED_AUTHORITY:
897                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
898                                   (u8 *)__LC_EXC_ACCESS_ID);
899                 nullifying = true;
900                 break;
901         case PGM_ASCE_TYPE:
902         case PGM_PAGE_TRANSLATION:
903         case PGM_REGION_FIRST_TRANS:
904         case PGM_REGION_SECOND_TRANS:
905         case PGM_REGION_THIRD_TRANS:
906         case PGM_SEGMENT_TRANSLATION:
907                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
908                                   (u64 *)__LC_TRANS_EXC_CODE);
909                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
910                                    (u8 *)__LC_EXC_ACCESS_ID);
911                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
912                                    (u8 *)__LC_OP_ACCESS_ID);
913                 nullifying = true;
914                 break;
915         case PGM_MONITOR:
916                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
917                                   (u16 *)__LC_MON_CLASS_NR);
918                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
919                                    (u64 *)__LC_MON_CODE);
920                 break;
921         case PGM_VECTOR_PROCESSING:
922         case PGM_DATA:
923                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
924                                   (u32 *)__LC_DATA_EXC_CODE);
925                 break;
926         case PGM_PROTECTION:
927                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
928                                   (u64 *)__LC_TRANS_EXC_CODE);
929                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
930                                    (u8 *)__LC_EXC_ACCESS_ID);
931                 break;
932         case PGM_STACK_FULL:
933         case PGM_STACK_EMPTY:
934         case PGM_STACK_SPECIFICATION:
935         case PGM_STACK_TYPE:
936         case PGM_STACK_OPERATION:
937         case PGM_TRACE_TABEL:
938         case PGM_CRYPTO_OPERATION:
939                 nullifying = true;
940                 break;
941         }
942
943         if (pgm_info.code & PGM_PER) {
944                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
945                                    (u8 *) __LC_PER_CODE);
946                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
947                                    (u8 *)__LC_PER_ATMID);
948                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
949                                    (u64 *) __LC_PER_ADDRESS);
950                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
951                                    (u8 *) __LC_PER_ACCESS_ID);
952         }
953
954         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
955                 kvm_s390_rewind_psw(vcpu, ilen);
956
957         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
958         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
959         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
960                                  (u64 *) __LC_PGM_LAST_BREAK);
961         rc |= put_guest_lc(vcpu, pgm_info.code,
962                            (u16 *)__LC_PGM_INT_CODE);
963         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
964                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
965         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
966                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
967         return rc ? -EFAULT : 0;
968 }
969
970 #define SCCB_MASK 0xFFFFFFF8
971 #define SCCB_EVENT_PENDING 0x3
972
973 static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
974 {
975         int rc;
976
977         if (kvm_s390_pv_cpu_get_handle(vcpu)) {
978                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
979                 vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
980                 vcpu->arch.sie_block->eiparams = parm;
981                 return 0;
982         }
983
984         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
985         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
986         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
987                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
988         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
989                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
990         rc |= put_guest_lc(vcpu, parm,
991                            (u32 *)__LC_EXT_PARAMS);
992
993         return rc ? -EFAULT : 0;
994 }
995
996 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
997 {
998         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
999         struct kvm_s390_ext_info ext;
1000
1001         spin_lock(&fi->lock);
1002         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
1003             !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
1004                 spin_unlock(&fi->lock);
1005                 return 0;
1006         }
1007         ext = fi->srv_signal;
1008         memset(&fi->srv_signal, 0, sizeof(ext));
1009         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1010         clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1011         if (kvm_s390_pv_cpu_is_protected(vcpu))
1012                 set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
1013         spin_unlock(&fi->lock);
1014
1015         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
1016                    ext.ext_params);
1017         vcpu->stat.deliver_service_signal++;
1018         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
1019                                          ext.ext_params, 0);
1020
1021         return write_sclp(vcpu, ext.ext_params);
1022 }
1023
1024 static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
1025 {
1026         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1027         struct kvm_s390_ext_info ext;
1028
1029         spin_lock(&fi->lock);
1030         if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
1031                 spin_unlock(&fi->lock);
1032                 return 0;
1033         }
1034         ext = fi->srv_signal;
1035         /* only clear the event bit */
1036         fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
1037         clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1038         spin_unlock(&fi->lock);
1039
1040         VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
1041         vcpu->stat.deliver_service_signal++;
1042         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
1043                                          ext.ext_params, 0);
1044
1045         return write_sclp(vcpu, SCCB_EVENT_PENDING);
1046 }
1047
1048 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
1049 {
1050         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1051         struct kvm_s390_interrupt_info *inti;
1052         int rc = 0;
1053
1054         spin_lock(&fi->lock);
1055         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
1056                                         struct kvm_s390_interrupt_info,
1057                                         list);
1058         if (inti) {
1059                 list_del(&inti->list);
1060                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
1061         }
1062         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
1063                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1064         spin_unlock(&fi->lock);
1065
1066         if (inti) {
1067                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1068                                                  KVM_S390_INT_PFAULT_DONE, 0,
1069                                                  inti->ext.ext_params2);
1070                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
1071                            inti->ext.ext_params2);
1072
1073                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1074                                 (u16 *)__LC_EXT_INT_CODE);
1075                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
1076                                 (u16 *)__LC_EXT_CPU_ADDR);
1077                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1078                                 &vcpu->arch.sie_block->gpsw,
1079                                 sizeof(psw_t));
1080                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1081                                 &vcpu->arch.sie_block->gpsw,
1082                                 sizeof(psw_t));
1083                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1084                                 (u64 *)__LC_EXT_PARAMS2);
1085                 kfree(inti);
1086         }
1087         return rc ? -EFAULT : 0;
1088 }
1089
1090 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
1091 {
1092         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1093         struct kvm_s390_interrupt_info *inti;
1094         int rc = 0;
1095
1096         spin_lock(&fi->lock);
1097         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
1098                                         struct kvm_s390_interrupt_info,
1099                                         list);
1100         if (inti) {
1101                 VCPU_EVENT(vcpu, 4,
1102                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
1103                            inti->ext.ext_params, inti->ext.ext_params2);
1104                 vcpu->stat.deliver_virtio++;
1105                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1106                                 inti->type,
1107                                 inti->ext.ext_params,
1108                                 inti->ext.ext_params2);
1109                 list_del(&inti->list);
1110                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
1111         }
1112         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
1113                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1114         spin_unlock(&fi->lock);
1115
1116         if (inti) {
1117                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1118                                 (u16 *)__LC_EXT_INT_CODE);
1119                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
1120                                 (u16 *)__LC_EXT_CPU_ADDR);
1121                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1122                                 &vcpu->arch.sie_block->gpsw,
1123                                 sizeof(psw_t));
1124                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1125                                 &vcpu->arch.sie_block->gpsw,
1126                                 sizeof(psw_t));
1127                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
1128                                 (u32 *)__LC_EXT_PARAMS);
1129                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1130                                 (u64 *)__LC_EXT_PARAMS2);
1131                 kfree(inti);
1132         }
1133         return rc ? -EFAULT : 0;
1134 }
1135
1136 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
1137 {
1138         int rc;
1139
1140         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
1141                 vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
1142                 vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
1143                 vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
1144                 vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
1145                 vcpu->arch.sie_block->io_int_word = io->io_int_word;
1146                 return 0;
1147         }
1148
1149         rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
1150         rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
1151         rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
1152         rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
1153         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
1154                              &vcpu->arch.sie_block->gpsw,
1155                              sizeof(psw_t));
1156         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
1157                             &vcpu->arch.sie_block->gpsw,
1158                             sizeof(psw_t));
1159         return rc ? -EFAULT : 0;
1160 }
1161
1162 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
1163                                      unsigned long irq_type)
1164 {
1165         struct list_head *isc_list;
1166         struct kvm_s390_float_interrupt *fi;
1167         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1168         struct kvm_s390_interrupt_info *inti = NULL;
1169         struct kvm_s390_io_info io;
1170         u32 isc;
1171         int rc = 0;
1172
1173         fi = &vcpu->kvm->arch.float_int;
1174
1175         spin_lock(&fi->lock);
1176         isc = irq_type_to_isc(irq_type);
1177         isc_list = &fi->lists[isc];
1178         inti = list_first_entry_or_null(isc_list,
1179                                         struct kvm_s390_interrupt_info,
1180                                         list);
1181         if (inti) {
1182                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1183                         VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
1184                 else
1185                         VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
1186                         inti->io.subchannel_id >> 8,
1187                         inti->io.subchannel_id >> 1 & 0x3,
1188                         inti->io.subchannel_nr);
1189
1190                 vcpu->stat.deliver_io++;
1191                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1192                                 inti->type,
1193                                 ((__u32)inti->io.subchannel_id << 16) |
1194                                 inti->io.subchannel_nr,
1195                                 ((__u64)inti->io.io_int_parm << 32) |
1196                                 inti->io.io_int_word);
1197                 list_del(&inti->list);
1198                 fi->counters[FIRQ_CNTR_IO] -= 1;
1199         }
1200         if (list_empty(isc_list))
1201                 clear_bit(irq_type, &fi->pending_irqs);
1202         spin_unlock(&fi->lock);
1203
1204         if (inti) {
1205                 rc = __do_deliver_io(vcpu, &(inti->io));
1206                 kfree(inti);
1207                 goto out;
1208         }
1209
1210         if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
1211                 /*
1212                  * in case an adapter interrupt was not delivered
1213                  * in SIE context KVM will handle the delivery
1214                  */
1215                 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1216                 memset(&io, 0, sizeof(io));
1217                 io.io_int_word = isc_to_int_word(isc);
1218                 vcpu->stat.deliver_io++;
1219                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1220                         KVM_S390_INT_IO(1, 0, 0, 0),
1221                         ((__u32)io.subchannel_id << 16) |
1222                         io.subchannel_nr,
1223                         ((__u64)io.io_int_parm << 32) |
1224                         io.io_int_word);
1225                 rc = __do_deliver_io(vcpu, &io);
1226         }
1227 out:
1228         return rc;
1229 }
1230
1231 /* Check whether an external call is pending (deliverable or not) */
1232 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1233 {
1234         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1235
1236         if (!sclp.has_sigpif)
1237                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1238
1239         return sca_ext_call_pending(vcpu, NULL);
1240 }
1241
1242 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1243 {
1244         if (deliverable_irqs(vcpu))
1245                 return 1;
1246
1247         if (kvm_cpu_has_pending_timer(vcpu))
1248                 return 1;
1249
1250         /* external call pending and deliverable */
1251         if (kvm_s390_ext_call_pending(vcpu) &&
1252             !psw_extint_disabled(vcpu) &&
1253             (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
1254                 return 1;
1255
1256         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1257                 return 1;
1258         return 0;
1259 }
1260
1261 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1262 {
1263         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1264 }
1265
1266 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1267 {
1268         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1269         const u64 ckc = vcpu->arch.sie_block->ckc;
1270         u64 cputm, sltime = 0;
1271
1272         if (ckc_interrupts_enabled(vcpu)) {
1273                 if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
1274                         if ((s64)now < (s64)ckc)
1275                                 sltime = tod_to_ns((s64)ckc - (s64)now);
1276                 } else if (now < ckc) {
1277                         sltime = tod_to_ns(ckc - now);
1278                 }
1279                 /* already expired */
1280                 if (!sltime)
1281                         return 0;
1282                 if (cpu_timer_interrupts_enabled(vcpu)) {
1283                         cputm = kvm_s390_get_cpu_timer(vcpu);
1284                         /* already expired? */
1285                         if (cputm >> 63)
1286                                 return 0;
1287                         return min_t(u64, sltime, tod_to_ns(cputm));
1288                 }
1289         } else if (cpu_timer_interrupts_enabled(vcpu)) {
1290                 sltime = kvm_s390_get_cpu_timer(vcpu);
1291                 /* already expired? */
1292                 if (sltime >> 63)
1293                         return 0;
1294         }
1295         return sltime;
1296 }
1297
1298 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1299 {
1300         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1301         u64 sltime;
1302
1303         vcpu->stat.exit_wait_state++;
1304
1305         /* fast path */
1306         if (kvm_arch_vcpu_runnable(vcpu))
1307                 return 0;
1308
1309         if (psw_interrupts_disabled(vcpu)) {
1310                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1311                 return -EOPNOTSUPP; /* disabled wait */
1312         }
1313
1314         if (gi->origin &&
1315             (gisa_get_ipm_or_restore_iam(gi) &
1316              vcpu->arch.sie_block->gcr[6] >> 24))
1317                 return 0;
1318
1319         if (!ckc_interrupts_enabled(vcpu) &&
1320             !cpu_timer_interrupts_enabled(vcpu)) {
1321                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1322                 __set_cpu_idle(vcpu);
1323                 goto no_timer;
1324         }
1325
1326         sltime = __calculate_sltime(vcpu);
1327         if (!sltime)
1328                 return 0;
1329
1330         __set_cpu_idle(vcpu);
1331         hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1332         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1333 no_timer:
1334         kvm_vcpu_srcu_read_unlock(vcpu);
1335         kvm_vcpu_halt(vcpu);
1336         vcpu->valid_wakeup = false;
1337         __unset_cpu_idle(vcpu);
1338         kvm_vcpu_srcu_read_lock(vcpu);
1339
1340         hrtimer_cancel(&vcpu->arch.ckc_timer);
1341         return 0;
1342 }
1343
1344 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1345 {
1346         vcpu->valid_wakeup = true;
1347         kvm_vcpu_wake_up(vcpu);
1348
1349         /*
1350          * The VCPU might not be sleeping but rather executing VSIE. Let's
1351          * kick it, so it leaves the SIE to process the request.
1352          */
1353         kvm_s390_vsie_kick(vcpu);
1354 }
1355
1356 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1357 {
1358         struct kvm_vcpu *vcpu;
1359         u64 sltime;
1360
1361         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1362         sltime = __calculate_sltime(vcpu);
1363
1364         /*
1365          * If the monotonic clock runs faster than the tod clock we might be
1366          * woken up too early and have to go back to sleep to avoid deadlocks.
1367          */
1368         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1369                 return HRTIMER_RESTART;
1370         kvm_s390_vcpu_wakeup(vcpu);
1371         return HRTIMER_NORESTART;
1372 }
1373
1374 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1375 {
1376         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1377
1378         spin_lock(&li->lock);
1379         li->pending_irqs = 0;
1380         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1381         memset(&li->irq, 0, sizeof(li->irq));
1382         spin_unlock(&li->lock);
1383
1384         sca_clear_ext_call(vcpu);
1385 }
1386
1387 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1388 {
1389         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1390         int rc = 0;
1391         unsigned long irq_type;
1392         unsigned long irqs;
1393
1394         __reset_intercept_indicators(vcpu);
1395
1396         /* pending ckc conditions might have been invalidated */
1397         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1398         if (ckc_irq_pending(vcpu))
1399                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1400
1401         /* pending cpu timer conditions might have been invalidated */
1402         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1403         if (cpu_timer_irq_pending(vcpu))
1404                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1405
1406         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1407                 /* bits are in the reverse order of interrupt priority */
1408                 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1409                 switch (irq_type) {
1410                 case IRQ_PEND_IO_ISC_0:
1411                 case IRQ_PEND_IO_ISC_1:
1412                 case IRQ_PEND_IO_ISC_2:
1413                 case IRQ_PEND_IO_ISC_3:
1414                 case IRQ_PEND_IO_ISC_4:
1415                 case IRQ_PEND_IO_ISC_5:
1416                 case IRQ_PEND_IO_ISC_6:
1417                 case IRQ_PEND_IO_ISC_7:
1418                         rc = __deliver_io(vcpu, irq_type);
1419                         break;
1420                 case IRQ_PEND_MCHK_EX:
1421                 case IRQ_PEND_MCHK_REP:
1422                         rc = __deliver_machine_check(vcpu);
1423                         break;
1424                 case IRQ_PEND_PROG:
1425                         rc = __deliver_prog(vcpu);
1426                         break;
1427                 case IRQ_PEND_EXT_EMERGENCY:
1428                         rc = __deliver_emergency_signal(vcpu);
1429                         break;
1430                 case IRQ_PEND_EXT_EXTERNAL:
1431                         rc = __deliver_external_call(vcpu);
1432                         break;
1433                 case IRQ_PEND_EXT_CLOCK_COMP:
1434                         rc = __deliver_ckc(vcpu);
1435                         break;
1436                 case IRQ_PEND_EXT_CPU_TIMER:
1437                         rc = __deliver_cpu_timer(vcpu);
1438                         break;
1439                 case IRQ_PEND_RESTART:
1440                         rc = __deliver_restart(vcpu);
1441                         break;
1442                 case IRQ_PEND_SET_PREFIX:
1443                         rc = __deliver_set_prefix(vcpu);
1444                         break;
1445                 case IRQ_PEND_PFAULT_INIT:
1446                         rc = __deliver_pfault_init(vcpu);
1447                         break;
1448                 case IRQ_PEND_EXT_SERVICE:
1449                         rc = __deliver_service(vcpu);
1450                         break;
1451                 case IRQ_PEND_EXT_SERVICE_EV:
1452                         rc = __deliver_service_ev(vcpu);
1453                         break;
1454                 case IRQ_PEND_PFAULT_DONE:
1455                         rc = __deliver_pfault_done(vcpu);
1456                         break;
1457                 case IRQ_PEND_VIRTIO:
1458                         rc = __deliver_virtio(vcpu);
1459                         break;
1460                 default:
1461                         WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1462                         clear_bit(irq_type, &li->pending_irqs);
1463                 }
1464         }
1465
1466         set_intercept_indicators(vcpu);
1467
1468         return rc;
1469 }
1470
1471 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1472 {
1473         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1474
1475         vcpu->stat.inject_program++;
1476         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1477         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1478                                    irq->u.pgm.code, 0);
1479
1480         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1481                 /* auto detection if no valid ILC was given */
1482                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1483                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1484                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1485         }
1486
1487         if (irq->u.pgm.code == PGM_PER) {
1488                 li->irq.pgm.code |= PGM_PER;
1489                 li->irq.pgm.flags = irq->u.pgm.flags;
1490                 /* only modify PER related information */
1491                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1492                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1493                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1494                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1495         } else if (!(irq->u.pgm.code & PGM_PER)) {
1496                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1497                                    irq->u.pgm.code;
1498                 li->irq.pgm.flags = irq->u.pgm.flags;
1499                 /* only modify non-PER information */
1500                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1501                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1502                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1503                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1504                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1505                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1506         } else {
1507                 li->irq.pgm = irq->u.pgm;
1508         }
1509         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1510         return 0;
1511 }
1512
1513 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1514 {
1515         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1516
1517         vcpu->stat.inject_pfault_init++;
1518         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1519                    irq->u.ext.ext_params2);
1520         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1521                                    irq->u.ext.ext_params,
1522                                    irq->u.ext.ext_params2);
1523
1524         li->irq.ext = irq->u.ext;
1525         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1526         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1527         return 0;
1528 }
1529
1530 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1531 {
1532         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1533         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1534         uint16_t src_id = irq->u.extcall.code;
1535
1536         vcpu->stat.inject_external_call++;
1537         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1538                    src_id);
1539         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1540                                    src_id, 0);
1541
1542         /* sending vcpu invalid */
1543         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1544                 return -EINVAL;
1545
1546         if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
1547                 return sca_inject_ext_call(vcpu, src_id);
1548
1549         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1550                 return -EBUSY;
1551         *extcall = irq->u.extcall;
1552         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1553         return 0;
1554 }
1555
1556 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1557 {
1558         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1559         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1560
1561         vcpu->stat.inject_set_prefix++;
1562         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1563                    irq->u.prefix.address);
1564         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1565                                    irq->u.prefix.address, 0);
1566
1567         if (!is_vcpu_stopped(vcpu))
1568                 return -EBUSY;
1569
1570         *prefix = irq->u.prefix;
1571         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1572         return 0;
1573 }
1574
1575 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1576 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1577 {
1578         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1579         struct kvm_s390_stop_info *stop = &li->irq.stop;
1580         int rc = 0;
1581
1582         vcpu->stat.inject_stop_signal++;
1583         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1584
1585         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1586                 return -EINVAL;
1587
1588         if (is_vcpu_stopped(vcpu)) {
1589                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1590                         rc = kvm_s390_store_status_unloaded(vcpu,
1591                                                 KVM_S390_STORE_STATUS_NOADDR);
1592                 return rc;
1593         }
1594
1595         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1596                 return -EBUSY;
1597         stop->flags = irq->u.stop.flags;
1598         kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1599         return 0;
1600 }
1601
1602 static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
1603 {
1604         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1605
1606         vcpu->stat.inject_restart++;
1607         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1608         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1609
1610         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1611         return 0;
1612 }
1613
1614 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1615                                    struct kvm_s390_irq *irq)
1616 {
1617         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1618
1619         vcpu->stat.inject_emergency_signal++;
1620         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1621                    irq->u.emerg.code);
1622         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1623                                    irq->u.emerg.code, 0);
1624
1625         /* sending vcpu invalid */
1626         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1627                 return -EINVAL;
1628
1629         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1630         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1631         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1632         return 0;
1633 }
1634
1635 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1636 {
1637         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1638         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1639
1640         vcpu->stat.inject_mchk++;
1641         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1642                    irq->u.mchk.mcic);
1643         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1644                                    irq->u.mchk.mcic);
1645
1646         /*
1647          * Because repressible machine checks can be indicated along with
1648          * exigent machine checks (PoP, Chapter 11, Interruption action)
1649          * we need to combine cr14, mcic and external damage code.
1650          * Failing storage address and the logout area should not be or'ed
1651          * together, we just indicate the last occurrence of the corresponding
1652          * machine check
1653          */
1654         mchk->cr14 |= irq->u.mchk.cr14;
1655         mchk->mcic |= irq->u.mchk.mcic;
1656         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1657         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1658         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1659                sizeof(mchk->fixed_logout));
1660         if (mchk->mcic & MCHK_EX_MASK)
1661                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1662         else if (mchk->mcic & MCHK_REP_MASK)
1663                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1664         return 0;
1665 }
1666
1667 static int __inject_ckc(struct kvm_vcpu *vcpu)
1668 {
1669         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1670
1671         vcpu->stat.inject_ckc++;
1672         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1673         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1674                                    0, 0);
1675
1676         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1677         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1678         return 0;
1679 }
1680
1681 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1682 {
1683         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1684
1685         vcpu->stat.inject_cputm++;
1686         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1687         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1688                                    0, 0);
1689
1690         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1691         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1692         return 0;
1693 }
1694
1695 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1696                                                   int isc, u32 schid)
1697 {
1698         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1699         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1700         struct kvm_s390_interrupt_info *iter;
1701         u16 id = (schid & 0xffff0000U) >> 16;
1702         u16 nr = schid & 0x0000ffffU;
1703
1704         spin_lock(&fi->lock);
1705         list_for_each_entry(iter, isc_list, list) {
1706                 if (schid && (id != iter->io.subchannel_id ||
1707                               nr != iter->io.subchannel_nr))
1708                         continue;
1709                 /* found an appropriate entry */
1710                 list_del_init(&iter->list);
1711                 fi->counters[FIRQ_CNTR_IO] -= 1;
1712                 if (list_empty(isc_list))
1713                         clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1714                 spin_unlock(&fi->lock);
1715                 return iter;
1716         }
1717         spin_unlock(&fi->lock);
1718         return NULL;
1719 }
1720
1721 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1722                                                       u64 isc_mask, u32 schid)
1723 {
1724         struct kvm_s390_interrupt_info *inti = NULL;
1725         int isc;
1726
1727         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1728                 if (isc_mask & isc_to_isc_bits(isc))
1729                         inti = get_io_int(kvm, isc, schid);
1730         }
1731         return inti;
1732 }
1733
1734 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1735 {
1736         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1737         unsigned long active_mask;
1738         int isc;
1739
1740         if (schid)
1741                 goto out;
1742         if (!gi->origin)
1743                 goto out;
1744
1745         active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
1746         while (active_mask) {
1747                 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1748                 if (gisa_tac_ipm_gisc(gi->origin, isc))
1749                         return isc;
1750                 clear_bit_inv(isc, &active_mask);
1751         }
1752 out:
1753         return -EINVAL;
1754 }
1755
1756 /*
1757  * Dequeue and return an I/O interrupt matching any of the interruption
1758  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1759  * Take into account the interrupts pending in the interrupt list and in GISA.
1760  *
1761  * Note that for a guest that does not enable I/O interrupts
1762  * but relies on TPI, a flood of classic interrupts may starve
1763  * out adapter interrupts on the same isc. Linux does not do
1764  * that, and it is possible to work around the issue by configuring
1765  * different iscs for classic and adapter interrupts in the guest,
1766  * but we may want to revisit this in the future.
1767  */
1768 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1769                                                     u64 isc_mask, u32 schid)
1770 {
1771         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1772         struct kvm_s390_interrupt_info *inti, *tmp_inti;
1773         int isc;
1774
1775         inti = get_top_io_int(kvm, isc_mask, schid);
1776
1777         isc = get_top_gisa_isc(kvm, isc_mask, schid);
1778         if (isc < 0)
1779                 /* no AI in GISA */
1780                 goto out;
1781
1782         if (!inti)
1783                 /* AI in GISA but no classical IO int */
1784                 goto gisa_out;
1785
1786         /* both types of interrupts present */
1787         if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1788                 /* classical IO int with higher priority */
1789                 gisa_set_ipm_gisc(gi->origin, isc);
1790                 goto out;
1791         }
1792 gisa_out:
1793         tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
1794         if (tmp_inti) {
1795                 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1796                 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1797                 if (inti)
1798                         kvm_s390_reinject_io_int(kvm, inti);
1799                 inti = tmp_inti;
1800         } else
1801                 gisa_set_ipm_gisc(gi->origin, isc);
1802 out:
1803         return inti;
1804 }
1805
1806 static int __inject_service(struct kvm *kvm,
1807                              struct kvm_s390_interrupt_info *inti)
1808 {
1809         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1810
1811         kvm->stat.inject_service_signal++;
1812         spin_lock(&fi->lock);
1813         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1814
1815         /* We always allow events, track them separately from the sccb ints */
1816         if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
1817                 set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1818
1819         /*
1820          * Early versions of the QEMU s390 bios will inject several
1821          * service interrupts after another without handling a
1822          * condition code indicating busy.
1823          * We will silently ignore those superfluous sccb values.
1824          * A future version of QEMU will take care of serialization
1825          * of servc requests
1826          */
1827         if (fi->srv_signal.ext_params & SCCB_MASK)
1828                 goto out;
1829         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1830         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1831 out:
1832         spin_unlock(&fi->lock);
1833         kfree(inti);
1834         return 0;
1835 }
1836
1837 static int __inject_virtio(struct kvm *kvm,
1838                             struct kvm_s390_interrupt_info *inti)
1839 {
1840         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1841
1842         kvm->stat.inject_virtio++;
1843         spin_lock(&fi->lock);
1844         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1845                 spin_unlock(&fi->lock);
1846                 return -EBUSY;
1847         }
1848         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1849         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1850         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1851         spin_unlock(&fi->lock);
1852         return 0;
1853 }
1854
1855 static int __inject_pfault_done(struct kvm *kvm,
1856                                  struct kvm_s390_interrupt_info *inti)
1857 {
1858         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1859
1860         kvm->stat.inject_pfault_done++;
1861         spin_lock(&fi->lock);
1862         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1863                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1864                 spin_unlock(&fi->lock);
1865                 return -EBUSY;
1866         }
1867         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1868         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1869         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1870         spin_unlock(&fi->lock);
1871         return 0;
1872 }
1873
1874 #define CR_PENDING_SUBCLASS 28
1875 static int __inject_float_mchk(struct kvm *kvm,
1876                                 struct kvm_s390_interrupt_info *inti)
1877 {
1878         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1879
1880         kvm->stat.inject_float_mchk++;
1881         spin_lock(&fi->lock);
1882         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1883         fi->mchk.mcic |= inti->mchk.mcic;
1884         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1885         spin_unlock(&fi->lock);
1886         kfree(inti);
1887         return 0;
1888 }
1889
1890 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1891 {
1892         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1893         struct kvm_s390_float_interrupt *fi;
1894         struct list_head *list;
1895         int isc;
1896
1897         kvm->stat.inject_io++;
1898         isc = int_word_to_isc(inti->io.io_int_word);
1899
1900         /*
1901          * We do not use the lock checking variant as this is just a
1902          * performance optimization and we do not hold the lock here.
1903          * This is ok as the code will pick interrupts from both "lists"
1904          * for delivery.
1905          */
1906         if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
1907                 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1908                 gisa_set_ipm_gisc(gi->origin, isc);
1909                 kfree(inti);
1910                 return 0;
1911         }
1912
1913         fi = &kvm->arch.float_int;
1914         spin_lock(&fi->lock);
1915         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1916                 spin_unlock(&fi->lock);
1917                 return -EBUSY;
1918         }
1919         fi->counters[FIRQ_CNTR_IO] += 1;
1920
1921         if (inti->type & KVM_S390_INT_IO_AI_MASK)
1922                 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1923         else
1924                 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1925                         inti->io.subchannel_id >> 8,
1926                         inti->io.subchannel_id >> 1 & 0x3,
1927                         inti->io.subchannel_nr);
1928         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1929         list_add_tail(&inti->list, list);
1930         set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1931         spin_unlock(&fi->lock);
1932         return 0;
1933 }
1934
1935 /*
1936  * Find a destination VCPU for a floating irq and kick it.
1937  */
1938 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1939 {
1940         struct kvm_vcpu *dst_vcpu;
1941         int sigcpu, online_vcpus, nr_tries = 0;
1942
1943         online_vcpus = atomic_read(&kvm->online_vcpus);
1944         if (!online_vcpus)
1945                 return;
1946
1947         /* find idle VCPUs first, then round robin */
1948         sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
1949         if (sigcpu == online_vcpus) {
1950                 do {
1951                         sigcpu = kvm->arch.float_int.next_rr_cpu++;
1952                         kvm->arch.float_int.next_rr_cpu %= online_vcpus;
1953                         /* avoid endless loops if all vcpus are stopped */
1954                         if (nr_tries++ >= online_vcpus)
1955                                 return;
1956                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1957         }
1958         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1959
1960         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1961         switch (type) {
1962         case KVM_S390_MCHK:
1963                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1964                 break;
1965         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1966                 if (!(type & KVM_S390_INT_IO_AI_MASK &&
1967                       kvm->arch.gisa_int.origin) ||
1968                       kvm_s390_pv_cpu_get_handle(dst_vcpu))
1969                         kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1970                 break;
1971         default:
1972                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1973                 break;
1974         }
1975         kvm_s390_vcpu_wakeup(dst_vcpu);
1976 }
1977
1978 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1979 {
1980         u64 type = READ_ONCE(inti->type);
1981         int rc;
1982
1983         switch (type) {
1984         case KVM_S390_MCHK:
1985                 rc = __inject_float_mchk(kvm, inti);
1986                 break;
1987         case KVM_S390_INT_VIRTIO:
1988                 rc = __inject_virtio(kvm, inti);
1989                 break;
1990         case KVM_S390_INT_SERVICE:
1991                 rc = __inject_service(kvm, inti);
1992                 break;
1993         case KVM_S390_INT_PFAULT_DONE:
1994                 rc = __inject_pfault_done(kvm, inti);
1995                 break;
1996         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1997                 rc = __inject_io(kvm, inti);
1998                 break;
1999         default:
2000                 rc = -EINVAL;
2001         }
2002         if (rc)
2003                 return rc;
2004
2005         __floating_irq_kick(kvm, type);
2006         return 0;
2007 }
2008
2009 int kvm_s390_inject_vm(struct kvm *kvm,
2010                        struct kvm_s390_interrupt *s390int)
2011 {
2012         struct kvm_s390_interrupt_info *inti;
2013         int rc;
2014
2015         inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
2016         if (!inti)
2017                 return -ENOMEM;
2018
2019         inti->type = s390int->type;
2020         switch (inti->type) {
2021         case KVM_S390_INT_VIRTIO:
2022                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
2023                          s390int->parm, s390int->parm64);
2024                 inti->ext.ext_params = s390int->parm;
2025                 inti->ext.ext_params2 = s390int->parm64;
2026                 break;
2027         case KVM_S390_INT_SERVICE:
2028                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
2029                 inti->ext.ext_params = s390int->parm;
2030                 break;
2031         case KVM_S390_INT_PFAULT_DONE:
2032                 inti->ext.ext_params2 = s390int->parm64;
2033                 break;
2034         case KVM_S390_MCHK:
2035                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
2036                          s390int->parm64);
2037                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
2038                 inti->mchk.mcic = s390int->parm64;
2039                 break;
2040         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2041                 inti->io.subchannel_id = s390int->parm >> 16;
2042                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
2043                 inti->io.io_int_parm = s390int->parm64 >> 32;
2044                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
2045                 break;
2046         default:
2047                 kfree(inti);
2048                 return -EINVAL;
2049         }
2050         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2051                                  2);
2052
2053         rc = __inject_vm(kvm, inti);
2054         if (rc)
2055                 kfree(inti);
2056         return rc;
2057 }
2058
2059 int kvm_s390_reinject_io_int(struct kvm *kvm,
2060                               struct kvm_s390_interrupt_info *inti)
2061 {
2062         return __inject_vm(kvm, inti);
2063 }
2064
2065 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
2066                        struct kvm_s390_irq *irq)
2067 {
2068         irq->type = s390int->type;
2069         switch (irq->type) {
2070         case KVM_S390_PROGRAM_INT:
2071                 if (s390int->parm & 0xffff0000)
2072                         return -EINVAL;
2073                 irq->u.pgm.code = s390int->parm;
2074                 break;
2075         case KVM_S390_SIGP_SET_PREFIX:
2076                 irq->u.prefix.address = s390int->parm;
2077                 break;
2078         case KVM_S390_SIGP_STOP:
2079                 irq->u.stop.flags = s390int->parm;
2080                 break;
2081         case KVM_S390_INT_EXTERNAL_CALL:
2082                 if (s390int->parm & 0xffff0000)
2083                         return -EINVAL;
2084                 irq->u.extcall.code = s390int->parm;
2085                 break;
2086         case KVM_S390_INT_EMERGENCY:
2087                 if (s390int->parm & 0xffff0000)
2088                         return -EINVAL;
2089                 irq->u.emerg.code = s390int->parm;
2090                 break;
2091         case KVM_S390_MCHK:
2092                 irq->u.mchk.mcic = s390int->parm64;
2093                 break;
2094         case KVM_S390_INT_PFAULT_INIT:
2095                 irq->u.ext.ext_params = s390int->parm;
2096                 irq->u.ext.ext_params2 = s390int->parm64;
2097                 break;
2098         case KVM_S390_RESTART:
2099         case KVM_S390_INT_CLOCK_COMP:
2100         case KVM_S390_INT_CPU_TIMER:
2101                 break;
2102         default:
2103                 return -EINVAL;
2104         }
2105         return 0;
2106 }
2107
2108 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
2109 {
2110         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2111
2112         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2113 }
2114
2115 int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
2116 {
2117         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2118
2119         return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
2120 }
2121
2122 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
2123 {
2124         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2125
2126         spin_lock(&li->lock);
2127         li->irq.stop.flags = 0;
2128         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2129         spin_unlock(&li->lock);
2130 }
2131
2132 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2133 {
2134         int rc;
2135
2136         switch (irq->type) {
2137         case KVM_S390_PROGRAM_INT:
2138                 rc = __inject_prog(vcpu, irq);
2139                 break;
2140         case KVM_S390_SIGP_SET_PREFIX:
2141                 rc = __inject_set_prefix(vcpu, irq);
2142                 break;
2143         case KVM_S390_SIGP_STOP:
2144                 rc = __inject_sigp_stop(vcpu, irq);
2145                 break;
2146         case KVM_S390_RESTART:
2147                 rc = __inject_sigp_restart(vcpu);
2148                 break;
2149         case KVM_S390_INT_CLOCK_COMP:
2150                 rc = __inject_ckc(vcpu);
2151                 break;
2152         case KVM_S390_INT_CPU_TIMER:
2153                 rc = __inject_cpu_timer(vcpu);
2154                 break;
2155         case KVM_S390_INT_EXTERNAL_CALL:
2156                 rc = __inject_extcall(vcpu, irq);
2157                 break;
2158         case KVM_S390_INT_EMERGENCY:
2159                 rc = __inject_sigp_emergency(vcpu, irq);
2160                 break;
2161         case KVM_S390_MCHK:
2162                 rc = __inject_mchk(vcpu, irq);
2163                 break;
2164         case KVM_S390_INT_PFAULT_INIT:
2165                 rc = __inject_pfault_init(vcpu, irq);
2166                 break;
2167         case KVM_S390_INT_VIRTIO:
2168         case KVM_S390_INT_SERVICE:
2169         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2170         default:
2171                 rc = -EINVAL;
2172         }
2173
2174         return rc;
2175 }
2176
2177 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2178 {
2179         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2180         int rc;
2181
2182         spin_lock(&li->lock);
2183         rc = do_inject_vcpu(vcpu, irq);
2184         spin_unlock(&li->lock);
2185         if (!rc)
2186                 kvm_s390_vcpu_wakeup(vcpu);
2187         return rc;
2188 }
2189
2190 static inline void clear_irq_list(struct list_head *_list)
2191 {
2192         struct kvm_s390_interrupt_info *inti, *n;
2193
2194         list_for_each_entry_safe(inti, n, _list, list) {
2195                 list_del(&inti->list);
2196                 kfree(inti);
2197         }
2198 }
2199
2200 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2201                        struct kvm_s390_irq *irq)
2202 {
2203         irq->type = inti->type;
2204         switch (inti->type) {
2205         case KVM_S390_INT_PFAULT_INIT:
2206         case KVM_S390_INT_PFAULT_DONE:
2207         case KVM_S390_INT_VIRTIO:
2208                 irq->u.ext = inti->ext;
2209                 break;
2210         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2211                 irq->u.io = inti->io;
2212                 break;
2213         }
2214 }
2215
2216 void kvm_s390_clear_float_irqs(struct kvm *kvm)
2217 {
2218         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2219         int i;
2220
2221         mutex_lock(&kvm->lock);
2222         if (!kvm_s390_pv_is_protected(kvm))
2223                 fi->masked_irqs = 0;
2224         mutex_unlock(&kvm->lock);
2225         spin_lock(&fi->lock);
2226         fi->pending_irqs = 0;
2227         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
2228         memset(&fi->mchk, 0, sizeof(fi->mchk));
2229         for (i = 0; i < FIRQ_LIST_COUNT; i++)
2230                 clear_irq_list(&fi->lists[i]);
2231         for (i = 0; i < FIRQ_MAX_COUNT; i++)
2232                 fi->counters[i] = 0;
2233         spin_unlock(&fi->lock);
2234         kvm_s390_gisa_clear(kvm);
2235 };
2236
2237 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2238 {
2239         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2240         struct kvm_s390_interrupt_info *inti;
2241         struct kvm_s390_float_interrupt *fi;
2242         struct kvm_s390_irq *buf;
2243         struct kvm_s390_irq *irq;
2244         int max_irqs;
2245         int ret = 0;
2246         int n = 0;
2247         int i;
2248
2249         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2250                 return -EINVAL;
2251
2252         /*
2253          * We are already using -ENOMEM to signal
2254          * userspace it may retry with a bigger buffer,
2255          * so we need to use something else for this case
2256          */
2257         buf = vzalloc(len);
2258         if (!buf)
2259                 return -ENOBUFS;
2260
2261         max_irqs = len / sizeof(struct kvm_s390_irq);
2262
2263         if (gi->origin && gisa_get_ipm(gi->origin)) {
2264                 for (i = 0; i <= MAX_ISC; i++) {
2265                         if (n == max_irqs) {
2266                                 /* signal userspace to try again */
2267                                 ret = -ENOMEM;
2268                                 goto out_nolock;
2269                         }
2270                         if (gisa_tac_ipm_gisc(gi->origin, i)) {
2271                                 irq = (struct kvm_s390_irq *) &buf[n];
2272                                 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2273                                 irq->u.io.io_int_word = isc_to_int_word(i);
2274                                 n++;
2275                         }
2276                 }
2277         }
2278         fi = &kvm->arch.float_int;
2279         spin_lock(&fi->lock);
2280         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2281                 list_for_each_entry(inti, &fi->lists[i], list) {
2282                         if (n == max_irqs) {
2283                                 /* signal userspace to try again */
2284                                 ret = -ENOMEM;
2285                                 goto out;
2286                         }
2287                         inti_to_irq(inti, &buf[n]);
2288                         n++;
2289                 }
2290         }
2291         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
2292             test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
2293                 if (n == max_irqs) {
2294                         /* signal userspace to try again */
2295                         ret = -ENOMEM;
2296                         goto out;
2297                 }
2298                 irq = (struct kvm_s390_irq *) &buf[n];
2299                 irq->type = KVM_S390_INT_SERVICE;
2300                 irq->u.ext = fi->srv_signal;
2301                 n++;
2302         }
2303         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2304                 if (n == max_irqs) {
2305                                 /* signal userspace to try again */
2306                                 ret = -ENOMEM;
2307                                 goto out;
2308                 }
2309                 irq = (struct kvm_s390_irq *) &buf[n];
2310                 irq->type = KVM_S390_MCHK;
2311                 irq->u.mchk = fi->mchk;
2312                 n++;
2313 }
2314
2315 out:
2316         spin_unlock(&fi->lock);
2317 out_nolock:
2318         if (!ret && n > 0) {
2319                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2320                         ret = -EFAULT;
2321         }
2322         vfree(buf);
2323
2324         return ret < 0 ? ret : n;
2325 }
2326
2327 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2328 {
2329         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2330         struct kvm_s390_ais_all ais;
2331
2332         if (attr->attr < sizeof(ais))
2333                 return -EINVAL;
2334
2335         if (!test_kvm_facility(kvm, 72))
2336                 return -EOPNOTSUPP;
2337
2338         mutex_lock(&fi->ais_lock);
2339         ais.simm = fi->simm;
2340         ais.nimm = fi->nimm;
2341         mutex_unlock(&fi->ais_lock);
2342
2343         if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2344                 return -EFAULT;
2345
2346         return 0;
2347 }
2348
2349 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2350 {
2351         int r;
2352
2353         switch (attr->group) {
2354         case KVM_DEV_FLIC_GET_ALL_IRQS:
2355                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2356                                           attr->attr);
2357                 break;
2358         case KVM_DEV_FLIC_AISM_ALL:
2359                 r = flic_ais_mode_get_all(dev->kvm, attr);
2360                 break;
2361         default:
2362                 r = -EINVAL;
2363         }
2364
2365         return r;
2366 }
2367
2368 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2369                                      u64 addr)
2370 {
2371         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2372         void *target = NULL;
2373         void __user *source;
2374         u64 size;
2375
2376         if (get_user(inti->type, (u64 __user *)addr))
2377                 return -EFAULT;
2378
2379         switch (inti->type) {
2380         case KVM_S390_INT_PFAULT_INIT:
2381         case KVM_S390_INT_PFAULT_DONE:
2382         case KVM_S390_INT_VIRTIO:
2383         case KVM_S390_INT_SERVICE:
2384                 target = (void *) &inti->ext;
2385                 source = &uptr->u.ext;
2386                 size = sizeof(inti->ext);
2387                 break;
2388         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2389                 target = (void *) &inti->io;
2390                 source = &uptr->u.io;
2391                 size = sizeof(inti->io);
2392                 break;
2393         case KVM_S390_MCHK:
2394                 target = (void *) &inti->mchk;
2395                 source = &uptr->u.mchk;
2396                 size = sizeof(inti->mchk);
2397                 break;
2398         default:
2399                 return -EINVAL;
2400         }
2401
2402         if (copy_from_user(target, source, size))
2403                 return -EFAULT;
2404
2405         return 0;
2406 }
2407
2408 static int enqueue_floating_irq(struct kvm_device *dev,
2409                                 struct kvm_device_attr *attr)
2410 {
2411         struct kvm_s390_interrupt_info *inti = NULL;
2412         int r = 0;
2413         int len = attr->attr;
2414
2415         if (len % sizeof(struct kvm_s390_irq) != 0)
2416                 return -EINVAL;
2417         else if (len > KVM_S390_FLIC_MAX_BUFFER)
2418                 return -EINVAL;
2419
2420         while (len >= sizeof(struct kvm_s390_irq)) {
2421                 inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
2422                 if (!inti)
2423                         return -ENOMEM;
2424
2425                 r = copy_irq_from_user(inti, attr->addr);
2426                 if (r) {
2427                         kfree(inti);
2428                         return r;
2429                 }
2430                 r = __inject_vm(dev->kvm, inti);
2431                 if (r) {
2432                         kfree(inti);
2433                         return r;
2434                 }
2435                 len -= sizeof(struct kvm_s390_irq);
2436                 attr->addr += sizeof(struct kvm_s390_irq);
2437         }
2438
2439         return r;
2440 }
2441
2442 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2443 {
2444         if (id >= MAX_S390_IO_ADAPTERS)
2445                 return NULL;
2446         id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
2447         return kvm->arch.adapters[id];
2448 }
2449
2450 static int register_io_adapter(struct kvm_device *dev,
2451                                struct kvm_device_attr *attr)
2452 {
2453         struct s390_io_adapter *adapter;
2454         struct kvm_s390_io_adapter adapter_info;
2455
2456         if (copy_from_user(&adapter_info,
2457                            (void __user *)attr->addr, sizeof(adapter_info)))
2458                 return -EFAULT;
2459
2460         if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
2461                 return -EINVAL;
2462
2463         adapter_info.id = array_index_nospec(adapter_info.id,
2464                                              MAX_S390_IO_ADAPTERS);
2465
2466         if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
2467                 return -EINVAL;
2468
2469         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
2470         if (!adapter)
2471                 return -ENOMEM;
2472
2473         adapter->id = adapter_info.id;
2474         adapter->isc = adapter_info.isc;
2475         adapter->maskable = adapter_info.maskable;
2476         adapter->masked = false;
2477         adapter->swap = adapter_info.swap;
2478         adapter->suppressible = (adapter_info.flags) &
2479                                 KVM_S390_ADAPTER_SUPPRESSIBLE;
2480         dev->kvm->arch.adapters[adapter->id] = adapter;
2481
2482         return 0;
2483 }
2484
2485 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2486 {
2487         int ret;
2488         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2489
2490         if (!adapter || !adapter->maskable)
2491                 return -EINVAL;
2492         ret = adapter->masked;
2493         adapter->masked = masked;
2494         return ret;
2495 }
2496
2497 void kvm_s390_destroy_adapters(struct kvm *kvm)
2498 {
2499         int i;
2500
2501         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
2502                 kfree(kvm->arch.adapters[i]);
2503 }
2504
2505 static int modify_io_adapter(struct kvm_device *dev,
2506                              struct kvm_device_attr *attr)
2507 {
2508         struct kvm_s390_io_adapter_req req;
2509         struct s390_io_adapter *adapter;
2510         int ret;
2511
2512         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2513                 return -EFAULT;
2514
2515         adapter = get_io_adapter(dev->kvm, req.id);
2516         if (!adapter)
2517                 return -EINVAL;
2518         switch (req.type) {
2519         case KVM_S390_IO_ADAPTER_MASK:
2520                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2521                 if (ret > 0)
2522                         ret = 0;
2523                 break;
2524         /*
2525          * The following operations are no longer needed and therefore no-ops.
2526          * The gpa to hva translation is done when an IRQ route is set up. The
2527          * set_irq code uses get_user_pages_remote() to do the actual write.
2528          */
2529         case KVM_S390_IO_ADAPTER_MAP:
2530         case KVM_S390_IO_ADAPTER_UNMAP:
2531                 ret = 0;
2532                 break;
2533         default:
2534                 ret = -EINVAL;
2535         }
2536
2537         return ret;
2538 }
2539
2540 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2541
2542 {
2543         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2544         u32 schid;
2545
2546         if (attr->flags)
2547                 return -EINVAL;
2548         if (attr->attr != sizeof(schid))
2549                 return -EINVAL;
2550         if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2551                 return -EFAULT;
2552         if (!schid)
2553                 return -EINVAL;
2554         kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2555         /*
2556          * If userspace is conforming to the architecture, we can have at most
2557          * one pending I/O interrupt per subchannel, so this is effectively a
2558          * clear all.
2559          */
2560         return 0;
2561 }
2562
2563 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2564 {
2565         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2566         struct kvm_s390_ais_req req;
2567         int ret = 0;
2568
2569         if (!test_kvm_facility(kvm, 72))
2570                 return -EOPNOTSUPP;
2571
2572         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2573                 return -EFAULT;
2574
2575         if (req.isc > MAX_ISC)
2576                 return -EINVAL;
2577
2578         trace_kvm_s390_modify_ais_mode(req.isc,
2579                                        (fi->simm & AIS_MODE_MASK(req.isc)) ?
2580                                        (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2581                                        2 : KVM_S390_AIS_MODE_SINGLE :
2582                                        KVM_S390_AIS_MODE_ALL, req.mode);
2583
2584         mutex_lock(&fi->ais_lock);
2585         switch (req.mode) {
2586         case KVM_S390_AIS_MODE_ALL:
2587                 fi->simm &= ~AIS_MODE_MASK(req.isc);
2588                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2589                 break;
2590         case KVM_S390_AIS_MODE_SINGLE:
2591                 fi->simm |= AIS_MODE_MASK(req.isc);
2592                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2593                 break;
2594         default:
2595                 ret = -EINVAL;
2596         }
2597         mutex_unlock(&fi->ais_lock);
2598
2599         return ret;
2600 }
2601
2602 static int kvm_s390_inject_airq(struct kvm *kvm,
2603                                 struct s390_io_adapter *adapter)
2604 {
2605         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2606         struct kvm_s390_interrupt s390int = {
2607                 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2608                 .parm = 0,
2609                 .parm64 = isc_to_int_word(adapter->isc),
2610         };
2611         int ret = 0;
2612
2613         if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2614                 return kvm_s390_inject_vm(kvm, &s390int);
2615
2616         mutex_lock(&fi->ais_lock);
2617         if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2618                 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2619                 goto out;
2620         }
2621
2622         ret = kvm_s390_inject_vm(kvm, &s390int);
2623         if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2624                 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2625                 trace_kvm_s390_modify_ais_mode(adapter->isc,
2626                                                KVM_S390_AIS_MODE_SINGLE, 2);
2627         }
2628 out:
2629         mutex_unlock(&fi->ais_lock);
2630         return ret;
2631 }
2632
2633 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2634 {
2635         unsigned int id = attr->attr;
2636         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2637
2638         if (!adapter)
2639                 return -EINVAL;
2640
2641         return kvm_s390_inject_airq(kvm, adapter);
2642 }
2643
2644 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2645 {
2646         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2647         struct kvm_s390_ais_all ais;
2648
2649         if (!test_kvm_facility(kvm, 72))
2650                 return -EOPNOTSUPP;
2651
2652         if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2653                 return -EFAULT;
2654
2655         mutex_lock(&fi->ais_lock);
2656         fi->simm = ais.simm;
2657         fi->nimm = ais.nimm;
2658         mutex_unlock(&fi->ais_lock);
2659
2660         return 0;
2661 }
2662
2663 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2664 {
2665         int r = 0;
2666         unsigned long i;
2667         struct kvm_vcpu *vcpu;
2668
2669         switch (attr->group) {
2670         case KVM_DEV_FLIC_ENQUEUE:
2671                 r = enqueue_floating_irq(dev, attr);
2672                 break;
2673         case KVM_DEV_FLIC_CLEAR_IRQS:
2674                 kvm_s390_clear_float_irqs(dev->kvm);
2675                 break;
2676         case KVM_DEV_FLIC_APF_ENABLE:
2677                 dev->kvm->arch.gmap->pfault_enabled = 1;
2678                 break;
2679         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2680                 dev->kvm->arch.gmap->pfault_enabled = 0;
2681                 /*
2682                  * Make sure no async faults are in transition when
2683                  * clearing the queues. So we don't need to worry
2684                  * about late coming workers.
2685                  */
2686                 synchronize_srcu(&dev->kvm->srcu);
2687                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2688                         kvm_clear_async_pf_completion_queue(vcpu);
2689                 break;
2690         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2691                 r = register_io_adapter(dev, attr);
2692                 break;
2693         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2694                 r = modify_io_adapter(dev, attr);
2695                 break;
2696         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2697                 r = clear_io_irq(dev->kvm, attr);
2698                 break;
2699         case KVM_DEV_FLIC_AISM:
2700                 r = modify_ais_mode(dev->kvm, attr);
2701                 break;
2702         case KVM_DEV_FLIC_AIRQ_INJECT:
2703                 r = flic_inject_airq(dev->kvm, attr);
2704                 break;
2705         case KVM_DEV_FLIC_AISM_ALL:
2706                 r = flic_ais_mode_set_all(dev->kvm, attr);
2707                 break;
2708         default:
2709                 r = -EINVAL;
2710         }
2711
2712         return r;
2713 }
2714
2715 static int flic_has_attr(struct kvm_device *dev,
2716                              struct kvm_device_attr *attr)
2717 {
2718         switch (attr->group) {
2719         case KVM_DEV_FLIC_GET_ALL_IRQS:
2720         case KVM_DEV_FLIC_ENQUEUE:
2721         case KVM_DEV_FLIC_CLEAR_IRQS:
2722         case KVM_DEV_FLIC_APF_ENABLE:
2723         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2724         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2725         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2726         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2727         case KVM_DEV_FLIC_AISM:
2728         case KVM_DEV_FLIC_AIRQ_INJECT:
2729         case KVM_DEV_FLIC_AISM_ALL:
2730                 return 0;
2731         }
2732         return -ENXIO;
2733 }
2734
2735 static int flic_create(struct kvm_device *dev, u32 type)
2736 {
2737         if (!dev)
2738                 return -EINVAL;
2739         if (dev->kvm->arch.flic)
2740                 return -EINVAL;
2741         dev->kvm->arch.flic = dev;
2742         return 0;
2743 }
2744
2745 static void flic_destroy(struct kvm_device *dev)
2746 {
2747         dev->kvm->arch.flic = NULL;
2748         kfree(dev);
2749 }
2750
2751 /* s390 floating irq controller (flic) */
2752 struct kvm_device_ops kvm_flic_ops = {
2753         .name = "kvm-flic",
2754         .get_attr = flic_get_attr,
2755         .set_attr = flic_set_attr,
2756         .has_attr = flic_has_attr,
2757         .create = flic_create,
2758         .destroy = flic_destroy,
2759 };
2760
2761 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2762 {
2763         unsigned long bit;
2764
2765         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2766
2767         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2768 }
2769
2770 static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
2771 {
2772         struct page *page = NULL;
2773
2774         mmap_read_lock(kvm->mm);
2775         get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
2776                               &page, NULL, NULL);
2777         mmap_read_unlock(kvm->mm);
2778         return page;
2779 }
2780
2781 static int adapter_indicators_set(struct kvm *kvm,
2782                                   struct s390_io_adapter *adapter,
2783                                   struct kvm_s390_adapter_int *adapter_int)
2784 {
2785         unsigned long bit;
2786         int summary_set, idx;
2787         struct page *ind_page, *summary_page;
2788         void *map;
2789
2790         ind_page = get_map_page(kvm, adapter_int->ind_addr);
2791         if (!ind_page)
2792                 return -1;
2793         summary_page = get_map_page(kvm, adapter_int->summary_addr);
2794         if (!summary_page) {
2795                 put_page(ind_page);
2796                 return -1;
2797         }
2798
2799         idx = srcu_read_lock(&kvm->srcu);
2800         map = page_address(ind_page);
2801         bit = get_ind_bit(adapter_int->ind_addr,
2802                           adapter_int->ind_offset, adapter->swap);
2803         set_bit(bit, map);
2804         mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
2805         set_page_dirty_lock(ind_page);
2806         map = page_address(summary_page);
2807         bit = get_ind_bit(adapter_int->summary_addr,
2808                           adapter_int->summary_offset, adapter->swap);
2809         summary_set = test_and_set_bit(bit, map);
2810         mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
2811         set_page_dirty_lock(summary_page);
2812         srcu_read_unlock(&kvm->srcu, idx);
2813
2814         put_page(ind_page);
2815         put_page(summary_page);
2816         return summary_set ? 0 : 1;
2817 }
2818
2819 /*
2820  * < 0 - not injected due to error
2821  * = 0 - coalesced, summary indicator already active
2822  * > 0 - injected interrupt
2823  */
2824 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2825                            struct kvm *kvm, int irq_source_id, int level,
2826                            bool line_status)
2827 {
2828         int ret;
2829         struct s390_io_adapter *adapter;
2830
2831         /* We're only interested in the 0->1 transition. */
2832         if (!level)
2833                 return 0;
2834         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2835         if (!adapter)
2836                 return -1;
2837         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2838         if ((ret > 0) && !adapter->masked) {
2839                 ret = kvm_s390_inject_airq(kvm, adapter);
2840                 if (ret == 0)
2841                         ret = 1;
2842         }
2843         return ret;
2844 }
2845
2846 /*
2847  * Inject the machine check to the guest.
2848  */
2849 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2850                                      struct mcck_volatile_info *mcck_info)
2851 {
2852         struct kvm_s390_interrupt_info inti;
2853         struct kvm_s390_irq irq;
2854         struct kvm_s390_mchk_info *mchk;
2855         union mci mci;
2856         __u64 cr14 = 0;         /* upper bits are not used */
2857         int rc;
2858
2859         mci.val = mcck_info->mcic;
2860         if (mci.sr)
2861                 cr14 |= CR14_RECOVERY_SUBMASK;
2862         if (mci.dg)
2863                 cr14 |= CR14_DEGRADATION_SUBMASK;
2864         if (mci.w)
2865                 cr14 |= CR14_WARNING_SUBMASK;
2866
2867         mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2868         mchk->cr14 = cr14;
2869         mchk->mcic = mcck_info->mcic;
2870         mchk->ext_damage_code = mcck_info->ext_damage_code;
2871         mchk->failing_storage_address = mcck_info->failing_storage_address;
2872         if (mci.ck) {
2873                 /* Inject the floating machine check */
2874                 inti.type = KVM_S390_MCHK;
2875                 rc = __inject_vm(vcpu->kvm, &inti);
2876         } else {
2877                 /* Inject the machine check to specified vcpu */
2878                 irq.type = KVM_S390_MCHK;
2879                 rc = kvm_s390_inject_vcpu(vcpu, &irq);
2880         }
2881         WARN_ON_ONCE(rc);
2882 }
2883
2884 int kvm_set_routing_entry(struct kvm *kvm,
2885                           struct kvm_kernel_irq_routing_entry *e,
2886                           const struct kvm_irq_routing_entry *ue)
2887 {
2888         u64 uaddr;
2889
2890         switch (ue->type) {
2891         /* we store the userspace addresses instead of the guest addresses */
2892         case KVM_IRQ_ROUTING_S390_ADAPTER:
2893                 e->set = set_adapter_int;
2894                 uaddr =  gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
2895                 if (uaddr == -EFAULT)
2896                         return -EFAULT;
2897                 e->adapter.summary_addr = uaddr;
2898                 uaddr =  gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
2899                 if (uaddr == -EFAULT)
2900                         return -EFAULT;
2901                 e->adapter.ind_addr = uaddr;
2902                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2903                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2904                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2905                 return 0;
2906         default:
2907                 return -EINVAL;
2908         }
2909 }
2910
2911 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2912                 int irq_source_id, int level, bool line_status)
2913 {
2914         return -EINVAL;
2915 }
2916
2917 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2918 {
2919         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2920         struct kvm_s390_irq *buf;
2921         int r = 0;
2922         int n;
2923
2924         buf = vmalloc(len);
2925         if (!buf)
2926                 return -ENOMEM;
2927
2928         if (copy_from_user((void *) buf, irqstate, len)) {
2929                 r = -EFAULT;
2930                 goto out_free;
2931         }
2932
2933         /*
2934          * Don't allow setting the interrupt state
2935          * when there are already interrupts pending
2936          */
2937         spin_lock(&li->lock);
2938         if (li->pending_irqs) {
2939                 r = -EBUSY;
2940                 goto out_unlock;
2941         }
2942
2943         for (n = 0; n < len / sizeof(*buf); n++) {
2944                 r = do_inject_vcpu(vcpu, &buf[n]);
2945                 if (r)
2946                         break;
2947         }
2948
2949 out_unlock:
2950         spin_unlock(&li->lock);
2951 out_free:
2952         vfree(buf);
2953
2954         return r;
2955 }
2956
2957 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2958                             struct kvm_s390_irq *irq,
2959                             unsigned long irq_type)
2960 {
2961         switch (irq_type) {
2962         case IRQ_PEND_MCHK_EX:
2963         case IRQ_PEND_MCHK_REP:
2964                 irq->type = KVM_S390_MCHK;
2965                 irq->u.mchk = li->irq.mchk;
2966                 break;
2967         case IRQ_PEND_PROG:
2968                 irq->type = KVM_S390_PROGRAM_INT;
2969                 irq->u.pgm = li->irq.pgm;
2970                 break;
2971         case IRQ_PEND_PFAULT_INIT:
2972                 irq->type = KVM_S390_INT_PFAULT_INIT;
2973                 irq->u.ext = li->irq.ext;
2974                 break;
2975         case IRQ_PEND_EXT_EXTERNAL:
2976                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2977                 irq->u.extcall = li->irq.extcall;
2978                 break;
2979         case IRQ_PEND_EXT_CLOCK_COMP:
2980                 irq->type = KVM_S390_INT_CLOCK_COMP;
2981                 break;
2982         case IRQ_PEND_EXT_CPU_TIMER:
2983                 irq->type = KVM_S390_INT_CPU_TIMER;
2984                 break;
2985         case IRQ_PEND_SIGP_STOP:
2986                 irq->type = KVM_S390_SIGP_STOP;
2987                 irq->u.stop = li->irq.stop;
2988                 break;
2989         case IRQ_PEND_RESTART:
2990                 irq->type = KVM_S390_RESTART;
2991                 break;
2992         case IRQ_PEND_SET_PREFIX:
2993                 irq->type = KVM_S390_SIGP_SET_PREFIX;
2994                 irq->u.prefix = li->irq.prefix;
2995                 break;
2996         }
2997 }
2998
2999 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
3000 {
3001         int scn;
3002         DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
3003         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
3004         unsigned long pending_irqs;
3005         struct kvm_s390_irq irq;
3006         unsigned long irq_type;
3007         int cpuaddr;
3008         int n = 0;
3009
3010         spin_lock(&li->lock);
3011         pending_irqs = li->pending_irqs;
3012         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
3013                sizeof(sigp_emerg_pending));
3014         spin_unlock(&li->lock);
3015
3016         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
3017                 memset(&irq, 0, sizeof(irq));
3018                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
3019                         continue;
3020                 if (n + sizeof(irq) > len)
3021                         return -ENOBUFS;
3022                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
3023                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3024                         return -EFAULT;
3025                 n += sizeof(irq);
3026         }
3027
3028         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
3029                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
3030                         memset(&irq, 0, sizeof(irq));
3031                         if (n + sizeof(irq) > len)
3032                                 return -ENOBUFS;
3033                         irq.type = KVM_S390_INT_EMERGENCY;
3034                         irq.u.emerg.code = cpuaddr;
3035                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3036                                 return -EFAULT;
3037                         n += sizeof(irq);
3038                 }
3039         }
3040
3041         if (sca_ext_call_pending(vcpu, &scn)) {
3042                 if (n + sizeof(irq) > len)
3043                         return -ENOBUFS;
3044                 memset(&irq, 0, sizeof(irq));
3045                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
3046                 irq.u.extcall.code = scn;
3047                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3048                         return -EFAULT;
3049                 n += sizeof(irq);
3050         }
3051
3052         return n;
3053 }
3054
3055 static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
3056 {
3057         int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
3058         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3059         struct kvm_vcpu *vcpu;
3060         u8 vcpu_isc_mask;
3061
3062         for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
3063                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
3064                 if (psw_ioint_disabled(vcpu))
3065                         continue;
3066                 vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
3067                 if (deliverable_mask & vcpu_isc_mask) {
3068                         /* lately kicked but not yet running */
3069                         if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
3070                                 return;
3071                         kvm_s390_vcpu_wakeup(vcpu);
3072                         return;
3073                 }
3074         }
3075 }
3076
3077 static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
3078 {
3079         struct kvm_s390_gisa_interrupt *gi =
3080                 container_of(timer, struct kvm_s390_gisa_interrupt, timer);
3081         struct kvm *kvm =
3082                 container_of(gi->origin, struct sie_page2, gisa)->kvm;
3083         u8 pending_mask;
3084
3085         pending_mask = gisa_get_ipm_or_restore_iam(gi);
3086         if (pending_mask) {
3087                 __airqs_kick_single_vcpu(kvm, pending_mask);
3088                 hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
3089                 return HRTIMER_RESTART;
3090         }
3091
3092         return HRTIMER_NORESTART;
3093 }
3094
3095 #define NULL_GISA_ADDR 0x00000000UL
3096 #define NONE_GISA_ADDR 0x00000001UL
3097 #define GISA_ADDR_MASK 0xfffff000UL
3098
3099 static void process_gib_alert_list(void)
3100 {
3101         struct kvm_s390_gisa_interrupt *gi;
3102         struct kvm_s390_gisa *gisa;
3103         struct kvm *kvm;
3104         u32 final, origin = 0UL;
3105
3106         do {
3107                 /*
3108                  * If the NONE_GISA_ADDR is still stored in the alert list
3109                  * origin, we will leave the outer loop. No further GISA has
3110                  * been added to the alert list by millicode while processing
3111                  * the current alert list.
3112                  */
3113                 final = (origin & NONE_GISA_ADDR);
3114                 /*
3115                  * Cut off the alert list and store the NONE_GISA_ADDR in the
3116                  * alert list origin to avoid further GAL interruptions.
3117                  * A new alert list can be build up by millicode in parallel
3118                  * for guests not in the yet cut-off alert list. When in the
3119                  * final loop, store the NULL_GISA_ADDR instead. This will re-
3120                  * enable GAL interruptions on the host again.
3121                  */
3122                 origin = xchg(&gib->alert_list_origin,
3123                               (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
3124                 /*
3125                  * Loop through the just cut-off alert list and start the
3126                  * gisa timers to kick idle vcpus to consume the pending
3127                  * interruptions asap.
3128                  */
3129                 while (origin & GISA_ADDR_MASK) {
3130                         gisa = (struct kvm_s390_gisa *)(u64)origin;
3131                         origin = gisa->next_alert;
3132                         gisa->next_alert = (u32)(u64)gisa;
3133                         kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
3134                         gi = &kvm->arch.gisa_int;
3135                         if (hrtimer_active(&gi->timer))
3136                                 hrtimer_cancel(&gi->timer);
3137                         hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3138                 }
3139         } while (!final);
3140
3141 }
3142
3143 void kvm_s390_gisa_clear(struct kvm *kvm)
3144 {
3145         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3146
3147         if (!gi->origin)
3148                 return;
3149         gisa_clear_ipm(gi->origin);
3150         VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
3151 }
3152
3153 void kvm_s390_gisa_init(struct kvm *kvm)
3154 {
3155         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3156
3157         if (!css_general_characteristics.aiv)
3158                 return;
3159         gi->origin = &kvm->arch.sie_page2->gisa;
3160         gi->alert.mask = 0;
3161         spin_lock_init(&gi->alert.ref_lock);
3162         gi->expires = 50 * 1000; /* 50 usec */
3163         hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3164         gi->timer.function = gisa_vcpu_kicker;
3165         memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
3166         gi->origin->next_alert = (u32)(u64)gi->origin;
3167         VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
3168 }
3169
3170 void kvm_s390_gisa_enable(struct kvm *kvm)
3171 {
3172         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3173         struct kvm_vcpu *vcpu;
3174         unsigned long i;
3175         u32 gisa_desc;
3176
3177         if (gi->origin)
3178                 return;
3179         kvm_s390_gisa_init(kvm);
3180         gisa_desc = kvm_s390_get_gisa_desc(kvm);
3181         if (!gisa_desc)
3182                 return;
3183         kvm_for_each_vcpu(i, vcpu, kvm) {
3184                 mutex_lock(&vcpu->mutex);
3185                 vcpu->arch.sie_block->gd = gisa_desc;
3186                 vcpu->arch.sie_block->eca |= ECA_AIV;
3187                 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3188                            vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3189                 mutex_unlock(&vcpu->mutex);
3190         }
3191 }
3192
3193 void kvm_s390_gisa_destroy(struct kvm *kvm)
3194 {
3195         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3196         struct kvm_s390_gisa *gisa = gi->origin;
3197
3198         if (!gi->origin)
3199                 return;
3200         if (gi->alert.mask)
3201                 KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
3202                           kvm, gi->alert.mask);
3203         while (gisa_in_alert_list(gi->origin))
3204                 cpu_relax();
3205         hrtimer_cancel(&gi->timer);
3206         gi->origin = NULL;
3207         VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
3208 }
3209
3210 void kvm_s390_gisa_disable(struct kvm *kvm)
3211 {
3212         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3213         struct kvm_vcpu *vcpu;
3214         unsigned long i;
3215
3216         if (!gi->origin)
3217                 return;
3218         kvm_for_each_vcpu(i, vcpu, kvm) {
3219                 mutex_lock(&vcpu->mutex);
3220                 vcpu->arch.sie_block->eca &= ~ECA_AIV;
3221                 vcpu->arch.sie_block->gd = 0U;
3222                 mutex_unlock(&vcpu->mutex);
3223                 VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
3224         }
3225         kvm_s390_gisa_destroy(kvm);
3226 }
3227
3228 /**
3229  * kvm_s390_gisc_register - register a guest ISC
3230  *
3231  * @kvm:  the kernel vm to work with
3232  * @gisc: the guest interruption sub class to register
3233  *
3234  * The function extends the vm specific alert mask to use.
3235  * The effective IAM mask in the GISA is updated as well
3236  * in case the GISA is not part of the GIB alert list.
3237  * It will be updated latest when the IAM gets restored
3238  * by gisa_get_ipm_or_restore_iam().
3239  *
3240  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3241  *          has registered with the channel subsystem.
3242  *          -ENODEV in case the vm uses no GISA
3243  *          -ERANGE in case the guest ISC is invalid
3244  */
3245 int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
3246 {
3247         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3248
3249         if (!gi->origin)
3250                 return -ENODEV;
3251         if (gisc > MAX_ISC)
3252                 return -ERANGE;
3253
3254         spin_lock(&gi->alert.ref_lock);
3255         gi->alert.ref_count[gisc]++;
3256         if (gi->alert.ref_count[gisc] == 1) {
3257                 gi->alert.mask |= 0x80 >> gisc;
3258                 gisa_set_iam(gi->origin, gi->alert.mask);
3259         }
3260         spin_unlock(&gi->alert.ref_lock);
3261
3262         return gib->nisc;
3263 }
3264 EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
3265
3266 /**
3267  * kvm_s390_gisc_unregister - unregister a guest ISC
3268  *
3269  * @kvm:  the kernel vm to work with
3270  * @gisc: the guest interruption sub class to register
3271  *
3272  * The function reduces the vm specific alert mask to use.
3273  * The effective IAM mask in the GISA is updated as well
3274  * in case the GISA is not part of the GIB alert list.
3275  * It will be updated latest when the IAM gets restored
3276  * by gisa_get_ipm_or_restore_iam().
3277  *
3278  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3279  *          has registered with the channel subsystem.
3280  *          -ENODEV in case the vm uses no GISA
3281  *          -ERANGE in case the guest ISC is invalid
3282  *          -EINVAL in case the guest ISC is not registered
3283  */
3284 int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
3285 {
3286         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3287         int rc = 0;
3288
3289         if (!gi->origin)
3290                 return -ENODEV;
3291         if (gisc > MAX_ISC)
3292                 return -ERANGE;
3293
3294         spin_lock(&gi->alert.ref_lock);
3295         if (gi->alert.ref_count[gisc] == 0) {
3296                 rc = -EINVAL;
3297                 goto out;
3298         }
3299         gi->alert.ref_count[gisc]--;
3300         if (gi->alert.ref_count[gisc] == 0) {
3301                 gi->alert.mask &= ~(0x80 >> gisc);
3302                 gisa_set_iam(gi->origin, gi->alert.mask);
3303         }
3304 out:
3305         spin_unlock(&gi->alert.ref_lock);
3306
3307         return rc;
3308 }
3309 EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
3310
3311 static void aen_host_forward(unsigned long si)
3312 {
3313         struct kvm_s390_gisa_interrupt *gi;
3314         struct zpci_gaite *gaite;
3315         struct kvm *kvm;
3316
3317         gaite = (struct zpci_gaite *)aift->gait +
3318                 (si * sizeof(struct zpci_gaite));
3319         if (gaite->count == 0)
3320                 return;
3321         if (gaite->aisb != 0)
3322                 set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
3323
3324         kvm = kvm_s390_pci_si_to_kvm(aift, si);
3325         if (!kvm)
3326                 return;
3327         gi = &kvm->arch.gisa_int;
3328
3329         if (!(gi->origin->g1.simm & AIS_MODE_MASK(gaite->gisc)) ||
3330             !(gi->origin->g1.nimm & AIS_MODE_MASK(gaite->gisc))) {
3331                 gisa_set_ipm_gisc(gi->origin, gaite->gisc);
3332                 if (hrtimer_active(&gi->timer))
3333                         hrtimer_cancel(&gi->timer);
3334                 hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3335                 kvm->stat.aen_forward++;
3336         }
3337 }
3338
3339 static void aen_process_gait(u8 isc)
3340 {
3341         bool found = false, first = true;
3342         union zpci_sic_iib iib = {{0}};
3343         unsigned long si, flags;
3344
3345         spin_lock_irqsave(&aift->gait_lock, flags);
3346
3347         if (!aift->gait) {
3348                 spin_unlock_irqrestore(&aift->gait_lock, flags);
3349                 return;
3350         }
3351
3352         for (si = 0;;) {
3353                 /* Scan adapter summary indicator bit vector */
3354                 si = airq_iv_scan(aift->sbv, si, airq_iv_end(aift->sbv));
3355                 if (si == -1UL) {
3356                         if (first || found) {
3357                                 /* Re-enable interrupts. */
3358                                 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, isc,
3359                                                   &iib);
3360                                 first = found = false;
3361                         } else {
3362                                 /* Interrupts on and all bits processed */
3363                                 break;
3364                         }
3365                         found = false;
3366                         si = 0;
3367                         /* Scan again after re-enabling interrupts */
3368                         continue;
3369                 }
3370                 found = true;
3371                 aen_host_forward(si);
3372         }
3373
3374         spin_unlock_irqrestore(&aift->gait_lock, flags);
3375 }
3376
3377 static void gib_alert_irq_handler(struct airq_struct *airq,
3378                                   struct tpi_info *tpi_info)
3379 {
3380         struct tpi_adapter_info *info = (struct tpi_adapter_info *)tpi_info;
3381
3382         inc_irq_stat(IRQIO_GAL);
3383
3384         if ((info->forward || info->error) &&
3385             IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3386                 aen_process_gait(info->isc);
3387                 if (info->aism != 0)
3388                         process_gib_alert_list();
3389         } else {
3390                 process_gib_alert_list();
3391         }
3392 }
3393
3394 static struct airq_struct gib_alert_irq = {
3395         .handler = gib_alert_irq_handler,
3396         .lsi_ptr = &gib_alert_irq.lsi_mask,
3397 };
3398
3399 void kvm_s390_gib_destroy(void)
3400 {
3401         if (!gib)
3402                 return;
3403         if (kvm_s390_pci_interp_allowed() && aift) {
3404                 mutex_lock(&aift->aift_lock);
3405                 kvm_s390_pci_aen_exit();
3406                 mutex_unlock(&aift->aift_lock);
3407         }
3408         chsc_sgib(0);
3409         unregister_adapter_interrupt(&gib_alert_irq);
3410         free_page((unsigned long)gib);
3411         gib = NULL;
3412 }
3413
3414 int kvm_s390_gib_init(u8 nisc)
3415 {
3416         int rc = 0;
3417
3418         if (!css_general_characteristics.aiv) {
3419                 KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
3420                 goto out;
3421         }
3422
3423         gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3424         if (!gib) {
3425                 rc = -ENOMEM;
3426                 goto out;
3427         }
3428
3429         gib_alert_irq.isc = nisc;
3430         if (register_adapter_interrupt(&gib_alert_irq)) {
3431                 pr_err("Registering the GIB alert interruption handler failed\n");
3432                 rc = -EIO;
3433                 goto out_free_gib;
3434         }
3435
3436         gib->nisc = nisc;
3437         if (chsc_sgib((u32)(u64)gib)) {
3438                 pr_err("Associating the GIB with the AIV facility failed\n");
3439                 free_page((unsigned long)gib);
3440                 gib = NULL;
3441                 rc = -EIO;
3442                 goto out_unreg_gal;
3443         }
3444
3445         if (kvm_s390_pci_interp_allowed()) {
3446                 if (kvm_s390_pci_aen_init(nisc)) {
3447                         pr_err("Initializing AEN for PCI failed\n");
3448                         rc = -EIO;
3449                         goto out_unreg_gal;
3450                 }
3451         }
3452
3453         KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
3454         goto out;
3455
3456 out_unreg_gal:
3457         unregister_adapter_interrupt(&gib_alert_irq);
3458 out_free_gib:
3459         free_page((unsigned long)gib);
3460         gib = NULL;
3461 out:
3462         return rc;
3463 }
This page took 0.241389 seconds and 4 git commands to generate.