]> Git Repo - qemu.git/blame - target-s390x/kvm.c
configure: add Linux libnuma detection
[qemu.git] / target-s390x / kvm.c
CommitLineData
0e60a699
AG
1/*
2 * QEMU S390x KVM implementation
3 *
4 * Copyright (c) 2009 Alexander Graf <[email protected]>
ccb084d3 5 * Copyright IBM Corp. 2012
0e60a699
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
ccb084d3
CB
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 *
20 * You should have received a copy of the GNU (Lesser) General Public
0e60a699
AG
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <sys/types.h>
25#include <sys/ioctl.h>
26#include <sys/mman.h>
27
28#include <linux/kvm.h>
29#include <asm/ptrace.h>
30
31#include "qemu-common.h"
1de7afc9 32#include "qemu/timer.h"
9c17d615
PB
33#include "sysemu/sysemu.h"
34#include "sysemu/kvm.h"
4cb88c3c 35#include "hw/hw.h"
0e60a699 36#include "cpu.h"
9c17d615 37#include "sysemu/device_tree.h"
08eb8c85
CB
38#include "qapi/qmp/qjson.h"
39#include "monitor/monitor.h"
770a6379 40#include "exec/gdbstub.h"
860643bc 41#include "trace.h"
0e60a699
AG
42
43/* #define DEBUG_KVM */
44
45#ifdef DEBUG_KVM
e67137c6 46#define DPRINTF(fmt, ...) \
0e60a699
AG
47 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
48#else
e67137c6 49#define DPRINTF(fmt, ...) \
0e60a699
AG
50 do { } while (0)
51#endif
52
53#define IPA0_DIAG 0x8300
54#define IPA0_SIGP 0xae00
09b99878
CH
55#define IPA0_B2 0xb200
56#define IPA0_B9 0xb900
57#define IPA0_EB 0xeb00
0e60a699 58
1eecf41b
FB
59#define PRIV_B2_SCLP_CALL 0x20
60#define PRIV_B2_CSCH 0x30
61#define PRIV_B2_HSCH 0x31
62#define PRIV_B2_MSCH 0x32
63#define PRIV_B2_SSCH 0x33
64#define PRIV_B2_STSCH 0x34
65#define PRIV_B2_TSCH 0x35
66#define PRIV_B2_TPI 0x36
67#define PRIV_B2_SAL 0x37
68#define PRIV_B2_RSCH 0x38
69#define PRIV_B2_STCRW 0x39
70#define PRIV_B2_STCPS 0x3a
71#define PRIV_B2_RCHP 0x3b
72#define PRIV_B2_SCHM 0x3c
73#define PRIV_B2_CHSC 0x5f
74#define PRIV_B2_SIGA 0x74
75#define PRIV_B2_XSCH 0x76
76
77#define PRIV_EB_SQBS 0x8a
78
79#define PRIV_B9_EQBS 0x9c
80
268846ba 81#define DIAG_IPL 0x308
0e60a699
AG
82#define DIAG_KVM_HYPERCALL 0x500
83#define DIAG_KVM_BREAKPOINT 0x501
84
0e60a699 85#define ICPT_INSTRUCTION 0x04
6449a41a 86#define ICPT_PROGRAM 0x08
a2689242 87#define ICPT_EXT_INT 0x14
0e60a699
AG
88#define ICPT_WAITPSW 0x1c
89#define ICPT_SOFT_INTERCEPT 0x24
90#define ICPT_CPU_STOP 0x28
91#define ICPT_IO 0x40
92
770a6379
DH
93static CPUWatchpoint hw_watchpoint;
94/*
95 * We don't use a list because this structure is also used to transmit the
96 * hardware breakpoints to the kernel.
97 */
98static struct kvm_hw_breakpoint *hw_breakpoints;
99static int nb_hw_breakpoints;
100
94a8d39a
JK
101const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
102 KVM_CAP_LAST_INFO
103};
104
5b08b344 105static int cap_sync_regs;
819bd309 106static int cap_async_pf;
5b08b344 107
575ddeb4 108static void *legacy_s390_alloc(size_t size);
91138037 109
4cb88c3c
DD
110static int kvm_s390_check_clear_cmma(KVMState *s)
111{
112 struct kvm_device_attr attr = {
113 .group = KVM_S390_VM_MEM_CTRL,
114 .attr = KVM_S390_VM_MEM_CLR_CMMA,
115 };
116
117 return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr);
118}
119
120static int kvm_s390_check_enable_cmma(KVMState *s)
121{
122 struct kvm_device_attr attr = {
123 .group = KVM_S390_VM_MEM_CTRL,
124 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
125 };
126
127 return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr);
128}
129
130void kvm_s390_clear_cmma_callback(void *opaque)
131{
132 int rc;
133 KVMState *s = opaque;
134 struct kvm_device_attr attr = {
135 .group = KVM_S390_VM_MEM_CTRL,
136 .attr = KVM_S390_VM_MEM_CLR_CMMA,
137 };
138
139 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
140 trace_kvm_clear_cmma(rc);
141}
142
143static void kvm_s390_enable_cmma(KVMState *s)
144{
145 int rc;
146 struct kvm_device_attr attr = {
147 .group = KVM_S390_VM_MEM_CTRL,
148 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
149 };
150
151 if (kvm_s390_check_enable_cmma(s) || kvm_s390_check_clear_cmma(s)) {
152 return;
153 }
154
155 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
156 if (!rc) {
157 qemu_register_reset(kvm_s390_clear_cmma_callback, s);
158 }
159 trace_kvm_enable_cmma(rc);
160}
161
cad1e282 162int kvm_arch_init(KVMState *s)
0e60a699 163{
5b08b344 164 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
819bd309 165 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
4cb88c3c
DD
166
167 if (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES)) {
168 kvm_s390_enable_cmma(s);
169 }
170
91138037
MA
171 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
172 || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
173 phys_mem_set_alloc(legacy_s390_alloc);
174 }
0e60a699
AG
175 return 0;
176}
177
b164e48e
EH
178unsigned long kvm_arch_vcpu_id(CPUState *cpu)
179{
180 return cpu->cpu_index;
181}
182
20d695a9 183int kvm_arch_init_vcpu(CPUState *cpu)
0e60a699 184{
1c9d2a1d
CB
185 /* nothing todo yet */
186 return 0;
0e60a699
AG
187}
188
50a2c6e5 189void kvm_s390_reset_vcpu(S390CPU *cpu)
0e60a699 190{
50a2c6e5
PB
191 CPUState *cs = CPU(cpu);
192
419831d7
AG
193 /* The initial reset call is needed here to reset in-kernel
194 * vcpu data that we can't access directly from QEMU
195 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
196 * Before this ioctl cpu_synchronize_state() is called in common kvm
197 * code (kvm-all) */
50a2c6e5 198 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
70bada03
JF
199 perror("Can't reset vcpu\n");
200 }
0e60a699
AG
201}
202
20d695a9 203int kvm_arch_put_registers(CPUState *cs, int level)
0e60a699 204{
20d695a9
AF
205 S390CPU *cpu = S390_CPU(cs);
206 CPUS390XState *env = &cpu->env;
5b08b344 207 struct kvm_sregs sregs;
0e60a699 208 struct kvm_regs regs;
860643bc 209 int r;
0e60a699
AG
210 int i;
211
5b08b344 212 /* always save the PSW and the GPRS*/
f7575c96
AF
213 cs->kvm_run->psw_addr = env->psw.addr;
214 cs->kvm_run->psw_mask = env->psw.mask;
0e60a699 215
f7575c96 216 if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
5b08b344 217 for (i = 0; i < 16; i++) {
f7575c96
AF
218 cs->kvm_run->s.regs.gprs[i] = env->regs[i];
219 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
5b08b344
CB
220 }
221 } else {
222 for (i = 0; i < 16; i++) {
223 regs.gprs[i] = env->regs[i];
224 }
860643bc
CB
225 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
226 if (r < 0) {
227 return r;
5b08b344 228 }
0e60a699
AG
229 }
230
44c68de0
DD
231 /* Do we need to save more than that? */
232 if (level == KVM_PUT_RUNTIME_STATE) {
233 return 0;
234 }
420840e5 235
860643bc
CB
236 /*
237 * These ONE_REGS are not protected by a capability. As they are only
238 * necessary for migration we just trace a possible error, but don't
239 * return with an error return code.
240 */
241 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
242 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
243 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
44b0c0bb
CB
244 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
245 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
0e60a699 246
819bd309 247 if (cap_async_pf) {
860643bc
CB
248 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
249 if (r < 0) {
250 return r;
819bd309 251 }
860643bc
CB
252 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
253 if (r < 0) {
254 return r;
819bd309 255 }
860643bc
CB
256 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
257 if (r < 0) {
258 return r;
819bd309
DD
259 }
260 }
261
5b08b344 262 if (cap_sync_regs &&
f7575c96
AF
263 cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
264 cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
5b08b344 265 for (i = 0; i < 16; i++) {
f7575c96
AF
266 cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
267 cs->kvm_run->s.regs.crs[i] = env->cregs[i];
5b08b344 268 }
f7575c96
AF
269 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
270 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
5b08b344
CB
271 } else {
272 for (i = 0; i < 16; i++) {
273 sregs.acrs[i] = env->aregs[i];
274 sregs.crs[i] = env->cregs[i];
275 }
860643bc
CB
276 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
277 if (r < 0) {
278 return r;
5b08b344
CB
279 }
280 }
0e60a699 281
5b08b344 282 /* Finally the prefix */
f7575c96
AF
283 if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
284 cs->kvm_run->s.regs.prefix = env->psa;
285 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
5b08b344
CB
286 } else {
287 /* prefix is only supported via sync regs */
288 }
289 return 0;
0e60a699
AG
290}
291
20d695a9 292int kvm_arch_get_registers(CPUState *cs)
420840e5
JH
293{
294 S390CPU *cpu = S390_CPU(cs);
295 CPUS390XState *env = &cpu->env;
5b08b344 296 struct kvm_sregs sregs;
0e60a699 297 struct kvm_regs regs;
44c68de0 298 int i, r;
420840e5 299
5b08b344 300 /* get the PSW */
f7575c96
AF
301 env->psw.addr = cs->kvm_run->psw_addr;
302 env->psw.mask = cs->kvm_run->psw_mask;
5b08b344
CB
303
304 /* the GPRS */
f7575c96 305 if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
5b08b344 306 for (i = 0; i < 16; i++) {
f7575c96 307 env->regs[i] = cs->kvm_run->s.regs.gprs[i];
5b08b344
CB
308 }
309 } else {
44c68de0
DD
310 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
311 if (r < 0) {
312 return r;
5b08b344
CB
313 }
314 for (i = 0; i < 16; i++) {
315 env->regs[i] = regs.gprs[i];
316 }
0e60a699
AG
317 }
318
5b08b344
CB
319 /* The ACRS and CRS */
320 if (cap_sync_regs &&
f7575c96
AF
321 cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
322 cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
5b08b344 323 for (i = 0; i < 16; i++) {
f7575c96
AF
324 env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
325 env->cregs[i] = cs->kvm_run->s.regs.crs[i];
5b08b344
CB
326 }
327 } else {
44c68de0
DD
328 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
329 if (r < 0) {
330 return r;
5b08b344
CB
331 }
332 for (i = 0; i < 16; i++) {
333 env->aregs[i] = sregs.acrs[i];
334 env->cregs[i] = sregs.crs[i];
335 }
0e60a699
AG
336 }
337
44c68de0 338 /* The prefix */
f7575c96
AF
339 if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
340 env->psa = cs->kvm_run->s.regs.prefix;
5b08b344 341 }
0e60a699 342
860643bc
CB
343 /*
344 * These ONE_REGS are not protected by a capability. As they are only
345 * necessary for migration we just trace a possible error, but don't
346 * return with an error return code.
347 */
348 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
349 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
350 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
44b0c0bb
CB
351 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
352 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
44c68de0 353
819bd309 354 if (cap_async_pf) {
860643bc 355 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
819bd309
DD
356 if (r < 0) {
357 return r;
358 }
860643bc 359 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
819bd309
DD
360 if (r < 0) {
361 return r;
362 }
860643bc 363 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
819bd309
DD
364 if (r < 0) {
365 return r;
366 }
367 }
368
0e60a699
AG
369 return 0;
370}
371
fdec9918
CB
372/*
373 * Legacy layout for s390:
374 * Older S390 KVM requires the topmost vma of the RAM to be
375 * smaller than an system defined value, which is at least 256GB.
376 * Larger systems have larger values. We put the guest between
377 * the end of data segment (system break) and this value. We
378 * use 32GB as a base to have enough room for the system break
379 * to grow. We also have to use MAP parameters that avoid
380 * read-only mapping of guest pages.
381 */
575ddeb4 382static void *legacy_s390_alloc(size_t size)
fdec9918
CB
383{
384 void *mem;
385
386 mem = mmap((void *) 0x800000000ULL, size,
387 PROT_EXEC|PROT_READ|PROT_WRITE,
388 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
39228250 389 return mem == MAP_FAILED ? NULL : mem;
fdec9918
CB
390}
391
8e4e86af
DH
392/* DIAG 501 is used for sw breakpoints */
393static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
394
20d695a9 395int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
0e60a699 396{
0e60a699 397
8e4e86af
DH
398 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
399 sizeof(diag_501), 0) ||
400 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501,
401 sizeof(diag_501), 1)) {
0e60a699
AG
402 return -EINVAL;
403 }
404 return 0;
405}
406
20d695a9 407int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
0e60a699 408{
8e4e86af 409 uint8_t t[sizeof(diag_501)];
0e60a699 410
8e4e86af 411 if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) {
0e60a699 412 return -EINVAL;
8e4e86af 413 } else if (memcmp(t, diag_501, sizeof(diag_501))) {
0e60a699 414 return -EINVAL;
8e4e86af
DH
415 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
416 sizeof(diag_501), 1)) {
0e60a699
AG
417 return -EINVAL;
418 }
419
420 return 0;
421}
422
770a6379
DH
423static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
424 int len, int type)
425{
426 int n;
427
428 for (n = 0; n < nb_hw_breakpoints; n++) {
429 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
430 (hw_breakpoints[n].len == len || len == -1)) {
431 return &hw_breakpoints[n];
432 }
433 }
434
435 return NULL;
436}
437
438static int insert_hw_breakpoint(target_ulong addr, int len, int type)
439{
440 int size;
441
442 if (find_hw_breakpoint(addr, len, type)) {
443 return -EEXIST;
444 }
445
446 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
447
448 if (!hw_breakpoints) {
449 nb_hw_breakpoints = 0;
450 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
451 } else {
452 hw_breakpoints =
453 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
454 }
455
456 if (!hw_breakpoints) {
457 nb_hw_breakpoints = 0;
458 return -ENOMEM;
459 }
460
461 hw_breakpoints[nb_hw_breakpoints].addr = addr;
462 hw_breakpoints[nb_hw_breakpoints].len = len;
463 hw_breakpoints[nb_hw_breakpoints].type = type;
464
465 nb_hw_breakpoints++;
466
467 return 0;
468}
469
8c012449
DH
470int kvm_arch_insert_hw_breakpoint(target_ulong addr,
471 target_ulong len, int type)
472{
770a6379
DH
473 switch (type) {
474 case GDB_BREAKPOINT_HW:
475 type = KVM_HW_BP;
476 break;
477 case GDB_WATCHPOINT_WRITE:
478 if (len < 1) {
479 return -EINVAL;
480 }
481 type = KVM_HW_WP_WRITE;
482 break;
483 default:
484 return -ENOSYS;
485 }
486 return insert_hw_breakpoint(addr, len, type);
8c012449
DH
487}
488
489int kvm_arch_remove_hw_breakpoint(target_ulong addr,
490 target_ulong len, int type)
491{
770a6379
DH
492 int size;
493 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
494
495 if (bp == NULL) {
496 return -ENOENT;
497 }
498
499 nb_hw_breakpoints--;
500 if (nb_hw_breakpoints > 0) {
501 /*
502 * In order to trim the array, move the last element to the position to
503 * be removed - if necessary.
504 */
505 if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
506 *bp = hw_breakpoints[nb_hw_breakpoints];
507 }
508 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
509 hw_breakpoints =
510 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
511 } else {
512 g_free(hw_breakpoints);
513 hw_breakpoints = NULL;
514 }
515
516 return 0;
8c012449
DH
517}
518
519void kvm_arch_remove_all_hw_breakpoints(void)
520{
770a6379
DH
521 nb_hw_breakpoints = 0;
522 g_free(hw_breakpoints);
523 hw_breakpoints = NULL;
8c012449
DH
524}
525
526void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
527{
770a6379
DH
528 int i;
529
530 if (nb_hw_breakpoints > 0) {
531 dbg->arch.nr_hw_bp = nb_hw_breakpoints;
532 dbg->arch.hw_bp = hw_breakpoints;
533
534 for (i = 0; i < nb_hw_breakpoints; ++i) {
535 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
536 hw_breakpoints[i].addr);
537 }
538 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
539 } else {
540 dbg->arch.nr_hw_bp = 0;
541 dbg->arch.hw_bp = NULL;
542 }
8c012449
DH
543}
544
20d695a9 545void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
0e60a699 546{
0e60a699
AG
547}
548
20d695a9 549void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
0e60a699 550{
0e60a699
AG
551}
552
20d695a9 553int kvm_arch_process_async_events(CPUState *cs)
0af691d7 554{
225dc991 555 return cs->halted;
0af691d7
MT
556}
557
66ad0893
CH
558static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
559 struct kvm_s390_interrupt *interrupt)
560{
561 int r = 0;
562
563 interrupt->type = irq->type;
564 switch (irq->type) {
565 case KVM_S390_INT_VIRTIO:
566 interrupt->parm = irq->u.ext.ext_params;
567 /* fall through */
568 case KVM_S390_INT_PFAULT_INIT:
569 case KVM_S390_INT_PFAULT_DONE:
570 interrupt->parm64 = irq->u.ext.ext_params2;
571 break;
572 case KVM_S390_PROGRAM_INT:
573 interrupt->parm = irq->u.pgm.code;
574 break;
575 case KVM_S390_SIGP_SET_PREFIX:
576 interrupt->parm = irq->u.prefix.address;
577 break;
578 case KVM_S390_INT_SERVICE:
579 interrupt->parm = irq->u.ext.ext_params;
580 break;
581 case KVM_S390_MCHK:
582 interrupt->parm = irq->u.mchk.cr14;
583 interrupt->parm64 = irq->u.mchk.mcic;
584 break;
585 case KVM_S390_INT_EXTERNAL_CALL:
586 interrupt->parm = irq->u.extcall.code;
587 break;
588 case KVM_S390_INT_EMERGENCY:
589 interrupt->parm = irq->u.emerg.code;
590 break;
591 case KVM_S390_SIGP_STOP:
592 case KVM_S390_RESTART:
593 break; /* These types have no parameters */
594 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
595 interrupt->parm = irq->u.io.subchannel_id << 16;
596 interrupt->parm |= irq->u.io.subchannel_nr;
597 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
598 interrupt->parm64 |= irq->u.io.io_int_word;
599 break;
600 default:
601 r = -EINVAL;
602 break;
603 }
604 return r;
605}
606
607void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
608{
609 struct kvm_s390_interrupt kvmint = {};
610 CPUState *cs = CPU(cpu);
611 int r;
612
613 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
614 if (r < 0) {
615 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
616 exit(1);
617 }
618
619 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
620 if (r < 0) {
621 fprintf(stderr, "KVM failed to inject interrupt\n");
622 exit(1);
623 }
624}
625
bbd8bb8e 626static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
66ad0893
CH
627{
628 struct kvm_s390_interrupt kvmint = {};
629 int r;
630
631 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
632 if (r < 0) {
633 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
634 exit(1);
635 }
636
637 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
638 if (r < 0) {
639 fprintf(stderr, "KVM failed to inject interrupt\n");
640 exit(1);
641 }
642}
643
bbd8bb8e
CH
644void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
645{
646 static bool use_flic = true;
647 int r;
648
649 if (use_flic) {
650 r = kvm_s390_inject_flic(irq);
651 if (r == -ENOSYS) {
652 use_flic = false;
653 }
654 if (!r) {
655 return;
656 }
657 }
658 __kvm_s390_floating_interrupt(irq);
659}
660
de13d216 661void kvm_s390_virtio_irq(int config_change, uint64_t token)
0e60a699 662{
de13d216
CH
663 struct kvm_s390_irq irq = {
664 .type = KVM_S390_INT_VIRTIO,
665 .u.ext.ext_params = config_change,
666 .u.ext.ext_params2 = token,
667 };
0e60a699 668
de13d216 669 kvm_s390_floating_interrupt(&irq);
0e60a699
AG
670}
671
de13d216 672void kvm_s390_service_interrupt(uint32_t parm)
0e60a699 673{
de13d216
CH
674 struct kvm_s390_irq irq = {
675 .type = KVM_S390_INT_SERVICE,
676 .u.ext.ext_params = parm,
677 };
0e60a699 678
de13d216 679 kvm_s390_floating_interrupt(&irq);
79afc36d
CH
680}
681
1bc22652 682static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
0e60a699 683{
de13d216
CH
684 struct kvm_s390_irq irq = {
685 .type = KVM_S390_PROGRAM_INT,
686 .u.pgm.code = code,
687 };
688
689 kvm_s390_vcpu_interrupt(cpu, &irq);
0e60a699
AG
690}
691
1bc22652 692static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
bcec36ea 693 uint16_t ipbh0)
0e60a699 694{
1bc22652 695 CPUS390XState *env = &cpu->env;
a0fa2cb8
TH
696 uint64_t sccb;
697 uint32_t code;
0e60a699
AG
698 int r = 0;
699
cb446eca 700 cpu_synchronize_state(CPU(cpu));
0e60a699
AG
701 sccb = env->regs[ipbh0 & 0xf];
702 code = env->regs[(ipbh0 & 0xf0) >> 4];
703
6e252802 704 r = sclp_service_call(env, sccb, code);
9abf567d 705 if (r < 0) {
1bc22652 706 enter_pgmcheck(cpu, -r);
e8803d93
TH
707 } else {
708 setcc(cpu, r);
0e60a699 709 }
81f7c56c 710
0e60a699
AG
711 return 0;
712}
713
1eecf41b 714static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
09b99878 715{
09b99878 716 CPUS390XState *env = &cpu->env;
1eecf41b
FB
717 int rc = 0;
718 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
3474b679 719
44c68de0 720 cpu_synchronize_state(CPU(cpu));
3474b679 721
09b99878 722 switch (ipa1) {
1eecf41b 723 case PRIV_B2_XSCH:
5d9bf1c0 724 ioinst_handle_xsch(cpu, env->regs[1]);
09b99878 725 break;
1eecf41b 726 case PRIV_B2_CSCH:
5d9bf1c0 727 ioinst_handle_csch(cpu, env->regs[1]);
09b99878 728 break;
1eecf41b 729 case PRIV_B2_HSCH:
5d9bf1c0 730 ioinst_handle_hsch(cpu, env->regs[1]);
09b99878 731 break;
1eecf41b 732 case PRIV_B2_MSCH:
5d9bf1c0 733 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 734 break;
1eecf41b 735 case PRIV_B2_SSCH:
5d9bf1c0 736 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 737 break;
1eecf41b 738 case PRIV_B2_STCRW:
5d9bf1c0 739 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
09b99878 740 break;
1eecf41b 741 case PRIV_B2_STSCH:
5d9bf1c0 742 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 743 break;
1eecf41b 744 case PRIV_B2_TSCH:
09b99878
CH
745 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
746 fprintf(stderr, "Spurious tsch intercept\n");
747 break;
1eecf41b 748 case PRIV_B2_CHSC:
5d9bf1c0 749 ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
09b99878 750 break;
1eecf41b 751 case PRIV_B2_TPI:
09b99878
CH
752 /* This should have been handled by kvm already. */
753 fprintf(stderr, "Spurious tpi intercept\n");
754 break;
1eecf41b 755 case PRIV_B2_SCHM:
5d9bf1c0
TH
756 ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
757 run->s390_sieic.ipb);
09b99878 758 break;
1eecf41b 759 case PRIV_B2_RSCH:
5d9bf1c0 760 ioinst_handle_rsch(cpu, env->regs[1]);
09b99878 761 break;
1eecf41b 762 case PRIV_B2_RCHP:
5d9bf1c0 763 ioinst_handle_rchp(cpu, env->regs[1]);
09b99878 764 break;
1eecf41b 765 case PRIV_B2_STCPS:
09b99878 766 /* We do not provide this instruction, it is suppressed. */
09b99878 767 break;
1eecf41b 768 case PRIV_B2_SAL:
5d9bf1c0 769 ioinst_handle_sal(cpu, env->regs[1]);
09b99878 770 break;
1eecf41b 771 case PRIV_B2_SIGA:
c1e8dfb5 772 /* Not provided, set CC = 3 for subchannel not operational */
5d9bf1c0 773 setcc(cpu, 3);
09b99878 774 break;
1eecf41b
FB
775 case PRIV_B2_SCLP_CALL:
776 rc = kvm_sclp_service_call(cpu, run, ipbh0);
777 break;
c1e8dfb5 778 default:
1eecf41b
FB
779 rc = -1;
780 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
781 break;
09b99878
CH
782 }
783
1eecf41b 784 return rc;
09b99878
CH
785}
786
1eecf41b 787static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
0e60a699
AG
788{
789 int r = 0;
0e60a699 790
0e60a699 791 switch (ipa1) {
1eecf41b
FB
792 case PRIV_B9_EQBS:
793 /* just inject exception */
794 r = -1;
795 break;
796 default:
797 r = -1;
798 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
799 break;
800 }
801
802 return r;
803}
804
805static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
806{
807 int r = 0;
808
809 switch (ipa1) {
810 case PRIV_EB_SQBS:
811 /* just inject exception */
812 r = -1;
813 break;
814 default:
815 r = -1;
816 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipa1);
817 break;
0e60a699
AG
818 }
819
820 return r;
821}
822
4fd6dd06 823static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
0e60a699 824{
4fd6dd06 825 CPUS390XState *env = &cpu->env;
77319f22 826 int ret;
3474b679 827
44c68de0 828 cpu_synchronize_state(CPU(cpu));
77319f22
TH
829 ret = s390_virtio_hypercall(env);
830 if (ret == -EINVAL) {
831 enter_pgmcheck(cpu, PGM_SPECIFICATION);
832 return 0;
833 }
0e60a699 834
77319f22 835 return ret;
0e60a699
AG
836}
837
268846ba
ED
838static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
839{
840 uint64_t r1, r3;
841
842 cpu_synchronize_state(CPU(cpu));
843 r1 = (run->s390_sieic.ipa & 0x00f0) >> 8;
844 r3 = run->s390_sieic.ipa & 0x000f;
845 handle_diag_308(&cpu->env, r1, r3);
846}
847
b30f4dfb
DH
848static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
849{
850 CPUS390XState *env = &cpu->env;
851 unsigned long pc;
852
853 cpu_synchronize_state(CPU(cpu));
854
855 pc = env->psw.addr - 4;
856 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
857 env->psw.addr = pc;
858 return EXCP_DEBUG;
859 }
860
861 return -ENOENT;
862}
863
638129ff
CH
864#define DIAG_KVM_CODE_MASK 0x000000000000ffff
865
866static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
0e60a699
AG
867{
868 int r = 0;
638129ff
CH
869 uint16_t func_code;
870
871 /*
872 * For any diagnose call we support, bits 48-63 of the resulting
873 * address specify the function code; the remainder is ignored.
874 */
875 func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK;
876 switch (func_code) {
268846ba
ED
877 case DIAG_IPL:
878 kvm_handle_diag_308(cpu, run);
879 break;
39fbc5c6
CB
880 case DIAG_KVM_HYPERCALL:
881 r = handle_hypercall(cpu, run);
882 break;
883 case DIAG_KVM_BREAKPOINT:
b30f4dfb 884 r = handle_sw_breakpoint(cpu, run);
39fbc5c6
CB
885 break;
886 default:
638129ff 887 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
39fbc5c6
CB
888 r = -1;
889 break;
0e60a699
AG
890 }
891
892 return r;
893}
894
b20a461f
TH
895static int kvm_s390_cpu_start(S390CPU *cpu)
896{
897 s390_add_running_cpu(cpu);
898 qemu_cpu_kick(CPU(cpu));
899 DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env);
900 return 0;
901}
902
7f7f9752 903int kvm_s390_cpu_restart(S390CPU *cpu)
0e60a699 904{
de13d216
CH
905 struct kvm_s390_irq irq = {
906 .type = KVM_S390_RESTART,
907 };
908
909 kvm_s390_vcpu_interrupt(cpu, &irq);
49e15878 910 s390_add_running_cpu(cpu);
c08d7424 911 qemu_cpu_kick(CPU(cpu));
7f7f9752 912 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
0e60a699
AG
913 return 0;
914}
915
f7d3e466 916static void sigp_initial_cpu_reset(void *arg)
0e60a699 917{
f7d3e466
TH
918 CPUState *cpu = arg;
919 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
d5900813 920
f7d3e466
TH
921 cpu_synchronize_state(cpu);
922 scc->initial_cpu_reset(cpu);
0e60a699
AG
923}
924
04c2b516
TH
925static void sigp_cpu_reset(void *arg)
926{
927 CPUState *cpu = arg;
928 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
929
930 cpu_synchronize_state(cpu);
931 scc->cpu_reset(cpu);
932}
933
b8031adb
TH
934#define SIGP_ORDER_MASK 0x000000ff
935
f7575c96 936static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
0e60a699 937{
f7575c96 938 CPUS390XState *env = &cpu->env;
0e60a699 939 uint8_t order_code;
0e60a699 940 uint16_t cpu_addr;
45fa769b 941 S390CPU *target_cpu;
3796f0e1
TH
942 uint64_t *statusreg = &env->regs[ipa1 >> 4];
943 int cc;
0e60a699 944
cb446eca 945 cpu_synchronize_state(CPU(cpu));
0e60a699
AG
946
947 /* get order code */
b8031adb 948 order_code = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK;
0e60a699 949
0e60a699 950 cpu_addr = env->regs[ipa1 & 0x0f];
45fa769b
AF
951 target_cpu = s390_cpu_addr2state(cpu_addr);
952 if (target_cpu == NULL) {
3796f0e1 953 cc = 3; /* not operational */
0e60a699
AG
954 goto out;
955 }
956
957 switch (order_code) {
b20a461f 958 case SIGP_START:
3796f0e1 959 cc = kvm_s390_cpu_start(target_cpu);
b20a461f 960 break;
0b9972a2 961 case SIGP_RESTART:
3796f0e1 962 cc = kvm_s390_cpu_restart(target_cpu);
0b9972a2
TH
963 break;
964 case SIGP_SET_ARCH:
0788082a
TH
965 *statusreg &= 0xffffffff00000000UL;
966 *statusreg |= SIGP_STAT_INVALID_PARAMETER;
967 cc = 1; /* status stored */
968 break;
0b9972a2 969 case SIGP_INITIAL_CPU_RESET:
f7d3e466
TH
970 run_on_cpu(CPU(target_cpu), sigp_initial_cpu_reset, CPU(target_cpu));
971 cc = 0;
0b9972a2 972 break;
04c2b516
TH
973 case SIGP_CPU_RESET:
974 run_on_cpu(CPU(target_cpu), sigp_cpu_reset, CPU(target_cpu));
975 cc = 0;
976 break;
0b9972a2 977 default:
3796f0e1
TH
978 DPRINTF("KVM: unknown SIGP: 0x%x\n", order_code);
979 *statusreg &= 0xffffffff00000000UL;
980 *statusreg |= SIGP_STAT_INVALID_ORDER;
981 cc = 1; /* status stored */
0b9972a2 982 break;
0e60a699
AG
983 }
984
985out:
3796f0e1 986 setcc(cpu, cc);
0e60a699
AG
987 return 0;
988}
989
b30f4dfb 990static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
0e60a699
AG
991{
992 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
993 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
d7963c43 994 int r = -1;
0e60a699 995
e67137c6
PM
996 DPRINTF("handle_instruction 0x%x 0x%x\n",
997 run->s390_sieic.ipa, run->s390_sieic.ipb);
0e60a699 998 switch (ipa0) {
09b99878 999 case IPA0_B2:
1eecf41b
FB
1000 r = handle_b2(cpu, run, ipa1);
1001 break;
09b99878 1002 case IPA0_B9:
1eecf41b
FB
1003 r = handle_b9(cpu, run, ipa1);
1004 break;
09b99878 1005 case IPA0_EB:
1eecf41b 1006 r = handle_eb(cpu, run, ipa1);
09b99878
CH
1007 break;
1008 case IPA0_DIAG:
638129ff 1009 r = handle_diag(cpu, run, run->s390_sieic.ipb);
09b99878
CH
1010 break;
1011 case IPA0_SIGP:
1012 r = handle_sigp(cpu, run, ipa1);
1013 break;
0e60a699
AG
1014 }
1015
1016 if (r < 0) {
b30f4dfb 1017 r = 0;
1bc22652 1018 enter_pgmcheck(cpu, 0x0001);
0e60a699 1019 }
b30f4dfb
DH
1020
1021 return r;
0e60a699
AG
1022}
1023
f7575c96 1024static bool is_special_wait_psw(CPUState *cs)
eca3ed03
CB
1025{
1026 /* signal quiesce */
f7575c96 1027 return cs->kvm_run->psw_addr == 0xfffUL;
eca3ed03
CB
1028}
1029
a2689242
TH
1030static void guest_panicked(void)
1031{
1032 QObject *data;
1033
1034 data = qobject_from_jsonf("{ 'action': %s }", "pause");
1035 monitor_protocol_event(QEVENT_GUEST_PANICKED, data);
1036 qobject_decref(data);
1037
1038 vm_stop(RUN_STATE_GUEST_PANICKED);
1039}
1040
1041static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
1042{
1043 CPUState *cs = CPU(cpu);
1044
1045 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1046 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
1047 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
1048 s390_del_running_cpu(cpu);
1049 guest_panicked();
1050}
1051
1bc22652 1052static int handle_intercept(S390CPU *cpu)
0e60a699 1053{
f7575c96
AF
1054 CPUState *cs = CPU(cpu);
1055 struct kvm_run *run = cs->kvm_run;
0e60a699
AG
1056 int icpt_code = run->s390_sieic.icptcode;
1057 int r = 0;
1058
e67137c6 1059 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
f7575c96 1060 (long)cs->kvm_run->psw_addr);
0e60a699
AG
1061 switch (icpt_code) {
1062 case ICPT_INSTRUCTION:
b30f4dfb 1063 r = handle_instruction(cpu, run);
0e60a699 1064 break;
6449a41a
TH
1065 case ICPT_PROGRAM:
1066 unmanageable_intercept(cpu, "program interrupt",
1067 offsetof(LowCore, program_new_psw));
1068 r = EXCP_HALTED;
1069 break;
a2689242
TH
1070 case ICPT_EXT_INT:
1071 unmanageable_intercept(cpu, "external interrupt",
1072 offsetof(LowCore, external_new_psw));
1073 r = EXCP_HALTED;
1074 break;
0e60a699 1075 case ICPT_WAITPSW:
08eb8c85
CB
1076 /* disabled wait, since enabled wait is handled in kernel */
1077 if (s390_del_running_cpu(cpu) == 0) {
1078 if (is_special_wait_psw(cs)) {
1079 qemu_system_shutdown_request();
1080 } else {
a2689242 1081 guest_panicked();
08eb8c85 1082 }
eca3ed03
CB
1083 }
1084 r = EXCP_HALTED;
1085 break;
854e42f3 1086 case ICPT_CPU_STOP:
49e15878 1087 if (s390_del_running_cpu(cpu) == 0) {
854e42f3
CB
1088 qemu_system_shutdown_request();
1089 }
1090 r = EXCP_HALTED;
0e60a699
AG
1091 break;
1092 case ICPT_SOFT_INTERCEPT:
1093 fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1094 exit(1);
1095 break;
0e60a699
AG
1096 case ICPT_IO:
1097 fprintf(stderr, "KVM unimplemented icpt IO\n");
1098 exit(1);
1099 break;
1100 default:
1101 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1102 exit(1);
1103 break;
1104 }
1105
1106 return r;
1107}
1108
09b99878
CH
1109static int handle_tsch(S390CPU *cpu)
1110{
1111 CPUS390XState *env = &cpu->env;
1112 CPUState *cs = CPU(cpu);
1113 struct kvm_run *run = cs->kvm_run;
1114 int ret;
1115
44c68de0 1116 cpu_synchronize_state(cs);
3474b679 1117
09b99878
CH
1118 ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
1119 if (ret >= 0) {
1120 /* Success; set condition code. */
1121 setcc(cpu, ret);
1122 ret = 0;
1123 } else if (ret < -1) {
1124 /*
1125 * Failure.
1126 * If an I/O interrupt had been dequeued, we have to reinject it.
1127 */
1128 if (run->s390_tsch.dequeued) {
de13d216
CH
1129 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
1130 run->s390_tsch.subchannel_nr,
1131 run->s390_tsch.io_int_parm,
1132 run->s390_tsch.io_int_word);
09b99878
CH
1133 }
1134 ret = 0;
1135 }
1136 return ret;
1137}
1138
8c012449
DH
1139static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1140{
770a6379
DH
1141 CPUState *cs = CPU(cpu);
1142 struct kvm_run *run = cs->kvm_run;
1143
1144 int ret = 0;
1145 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1146
1147 switch (arch_info->type) {
1148 case KVM_HW_WP_WRITE:
1149 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1150 cs->watchpoint_hit = &hw_watchpoint;
1151 hw_watchpoint.vaddr = arch_info->addr;
1152 hw_watchpoint.flags = BP_MEM_WRITE;
1153 ret = EXCP_DEBUG;
1154 }
1155 break;
1156 case KVM_HW_BP:
1157 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1158 ret = EXCP_DEBUG;
1159 }
1160 break;
1161 case KVM_SINGLESTEP:
1162 if (cs->singlestep_enabled) {
1163 ret = EXCP_DEBUG;
1164 }
1165 break;
1166 default:
1167 ret = -ENOSYS;
1168 }
1169
1170 return ret;
8c012449
DH
1171}
1172
20d695a9 1173int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
0e60a699 1174{
20d695a9 1175 S390CPU *cpu = S390_CPU(cs);
0e60a699
AG
1176 int ret = 0;
1177
1178 switch (run->exit_reason) {
1179 case KVM_EXIT_S390_SIEIC:
1bc22652 1180 ret = handle_intercept(cpu);
0e60a699
AG
1181 break;
1182 case KVM_EXIT_S390_RESET:
add142e0 1183 qemu_system_reset_request();
0e60a699 1184 break;
09b99878
CH
1185 case KVM_EXIT_S390_TSCH:
1186 ret = handle_tsch(cpu);
1187 break;
8c012449
DH
1188 case KVM_EXIT_DEBUG:
1189 ret = kvm_arch_handle_debug_exit(cpu);
1190 break;
0e60a699
AG
1191 default:
1192 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1193 break;
1194 }
1195
bb4ea393
JK
1196 if (ret == 0) {
1197 ret = EXCP_INTERRUPT;
bb4ea393 1198 }
0e60a699
AG
1199 return ret;
1200}
4513d923 1201
20d695a9 1202bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
4513d923
GN
1203{
1204 return true;
1205}
a1b87fe0 1206
20d695a9 1207int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
a1b87fe0
JK
1208{
1209 return 1;
1210}
1211
1212int kvm_arch_on_sigbus(int code, void *addr)
1213{
1214 return 1;
1215}
09b99878 1216
de13d216 1217void kvm_s390_io_interrupt(uint16_t subchannel_id,
09b99878
CH
1218 uint16_t subchannel_nr, uint32_t io_int_parm,
1219 uint32_t io_int_word)
1220{
de13d216
CH
1221 struct kvm_s390_irq irq = {
1222 .u.io.subchannel_id = subchannel_id,
1223 .u.io.subchannel_nr = subchannel_nr,
1224 .u.io.io_int_parm = io_int_parm,
1225 .u.io.io_int_word = io_int_word,
1226 };
09b99878 1227
7e749462 1228 if (io_int_word & IO_INT_WORD_AI) {
de13d216 1229 irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
7e749462 1230 } else {
de13d216 1231 irq.type = ((subchannel_id & 0xff00) << 24) |
7e749462
CH
1232 ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
1233 }
de13d216 1234 kvm_s390_floating_interrupt(&irq);
09b99878
CH
1235}
1236
de13d216 1237void kvm_s390_crw_mchk(void)
09b99878 1238{
de13d216
CH
1239 struct kvm_s390_irq irq = {
1240 .type = KVM_S390_MCHK,
1241 .u.mchk.cr14 = 1 << 28,
1242 .u.mchk.mcic = 0x00400f1d40330000,
1243 };
1244 kvm_s390_floating_interrupt(&irq);
09b99878
CH
1245}
1246
1247void kvm_s390_enable_css_support(S390CPU *cpu)
1248{
09b99878
CH
1249 int r;
1250
1251 /* Activate host kernel channel subsystem support. */
e080f0fd 1252 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
09b99878
CH
1253 assert(r == 0);
1254}
48475e14
AK
1255
1256void kvm_arch_init_irq_routing(KVMState *s)
1257{
d426d9fb
CH
1258 /*
1259 * Note that while irqchip capabilities generally imply that cpustates
1260 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1261 * have to override the common code kvm_halt_in_kernel_allowed setting.
1262 */
1263 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
1264 kvm_irqfds_allowed = true;
1265 kvm_gsi_routing_allowed = true;
1266 kvm_halt_in_kernel_allowed = false;
1267 }
48475e14 1268}
b4436a0b 1269
cc3ac9c4
CH
1270int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1271 int vq, bool assign)
b4436a0b
CH
1272{
1273 struct kvm_ioeventfd kick = {
1274 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1275 KVM_IOEVENTFD_FLAG_DATAMATCH,
cc3ac9c4 1276 .fd = event_notifier_get_fd(notifier),
b4436a0b
CH
1277 .datamatch = vq,
1278 .addr = sch,
1279 .len = 8,
1280 };
1281 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1282 return -ENOSYS;
1283 }
1284 if (!assign) {
1285 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1286 }
1287 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1288}
This page took 0.639158 seconds and 4 git commands to generate.