]>
Commit | Line | Data |
---|---|---|
0e60a699 AG |
1 | /* |
2 | * QEMU S390x KVM implementation | |
3 | * | |
4 | * Copyright (c) 2009 Alexander Graf <[email protected]> | |
ccb084d3 | 5 | * Copyright IBM Corp. 2012 |
0e60a699 AG |
6 | * |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
ccb084d3 CB |
17 | * Contributions after 2012-10-29 are licensed under the terms of the |
18 | * GNU GPL, version 2 or (at your option) any later version. | |
19 | * | |
20 | * You should have received a copy of the GNU (Lesser) General Public | |
0e60a699 AG |
21 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
22 | */ | |
23 | ||
24 | #include <sys/types.h> | |
25 | #include <sys/ioctl.h> | |
26 | #include <sys/mman.h> | |
27 | ||
28 | #include <linux/kvm.h> | |
29 | #include <asm/ptrace.h> | |
30 | ||
31 | #include "qemu-common.h" | |
1de7afc9 | 32 | #include "qemu/timer.h" |
9c17d615 PB |
33 | #include "sysemu/sysemu.h" |
34 | #include "sysemu/kvm.h" | |
4cb88c3c | 35 | #include "hw/hw.h" |
0e60a699 | 36 | #include "cpu.h" |
9c17d615 | 37 | #include "sysemu/device_tree.h" |
08eb8c85 CB |
38 | #include "qapi/qmp/qjson.h" |
39 | #include "monitor/monitor.h" | |
770a6379 | 40 | #include "exec/gdbstub.h" |
860643bc | 41 | #include "trace.h" |
3a449690 | 42 | #include "qapi-event.h" |
0e60a699 AG |
43 | |
44 | /* #define DEBUG_KVM */ | |
45 | ||
46 | #ifdef DEBUG_KVM | |
e67137c6 | 47 | #define DPRINTF(fmt, ...) \ |
0e60a699 AG |
48 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
49 | #else | |
e67137c6 | 50 | #define DPRINTF(fmt, ...) \ |
0e60a699 AG |
51 | do { } while (0) |
52 | #endif | |
53 | ||
54 | #define IPA0_DIAG 0x8300 | |
55 | #define IPA0_SIGP 0xae00 | |
09b99878 CH |
56 | #define IPA0_B2 0xb200 |
57 | #define IPA0_B9 0xb900 | |
58 | #define IPA0_EB 0xeb00 | |
0e60a699 | 59 | |
1eecf41b FB |
60 | #define PRIV_B2_SCLP_CALL 0x20 |
61 | #define PRIV_B2_CSCH 0x30 | |
62 | #define PRIV_B2_HSCH 0x31 | |
63 | #define PRIV_B2_MSCH 0x32 | |
64 | #define PRIV_B2_SSCH 0x33 | |
65 | #define PRIV_B2_STSCH 0x34 | |
66 | #define PRIV_B2_TSCH 0x35 | |
67 | #define PRIV_B2_TPI 0x36 | |
68 | #define PRIV_B2_SAL 0x37 | |
69 | #define PRIV_B2_RSCH 0x38 | |
70 | #define PRIV_B2_STCRW 0x39 | |
71 | #define PRIV_B2_STCPS 0x3a | |
72 | #define PRIV_B2_RCHP 0x3b | |
73 | #define PRIV_B2_SCHM 0x3c | |
74 | #define PRIV_B2_CHSC 0x5f | |
75 | #define PRIV_B2_SIGA 0x74 | |
76 | #define PRIV_B2_XSCH 0x76 | |
77 | ||
78 | #define PRIV_EB_SQBS 0x8a | |
79 | ||
80 | #define PRIV_B9_EQBS 0x9c | |
81 | ||
268846ba | 82 | #define DIAG_IPL 0x308 |
0e60a699 AG |
83 | #define DIAG_KVM_HYPERCALL 0x500 |
84 | #define DIAG_KVM_BREAKPOINT 0x501 | |
85 | ||
0e60a699 | 86 | #define ICPT_INSTRUCTION 0x04 |
6449a41a | 87 | #define ICPT_PROGRAM 0x08 |
a2689242 | 88 | #define ICPT_EXT_INT 0x14 |
0e60a699 AG |
89 | #define ICPT_WAITPSW 0x1c |
90 | #define ICPT_SOFT_INTERCEPT 0x24 | |
91 | #define ICPT_CPU_STOP 0x28 | |
92 | #define ICPT_IO 0x40 | |
93 | ||
770a6379 DH |
94 | static CPUWatchpoint hw_watchpoint; |
95 | /* | |
96 | * We don't use a list because this structure is also used to transmit the | |
97 | * hardware breakpoints to the kernel. | |
98 | */ | |
99 | static struct kvm_hw_breakpoint *hw_breakpoints; | |
100 | static int nb_hw_breakpoints; | |
101 | ||
94a8d39a JK |
102 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { |
103 | KVM_CAP_LAST_INFO | |
104 | }; | |
105 | ||
5b08b344 | 106 | static int cap_sync_regs; |
819bd309 | 107 | static int cap_async_pf; |
5b08b344 | 108 | |
dc622deb | 109 | static void *legacy_s390_alloc(size_t size, uint64_t *align); |
91138037 | 110 | |
4cb88c3c DD |
111 | static int kvm_s390_check_clear_cmma(KVMState *s) |
112 | { | |
113 | struct kvm_device_attr attr = { | |
114 | .group = KVM_S390_VM_MEM_CTRL, | |
115 | .attr = KVM_S390_VM_MEM_CLR_CMMA, | |
116 | }; | |
117 | ||
118 | return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr); | |
119 | } | |
120 | ||
121 | static int kvm_s390_check_enable_cmma(KVMState *s) | |
122 | { | |
123 | struct kvm_device_attr attr = { | |
124 | .group = KVM_S390_VM_MEM_CTRL, | |
125 | .attr = KVM_S390_VM_MEM_ENABLE_CMMA, | |
126 | }; | |
127 | ||
128 | return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr); | |
129 | } | |
130 | ||
131 | void kvm_s390_clear_cmma_callback(void *opaque) | |
132 | { | |
133 | int rc; | |
134 | KVMState *s = opaque; | |
135 | struct kvm_device_attr attr = { | |
136 | .group = KVM_S390_VM_MEM_CTRL, | |
137 | .attr = KVM_S390_VM_MEM_CLR_CMMA, | |
138 | }; | |
139 | ||
140 | rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); | |
141 | trace_kvm_clear_cmma(rc); | |
142 | } | |
143 | ||
144 | static void kvm_s390_enable_cmma(KVMState *s) | |
145 | { | |
146 | int rc; | |
147 | struct kvm_device_attr attr = { | |
148 | .group = KVM_S390_VM_MEM_CTRL, | |
149 | .attr = KVM_S390_VM_MEM_ENABLE_CMMA, | |
150 | }; | |
151 | ||
152 | if (kvm_s390_check_enable_cmma(s) || kvm_s390_check_clear_cmma(s)) { | |
153 | return; | |
154 | } | |
155 | ||
156 | rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); | |
157 | if (!rc) { | |
158 | qemu_register_reset(kvm_s390_clear_cmma_callback, s); | |
159 | } | |
160 | trace_kvm_enable_cmma(rc); | |
161 | } | |
162 | ||
cad1e282 | 163 | int kvm_arch_init(KVMState *s) |
0e60a699 | 164 | { |
5b08b344 | 165 | cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); |
819bd309 | 166 | cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); |
4cb88c3c DD |
167 | |
168 | if (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES)) { | |
169 | kvm_s390_enable_cmma(s); | |
170 | } | |
171 | ||
91138037 MA |
172 | if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) |
173 | || !kvm_check_extension(s, KVM_CAP_S390_COW)) { | |
174 | phys_mem_set_alloc(legacy_s390_alloc); | |
175 | } | |
0e60a699 AG |
176 | return 0; |
177 | } | |
178 | ||
b164e48e EH |
179 | unsigned long kvm_arch_vcpu_id(CPUState *cpu) |
180 | { | |
181 | return cpu->cpu_index; | |
182 | } | |
183 | ||
c9e659c9 | 184 | int kvm_arch_init_vcpu(CPUState *cs) |
0e60a699 | 185 | { |
c9e659c9 DH |
186 | S390CPU *cpu = S390_CPU(cs); |
187 | kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); | |
1c9d2a1d | 188 | return 0; |
0e60a699 AG |
189 | } |
190 | ||
50a2c6e5 | 191 | void kvm_s390_reset_vcpu(S390CPU *cpu) |
0e60a699 | 192 | { |
50a2c6e5 PB |
193 | CPUState *cs = CPU(cpu); |
194 | ||
419831d7 AG |
195 | /* The initial reset call is needed here to reset in-kernel |
196 | * vcpu data that we can't access directly from QEMU | |
197 | * (i.e. with older kernels which don't support sync_regs/ONE_REG). | |
198 | * Before this ioctl cpu_synchronize_state() is called in common kvm | |
199 | * code (kvm-all) */ | |
50a2c6e5 | 200 | if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) { |
99607144 | 201 | error_report("Initial CPU reset failed on CPU %i\n", cs->cpu_index); |
70bada03 | 202 | } |
0e60a699 AG |
203 | } |
204 | ||
20d695a9 | 205 | int kvm_arch_put_registers(CPUState *cs, int level) |
0e60a699 | 206 | { |
20d695a9 AF |
207 | S390CPU *cpu = S390_CPU(cs); |
208 | CPUS390XState *env = &cpu->env; | |
5b08b344 | 209 | struct kvm_sregs sregs; |
0e60a699 | 210 | struct kvm_regs regs; |
e6eef7c2 | 211 | struct kvm_fpu fpu = {}; |
860643bc | 212 | int r; |
0e60a699 AG |
213 | int i; |
214 | ||
5b08b344 | 215 | /* always save the PSW and the GPRS*/ |
f7575c96 AF |
216 | cs->kvm_run->psw_addr = env->psw.addr; |
217 | cs->kvm_run->psw_mask = env->psw.mask; | |
0e60a699 | 218 | |
f7575c96 | 219 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { |
5b08b344 | 220 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
221 | cs->kvm_run->s.regs.gprs[i] = env->regs[i]; |
222 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; | |
5b08b344 CB |
223 | } |
224 | } else { | |
225 | for (i = 0; i < 16; i++) { | |
226 | regs.gprs[i] = env->regs[i]; | |
227 | } | |
860643bc CB |
228 | r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); |
229 | if (r < 0) { | |
230 | return r; | |
5b08b344 | 231 | } |
0e60a699 AG |
232 | } |
233 | ||
85ad6230 JH |
234 | /* Floating point */ |
235 | for (i = 0; i < 16; i++) { | |
236 | fpu.fprs[i] = env->fregs[i].ll; | |
237 | } | |
238 | fpu.fpc = env->fpc; | |
239 | ||
240 | r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); | |
241 | if (r < 0) { | |
242 | return r; | |
243 | } | |
244 | ||
44c68de0 DD |
245 | /* Do we need to save more than that? */ |
246 | if (level == KVM_PUT_RUNTIME_STATE) { | |
247 | return 0; | |
248 | } | |
420840e5 | 249 | |
860643bc CB |
250 | /* |
251 | * These ONE_REGS are not protected by a capability. As they are only | |
252 | * necessary for migration we just trace a possible error, but don't | |
253 | * return with an error return code. | |
254 | */ | |
255 | kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); | |
256 | kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); | |
257 | kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); | |
44b0c0bb CB |
258 | kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); |
259 | kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); | |
0e60a699 | 260 | |
819bd309 | 261 | if (cap_async_pf) { |
860643bc CB |
262 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); |
263 | if (r < 0) { | |
264 | return r; | |
819bd309 | 265 | } |
860643bc CB |
266 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); |
267 | if (r < 0) { | |
268 | return r; | |
819bd309 | 269 | } |
860643bc CB |
270 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); |
271 | if (r < 0) { | |
272 | return r; | |
819bd309 DD |
273 | } |
274 | } | |
275 | ||
5b08b344 | 276 | if (cap_sync_regs && |
f7575c96 AF |
277 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && |
278 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { | |
5b08b344 | 279 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
280 | cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; |
281 | cs->kvm_run->s.regs.crs[i] = env->cregs[i]; | |
5b08b344 | 282 | } |
f7575c96 AF |
283 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; |
284 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; | |
5b08b344 CB |
285 | } else { |
286 | for (i = 0; i < 16; i++) { | |
287 | sregs.acrs[i] = env->aregs[i]; | |
288 | sregs.crs[i] = env->cregs[i]; | |
289 | } | |
860643bc CB |
290 | r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); |
291 | if (r < 0) { | |
292 | return r; | |
5b08b344 CB |
293 | } |
294 | } | |
0e60a699 | 295 | |
5b08b344 | 296 | /* Finally the prefix */ |
f7575c96 AF |
297 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { |
298 | cs->kvm_run->s.regs.prefix = env->psa; | |
299 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; | |
5b08b344 CB |
300 | } else { |
301 | /* prefix is only supported via sync regs */ | |
302 | } | |
303 | return 0; | |
0e60a699 AG |
304 | } |
305 | ||
20d695a9 | 306 | int kvm_arch_get_registers(CPUState *cs) |
420840e5 JH |
307 | { |
308 | S390CPU *cpu = S390_CPU(cs); | |
309 | CPUS390XState *env = &cpu->env; | |
5b08b344 | 310 | struct kvm_sregs sregs; |
0e60a699 | 311 | struct kvm_regs regs; |
85ad6230 | 312 | struct kvm_fpu fpu; |
44c68de0 | 313 | int i, r; |
420840e5 | 314 | |
5b08b344 | 315 | /* get the PSW */ |
f7575c96 AF |
316 | env->psw.addr = cs->kvm_run->psw_addr; |
317 | env->psw.mask = cs->kvm_run->psw_mask; | |
5b08b344 CB |
318 | |
319 | /* the GPRS */ | |
f7575c96 | 320 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { |
5b08b344 | 321 | for (i = 0; i < 16; i++) { |
f7575c96 | 322 | env->regs[i] = cs->kvm_run->s.regs.gprs[i]; |
5b08b344 CB |
323 | } |
324 | } else { | |
44c68de0 DD |
325 | r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); |
326 | if (r < 0) { | |
327 | return r; | |
5b08b344 CB |
328 | } |
329 | for (i = 0; i < 16; i++) { | |
330 | env->regs[i] = regs.gprs[i]; | |
331 | } | |
0e60a699 AG |
332 | } |
333 | ||
5b08b344 CB |
334 | /* The ACRS and CRS */ |
335 | if (cap_sync_regs && | |
f7575c96 AF |
336 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && |
337 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { | |
5b08b344 | 338 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
339 | env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; |
340 | env->cregs[i] = cs->kvm_run->s.regs.crs[i]; | |
5b08b344 CB |
341 | } |
342 | } else { | |
44c68de0 DD |
343 | r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); |
344 | if (r < 0) { | |
345 | return r; | |
5b08b344 CB |
346 | } |
347 | for (i = 0; i < 16; i++) { | |
348 | env->aregs[i] = sregs.acrs[i]; | |
349 | env->cregs[i] = sregs.crs[i]; | |
350 | } | |
0e60a699 AG |
351 | } |
352 | ||
85ad6230 JH |
353 | /* Floating point */ |
354 | r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); | |
355 | if (r < 0) { | |
356 | return r; | |
357 | } | |
358 | for (i = 0; i < 16; i++) { | |
359 | env->fregs[i].ll = fpu.fprs[i]; | |
360 | } | |
361 | env->fpc = fpu.fpc; | |
362 | ||
44c68de0 | 363 | /* The prefix */ |
f7575c96 AF |
364 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { |
365 | env->psa = cs->kvm_run->s.regs.prefix; | |
5b08b344 | 366 | } |
0e60a699 | 367 | |
860643bc CB |
368 | /* |
369 | * These ONE_REGS are not protected by a capability. As they are only | |
370 | * necessary for migration we just trace a possible error, but don't | |
371 | * return with an error return code. | |
372 | */ | |
373 | kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); | |
374 | kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); | |
375 | kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); | |
44b0c0bb CB |
376 | kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); |
377 | kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); | |
44c68de0 | 378 | |
819bd309 | 379 | if (cap_async_pf) { |
860643bc | 380 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); |
819bd309 DD |
381 | if (r < 0) { |
382 | return r; | |
383 | } | |
860643bc | 384 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); |
819bd309 DD |
385 | if (r < 0) { |
386 | return r; | |
387 | } | |
860643bc | 388 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); |
819bd309 DD |
389 | if (r < 0) { |
390 | return r; | |
391 | } | |
392 | } | |
393 | ||
0e60a699 AG |
394 | return 0; |
395 | } | |
396 | ||
fdec9918 CB |
397 | /* |
398 | * Legacy layout for s390: | |
399 | * Older S390 KVM requires the topmost vma of the RAM to be | |
400 | * smaller than an system defined value, which is at least 256GB. | |
401 | * Larger systems have larger values. We put the guest between | |
402 | * the end of data segment (system break) and this value. We | |
403 | * use 32GB as a base to have enough room for the system break | |
404 | * to grow. We also have to use MAP parameters that avoid | |
405 | * read-only mapping of guest pages. | |
406 | */ | |
dc622deb | 407 | static void *legacy_s390_alloc(size_t size, uint64_t *align) |
fdec9918 CB |
408 | { |
409 | void *mem; | |
410 | ||
411 | mem = mmap((void *) 0x800000000ULL, size, | |
412 | PROT_EXEC|PROT_READ|PROT_WRITE, | |
413 | MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); | |
39228250 | 414 | return mem == MAP_FAILED ? NULL : mem; |
fdec9918 CB |
415 | } |
416 | ||
8e4e86af DH |
417 | /* DIAG 501 is used for sw breakpoints */ |
418 | static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; | |
419 | ||
20d695a9 | 420 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
0e60a699 | 421 | { |
0e60a699 | 422 | |
8e4e86af DH |
423 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, |
424 | sizeof(diag_501), 0) || | |
425 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501, | |
426 | sizeof(diag_501), 1)) { | |
0e60a699 AG |
427 | return -EINVAL; |
428 | } | |
429 | return 0; | |
430 | } | |
431 | ||
20d695a9 | 432 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
0e60a699 | 433 | { |
8e4e86af | 434 | uint8_t t[sizeof(diag_501)]; |
0e60a699 | 435 | |
8e4e86af | 436 | if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) { |
0e60a699 | 437 | return -EINVAL; |
8e4e86af | 438 | } else if (memcmp(t, diag_501, sizeof(diag_501))) { |
0e60a699 | 439 | return -EINVAL; |
8e4e86af DH |
440 | } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, |
441 | sizeof(diag_501), 1)) { | |
0e60a699 AG |
442 | return -EINVAL; |
443 | } | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
770a6379 DH |
448 | static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, |
449 | int len, int type) | |
450 | { | |
451 | int n; | |
452 | ||
453 | for (n = 0; n < nb_hw_breakpoints; n++) { | |
454 | if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && | |
455 | (hw_breakpoints[n].len == len || len == -1)) { | |
456 | return &hw_breakpoints[n]; | |
457 | } | |
458 | } | |
459 | ||
460 | return NULL; | |
461 | } | |
462 | ||
463 | static int insert_hw_breakpoint(target_ulong addr, int len, int type) | |
464 | { | |
465 | int size; | |
466 | ||
467 | if (find_hw_breakpoint(addr, len, type)) { | |
468 | return -EEXIST; | |
469 | } | |
470 | ||
471 | size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); | |
472 | ||
473 | if (!hw_breakpoints) { | |
474 | nb_hw_breakpoints = 0; | |
475 | hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); | |
476 | } else { | |
477 | hw_breakpoints = | |
478 | (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); | |
479 | } | |
480 | ||
481 | if (!hw_breakpoints) { | |
482 | nb_hw_breakpoints = 0; | |
483 | return -ENOMEM; | |
484 | } | |
485 | ||
486 | hw_breakpoints[nb_hw_breakpoints].addr = addr; | |
487 | hw_breakpoints[nb_hw_breakpoints].len = len; | |
488 | hw_breakpoints[nb_hw_breakpoints].type = type; | |
489 | ||
490 | nb_hw_breakpoints++; | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
8c012449 DH |
495 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, |
496 | target_ulong len, int type) | |
497 | { | |
770a6379 DH |
498 | switch (type) { |
499 | case GDB_BREAKPOINT_HW: | |
500 | type = KVM_HW_BP; | |
501 | break; | |
502 | case GDB_WATCHPOINT_WRITE: | |
503 | if (len < 1) { | |
504 | return -EINVAL; | |
505 | } | |
506 | type = KVM_HW_WP_WRITE; | |
507 | break; | |
508 | default: | |
509 | return -ENOSYS; | |
510 | } | |
511 | return insert_hw_breakpoint(addr, len, type); | |
8c012449 DH |
512 | } |
513 | ||
514 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
515 | target_ulong len, int type) | |
516 | { | |
770a6379 DH |
517 | int size; |
518 | struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); | |
519 | ||
520 | if (bp == NULL) { | |
521 | return -ENOENT; | |
522 | } | |
523 | ||
524 | nb_hw_breakpoints--; | |
525 | if (nb_hw_breakpoints > 0) { | |
526 | /* | |
527 | * In order to trim the array, move the last element to the position to | |
528 | * be removed - if necessary. | |
529 | */ | |
530 | if (bp != &hw_breakpoints[nb_hw_breakpoints]) { | |
531 | *bp = hw_breakpoints[nb_hw_breakpoints]; | |
532 | } | |
533 | size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); | |
534 | hw_breakpoints = | |
535 | (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); | |
536 | } else { | |
537 | g_free(hw_breakpoints); | |
538 | hw_breakpoints = NULL; | |
539 | } | |
540 | ||
541 | return 0; | |
8c012449 DH |
542 | } |
543 | ||
544 | void kvm_arch_remove_all_hw_breakpoints(void) | |
545 | { | |
770a6379 DH |
546 | nb_hw_breakpoints = 0; |
547 | g_free(hw_breakpoints); | |
548 | hw_breakpoints = NULL; | |
8c012449 DH |
549 | } |
550 | ||
551 | void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) | |
552 | { | |
770a6379 DH |
553 | int i; |
554 | ||
555 | if (nb_hw_breakpoints > 0) { | |
556 | dbg->arch.nr_hw_bp = nb_hw_breakpoints; | |
557 | dbg->arch.hw_bp = hw_breakpoints; | |
558 | ||
559 | for (i = 0; i < nb_hw_breakpoints; ++i) { | |
560 | hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, | |
561 | hw_breakpoints[i].addr); | |
562 | } | |
563 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; | |
564 | } else { | |
565 | dbg->arch.nr_hw_bp = 0; | |
566 | dbg->arch.hw_bp = NULL; | |
567 | } | |
8c012449 DH |
568 | } |
569 | ||
20d695a9 | 570 | void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) |
0e60a699 | 571 | { |
0e60a699 AG |
572 | } |
573 | ||
20d695a9 | 574 | void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) |
0e60a699 | 575 | { |
0e60a699 AG |
576 | } |
577 | ||
20d695a9 | 578 | int kvm_arch_process_async_events(CPUState *cs) |
0af691d7 | 579 | { |
225dc991 | 580 | return cs->halted; |
0af691d7 MT |
581 | } |
582 | ||
66ad0893 CH |
583 | static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, |
584 | struct kvm_s390_interrupt *interrupt) | |
585 | { | |
586 | int r = 0; | |
587 | ||
588 | interrupt->type = irq->type; | |
589 | switch (irq->type) { | |
590 | case KVM_S390_INT_VIRTIO: | |
591 | interrupt->parm = irq->u.ext.ext_params; | |
592 | /* fall through */ | |
593 | case KVM_S390_INT_PFAULT_INIT: | |
594 | case KVM_S390_INT_PFAULT_DONE: | |
595 | interrupt->parm64 = irq->u.ext.ext_params2; | |
596 | break; | |
597 | case KVM_S390_PROGRAM_INT: | |
598 | interrupt->parm = irq->u.pgm.code; | |
599 | break; | |
600 | case KVM_S390_SIGP_SET_PREFIX: | |
601 | interrupt->parm = irq->u.prefix.address; | |
602 | break; | |
603 | case KVM_S390_INT_SERVICE: | |
604 | interrupt->parm = irq->u.ext.ext_params; | |
605 | break; | |
606 | case KVM_S390_MCHK: | |
607 | interrupt->parm = irq->u.mchk.cr14; | |
608 | interrupt->parm64 = irq->u.mchk.mcic; | |
609 | break; | |
610 | case KVM_S390_INT_EXTERNAL_CALL: | |
611 | interrupt->parm = irq->u.extcall.code; | |
612 | break; | |
613 | case KVM_S390_INT_EMERGENCY: | |
614 | interrupt->parm = irq->u.emerg.code; | |
615 | break; | |
616 | case KVM_S390_SIGP_STOP: | |
617 | case KVM_S390_RESTART: | |
618 | break; /* These types have no parameters */ | |
619 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | |
620 | interrupt->parm = irq->u.io.subchannel_id << 16; | |
621 | interrupt->parm |= irq->u.io.subchannel_nr; | |
622 | interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; | |
623 | interrupt->parm64 |= irq->u.io.io_int_word; | |
624 | break; | |
625 | default: | |
626 | r = -EINVAL; | |
627 | break; | |
628 | } | |
629 | return r; | |
630 | } | |
631 | ||
632 | void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) | |
633 | { | |
634 | struct kvm_s390_interrupt kvmint = {}; | |
635 | CPUState *cs = CPU(cpu); | |
636 | int r; | |
637 | ||
638 | r = s390_kvm_irq_to_interrupt(irq, &kvmint); | |
639 | if (r < 0) { | |
640 | fprintf(stderr, "%s called with bogus interrupt\n", __func__); | |
641 | exit(1); | |
642 | } | |
643 | ||
644 | r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); | |
645 | if (r < 0) { | |
646 | fprintf(stderr, "KVM failed to inject interrupt\n"); | |
647 | exit(1); | |
648 | } | |
649 | } | |
650 | ||
bbd8bb8e | 651 | static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) |
66ad0893 CH |
652 | { |
653 | struct kvm_s390_interrupt kvmint = {}; | |
654 | int r; | |
655 | ||
656 | r = s390_kvm_irq_to_interrupt(irq, &kvmint); | |
657 | if (r < 0) { | |
658 | fprintf(stderr, "%s called with bogus interrupt\n", __func__); | |
659 | exit(1); | |
660 | } | |
661 | ||
662 | r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); | |
663 | if (r < 0) { | |
664 | fprintf(stderr, "KVM failed to inject interrupt\n"); | |
665 | exit(1); | |
666 | } | |
667 | } | |
668 | ||
bbd8bb8e CH |
669 | void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) |
670 | { | |
671 | static bool use_flic = true; | |
672 | int r; | |
673 | ||
674 | if (use_flic) { | |
675 | r = kvm_s390_inject_flic(irq); | |
676 | if (r == -ENOSYS) { | |
677 | use_flic = false; | |
678 | } | |
679 | if (!r) { | |
680 | return; | |
681 | } | |
682 | } | |
683 | __kvm_s390_floating_interrupt(irq); | |
684 | } | |
685 | ||
de13d216 | 686 | void kvm_s390_virtio_irq(int config_change, uint64_t token) |
0e60a699 | 687 | { |
de13d216 CH |
688 | struct kvm_s390_irq irq = { |
689 | .type = KVM_S390_INT_VIRTIO, | |
690 | .u.ext.ext_params = config_change, | |
691 | .u.ext.ext_params2 = token, | |
692 | }; | |
0e60a699 | 693 | |
de13d216 | 694 | kvm_s390_floating_interrupt(&irq); |
0e60a699 AG |
695 | } |
696 | ||
de13d216 | 697 | void kvm_s390_service_interrupt(uint32_t parm) |
0e60a699 | 698 | { |
de13d216 CH |
699 | struct kvm_s390_irq irq = { |
700 | .type = KVM_S390_INT_SERVICE, | |
701 | .u.ext.ext_params = parm, | |
702 | }; | |
0e60a699 | 703 | |
de13d216 | 704 | kvm_s390_floating_interrupt(&irq); |
79afc36d CH |
705 | } |
706 | ||
1bc22652 | 707 | static void enter_pgmcheck(S390CPU *cpu, uint16_t code) |
0e60a699 | 708 | { |
de13d216 CH |
709 | struct kvm_s390_irq irq = { |
710 | .type = KVM_S390_PROGRAM_INT, | |
711 | .u.pgm.code = code, | |
712 | }; | |
713 | ||
714 | kvm_s390_vcpu_interrupt(cpu, &irq); | |
0e60a699 AG |
715 | } |
716 | ||
1bc22652 | 717 | static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, |
bcec36ea | 718 | uint16_t ipbh0) |
0e60a699 | 719 | { |
1bc22652 | 720 | CPUS390XState *env = &cpu->env; |
a0fa2cb8 TH |
721 | uint64_t sccb; |
722 | uint32_t code; | |
0e60a699 AG |
723 | int r = 0; |
724 | ||
cb446eca | 725 | cpu_synchronize_state(CPU(cpu)); |
0e60a699 AG |
726 | sccb = env->regs[ipbh0 & 0xf]; |
727 | code = env->regs[(ipbh0 & 0xf0) >> 4]; | |
728 | ||
6e252802 | 729 | r = sclp_service_call(env, sccb, code); |
9abf567d | 730 | if (r < 0) { |
1bc22652 | 731 | enter_pgmcheck(cpu, -r); |
e8803d93 TH |
732 | } else { |
733 | setcc(cpu, r); | |
0e60a699 | 734 | } |
81f7c56c | 735 | |
0e60a699 AG |
736 | return 0; |
737 | } | |
738 | ||
1eecf41b | 739 | static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
09b99878 | 740 | { |
09b99878 | 741 | CPUS390XState *env = &cpu->env; |
1eecf41b FB |
742 | int rc = 0; |
743 | uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; | |
3474b679 | 744 | |
44c68de0 | 745 | cpu_synchronize_state(CPU(cpu)); |
3474b679 | 746 | |
09b99878 | 747 | switch (ipa1) { |
1eecf41b | 748 | case PRIV_B2_XSCH: |
5d9bf1c0 | 749 | ioinst_handle_xsch(cpu, env->regs[1]); |
09b99878 | 750 | break; |
1eecf41b | 751 | case PRIV_B2_CSCH: |
5d9bf1c0 | 752 | ioinst_handle_csch(cpu, env->regs[1]); |
09b99878 | 753 | break; |
1eecf41b | 754 | case PRIV_B2_HSCH: |
5d9bf1c0 | 755 | ioinst_handle_hsch(cpu, env->regs[1]); |
09b99878 | 756 | break; |
1eecf41b | 757 | case PRIV_B2_MSCH: |
5d9bf1c0 | 758 | ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 759 | break; |
1eecf41b | 760 | case PRIV_B2_SSCH: |
5d9bf1c0 | 761 | ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 762 | break; |
1eecf41b | 763 | case PRIV_B2_STCRW: |
5d9bf1c0 | 764 | ioinst_handle_stcrw(cpu, run->s390_sieic.ipb); |
09b99878 | 765 | break; |
1eecf41b | 766 | case PRIV_B2_STSCH: |
5d9bf1c0 | 767 | ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 768 | break; |
1eecf41b | 769 | case PRIV_B2_TSCH: |
09b99878 CH |
770 | /* We should only get tsch via KVM_EXIT_S390_TSCH. */ |
771 | fprintf(stderr, "Spurious tsch intercept\n"); | |
772 | break; | |
1eecf41b | 773 | case PRIV_B2_CHSC: |
5d9bf1c0 | 774 | ioinst_handle_chsc(cpu, run->s390_sieic.ipb); |
09b99878 | 775 | break; |
1eecf41b | 776 | case PRIV_B2_TPI: |
09b99878 CH |
777 | /* This should have been handled by kvm already. */ |
778 | fprintf(stderr, "Spurious tpi intercept\n"); | |
779 | break; | |
1eecf41b | 780 | case PRIV_B2_SCHM: |
5d9bf1c0 TH |
781 | ioinst_handle_schm(cpu, env->regs[1], env->regs[2], |
782 | run->s390_sieic.ipb); | |
09b99878 | 783 | break; |
1eecf41b | 784 | case PRIV_B2_RSCH: |
5d9bf1c0 | 785 | ioinst_handle_rsch(cpu, env->regs[1]); |
09b99878 | 786 | break; |
1eecf41b | 787 | case PRIV_B2_RCHP: |
5d9bf1c0 | 788 | ioinst_handle_rchp(cpu, env->regs[1]); |
09b99878 | 789 | break; |
1eecf41b | 790 | case PRIV_B2_STCPS: |
09b99878 | 791 | /* We do not provide this instruction, it is suppressed. */ |
09b99878 | 792 | break; |
1eecf41b | 793 | case PRIV_B2_SAL: |
5d9bf1c0 | 794 | ioinst_handle_sal(cpu, env->regs[1]); |
09b99878 | 795 | break; |
1eecf41b | 796 | case PRIV_B2_SIGA: |
c1e8dfb5 | 797 | /* Not provided, set CC = 3 for subchannel not operational */ |
5d9bf1c0 | 798 | setcc(cpu, 3); |
09b99878 | 799 | break; |
1eecf41b FB |
800 | case PRIV_B2_SCLP_CALL: |
801 | rc = kvm_sclp_service_call(cpu, run, ipbh0); | |
802 | break; | |
c1e8dfb5 | 803 | default: |
1eecf41b FB |
804 | rc = -1; |
805 | DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); | |
806 | break; | |
09b99878 CH |
807 | } |
808 | ||
1eecf41b | 809 | return rc; |
09b99878 CH |
810 | } |
811 | ||
1eecf41b | 812 | static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
0e60a699 AG |
813 | { |
814 | int r = 0; | |
0e60a699 | 815 | |
0e60a699 | 816 | switch (ipa1) { |
1eecf41b FB |
817 | case PRIV_B9_EQBS: |
818 | /* just inject exception */ | |
819 | r = -1; | |
820 | break; | |
821 | default: | |
822 | r = -1; | |
823 | DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); | |
824 | break; | |
825 | } | |
826 | ||
827 | return r; | |
828 | } | |
829 | ||
80765f07 | 830 | static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) |
1eecf41b FB |
831 | { |
832 | int r = 0; | |
833 | ||
80765f07 | 834 | switch (ipbl) { |
1eecf41b FB |
835 | case PRIV_EB_SQBS: |
836 | /* just inject exception */ | |
837 | r = -1; | |
838 | break; | |
839 | default: | |
840 | r = -1; | |
80765f07 | 841 | DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); |
1eecf41b | 842 | break; |
0e60a699 AG |
843 | } |
844 | ||
845 | return r; | |
846 | } | |
847 | ||
4fd6dd06 | 848 | static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) |
0e60a699 | 849 | { |
4fd6dd06 | 850 | CPUS390XState *env = &cpu->env; |
77319f22 | 851 | int ret; |
3474b679 | 852 | |
44c68de0 | 853 | cpu_synchronize_state(CPU(cpu)); |
77319f22 TH |
854 | ret = s390_virtio_hypercall(env); |
855 | if (ret == -EINVAL) { | |
856 | enter_pgmcheck(cpu, PGM_SPECIFICATION); | |
857 | return 0; | |
858 | } | |
0e60a699 | 859 | |
77319f22 | 860 | return ret; |
0e60a699 AG |
861 | } |
862 | ||
268846ba ED |
863 | static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) |
864 | { | |
865 | uint64_t r1, r3; | |
866 | ||
867 | cpu_synchronize_state(CPU(cpu)); | |
868 | r1 = (run->s390_sieic.ipa & 0x00f0) >> 8; | |
869 | r3 = run->s390_sieic.ipa & 0x000f; | |
870 | handle_diag_308(&cpu->env, r1, r3); | |
871 | } | |
872 | ||
b30f4dfb DH |
873 | static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) |
874 | { | |
875 | CPUS390XState *env = &cpu->env; | |
876 | unsigned long pc; | |
877 | ||
878 | cpu_synchronize_state(CPU(cpu)); | |
879 | ||
880 | pc = env->psw.addr - 4; | |
881 | if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { | |
882 | env->psw.addr = pc; | |
883 | return EXCP_DEBUG; | |
884 | } | |
885 | ||
886 | return -ENOENT; | |
887 | } | |
888 | ||
638129ff CH |
889 | #define DIAG_KVM_CODE_MASK 0x000000000000ffff |
890 | ||
891 | static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) | |
0e60a699 AG |
892 | { |
893 | int r = 0; | |
638129ff CH |
894 | uint16_t func_code; |
895 | ||
896 | /* | |
897 | * For any diagnose call we support, bits 48-63 of the resulting | |
898 | * address specify the function code; the remainder is ignored. | |
899 | */ | |
900 | func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; | |
901 | switch (func_code) { | |
268846ba ED |
902 | case DIAG_IPL: |
903 | kvm_handle_diag_308(cpu, run); | |
904 | break; | |
39fbc5c6 CB |
905 | case DIAG_KVM_HYPERCALL: |
906 | r = handle_hypercall(cpu, run); | |
907 | break; | |
908 | case DIAG_KVM_BREAKPOINT: | |
b30f4dfb | 909 | r = handle_sw_breakpoint(cpu, run); |
39fbc5c6 CB |
910 | break; |
911 | default: | |
638129ff | 912 | DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); |
39fbc5c6 CB |
913 | r = -1; |
914 | break; | |
0e60a699 AG |
915 | } |
916 | ||
917 | return r; | |
918 | } | |
919 | ||
6e6ad8db | 920 | static void sigp_cpu_start(void *arg) |
b20a461f | 921 | { |
6e6ad8db DH |
922 | CPUState *cs = arg; |
923 | S390CPU *cpu = S390_CPU(cs); | |
924 | ||
eb24f7c6 | 925 | s390_cpu_set_state(CPU_STATE_OPERATING, cpu); |
b20a461f | 926 | DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env); |
b20a461f TH |
927 | } |
928 | ||
6e6ad8db | 929 | static void sigp_cpu_restart(void *arg) |
0e60a699 | 930 | { |
6e6ad8db DH |
931 | CPUState *cs = arg; |
932 | S390CPU *cpu = S390_CPU(cs); | |
de13d216 CH |
933 | struct kvm_s390_irq irq = { |
934 | .type = KVM_S390_RESTART, | |
935 | }; | |
936 | ||
937 | kvm_s390_vcpu_interrupt(cpu, &irq); | |
eb24f7c6 | 938 | s390_cpu_set_state(CPU_STATE_OPERATING, cpu); |
6e6ad8db DH |
939 | } |
940 | ||
941 | int kvm_s390_cpu_restart(S390CPU *cpu) | |
942 | { | |
943 | run_on_cpu(CPU(cpu), sigp_cpu_restart, CPU(cpu)); | |
7f7f9752 | 944 | DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); |
0e60a699 AG |
945 | return 0; |
946 | } | |
947 | ||
f7d3e466 | 948 | static void sigp_initial_cpu_reset(void *arg) |
0e60a699 | 949 | { |
f7d3e466 TH |
950 | CPUState *cpu = arg; |
951 | S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); | |
d5900813 | 952 | |
f7d3e466 TH |
953 | cpu_synchronize_state(cpu); |
954 | scc->initial_cpu_reset(cpu); | |
71dd7e69 | 955 | cpu_synchronize_post_reset(cpu); |
0e60a699 AG |
956 | } |
957 | ||
04c2b516 TH |
958 | static void sigp_cpu_reset(void *arg) |
959 | { | |
960 | CPUState *cpu = arg; | |
961 | S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); | |
962 | ||
963 | cpu_synchronize_state(cpu); | |
964 | scc->cpu_reset(cpu); | |
71dd7e69 | 965 | cpu_synchronize_post_reset(cpu); |
04c2b516 TH |
966 | } |
967 | ||
b8031adb TH |
968 | #define SIGP_ORDER_MASK 0x000000ff |
969 | ||
f7575c96 | 970 | static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
0e60a699 | 971 | { |
f7575c96 | 972 | CPUS390XState *env = &cpu->env; |
0e60a699 | 973 | uint8_t order_code; |
0e60a699 | 974 | uint16_t cpu_addr; |
45fa769b | 975 | S390CPU *target_cpu; |
3796f0e1 TH |
976 | uint64_t *statusreg = &env->regs[ipa1 >> 4]; |
977 | int cc; | |
0e60a699 | 978 | |
cb446eca | 979 | cpu_synchronize_state(CPU(cpu)); |
0e60a699 AG |
980 | |
981 | /* get order code */ | |
b8031adb | 982 | order_code = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK; |
0e60a699 | 983 | |
0e60a699 | 984 | cpu_addr = env->regs[ipa1 & 0x0f]; |
45fa769b AF |
985 | target_cpu = s390_cpu_addr2state(cpu_addr); |
986 | if (target_cpu == NULL) { | |
3796f0e1 | 987 | cc = 3; /* not operational */ |
0e60a699 AG |
988 | goto out; |
989 | } | |
990 | ||
991 | switch (order_code) { | |
b20a461f | 992 | case SIGP_START: |
6e6ad8db DH |
993 | run_on_cpu(CPU(target_cpu), sigp_cpu_start, CPU(target_cpu)); |
994 | cc = 0; | |
b20a461f | 995 | break; |
0b9972a2 | 996 | case SIGP_RESTART: |
6e6ad8db DH |
997 | run_on_cpu(CPU(target_cpu), sigp_cpu_restart, CPU(target_cpu)); |
998 | cc = 0; | |
0b9972a2 TH |
999 | break; |
1000 | case SIGP_SET_ARCH: | |
0788082a TH |
1001 | *statusreg &= 0xffffffff00000000UL; |
1002 | *statusreg |= SIGP_STAT_INVALID_PARAMETER; | |
1003 | cc = 1; /* status stored */ | |
1004 | break; | |
0b9972a2 | 1005 | case SIGP_INITIAL_CPU_RESET: |
f7d3e466 TH |
1006 | run_on_cpu(CPU(target_cpu), sigp_initial_cpu_reset, CPU(target_cpu)); |
1007 | cc = 0; | |
0b9972a2 | 1008 | break; |
04c2b516 TH |
1009 | case SIGP_CPU_RESET: |
1010 | run_on_cpu(CPU(target_cpu), sigp_cpu_reset, CPU(target_cpu)); | |
1011 | cc = 0; | |
1012 | break; | |
0b9972a2 | 1013 | default: |
3796f0e1 TH |
1014 | DPRINTF("KVM: unknown SIGP: 0x%x\n", order_code); |
1015 | *statusreg &= 0xffffffff00000000UL; | |
1016 | *statusreg |= SIGP_STAT_INVALID_ORDER; | |
1017 | cc = 1; /* status stored */ | |
0b9972a2 | 1018 | break; |
0e60a699 AG |
1019 | } |
1020 | ||
1021 | out: | |
3796f0e1 | 1022 | setcc(cpu, cc); |
0e60a699 AG |
1023 | return 0; |
1024 | } | |
1025 | ||
b30f4dfb | 1026 | static int handle_instruction(S390CPU *cpu, struct kvm_run *run) |
0e60a699 AG |
1027 | { |
1028 | unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); | |
1029 | uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; | |
d7963c43 | 1030 | int r = -1; |
0e60a699 | 1031 | |
e67137c6 PM |
1032 | DPRINTF("handle_instruction 0x%x 0x%x\n", |
1033 | run->s390_sieic.ipa, run->s390_sieic.ipb); | |
0e60a699 | 1034 | switch (ipa0) { |
09b99878 | 1035 | case IPA0_B2: |
1eecf41b FB |
1036 | r = handle_b2(cpu, run, ipa1); |
1037 | break; | |
09b99878 | 1038 | case IPA0_B9: |
1eecf41b FB |
1039 | r = handle_b9(cpu, run, ipa1); |
1040 | break; | |
09b99878 | 1041 | case IPA0_EB: |
80765f07 | 1042 | r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); |
09b99878 CH |
1043 | break; |
1044 | case IPA0_DIAG: | |
638129ff | 1045 | r = handle_diag(cpu, run, run->s390_sieic.ipb); |
09b99878 CH |
1046 | break; |
1047 | case IPA0_SIGP: | |
1048 | r = handle_sigp(cpu, run, ipa1); | |
1049 | break; | |
0e60a699 AG |
1050 | } |
1051 | ||
1052 | if (r < 0) { | |
b30f4dfb | 1053 | r = 0; |
1bc22652 | 1054 | enter_pgmcheck(cpu, 0x0001); |
0e60a699 | 1055 | } |
b30f4dfb DH |
1056 | |
1057 | return r; | |
0e60a699 AG |
1058 | } |
1059 | ||
f7575c96 | 1060 | static bool is_special_wait_psw(CPUState *cs) |
eca3ed03 CB |
1061 | { |
1062 | /* signal quiesce */ | |
f7575c96 | 1063 | return cs->kvm_run->psw_addr == 0xfffUL; |
eca3ed03 CB |
1064 | } |
1065 | ||
a2689242 TH |
1066 | static void guest_panicked(void) |
1067 | { | |
3a449690 WX |
1068 | qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, |
1069 | &error_abort); | |
a2689242 TH |
1070 | vm_stop(RUN_STATE_GUEST_PANICKED); |
1071 | } | |
1072 | ||
1073 | static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset) | |
1074 | { | |
1075 | CPUState *cs = CPU(cpu); | |
1076 | ||
1077 | error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx", | |
1078 | str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset), | |
1079 | ldq_phys(cs->as, cpu->env.psa + pswoffset + 8)); | |
eb24f7c6 | 1080 | s390_cpu_halt(cpu); |
a2689242 TH |
1081 | guest_panicked(); |
1082 | } | |
1083 | ||
1bc22652 | 1084 | static int handle_intercept(S390CPU *cpu) |
0e60a699 | 1085 | { |
f7575c96 AF |
1086 | CPUState *cs = CPU(cpu); |
1087 | struct kvm_run *run = cs->kvm_run; | |
0e60a699 AG |
1088 | int icpt_code = run->s390_sieic.icptcode; |
1089 | int r = 0; | |
1090 | ||
e67137c6 | 1091 | DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, |
f7575c96 | 1092 | (long)cs->kvm_run->psw_addr); |
0e60a699 AG |
1093 | switch (icpt_code) { |
1094 | case ICPT_INSTRUCTION: | |
b30f4dfb | 1095 | r = handle_instruction(cpu, run); |
0e60a699 | 1096 | break; |
6449a41a TH |
1097 | case ICPT_PROGRAM: |
1098 | unmanageable_intercept(cpu, "program interrupt", | |
1099 | offsetof(LowCore, program_new_psw)); | |
1100 | r = EXCP_HALTED; | |
1101 | break; | |
a2689242 TH |
1102 | case ICPT_EXT_INT: |
1103 | unmanageable_intercept(cpu, "external interrupt", | |
1104 | offsetof(LowCore, external_new_psw)); | |
1105 | r = EXCP_HALTED; | |
1106 | break; | |
0e60a699 | 1107 | case ICPT_WAITPSW: |
08eb8c85 | 1108 | /* disabled wait, since enabled wait is handled in kernel */ |
eb24f7c6 DH |
1109 | cpu_synchronize_state(cs); |
1110 | if (s390_cpu_halt(cpu) == 0) { | |
08eb8c85 CB |
1111 | if (is_special_wait_psw(cs)) { |
1112 | qemu_system_shutdown_request(); | |
1113 | } else { | |
a2689242 | 1114 | guest_panicked(); |
08eb8c85 | 1115 | } |
eca3ed03 CB |
1116 | } |
1117 | r = EXCP_HALTED; | |
1118 | break; | |
854e42f3 | 1119 | case ICPT_CPU_STOP: |
eb24f7c6 | 1120 | if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) { |
854e42f3 CB |
1121 | qemu_system_shutdown_request(); |
1122 | } | |
1123 | r = EXCP_HALTED; | |
0e60a699 AG |
1124 | break; |
1125 | case ICPT_SOFT_INTERCEPT: | |
1126 | fprintf(stderr, "KVM unimplemented icpt SOFT\n"); | |
1127 | exit(1); | |
1128 | break; | |
0e60a699 AG |
1129 | case ICPT_IO: |
1130 | fprintf(stderr, "KVM unimplemented icpt IO\n"); | |
1131 | exit(1); | |
1132 | break; | |
1133 | default: | |
1134 | fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); | |
1135 | exit(1); | |
1136 | break; | |
1137 | } | |
1138 | ||
1139 | return r; | |
1140 | } | |
1141 | ||
09b99878 CH |
1142 | static int handle_tsch(S390CPU *cpu) |
1143 | { | |
1144 | CPUS390XState *env = &cpu->env; | |
1145 | CPUState *cs = CPU(cpu); | |
1146 | struct kvm_run *run = cs->kvm_run; | |
1147 | int ret; | |
1148 | ||
44c68de0 | 1149 | cpu_synchronize_state(cs); |
3474b679 | 1150 | |
09b99878 CH |
1151 | ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb); |
1152 | if (ret >= 0) { | |
1153 | /* Success; set condition code. */ | |
1154 | setcc(cpu, ret); | |
1155 | ret = 0; | |
1156 | } else if (ret < -1) { | |
1157 | /* | |
1158 | * Failure. | |
1159 | * If an I/O interrupt had been dequeued, we have to reinject it. | |
1160 | */ | |
1161 | if (run->s390_tsch.dequeued) { | |
de13d216 CH |
1162 | kvm_s390_io_interrupt(run->s390_tsch.subchannel_id, |
1163 | run->s390_tsch.subchannel_nr, | |
1164 | run->s390_tsch.io_int_parm, | |
1165 | run->s390_tsch.io_int_word); | |
09b99878 CH |
1166 | } |
1167 | ret = 0; | |
1168 | } | |
1169 | return ret; | |
1170 | } | |
1171 | ||
8c012449 DH |
1172 | static int kvm_arch_handle_debug_exit(S390CPU *cpu) |
1173 | { | |
770a6379 DH |
1174 | CPUState *cs = CPU(cpu); |
1175 | struct kvm_run *run = cs->kvm_run; | |
1176 | ||
1177 | int ret = 0; | |
1178 | struct kvm_debug_exit_arch *arch_info = &run->debug.arch; | |
1179 | ||
1180 | switch (arch_info->type) { | |
1181 | case KVM_HW_WP_WRITE: | |
1182 | if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { | |
1183 | cs->watchpoint_hit = &hw_watchpoint; | |
1184 | hw_watchpoint.vaddr = arch_info->addr; | |
1185 | hw_watchpoint.flags = BP_MEM_WRITE; | |
1186 | ret = EXCP_DEBUG; | |
1187 | } | |
1188 | break; | |
1189 | case KVM_HW_BP: | |
1190 | if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { | |
1191 | ret = EXCP_DEBUG; | |
1192 | } | |
1193 | break; | |
1194 | case KVM_SINGLESTEP: | |
1195 | if (cs->singlestep_enabled) { | |
1196 | ret = EXCP_DEBUG; | |
1197 | } | |
1198 | break; | |
1199 | default: | |
1200 | ret = -ENOSYS; | |
1201 | } | |
1202 | ||
1203 | return ret; | |
8c012449 DH |
1204 | } |
1205 | ||
20d695a9 | 1206 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) |
0e60a699 | 1207 | { |
20d695a9 | 1208 | S390CPU *cpu = S390_CPU(cs); |
0e60a699 AG |
1209 | int ret = 0; |
1210 | ||
1211 | switch (run->exit_reason) { | |
1212 | case KVM_EXIT_S390_SIEIC: | |
1bc22652 | 1213 | ret = handle_intercept(cpu); |
0e60a699 AG |
1214 | break; |
1215 | case KVM_EXIT_S390_RESET: | |
add142e0 | 1216 | qemu_system_reset_request(); |
0e60a699 | 1217 | break; |
09b99878 CH |
1218 | case KVM_EXIT_S390_TSCH: |
1219 | ret = handle_tsch(cpu); | |
1220 | break; | |
8c012449 DH |
1221 | case KVM_EXIT_DEBUG: |
1222 | ret = kvm_arch_handle_debug_exit(cpu); | |
1223 | break; | |
0e60a699 AG |
1224 | default: |
1225 | fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); | |
1226 | break; | |
1227 | } | |
1228 | ||
bb4ea393 JK |
1229 | if (ret == 0) { |
1230 | ret = EXCP_INTERRUPT; | |
bb4ea393 | 1231 | } |
0e60a699 AG |
1232 | return ret; |
1233 | } | |
4513d923 | 1234 | |
20d695a9 | 1235 | bool kvm_arch_stop_on_emulation_error(CPUState *cpu) |
4513d923 GN |
1236 | { |
1237 | return true; | |
1238 | } | |
a1b87fe0 | 1239 | |
20d695a9 | 1240 | int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) |
a1b87fe0 JK |
1241 | { |
1242 | return 1; | |
1243 | } | |
1244 | ||
1245 | int kvm_arch_on_sigbus(int code, void *addr) | |
1246 | { | |
1247 | return 1; | |
1248 | } | |
09b99878 | 1249 | |
de13d216 | 1250 | void kvm_s390_io_interrupt(uint16_t subchannel_id, |
09b99878 CH |
1251 | uint16_t subchannel_nr, uint32_t io_int_parm, |
1252 | uint32_t io_int_word) | |
1253 | { | |
de13d216 CH |
1254 | struct kvm_s390_irq irq = { |
1255 | .u.io.subchannel_id = subchannel_id, | |
1256 | .u.io.subchannel_nr = subchannel_nr, | |
1257 | .u.io.io_int_parm = io_int_parm, | |
1258 | .u.io.io_int_word = io_int_word, | |
1259 | }; | |
09b99878 | 1260 | |
7e749462 | 1261 | if (io_int_word & IO_INT_WORD_AI) { |
de13d216 | 1262 | irq.type = KVM_S390_INT_IO(1, 0, 0, 0); |
7e749462 | 1263 | } else { |
de13d216 | 1264 | irq.type = ((subchannel_id & 0xff00) << 24) | |
7e749462 CH |
1265 | ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16); |
1266 | } | |
de13d216 | 1267 | kvm_s390_floating_interrupt(&irq); |
09b99878 CH |
1268 | } |
1269 | ||
de13d216 | 1270 | void kvm_s390_crw_mchk(void) |
09b99878 | 1271 | { |
de13d216 CH |
1272 | struct kvm_s390_irq irq = { |
1273 | .type = KVM_S390_MCHK, | |
1274 | .u.mchk.cr14 = 1 << 28, | |
f0d4dc18 | 1275 | .u.mchk.mcic = 0x00400f1d40330000ULL, |
de13d216 CH |
1276 | }; |
1277 | kvm_s390_floating_interrupt(&irq); | |
09b99878 CH |
1278 | } |
1279 | ||
1280 | void kvm_s390_enable_css_support(S390CPU *cpu) | |
1281 | { | |
09b99878 CH |
1282 | int r; |
1283 | ||
1284 | /* Activate host kernel channel subsystem support. */ | |
e080f0fd | 1285 | r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); |
09b99878 CH |
1286 | assert(r == 0); |
1287 | } | |
48475e14 AK |
1288 | |
1289 | void kvm_arch_init_irq_routing(KVMState *s) | |
1290 | { | |
d426d9fb CH |
1291 | /* |
1292 | * Note that while irqchip capabilities generally imply that cpustates | |
1293 | * are handled in-kernel, it is not true for s390 (yet); therefore, we | |
1294 | * have to override the common code kvm_halt_in_kernel_allowed setting. | |
1295 | */ | |
1296 | if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { | |
d426d9fb CH |
1297 | kvm_gsi_routing_allowed = true; |
1298 | kvm_halt_in_kernel_allowed = false; | |
1299 | } | |
48475e14 | 1300 | } |
b4436a0b | 1301 | |
cc3ac9c4 CH |
1302 | int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, |
1303 | int vq, bool assign) | |
b4436a0b CH |
1304 | { |
1305 | struct kvm_ioeventfd kick = { | |
1306 | .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | | |
1307 | KVM_IOEVENTFD_FLAG_DATAMATCH, | |
cc3ac9c4 | 1308 | .fd = event_notifier_get_fd(notifier), |
b4436a0b CH |
1309 | .datamatch = vq, |
1310 | .addr = sch, | |
1311 | .len = 8, | |
1312 | }; | |
1313 | if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { | |
1314 | return -ENOSYS; | |
1315 | } | |
1316 | if (!assign) { | |
1317 | kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; | |
1318 | } | |
1319 | return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); | |
1320 | } | |
1def6656 MR |
1321 | |
1322 | int kvm_s390_get_memslot_count(KVMState *s) | |
1323 | { | |
1324 | return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); | |
1325 | } | |
c9e659c9 DH |
1326 | |
1327 | int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) | |
1328 | { | |
1329 | struct kvm_mp_state mp_state = {}; | |
1330 | int ret; | |
1331 | ||
1332 | /* the kvm part might not have been initialized yet */ | |
1333 | if (CPU(cpu)->kvm_state == NULL) { | |
1334 | return 0; | |
1335 | } | |
1336 | ||
1337 | switch (cpu_state) { | |
1338 | case CPU_STATE_STOPPED: | |
1339 | mp_state.mp_state = KVM_MP_STATE_STOPPED; | |
1340 | break; | |
1341 | case CPU_STATE_CHECK_STOP: | |
1342 | mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; | |
1343 | break; | |
1344 | case CPU_STATE_OPERATING: | |
1345 | mp_state.mp_state = KVM_MP_STATE_OPERATING; | |
1346 | break; | |
1347 | case CPU_STATE_LOAD: | |
1348 | mp_state.mp_state = KVM_MP_STATE_LOAD; | |
1349 | break; | |
1350 | default: | |
1351 | error_report("Requested CPU state is not a valid S390 CPU state: %u", | |
1352 | cpu_state); | |
1353 | exit(1); | |
1354 | } | |
1355 | ||
1356 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); | |
1357 | if (ret) { | |
1358 | trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, | |
1359 | strerror(-ret)); | |
1360 | } | |
1361 | ||
1362 | return ret; | |
1363 | } |