]>
Commit | Line | Data |
---|---|---|
0e60a699 AG |
1 | /* |
2 | * QEMU S390x KVM implementation | |
3 | * | |
4 | * Copyright (c) 2009 Alexander Graf <[email protected]> | |
ccb084d3 | 5 | * Copyright IBM Corp. 2012 |
0e60a699 AG |
6 | * |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
ccb084d3 CB |
17 | * Contributions after 2012-10-29 are licensed under the terms of the |
18 | * GNU GPL, version 2 or (at your option) any later version. | |
19 | * | |
20 | * You should have received a copy of the GNU (Lesser) General Public | |
0e60a699 AG |
21 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
22 | */ | |
23 | ||
24 | #include <sys/types.h> | |
25 | #include <sys/ioctl.h> | |
26 | #include <sys/mman.h> | |
27 | ||
28 | #include <linux/kvm.h> | |
29 | #include <asm/ptrace.h> | |
30 | ||
31 | #include "qemu-common.h" | |
1de7afc9 | 32 | #include "qemu/timer.h" |
9c17d615 PB |
33 | #include "sysemu/sysemu.h" |
34 | #include "sysemu/kvm.h" | |
0e60a699 | 35 | #include "cpu.h" |
9c17d615 | 36 | #include "sysemu/device_tree.h" |
08eb8c85 CB |
37 | #include "qapi/qmp/qjson.h" |
38 | #include "monitor/monitor.h" | |
770a6379 | 39 | #include "exec/gdbstub.h" |
860643bc | 40 | #include "trace.h" |
0e60a699 AG |
41 | |
42 | /* #define DEBUG_KVM */ | |
43 | ||
44 | #ifdef DEBUG_KVM | |
e67137c6 | 45 | #define DPRINTF(fmt, ...) \ |
0e60a699 AG |
46 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
47 | #else | |
e67137c6 | 48 | #define DPRINTF(fmt, ...) \ |
0e60a699 AG |
49 | do { } while (0) |
50 | #endif | |
51 | ||
52 | #define IPA0_DIAG 0x8300 | |
53 | #define IPA0_SIGP 0xae00 | |
09b99878 CH |
54 | #define IPA0_B2 0xb200 |
55 | #define IPA0_B9 0xb900 | |
56 | #define IPA0_EB 0xeb00 | |
0e60a699 | 57 | |
1eecf41b FB |
58 | #define PRIV_B2_SCLP_CALL 0x20 |
59 | #define PRIV_B2_CSCH 0x30 | |
60 | #define PRIV_B2_HSCH 0x31 | |
61 | #define PRIV_B2_MSCH 0x32 | |
62 | #define PRIV_B2_SSCH 0x33 | |
63 | #define PRIV_B2_STSCH 0x34 | |
64 | #define PRIV_B2_TSCH 0x35 | |
65 | #define PRIV_B2_TPI 0x36 | |
66 | #define PRIV_B2_SAL 0x37 | |
67 | #define PRIV_B2_RSCH 0x38 | |
68 | #define PRIV_B2_STCRW 0x39 | |
69 | #define PRIV_B2_STCPS 0x3a | |
70 | #define PRIV_B2_RCHP 0x3b | |
71 | #define PRIV_B2_SCHM 0x3c | |
72 | #define PRIV_B2_CHSC 0x5f | |
73 | #define PRIV_B2_SIGA 0x74 | |
74 | #define PRIV_B2_XSCH 0x76 | |
75 | ||
76 | #define PRIV_EB_SQBS 0x8a | |
77 | ||
78 | #define PRIV_B9_EQBS 0x9c | |
79 | ||
268846ba | 80 | #define DIAG_IPL 0x308 |
0e60a699 AG |
81 | #define DIAG_KVM_HYPERCALL 0x500 |
82 | #define DIAG_KVM_BREAKPOINT 0x501 | |
83 | ||
0e60a699 AG |
84 | #define ICPT_INSTRUCTION 0x04 |
85 | #define ICPT_WAITPSW 0x1c | |
86 | #define ICPT_SOFT_INTERCEPT 0x24 | |
87 | #define ICPT_CPU_STOP 0x28 | |
88 | #define ICPT_IO 0x40 | |
89 | ||
770a6379 DH |
90 | static CPUWatchpoint hw_watchpoint; |
91 | /* | |
92 | * We don't use a list because this structure is also used to transmit the | |
93 | * hardware breakpoints to the kernel. | |
94 | */ | |
95 | static struct kvm_hw_breakpoint *hw_breakpoints; | |
96 | static int nb_hw_breakpoints; | |
97 | ||
94a8d39a JK |
98 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { |
99 | KVM_CAP_LAST_INFO | |
100 | }; | |
101 | ||
5b08b344 | 102 | static int cap_sync_regs; |
819bd309 | 103 | static int cap_async_pf; |
5b08b344 | 104 | |
575ddeb4 | 105 | static void *legacy_s390_alloc(size_t size); |
91138037 | 106 | |
cad1e282 | 107 | int kvm_arch_init(KVMState *s) |
0e60a699 | 108 | { |
5b08b344 | 109 | cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); |
819bd309 | 110 | cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); |
91138037 MA |
111 | if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) |
112 | || !kvm_check_extension(s, KVM_CAP_S390_COW)) { | |
113 | phys_mem_set_alloc(legacy_s390_alloc); | |
114 | } | |
0e60a699 AG |
115 | return 0; |
116 | } | |
117 | ||
b164e48e EH |
118 | unsigned long kvm_arch_vcpu_id(CPUState *cpu) |
119 | { | |
120 | return cpu->cpu_index; | |
121 | } | |
122 | ||
20d695a9 | 123 | int kvm_arch_init_vcpu(CPUState *cpu) |
0e60a699 | 124 | { |
1c9d2a1d CB |
125 | /* nothing todo yet */ |
126 | return 0; | |
0e60a699 AG |
127 | } |
128 | ||
50a2c6e5 | 129 | void kvm_s390_reset_vcpu(S390CPU *cpu) |
0e60a699 | 130 | { |
50a2c6e5 PB |
131 | CPUState *cs = CPU(cpu); |
132 | ||
419831d7 AG |
133 | /* The initial reset call is needed here to reset in-kernel |
134 | * vcpu data that we can't access directly from QEMU | |
135 | * (i.e. with older kernels which don't support sync_regs/ONE_REG). | |
136 | * Before this ioctl cpu_synchronize_state() is called in common kvm | |
137 | * code (kvm-all) */ | |
50a2c6e5 | 138 | if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) { |
70bada03 JF |
139 | perror("Can't reset vcpu\n"); |
140 | } | |
0e60a699 AG |
141 | } |
142 | ||
20d695a9 | 143 | int kvm_arch_put_registers(CPUState *cs, int level) |
0e60a699 | 144 | { |
20d695a9 AF |
145 | S390CPU *cpu = S390_CPU(cs); |
146 | CPUS390XState *env = &cpu->env; | |
5b08b344 | 147 | struct kvm_sregs sregs; |
0e60a699 | 148 | struct kvm_regs regs; |
860643bc | 149 | int r; |
0e60a699 AG |
150 | int i; |
151 | ||
5b08b344 | 152 | /* always save the PSW and the GPRS*/ |
f7575c96 AF |
153 | cs->kvm_run->psw_addr = env->psw.addr; |
154 | cs->kvm_run->psw_mask = env->psw.mask; | |
0e60a699 | 155 | |
f7575c96 | 156 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { |
5b08b344 | 157 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
158 | cs->kvm_run->s.regs.gprs[i] = env->regs[i]; |
159 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; | |
5b08b344 CB |
160 | } |
161 | } else { | |
162 | for (i = 0; i < 16; i++) { | |
163 | regs.gprs[i] = env->regs[i]; | |
164 | } | |
860643bc CB |
165 | r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); |
166 | if (r < 0) { | |
167 | return r; | |
5b08b344 | 168 | } |
0e60a699 AG |
169 | } |
170 | ||
44c68de0 DD |
171 | /* Do we need to save more than that? */ |
172 | if (level == KVM_PUT_RUNTIME_STATE) { | |
173 | return 0; | |
174 | } | |
420840e5 | 175 | |
860643bc CB |
176 | /* |
177 | * These ONE_REGS are not protected by a capability. As they are only | |
178 | * necessary for migration we just trace a possible error, but don't | |
179 | * return with an error return code. | |
180 | */ | |
181 | kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); | |
182 | kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); | |
183 | kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); | |
44b0c0bb CB |
184 | kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); |
185 | kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); | |
0e60a699 | 186 | |
819bd309 | 187 | if (cap_async_pf) { |
860643bc CB |
188 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); |
189 | if (r < 0) { | |
190 | return r; | |
819bd309 | 191 | } |
860643bc CB |
192 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); |
193 | if (r < 0) { | |
194 | return r; | |
819bd309 | 195 | } |
860643bc CB |
196 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); |
197 | if (r < 0) { | |
198 | return r; | |
819bd309 DD |
199 | } |
200 | } | |
201 | ||
5b08b344 | 202 | if (cap_sync_regs && |
f7575c96 AF |
203 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && |
204 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { | |
5b08b344 | 205 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
206 | cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; |
207 | cs->kvm_run->s.regs.crs[i] = env->cregs[i]; | |
5b08b344 | 208 | } |
f7575c96 AF |
209 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; |
210 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; | |
5b08b344 CB |
211 | } else { |
212 | for (i = 0; i < 16; i++) { | |
213 | sregs.acrs[i] = env->aregs[i]; | |
214 | sregs.crs[i] = env->cregs[i]; | |
215 | } | |
860643bc CB |
216 | r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); |
217 | if (r < 0) { | |
218 | return r; | |
5b08b344 CB |
219 | } |
220 | } | |
0e60a699 | 221 | |
5b08b344 | 222 | /* Finally the prefix */ |
f7575c96 AF |
223 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { |
224 | cs->kvm_run->s.regs.prefix = env->psa; | |
225 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; | |
5b08b344 CB |
226 | } else { |
227 | /* prefix is only supported via sync regs */ | |
228 | } | |
229 | return 0; | |
0e60a699 AG |
230 | } |
231 | ||
20d695a9 | 232 | int kvm_arch_get_registers(CPUState *cs) |
420840e5 JH |
233 | { |
234 | S390CPU *cpu = S390_CPU(cs); | |
235 | CPUS390XState *env = &cpu->env; | |
5b08b344 | 236 | struct kvm_sregs sregs; |
0e60a699 | 237 | struct kvm_regs regs; |
44c68de0 | 238 | int i, r; |
420840e5 | 239 | |
5b08b344 | 240 | /* get the PSW */ |
f7575c96 AF |
241 | env->psw.addr = cs->kvm_run->psw_addr; |
242 | env->psw.mask = cs->kvm_run->psw_mask; | |
5b08b344 CB |
243 | |
244 | /* the GPRS */ | |
f7575c96 | 245 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { |
5b08b344 | 246 | for (i = 0; i < 16; i++) { |
f7575c96 | 247 | env->regs[i] = cs->kvm_run->s.regs.gprs[i]; |
5b08b344 CB |
248 | } |
249 | } else { | |
44c68de0 DD |
250 | r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); |
251 | if (r < 0) { | |
252 | return r; | |
5b08b344 CB |
253 | } |
254 | for (i = 0; i < 16; i++) { | |
255 | env->regs[i] = regs.gprs[i]; | |
256 | } | |
0e60a699 AG |
257 | } |
258 | ||
5b08b344 CB |
259 | /* The ACRS and CRS */ |
260 | if (cap_sync_regs && | |
f7575c96 AF |
261 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && |
262 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { | |
5b08b344 | 263 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
264 | env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; |
265 | env->cregs[i] = cs->kvm_run->s.regs.crs[i]; | |
5b08b344 CB |
266 | } |
267 | } else { | |
44c68de0 DD |
268 | r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); |
269 | if (r < 0) { | |
270 | return r; | |
5b08b344 CB |
271 | } |
272 | for (i = 0; i < 16; i++) { | |
273 | env->aregs[i] = sregs.acrs[i]; | |
274 | env->cregs[i] = sregs.crs[i]; | |
275 | } | |
0e60a699 AG |
276 | } |
277 | ||
44c68de0 | 278 | /* The prefix */ |
f7575c96 AF |
279 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { |
280 | env->psa = cs->kvm_run->s.regs.prefix; | |
5b08b344 | 281 | } |
0e60a699 | 282 | |
860643bc CB |
283 | /* |
284 | * These ONE_REGS are not protected by a capability. As they are only | |
285 | * necessary for migration we just trace a possible error, but don't | |
286 | * return with an error return code. | |
287 | */ | |
288 | kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); | |
289 | kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); | |
290 | kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); | |
44b0c0bb CB |
291 | kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); |
292 | kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); | |
44c68de0 | 293 | |
819bd309 | 294 | if (cap_async_pf) { |
860643bc | 295 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); |
819bd309 DD |
296 | if (r < 0) { |
297 | return r; | |
298 | } | |
860643bc | 299 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); |
819bd309 DD |
300 | if (r < 0) { |
301 | return r; | |
302 | } | |
860643bc | 303 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); |
819bd309 DD |
304 | if (r < 0) { |
305 | return r; | |
306 | } | |
307 | } | |
308 | ||
0e60a699 AG |
309 | return 0; |
310 | } | |
311 | ||
fdec9918 CB |
312 | /* |
313 | * Legacy layout for s390: | |
314 | * Older S390 KVM requires the topmost vma of the RAM to be | |
315 | * smaller than an system defined value, which is at least 256GB. | |
316 | * Larger systems have larger values. We put the guest between | |
317 | * the end of data segment (system break) and this value. We | |
318 | * use 32GB as a base to have enough room for the system break | |
319 | * to grow. We also have to use MAP parameters that avoid | |
320 | * read-only mapping of guest pages. | |
321 | */ | |
575ddeb4 | 322 | static void *legacy_s390_alloc(size_t size) |
fdec9918 CB |
323 | { |
324 | void *mem; | |
325 | ||
326 | mem = mmap((void *) 0x800000000ULL, size, | |
327 | PROT_EXEC|PROT_READ|PROT_WRITE, | |
328 | MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); | |
39228250 | 329 | return mem == MAP_FAILED ? NULL : mem; |
fdec9918 CB |
330 | } |
331 | ||
8e4e86af DH |
332 | /* DIAG 501 is used for sw breakpoints */ |
333 | static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; | |
334 | ||
20d695a9 | 335 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
0e60a699 | 336 | { |
0e60a699 | 337 | |
8e4e86af DH |
338 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, |
339 | sizeof(diag_501), 0) || | |
340 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501, | |
341 | sizeof(diag_501), 1)) { | |
0e60a699 AG |
342 | return -EINVAL; |
343 | } | |
344 | return 0; | |
345 | } | |
346 | ||
20d695a9 | 347 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
0e60a699 | 348 | { |
8e4e86af | 349 | uint8_t t[sizeof(diag_501)]; |
0e60a699 | 350 | |
8e4e86af | 351 | if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) { |
0e60a699 | 352 | return -EINVAL; |
8e4e86af | 353 | } else if (memcmp(t, diag_501, sizeof(diag_501))) { |
0e60a699 | 354 | return -EINVAL; |
8e4e86af DH |
355 | } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, |
356 | sizeof(diag_501), 1)) { | |
0e60a699 AG |
357 | return -EINVAL; |
358 | } | |
359 | ||
360 | return 0; | |
361 | } | |
362 | ||
770a6379 DH |
363 | static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, |
364 | int len, int type) | |
365 | { | |
366 | int n; | |
367 | ||
368 | for (n = 0; n < nb_hw_breakpoints; n++) { | |
369 | if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && | |
370 | (hw_breakpoints[n].len == len || len == -1)) { | |
371 | return &hw_breakpoints[n]; | |
372 | } | |
373 | } | |
374 | ||
375 | return NULL; | |
376 | } | |
377 | ||
378 | static int insert_hw_breakpoint(target_ulong addr, int len, int type) | |
379 | { | |
380 | int size; | |
381 | ||
382 | if (find_hw_breakpoint(addr, len, type)) { | |
383 | return -EEXIST; | |
384 | } | |
385 | ||
386 | size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); | |
387 | ||
388 | if (!hw_breakpoints) { | |
389 | nb_hw_breakpoints = 0; | |
390 | hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); | |
391 | } else { | |
392 | hw_breakpoints = | |
393 | (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); | |
394 | } | |
395 | ||
396 | if (!hw_breakpoints) { | |
397 | nb_hw_breakpoints = 0; | |
398 | return -ENOMEM; | |
399 | } | |
400 | ||
401 | hw_breakpoints[nb_hw_breakpoints].addr = addr; | |
402 | hw_breakpoints[nb_hw_breakpoints].len = len; | |
403 | hw_breakpoints[nb_hw_breakpoints].type = type; | |
404 | ||
405 | nb_hw_breakpoints++; | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
8c012449 DH |
410 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, |
411 | target_ulong len, int type) | |
412 | { | |
770a6379 DH |
413 | switch (type) { |
414 | case GDB_BREAKPOINT_HW: | |
415 | type = KVM_HW_BP; | |
416 | break; | |
417 | case GDB_WATCHPOINT_WRITE: | |
418 | if (len < 1) { | |
419 | return -EINVAL; | |
420 | } | |
421 | type = KVM_HW_WP_WRITE; | |
422 | break; | |
423 | default: | |
424 | return -ENOSYS; | |
425 | } | |
426 | return insert_hw_breakpoint(addr, len, type); | |
8c012449 DH |
427 | } |
428 | ||
429 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
430 | target_ulong len, int type) | |
431 | { | |
770a6379 DH |
432 | int size; |
433 | struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); | |
434 | ||
435 | if (bp == NULL) { | |
436 | return -ENOENT; | |
437 | } | |
438 | ||
439 | nb_hw_breakpoints--; | |
440 | if (nb_hw_breakpoints > 0) { | |
441 | /* | |
442 | * In order to trim the array, move the last element to the position to | |
443 | * be removed - if necessary. | |
444 | */ | |
445 | if (bp != &hw_breakpoints[nb_hw_breakpoints]) { | |
446 | *bp = hw_breakpoints[nb_hw_breakpoints]; | |
447 | } | |
448 | size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); | |
449 | hw_breakpoints = | |
450 | (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); | |
451 | } else { | |
452 | g_free(hw_breakpoints); | |
453 | hw_breakpoints = NULL; | |
454 | } | |
455 | ||
456 | return 0; | |
8c012449 DH |
457 | } |
458 | ||
459 | void kvm_arch_remove_all_hw_breakpoints(void) | |
460 | { | |
770a6379 DH |
461 | nb_hw_breakpoints = 0; |
462 | g_free(hw_breakpoints); | |
463 | hw_breakpoints = NULL; | |
8c012449 DH |
464 | } |
465 | ||
466 | void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) | |
467 | { | |
770a6379 DH |
468 | int i; |
469 | ||
470 | if (nb_hw_breakpoints > 0) { | |
471 | dbg->arch.nr_hw_bp = nb_hw_breakpoints; | |
472 | dbg->arch.hw_bp = hw_breakpoints; | |
473 | ||
474 | for (i = 0; i < nb_hw_breakpoints; ++i) { | |
475 | hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, | |
476 | hw_breakpoints[i].addr); | |
477 | } | |
478 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; | |
479 | } else { | |
480 | dbg->arch.nr_hw_bp = 0; | |
481 | dbg->arch.hw_bp = NULL; | |
482 | } | |
8c012449 DH |
483 | } |
484 | ||
20d695a9 | 485 | void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) |
0e60a699 | 486 | { |
0e60a699 AG |
487 | } |
488 | ||
20d695a9 | 489 | void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) |
0e60a699 | 490 | { |
0e60a699 AG |
491 | } |
492 | ||
20d695a9 | 493 | int kvm_arch_process_async_events(CPUState *cs) |
0af691d7 | 494 | { |
225dc991 | 495 | return cs->halted; |
0af691d7 MT |
496 | } |
497 | ||
1bc22652 | 498 | void kvm_s390_interrupt_internal(S390CPU *cpu, int type, uint32_t parm, |
bcec36ea | 499 | uint64_t parm64, int vm) |
0e60a699 | 500 | { |
1bc22652 | 501 | CPUState *cs = CPU(cpu); |
0e60a699 AG |
502 | struct kvm_s390_interrupt kvmint; |
503 | int r; | |
504 | ||
a60f24b5 | 505 | if (!cs->kvm_state) { |
0e60a699 AG |
506 | return; |
507 | } | |
508 | ||
0e60a699 AG |
509 | kvmint.type = type; |
510 | kvmint.parm = parm; | |
511 | kvmint.parm64 = parm64; | |
512 | ||
513 | if (vm) { | |
a60f24b5 | 514 | r = kvm_vm_ioctl(cs->kvm_state, KVM_S390_INTERRUPT, &kvmint); |
0e60a699 | 515 | } else { |
1bc22652 | 516 | r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); |
0e60a699 AG |
517 | } |
518 | ||
519 | if (r < 0) { | |
520 | fprintf(stderr, "KVM failed to inject interrupt\n"); | |
521 | exit(1); | |
522 | } | |
523 | } | |
524 | ||
1bc22652 | 525 | void kvm_s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token) |
0e60a699 | 526 | { |
1bc22652 | 527 | kvm_s390_interrupt_internal(cpu, KVM_S390_INT_VIRTIO, config_change, |
0e60a699 AG |
528 | token, 1); |
529 | } | |
530 | ||
1bc22652 | 531 | void kvm_s390_interrupt(S390CPU *cpu, int type, uint32_t code) |
0e60a699 | 532 | { |
1bc22652 | 533 | kvm_s390_interrupt_internal(cpu, type, code, 0, 0); |
0e60a699 AG |
534 | } |
535 | ||
1bc22652 | 536 | static void enter_pgmcheck(S390CPU *cpu, uint16_t code) |
0e60a699 | 537 | { |
1bc22652 | 538 | kvm_s390_interrupt(cpu, KVM_S390_PROGRAM_INT, code); |
0e60a699 AG |
539 | } |
540 | ||
1bc22652 | 541 | static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, |
bcec36ea | 542 | uint16_t ipbh0) |
0e60a699 | 543 | { |
1bc22652 | 544 | CPUS390XState *env = &cpu->env; |
a0fa2cb8 TH |
545 | uint64_t sccb; |
546 | uint32_t code; | |
0e60a699 AG |
547 | int r = 0; |
548 | ||
cb446eca | 549 | cpu_synchronize_state(CPU(cpu)); |
0e60a699 AG |
550 | sccb = env->regs[ipbh0 & 0xf]; |
551 | code = env->regs[(ipbh0 & 0xf0) >> 4]; | |
552 | ||
6e252802 | 553 | r = sclp_service_call(env, sccb, code); |
9abf567d | 554 | if (r < 0) { |
1bc22652 | 555 | enter_pgmcheck(cpu, -r); |
e8803d93 TH |
556 | } else { |
557 | setcc(cpu, r); | |
0e60a699 | 558 | } |
81f7c56c | 559 | |
0e60a699 AG |
560 | return 0; |
561 | } | |
562 | ||
1eecf41b | 563 | static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
09b99878 | 564 | { |
09b99878 | 565 | CPUS390XState *env = &cpu->env; |
1eecf41b FB |
566 | int rc = 0; |
567 | uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; | |
3474b679 | 568 | |
44c68de0 | 569 | cpu_synchronize_state(CPU(cpu)); |
3474b679 | 570 | |
09b99878 | 571 | switch (ipa1) { |
1eecf41b | 572 | case PRIV_B2_XSCH: |
5d9bf1c0 | 573 | ioinst_handle_xsch(cpu, env->regs[1]); |
09b99878 | 574 | break; |
1eecf41b | 575 | case PRIV_B2_CSCH: |
5d9bf1c0 | 576 | ioinst_handle_csch(cpu, env->regs[1]); |
09b99878 | 577 | break; |
1eecf41b | 578 | case PRIV_B2_HSCH: |
5d9bf1c0 | 579 | ioinst_handle_hsch(cpu, env->regs[1]); |
09b99878 | 580 | break; |
1eecf41b | 581 | case PRIV_B2_MSCH: |
5d9bf1c0 | 582 | ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 583 | break; |
1eecf41b | 584 | case PRIV_B2_SSCH: |
5d9bf1c0 | 585 | ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 586 | break; |
1eecf41b | 587 | case PRIV_B2_STCRW: |
5d9bf1c0 | 588 | ioinst_handle_stcrw(cpu, run->s390_sieic.ipb); |
09b99878 | 589 | break; |
1eecf41b | 590 | case PRIV_B2_STSCH: |
5d9bf1c0 | 591 | ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 592 | break; |
1eecf41b | 593 | case PRIV_B2_TSCH: |
09b99878 CH |
594 | /* We should only get tsch via KVM_EXIT_S390_TSCH. */ |
595 | fprintf(stderr, "Spurious tsch intercept\n"); | |
596 | break; | |
1eecf41b | 597 | case PRIV_B2_CHSC: |
5d9bf1c0 | 598 | ioinst_handle_chsc(cpu, run->s390_sieic.ipb); |
09b99878 | 599 | break; |
1eecf41b | 600 | case PRIV_B2_TPI: |
09b99878 CH |
601 | /* This should have been handled by kvm already. */ |
602 | fprintf(stderr, "Spurious tpi intercept\n"); | |
603 | break; | |
1eecf41b | 604 | case PRIV_B2_SCHM: |
5d9bf1c0 TH |
605 | ioinst_handle_schm(cpu, env->regs[1], env->regs[2], |
606 | run->s390_sieic.ipb); | |
09b99878 | 607 | break; |
1eecf41b | 608 | case PRIV_B2_RSCH: |
5d9bf1c0 | 609 | ioinst_handle_rsch(cpu, env->regs[1]); |
09b99878 | 610 | break; |
1eecf41b | 611 | case PRIV_B2_RCHP: |
5d9bf1c0 | 612 | ioinst_handle_rchp(cpu, env->regs[1]); |
09b99878 | 613 | break; |
1eecf41b | 614 | case PRIV_B2_STCPS: |
09b99878 | 615 | /* We do not provide this instruction, it is suppressed. */ |
09b99878 | 616 | break; |
1eecf41b | 617 | case PRIV_B2_SAL: |
5d9bf1c0 | 618 | ioinst_handle_sal(cpu, env->regs[1]); |
09b99878 | 619 | break; |
1eecf41b | 620 | case PRIV_B2_SIGA: |
c1e8dfb5 | 621 | /* Not provided, set CC = 3 for subchannel not operational */ |
5d9bf1c0 | 622 | setcc(cpu, 3); |
09b99878 | 623 | break; |
1eecf41b FB |
624 | case PRIV_B2_SCLP_CALL: |
625 | rc = kvm_sclp_service_call(cpu, run, ipbh0); | |
626 | break; | |
c1e8dfb5 | 627 | default: |
1eecf41b FB |
628 | rc = -1; |
629 | DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); | |
630 | break; | |
09b99878 CH |
631 | } |
632 | ||
1eecf41b | 633 | return rc; |
09b99878 CH |
634 | } |
635 | ||
1eecf41b | 636 | static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
0e60a699 AG |
637 | { |
638 | int r = 0; | |
0e60a699 | 639 | |
0e60a699 | 640 | switch (ipa1) { |
1eecf41b FB |
641 | case PRIV_B9_EQBS: |
642 | /* just inject exception */ | |
643 | r = -1; | |
644 | break; | |
645 | default: | |
646 | r = -1; | |
647 | DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); | |
648 | break; | |
649 | } | |
650 | ||
651 | return r; | |
652 | } | |
653 | ||
654 | static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) | |
655 | { | |
656 | int r = 0; | |
657 | ||
658 | switch (ipa1) { | |
659 | case PRIV_EB_SQBS: | |
660 | /* just inject exception */ | |
661 | r = -1; | |
662 | break; | |
663 | default: | |
664 | r = -1; | |
665 | DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipa1); | |
666 | break; | |
0e60a699 AG |
667 | } |
668 | ||
669 | return r; | |
670 | } | |
671 | ||
4fd6dd06 | 672 | static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) |
0e60a699 | 673 | { |
4fd6dd06 | 674 | CPUS390XState *env = &cpu->env; |
77319f22 | 675 | int ret; |
3474b679 | 676 | |
44c68de0 | 677 | cpu_synchronize_state(CPU(cpu)); |
77319f22 TH |
678 | ret = s390_virtio_hypercall(env); |
679 | if (ret == -EINVAL) { | |
680 | enter_pgmcheck(cpu, PGM_SPECIFICATION); | |
681 | return 0; | |
682 | } | |
0e60a699 | 683 | |
77319f22 | 684 | return ret; |
0e60a699 AG |
685 | } |
686 | ||
268846ba ED |
687 | static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) |
688 | { | |
689 | uint64_t r1, r3; | |
690 | ||
691 | cpu_synchronize_state(CPU(cpu)); | |
692 | r1 = (run->s390_sieic.ipa & 0x00f0) >> 8; | |
693 | r3 = run->s390_sieic.ipa & 0x000f; | |
694 | handle_diag_308(&cpu->env, r1, r3); | |
695 | } | |
696 | ||
b30f4dfb DH |
697 | static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) |
698 | { | |
699 | CPUS390XState *env = &cpu->env; | |
700 | unsigned long pc; | |
701 | ||
702 | cpu_synchronize_state(CPU(cpu)); | |
703 | ||
704 | pc = env->psw.addr - 4; | |
705 | if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { | |
706 | env->psw.addr = pc; | |
707 | return EXCP_DEBUG; | |
708 | } | |
709 | ||
710 | return -ENOENT; | |
711 | } | |
712 | ||
638129ff CH |
713 | #define DIAG_KVM_CODE_MASK 0x000000000000ffff |
714 | ||
715 | static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) | |
0e60a699 AG |
716 | { |
717 | int r = 0; | |
638129ff CH |
718 | uint16_t func_code; |
719 | ||
720 | /* | |
721 | * For any diagnose call we support, bits 48-63 of the resulting | |
722 | * address specify the function code; the remainder is ignored. | |
723 | */ | |
724 | func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; | |
725 | switch (func_code) { | |
268846ba ED |
726 | case DIAG_IPL: |
727 | kvm_handle_diag_308(cpu, run); | |
728 | break; | |
39fbc5c6 CB |
729 | case DIAG_KVM_HYPERCALL: |
730 | r = handle_hypercall(cpu, run); | |
731 | break; | |
732 | case DIAG_KVM_BREAKPOINT: | |
b30f4dfb | 733 | r = handle_sw_breakpoint(cpu, run); |
39fbc5c6 CB |
734 | break; |
735 | default: | |
638129ff | 736 | DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); |
39fbc5c6 CB |
737 | r = -1; |
738 | break; | |
0e60a699 AG |
739 | } |
740 | ||
741 | return r; | |
742 | } | |
743 | ||
b20a461f TH |
744 | static int kvm_s390_cpu_start(S390CPU *cpu) |
745 | { | |
746 | s390_add_running_cpu(cpu); | |
747 | qemu_cpu_kick(CPU(cpu)); | |
748 | DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env); | |
749 | return 0; | |
750 | } | |
751 | ||
7f7f9752 | 752 | int kvm_s390_cpu_restart(S390CPU *cpu) |
0e60a699 | 753 | { |
1bc22652 | 754 | kvm_s390_interrupt(cpu, KVM_S390_RESTART, 0); |
49e15878 | 755 | s390_add_running_cpu(cpu); |
c08d7424 | 756 | qemu_cpu_kick(CPU(cpu)); |
7f7f9752 | 757 | DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); |
0e60a699 AG |
758 | return 0; |
759 | } | |
760 | ||
f7d3e466 | 761 | static void sigp_initial_cpu_reset(void *arg) |
0e60a699 | 762 | { |
f7d3e466 TH |
763 | CPUState *cpu = arg; |
764 | S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); | |
d5900813 | 765 | |
f7d3e466 TH |
766 | cpu_synchronize_state(cpu); |
767 | scc->initial_cpu_reset(cpu); | |
0e60a699 AG |
768 | } |
769 | ||
04c2b516 TH |
770 | static void sigp_cpu_reset(void *arg) |
771 | { | |
772 | CPUState *cpu = arg; | |
773 | S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); | |
774 | ||
775 | cpu_synchronize_state(cpu); | |
776 | scc->cpu_reset(cpu); | |
777 | } | |
778 | ||
b8031adb TH |
779 | #define SIGP_ORDER_MASK 0x000000ff |
780 | ||
f7575c96 | 781 | static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
0e60a699 | 782 | { |
f7575c96 | 783 | CPUS390XState *env = &cpu->env; |
0e60a699 | 784 | uint8_t order_code; |
0e60a699 | 785 | uint16_t cpu_addr; |
45fa769b | 786 | S390CPU *target_cpu; |
3796f0e1 TH |
787 | uint64_t *statusreg = &env->regs[ipa1 >> 4]; |
788 | int cc; | |
0e60a699 | 789 | |
cb446eca | 790 | cpu_synchronize_state(CPU(cpu)); |
0e60a699 AG |
791 | |
792 | /* get order code */ | |
b8031adb | 793 | order_code = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK; |
0e60a699 | 794 | |
0e60a699 | 795 | cpu_addr = env->regs[ipa1 & 0x0f]; |
45fa769b AF |
796 | target_cpu = s390_cpu_addr2state(cpu_addr); |
797 | if (target_cpu == NULL) { | |
3796f0e1 | 798 | cc = 3; /* not operational */ |
0e60a699 AG |
799 | goto out; |
800 | } | |
801 | ||
802 | switch (order_code) { | |
b20a461f | 803 | case SIGP_START: |
3796f0e1 | 804 | cc = kvm_s390_cpu_start(target_cpu); |
b20a461f | 805 | break; |
0b9972a2 | 806 | case SIGP_RESTART: |
3796f0e1 | 807 | cc = kvm_s390_cpu_restart(target_cpu); |
0b9972a2 TH |
808 | break; |
809 | case SIGP_SET_ARCH: | |
0788082a TH |
810 | *statusreg &= 0xffffffff00000000UL; |
811 | *statusreg |= SIGP_STAT_INVALID_PARAMETER; | |
812 | cc = 1; /* status stored */ | |
813 | break; | |
0b9972a2 | 814 | case SIGP_INITIAL_CPU_RESET: |
f7d3e466 TH |
815 | run_on_cpu(CPU(target_cpu), sigp_initial_cpu_reset, CPU(target_cpu)); |
816 | cc = 0; | |
0b9972a2 | 817 | break; |
04c2b516 TH |
818 | case SIGP_CPU_RESET: |
819 | run_on_cpu(CPU(target_cpu), sigp_cpu_reset, CPU(target_cpu)); | |
820 | cc = 0; | |
821 | break; | |
0b9972a2 | 822 | default: |
3796f0e1 TH |
823 | DPRINTF("KVM: unknown SIGP: 0x%x\n", order_code); |
824 | *statusreg &= 0xffffffff00000000UL; | |
825 | *statusreg |= SIGP_STAT_INVALID_ORDER; | |
826 | cc = 1; /* status stored */ | |
0b9972a2 | 827 | break; |
0e60a699 AG |
828 | } |
829 | ||
830 | out: | |
3796f0e1 | 831 | setcc(cpu, cc); |
0e60a699 AG |
832 | return 0; |
833 | } | |
834 | ||
b30f4dfb | 835 | static int handle_instruction(S390CPU *cpu, struct kvm_run *run) |
0e60a699 AG |
836 | { |
837 | unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); | |
838 | uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; | |
d7963c43 | 839 | int r = -1; |
0e60a699 | 840 | |
e67137c6 PM |
841 | DPRINTF("handle_instruction 0x%x 0x%x\n", |
842 | run->s390_sieic.ipa, run->s390_sieic.ipb); | |
0e60a699 | 843 | switch (ipa0) { |
09b99878 | 844 | case IPA0_B2: |
1eecf41b FB |
845 | r = handle_b2(cpu, run, ipa1); |
846 | break; | |
09b99878 | 847 | case IPA0_B9: |
1eecf41b FB |
848 | r = handle_b9(cpu, run, ipa1); |
849 | break; | |
09b99878 | 850 | case IPA0_EB: |
1eecf41b | 851 | r = handle_eb(cpu, run, ipa1); |
09b99878 CH |
852 | break; |
853 | case IPA0_DIAG: | |
638129ff | 854 | r = handle_diag(cpu, run, run->s390_sieic.ipb); |
09b99878 CH |
855 | break; |
856 | case IPA0_SIGP: | |
857 | r = handle_sigp(cpu, run, ipa1); | |
858 | break; | |
0e60a699 AG |
859 | } |
860 | ||
861 | if (r < 0) { | |
b30f4dfb | 862 | r = 0; |
1bc22652 | 863 | enter_pgmcheck(cpu, 0x0001); |
0e60a699 | 864 | } |
b30f4dfb DH |
865 | |
866 | return r; | |
0e60a699 AG |
867 | } |
868 | ||
f7575c96 | 869 | static bool is_special_wait_psw(CPUState *cs) |
eca3ed03 CB |
870 | { |
871 | /* signal quiesce */ | |
f7575c96 | 872 | return cs->kvm_run->psw_addr == 0xfffUL; |
eca3ed03 CB |
873 | } |
874 | ||
1bc22652 | 875 | static int handle_intercept(S390CPU *cpu) |
0e60a699 | 876 | { |
f7575c96 AF |
877 | CPUState *cs = CPU(cpu); |
878 | struct kvm_run *run = cs->kvm_run; | |
0e60a699 AG |
879 | int icpt_code = run->s390_sieic.icptcode; |
880 | int r = 0; | |
881 | ||
e67137c6 | 882 | DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, |
f7575c96 | 883 | (long)cs->kvm_run->psw_addr); |
0e60a699 AG |
884 | switch (icpt_code) { |
885 | case ICPT_INSTRUCTION: | |
b30f4dfb | 886 | r = handle_instruction(cpu, run); |
0e60a699 AG |
887 | break; |
888 | case ICPT_WAITPSW: | |
08eb8c85 CB |
889 | /* disabled wait, since enabled wait is handled in kernel */ |
890 | if (s390_del_running_cpu(cpu) == 0) { | |
891 | if (is_special_wait_psw(cs)) { | |
892 | qemu_system_shutdown_request(); | |
893 | } else { | |
894 | QObject *data; | |
895 | ||
896 | data = qobject_from_jsonf("{ 'action': %s }", "pause"); | |
897 | monitor_protocol_event(QEVENT_GUEST_PANICKED, data); | |
898 | qobject_decref(data); | |
899 | vm_stop(RUN_STATE_GUEST_PANICKED); | |
900 | } | |
eca3ed03 CB |
901 | } |
902 | r = EXCP_HALTED; | |
903 | break; | |
854e42f3 | 904 | case ICPT_CPU_STOP: |
49e15878 | 905 | if (s390_del_running_cpu(cpu) == 0) { |
854e42f3 CB |
906 | qemu_system_shutdown_request(); |
907 | } | |
908 | r = EXCP_HALTED; | |
0e60a699 AG |
909 | break; |
910 | case ICPT_SOFT_INTERCEPT: | |
911 | fprintf(stderr, "KVM unimplemented icpt SOFT\n"); | |
912 | exit(1); | |
913 | break; | |
0e60a699 AG |
914 | case ICPT_IO: |
915 | fprintf(stderr, "KVM unimplemented icpt IO\n"); | |
916 | exit(1); | |
917 | break; | |
918 | default: | |
919 | fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); | |
920 | exit(1); | |
921 | break; | |
922 | } | |
923 | ||
924 | return r; | |
925 | } | |
926 | ||
09b99878 CH |
927 | static int handle_tsch(S390CPU *cpu) |
928 | { | |
929 | CPUS390XState *env = &cpu->env; | |
930 | CPUState *cs = CPU(cpu); | |
931 | struct kvm_run *run = cs->kvm_run; | |
932 | int ret; | |
933 | ||
44c68de0 | 934 | cpu_synchronize_state(cs); |
3474b679 | 935 | |
09b99878 CH |
936 | ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb); |
937 | if (ret >= 0) { | |
938 | /* Success; set condition code. */ | |
939 | setcc(cpu, ret); | |
940 | ret = 0; | |
941 | } else if (ret < -1) { | |
942 | /* | |
943 | * Failure. | |
944 | * If an I/O interrupt had been dequeued, we have to reinject it. | |
945 | */ | |
946 | if (run->s390_tsch.dequeued) { | |
947 | uint16_t subchannel_id = run->s390_tsch.subchannel_id; | |
948 | uint16_t subchannel_nr = run->s390_tsch.subchannel_nr; | |
949 | uint32_t io_int_parm = run->s390_tsch.io_int_parm; | |
950 | uint32_t io_int_word = run->s390_tsch.io_int_word; | |
951 | uint32_t type = ((subchannel_id & 0xff00) << 24) | | |
952 | ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16); | |
953 | ||
954 | kvm_s390_interrupt_internal(cpu, type, | |
955 | ((uint32_t)subchannel_id << 16) | |
956 | | subchannel_nr, | |
957 | ((uint64_t)io_int_parm << 32) | |
958 | | io_int_word, 1); | |
959 | } | |
960 | ret = 0; | |
961 | } | |
962 | return ret; | |
963 | } | |
964 | ||
8c012449 DH |
965 | static int kvm_arch_handle_debug_exit(S390CPU *cpu) |
966 | { | |
770a6379 DH |
967 | CPUState *cs = CPU(cpu); |
968 | struct kvm_run *run = cs->kvm_run; | |
969 | ||
970 | int ret = 0; | |
971 | struct kvm_debug_exit_arch *arch_info = &run->debug.arch; | |
972 | ||
973 | switch (arch_info->type) { | |
974 | case KVM_HW_WP_WRITE: | |
975 | if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { | |
976 | cs->watchpoint_hit = &hw_watchpoint; | |
977 | hw_watchpoint.vaddr = arch_info->addr; | |
978 | hw_watchpoint.flags = BP_MEM_WRITE; | |
979 | ret = EXCP_DEBUG; | |
980 | } | |
981 | break; | |
982 | case KVM_HW_BP: | |
983 | if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { | |
984 | ret = EXCP_DEBUG; | |
985 | } | |
986 | break; | |
987 | case KVM_SINGLESTEP: | |
988 | if (cs->singlestep_enabled) { | |
989 | ret = EXCP_DEBUG; | |
990 | } | |
991 | break; | |
992 | default: | |
993 | ret = -ENOSYS; | |
994 | } | |
995 | ||
996 | return ret; | |
8c012449 DH |
997 | } |
998 | ||
20d695a9 | 999 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) |
0e60a699 | 1000 | { |
20d695a9 | 1001 | S390CPU *cpu = S390_CPU(cs); |
0e60a699 AG |
1002 | int ret = 0; |
1003 | ||
1004 | switch (run->exit_reason) { | |
1005 | case KVM_EXIT_S390_SIEIC: | |
1bc22652 | 1006 | ret = handle_intercept(cpu); |
0e60a699 AG |
1007 | break; |
1008 | case KVM_EXIT_S390_RESET: | |
add142e0 | 1009 | qemu_system_reset_request(); |
0e60a699 | 1010 | break; |
09b99878 CH |
1011 | case KVM_EXIT_S390_TSCH: |
1012 | ret = handle_tsch(cpu); | |
1013 | break; | |
8c012449 DH |
1014 | case KVM_EXIT_DEBUG: |
1015 | ret = kvm_arch_handle_debug_exit(cpu); | |
1016 | break; | |
0e60a699 AG |
1017 | default: |
1018 | fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); | |
1019 | break; | |
1020 | } | |
1021 | ||
bb4ea393 JK |
1022 | if (ret == 0) { |
1023 | ret = EXCP_INTERRUPT; | |
bb4ea393 | 1024 | } |
0e60a699 AG |
1025 | return ret; |
1026 | } | |
4513d923 | 1027 | |
20d695a9 | 1028 | bool kvm_arch_stop_on_emulation_error(CPUState *cpu) |
4513d923 GN |
1029 | { |
1030 | return true; | |
1031 | } | |
a1b87fe0 | 1032 | |
20d695a9 | 1033 | int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) |
a1b87fe0 JK |
1034 | { |
1035 | return 1; | |
1036 | } | |
1037 | ||
1038 | int kvm_arch_on_sigbus(int code, void *addr) | |
1039 | { | |
1040 | return 1; | |
1041 | } | |
09b99878 CH |
1042 | |
1043 | void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id, | |
1044 | uint16_t subchannel_nr, uint32_t io_int_parm, | |
1045 | uint32_t io_int_word) | |
1046 | { | |
1047 | uint32_t type; | |
1048 | ||
7e749462 CH |
1049 | if (io_int_word & IO_INT_WORD_AI) { |
1050 | type = KVM_S390_INT_IO(1, 0, 0, 0); | |
1051 | } else { | |
1052 | type = ((subchannel_id & 0xff00) << 24) | | |
1053 | ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16); | |
1054 | } | |
09b99878 CH |
1055 | kvm_s390_interrupt_internal(cpu, type, |
1056 | ((uint32_t)subchannel_id << 16) | subchannel_nr, | |
1057 | ((uint64_t)io_int_parm << 32) | io_int_word, 1); | |
1058 | } | |
1059 | ||
1060 | void kvm_s390_crw_mchk(S390CPU *cpu) | |
1061 | { | |
1062 | kvm_s390_interrupt_internal(cpu, KVM_S390_MCHK, 1 << 28, | |
1063 | 0x00400f1d40330000, 1); | |
1064 | } | |
1065 | ||
1066 | void kvm_s390_enable_css_support(S390CPU *cpu) | |
1067 | { | |
09b99878 CH |
1068 | int r; |
1069 | ||
1070 | /* Activate host kernel channel subsystem support. */ | |
e080f0fd | 1071 | r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); |
09b99878 CH |
1072 | assert(r == 0); |
1073 | } | |
48475e14 AK |
1074 | |
1075 | void kvm_arch_init_irq_routing(KVMState *s) | |
1076 | { | |
d426d9fb CH |
1077 | /* |
1078 | * Note that while irqchip capabilities generally imply that cpustates | |
1079 | * are handled in-kernel, it is not true for s390 (yet); therefore, we | |
1080 | * have to override the common code kvm_halt_in_kernel_allowed setting. | |
1081 | */ | |
1082 | if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { | |
1083 | kvm_irqfds_allowed = true; | |
1084 | kvm_gsi_routing_allowed = true; | |
1085 | kvm_halt_in_kernel_allowed = false; | |
1086 | } | |
48475e14 | 1087 | } |
b4436a0b | 1088 | |
cc3ac9c4 CH |
1089 | int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, |
1090 | int vq, bool assign) | |
b4436a0b CH |
1091 | { |
1092 | struct kvm_ioeventfd kick = { | |
1093 | .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | | |
1094 | KVM_IOEVENTFD_FLAG_DATAMATCH, | |
cc3ac9c4 | 1095 | .fd = event_notifier_get_fd(notifier), |
b4436a0b CH |
1096 | .datamatch = vq, |
1097 | .addr = sch, | |
1098 | .len = 8, | |
1099 | }; | |
1100 | if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { | |
1101 | return -ENOSYS; | |
1102 | } | |
1103 | if (!assign) { | |
1104 | kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; | |
1105 | } | |
1106 | return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); | |
1107 | } |