2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
33 /* #define DEBUG_S390 */
34 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
46 #define DPRINTF(fmt, ...) \
50 #if defined(CONFIG_USER_ONLY)
52 void s390_cpu_do_interrupt(CPUState *cs)
54 cs->exception_index = -1;
57 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
60 S390CPU *cpu = S390_CPU(cs);
62 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
63 /* On real machines this value is dropped into LowMem. Since this
64 is userland, simply put this someplace that cpu_loop can find it. */
65 cpu->env.__excp_addr = address;
69 #else /* !CONFIG_USER_ONLY */
71 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
75 return PSW_ASC_PRIMARY;
76 case MMU_SECONDARY_IDX:
77 return PSW_ASC_SECONDARY;
85 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
88 S390CPU *cpu = S390_CPU(cs);
89 CPUS390XState *env = &cpu->env;
90 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
91 target_ulong vaddr, raddr;
94 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
95 __func__, orig_vaddr, rw, mmu_idx);
97 orig_vaddr &= TARGET_PAGE_MASK;
101 if (!(env->psw.mask & PSW_MASK_64)) {
105 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
106 /* Translation ended in exception */
110 /* check out of RAM access */
111 if (raddr > ram_size) {
112 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
113 (uint64_t)raddr, (uint64_t)ram_size);
114 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
118 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
119 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
121 tlb_set_page(cs, orig_vaddr, raddr, prot,
122 mmu_idx, TARGET_PAGE_SIZE);
127 static void do_program_interrupt(CPUS390XState *env)
131 int ilen = env->int_pgm_ilen;
133 if (ilen == ILEN_AUTO) {
134 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
136 assert(ilen == 2 || ilen == 4 || ilen == 6);
138 switch (env->int_pgm_code) {
140 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
149 case PGM_SPECIFICATION:
151 case PGM_FIXPT_OVERFLOW:
152 case PGM_FIXPT_DIVIDE:
153 case PGM_DEC_OVERFLOW:
155 case PGM_HFP_EXP_OVERFLOW:
156 case PGM_HFP_EXP_UNDERFLOW:
157 case PGM_HFP_SIGNIFICANCE:
163 case PGM_PC_TRANS_SPEC:
166 /* advance the PSW if our exception is not nullifying */
167 env->psw.addr += ilen;
171 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
172 __func__, env->int_pgm_code, ilen);
174 lowcore = cpu_map_lowcore(env);
176 /* Signal PER events with the exception. */
177 if (env->per_perc_atmid) {
178 env->int_pgm_code |= PGM_PER;
179 lowcore->per_address = cpu_to_be64(env->per_address);
180 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
181 env->per_perc_atmid = 0;
184 lowcore->pgm_ilen = cpu_to_be16(ilen);
185 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
186 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
187 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
188 mask = be64_to_cpu(lowcore->program_new_psw.mask);
189 addr = be64_to_cpu(lowcore->program_new_psw.addr);
190 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
192 cpu_unmap_lowcore(lowcore);
194 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
195 env->int_pgm_code, ilen, env->psw.mask,
198 load_psw(env, mask, addr);
201 static void do_svc_interrupt(CPUS390XState *env)
206 lowcore = cpu_map_lowcore(env);
208 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
209 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
210 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
211 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
212 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
213 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
215 cpu_unmap_lowcore(lowcore);
217 load_psw(env, mask, addr);
219 /* When a PER event is pending, the PER exception has to happen
220 immediately after the SERVICE CALL one. */
221 if (env->per_perc_atmid) {
222 env->int_pgm_code = PGM_PER;
223 env->int_pgm_ilen = env->int_svc_ilen;
224 do_program_interrupt(env);
228 #define VIRTIO_SUBCODE_64 0x0D00
230 static void do_ext_interrupt(CPUS390XState *env)
232 S390CPU *cpu = s390_env_get_cpu(env);
237 if (!(env->psw.mask & PSW_MASK_EXT)) {
238 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
241 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
242 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
245 q = &env->ext_queue[env->ext_index];
246 lowcore = cpu_map_lowcore(env);
248 lowcore->ext_int_code = cpu_to_be16(q->code);
249 lowcore->ext_params = cpu_to_be32(q->param);
250 lowcore->ext_params2 = cpu_to_be64(q->param64);
251 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
252 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
253 lowcore->cpu_addr = cpu_to_be16(env->core_id | VIRTIO_SUBCODE_64);
254 mask = be64_to_cpu(lowcore->external_new_psw.mask);
255 addr = be64_to_cpu(lowcore->external_new_psw.addr);
257 cpu_unmap_lowcore(lowcore);
260 if (env->ext_index == -1) {
261 env->pending_int &= ~INTERRUPT_EXT;
264 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
265 env->psw.mask, env->psw.addr);
267 load_psw(env, mask, addr);
270 static void do_io_interrupt(CPUS390XState *env)
272 S390CPU *cpu = s390_env_get_cpu(env);
279 if (!(env->psw.mask & PSW_MASK_IO)) {
280 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
283 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
286 if (env->io_index[isc] < 0) {
289 if (env->io_index[isc] >= MAX_IO_QUEUE) {
290 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
291 isc, env->io_index[isc]);
294 q = &env->io_queue[env->io_index[isc]][isc];
295 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
296 if (!(env->cregs[6] & isc_bits)) {
304 lowcore = cpu_map_lowcore(env);
306 lowcore->subchannel_id = cpu_to_be16(q->id);
307 lowcore->subchannel_nr = cpu_to_be16(q->nr);
308 lowcore->io_int_parm = cpu_to_be32(q->parm);
309 lowcore->io_int_word = cpu_to_be32(q->word);
310 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
311 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
312 mask = be64_to_cpu(lowcore->io_new_psw.mask);
313 addr = be64_to_cpu(lowcore->io_new_psw.addr);
315 cpu_unmap_lowcore(lowcore);
317 env->io_index[isc]--;
319 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
320 env->psw.mask, env->psw.addr);
321 load_psw(env, mask, addr);
323 if (env->io_index[isc] >= 0) {
330 env->pending_int &= ~INTERRUPT_IO;
335 static void do_mchk_interrupt(CPUS390XState *env)
337 S390CPU *cpu = s390_env_get_cpu(env);
343 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
344 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
347 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
348 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
351 q = &env->mchk_queue[env->mchk_index];
354 /* Don't know how to handle this... */
355 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
357 if (!(env->cregs[14] & (1 << 28))) {
358 /* CRW machine checks disabled */
362 lowcore = cpu_map_lowcore(env);
364 for (i = 0; i < 16; i++) {
365 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
366 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
367 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
368 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
370 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
371 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
372 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
373 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
374 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
375 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
376 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
378 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
379 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
380 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
381 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
382 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
383 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
385 cpu_unmap_lowcore(lowcore);
388 if (env->mchk_index == -1) {
389 env->pending_int &= ~INTERRUPT_MCHK;
392 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
393 env->psw.mask, env->psw.addr);
395 load_psw(env, mask, addr);
398 void s390_cpu_do_interrupt(CPUState *cs)
400 S390CPU *cpu = S390_CPU(cs);
401 CPUS390XState *env = &cpu->env;
403 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
404 __func__, cs->exception_index, env->psw.addr);
406 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
407 /* handle machine checks */
408 if ((env->psw.mask & PSW_MASK_MCHECK) &&
409 (cs->exception_index == -1)) {
410 if (env->pending_int & INTERRUPT_MCHK) {
411 cs->exception_index = EXCP_MCHK;
414 /* handle external interrupts */
415 if ((env->psw.mask & PSW_MASK_EXT) &&
416 cs->exception_index == -1) {
417 if (env->pending_int & INTERRUPT_EXT) {
418 /* code is already in env */
419 cs->exception_index = EXCP_EXT;
420 } else if (env->pending_int & INTERRUPT_TOD) {
421 cpu_inject_ext(cpu, 0x1004, 0, 0);
422 cs->exception_index = EXCP_EXT;
423 env->pending_int &= ~INTERRUPT_EXT;
424 env->pending_int &= ~INTERRUPT_TOD;
425 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
426 cpu_inject_ext(cpu, 0x1005, 0, 0);
427 cs->exception_index = EXCP_EXT;
428 env->pending_int &= ~INTERRUPT_EXT;
429 env->pending_int &= ~INTERRUPT_TOD;
432 /* handle I/O interrupts */
433 if ((env->psw.mask & PSW_MASK_IO) &&
434 (cs->exception_index == -1)) {
435 if (env->pending_int & INTERRUPT_IO) {
436 cs->exception_index = EXCP_IO;
440 switch (cs->exception_index) {
442 do_program_interrupt(env);
445 do_svc_interrupt(env);
448 do_ext_interrupt(env);
451 do_io_interrupt(env);
454 do_mchk_interrupt(env);
457 cs->exception_index = -1;
459 if (!env->pending_int) {
460 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
464 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
466 if (interrupt_request & CPU_INTERRUPT_HARD) {
467 S390CPU *cpu = S390_CPU(cs);
468 CPUS390XState *env = &cpu->env;
471 /* Execution of the target insn is indivisible from
472 the parent EXECUTE insn. */
475 if (env->psw.mask & PSW_MASK_EXT) {
476 s390_cpu_do_interrupt(cs);
483 void s390x_cpu_debug_excp_handler(CPUState *cs)
485 S390CPU *cpu = S390_CPU(cs);
486 CPUS390XState *env = &cpu->env;
487 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
489 if (wp_hit && wp_hit->flags & BP_CPU) {
490 /* FIXME: When the storage-alteration-space control bit is set,
491 the exception should only be triggered if the memory access
492 is done using an address space with the storage-alteration-event
493 bit set. We have no way to detect that with the current
495 cs->watchpoint_hit = NULL;
497 env->per_address = env->psw.addr;
498 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
499 /* FIXME: We currently no way to detect the address space used
500 to trigger the watchpoint. For now just consider it is the
501 current default ASC. This turn to be true except when MVCP
502 and MVCS instrutions are not used. */
503 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
505 /* Remove all watchpoints to re-execute the code. A PER exception
506 will be triggered, it will call load_psw which will recompute
508 cpu_watchpoint_remove_all(cs, BP_CPU);
509 cpu_loop_exit_noexc(cs);
513 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
514 this is only for the atomic operations, for which we want to raise a
515 specification exception. */
516 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
517 MMUAccessType access_type,
518 int mmu_idx, uintptr_t retaddr)
520 S390CPU *cpu = S390_CPU(cs);
521 CPUS390XState *env = &cpu->env;
524 cpu_restore_state(cs, retaddr);
526 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
529 #endif /* CONFIG_USER_ONLY */