2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
32 #include "hw/s390x/s390_flic.h"
35 /* #define DEBUG_S390 */
36 /* #define DEBUG_S390_STDOUT */
39 #ifdef DEBUG_S390_STDOUT
40 #define DPRINTF(fmt, ...) \
41 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
42 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
44 #define DPRINTF(fmt, ...) \
45 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
48 #define DPRINTF(fmt, ...) \
52 #if defined(CONFIG_USER_ONLY)
54 void s390_cpu_do_interrupt(CPUState *cs)
56 cs->exception_index = -1;
59 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
62 S390CPU *cpu = S390_CPU(cs);
64 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
65 /* On real machines this value is dropped into LowMem. Since this
66 is userland, simply put this someplace that cpu_loop can find it. */
67 cpu->env.__excp_addr = address;
71 #else /* !CONFIG_USER_ONLY */
73 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
77 return PSW_ASC_PRIMARY;
78 case MMU_SECONDARY_IDX:
79 return PSW_ASC_SECONDARY;
87 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
90 S390CPU *cpu = S390_CPU(cs);
91 CPUS390XState *env = &cpu->env;
92 target_ulong vaddr, raddr;
96 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
97 __func__, orig_vaddr, rw, mmu_idx);
101 if (mmu_idx < MMU_REAL_IDX) {
102 asc = cpu_mmu_idx_to_asc(mmu_idx);
104 if (!(env->psw.mask & PSW_MASK_64)) {
107 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
110 } else if (mmu_idx == MMU_REAL_IDX) {
111 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
118 /* check out of RAM access */
119 if (!address_space_access_valid(&address_space_memory, raddr,
120 TARGET_PAGE_SIZE, rw)) {
121 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
122 (uint64_t)raddr, (uint64_t)ram_size);
123 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
127 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
128 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
130 tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
131 mmu_idx, TARGET_PAGE_SIZE);
136 static void do_program_interrupt(CPUS390XState *env)
140 int ilen = env->int_pgm_ilen;
142 if (ilen == ILEN_AUTO) {
143 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
145 assert(ilen == 2 || ilen == 4 || ilen == 6);
147 switch (env->int_pgm_code) {
149 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
158 case PGM_SPECIFICATION:
160 case PGM_FIXPT_OVERFLOW:
161 case PGM_FIXPT_DIVIDE:
162 case PGM_DEC_OVERFLOW:
164 case PGM_HFP_EXP_OVERFLOW:
165 case PGM_HFP_EXP_UNDERFLOW:
166 case PGM_HFP_SIGNIFICANCE:
172 case PGM_PC_TRANS_SPEC:
175 /* advance the PSW if our exception is not nullifying */
176 env->psw.addr += ilen;
180 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
181 __func__, env->int_pgm_code, ilen);
183 lowcore = cpu_map_lowcore(env);
185 /* Signal PER events with the exception. */
186 if (env->per_perc_atmid) {
187 env->int_pgm_code |= PGM_PER;
188 lowcore->per_address = cpu_to_be64(env->per_address);
189 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
190 env->per_perc_atmid = 0;
193 lowcore->pgm_ilen = cpu_to_be16(ilen);
194 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
195 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
196 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
197 mask = be64_to_cpu(lowcore->program_new_psw.mask);
198 addr = be64_to_cpu(lowcore->program_new_psw.addr);
199 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
201 cpu_unmap_lowcore(lowcore);
203 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
204 env->int_pgm_code, ilen, env->psw.mask,
207 load_psw(env, mask, addr);
210 static void do_svc_interrupt(CPUS390XState *env)
215 lowcore = cpu_map_lowcore(env);
217 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
218 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
219 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
220 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
221 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
222 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
224 cpu_unmap_lowcore(lowcore);
226 load_psw(env, mask, addr);
228 /* When a PER event is pending, the PER exception has to happen
229 immediately after the SERVICE CALL one. */
230 if (env->per_perc_atmid) {
231 env->int_pgm_code = PGM_PER;
232 env->int_pgm_ilen = env->int_svc_ilen;
233 do_program_interrupt(env);
237 #define VIRTIO_SUBCODE_64 0x0D00
239 static void do_ext_interrupt(CPUS390XState *env)
241 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
242 S390CPU *cpu = s390_env_get_cpu(env);
247 if (!(env->psw.mask & PSW_MASK_EXT)) {
248 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
251 lowcore = cpu_map_lowcore(env);
253 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
254 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
255 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
256 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
257 g_assert(cpu_addr < S390_MAX_CPUS);
258 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
259 clear_bit(cpu_addr, env->emergency_signals);
260 if (bitmap_empty(env->emergency_signals, max_cpus)) {
261 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
263 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
264 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
265 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
266 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
267 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
268 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
269 (env->cregs[0] & CR0_CKC_SC)) {
270 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
271 lowcore->cpu_addr = 0;
272 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
273 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
274 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
275 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
276 lowcore->cpu_addr = 0;
277 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
278 } else if (qemu_s390_flic_has_service(flic) &&
279 (env->cregs[0] & CR0_SERVICE_SC)) {
282 param = qemu_s390_flic_dequeue_service(flic);
283 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
284 lowcore->ext_params = cpu_to_be32(param);
285 lowcore->cpu_addr = 0;
287 g_assert_not_reached();
290 mask = be64_to_cpu(lowcore->external_new_psw.mask);
291 addr = be64_to_cpu(lowcore->external_new_psw.addr);
292 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
293 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
295 cpu_unmap_lowcore(lowcore);
297 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
298 env->psw.mask, env->psw.addr);
300 load_psw(env, mask, addr);
303 static void do_io_interrupt(CPUS390XState *env)
305 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
310 g_assert(env->psw.mask & PSW_MASK_IO);
311 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
314 lowcore = cpu_map_lowcore(env);
316 lowcore->subchannel_id = cpu_to_be16(io->id);
317 lowcore->subchannel_nr = cpu_to_be16(io->nr);
318 lowcore->io_int_parm = cpu_to_be32(io->parm);
319 lowcore->io_int_word = cpu_to_be32(io->word);
320 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
321 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
322 mask = be64_to_cpu(lowcore->io_new_psw.mask);
323 addr = be64_to_cpu(lowcore->io_new_psw.addr);
325 cpu_unmap_lowcore(lowcore);
328 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask,
330 load_psw(env, mask, addr);
333 static void do_mchk_interrupt(CPUS390XState *env)
335 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
340 /* for now we only support channel report machine checks (floating) */
341 g_assert(env->psw.mask & PSW_MASK_MCHECK);
342 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
344 qemu_s390_flic_dequeue_crw_mchk(flic);
346 lowcore = cpu_map_lowcore(env);
348 /* we are always in z/Architecture mode */
349 lowcore->ar_access_id = 1;
351 for (i = 0; i < 16; i++) {
352 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
353 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
354 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
355 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
357 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
358 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
359 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
360 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
361 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
363 lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
364 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
365 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
366 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
367 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
369 cpu_unmap_lowcore(lowcore);
371 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
372 env->psw.mask, env->psw.addr);
374 load_psw(env, mask, addr);
377 void s390_cpu_do_interrupt(CPUState *cs)
379 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
380 S390CPU *cpu = S390_CPU(cs);
381 CPUS390XState *env = &cpu->env;
382 bool stopped = false;
384 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
385 __func__, cs->exception_index, env->psw.addr);
388 /* handle machine checks */
389 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
390 cs->exception_index = EXCP_MCHK;
392 /* handle external interrupts */
393 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
394 cs->exception_index = EXCP_EXT;
396 /* handle I/O interrupts */
397 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
398 cs->exception_index = EXCP_IO;
400 /* RESTART interrupt */
401 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
402 cs->exception_index = EXCP_RESTART;
404 /* STOP interrupt has least priority */
405 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
406 cs->exception_index = EXCP_STOP;
409 switch (cs->exception_index) {
411 do_program_interrupt(env);
414 do_svc_interrupt(env);
417 do_ext_interrupt(env);
420 do_io_interrupt(env);
423 do_mchk_interrupt(env);
426 do_restart_interrupt(env);
429 do_stop_interrupt(env);
434 if (cs->exception_index != -1 && !stopped) {
435 /* check if there are more pending interrupts to deliver */
436 cs->exception_index = -1;
439 cs->exception_index = -1;
441 /* we might still have pending interrupts, but not deliverable */
442 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
443 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
446 /* WAIT PSW during interrupt injection or STOP interrupt */
447 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
448 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
449 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
450 } else if (cs->halted) {
451 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
452 s390_cpu_unhalt(cpu);
456 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
458 if (interrupt_request & CPU_INTERRUPT_HARD) {
459 S390CPU *cpu = S390_CPU(cs);
460 CPUS390XState *env = &cpu->env;
463 /* Execution of the target insn is indivisible from
464 the parent EXECUTE insn. */
467 if (s390_cpu_has_int(cpu)) {
468 s390_cpu_do_interrupt(cs);
471 if (env->psw.mask & PSW_MASK_WAIT) {
472 /* Woken up because of a floating interrupt but it has already
473 * been delivered. Go back to sleep. */
474 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
480 void s390x_cpu_debug_excp_handler(CPUState *cs)
482 S390CPU *cpu = S390_CPU(cs);
483 CPUS390XState *env = &cpu->env;
484 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
486 if (wp_hit && wp_hit->flags & BP_CPU) {
487 /* FIXME: When the storage-alteration-space control bit is set,
488 the exception should only be triggered if the memory access
489 is done using an address space with the storage-alteration-event
490 bit set. We have no way to detect that with the current
492 cs->watchpoint_hit = NULL;
494 env->per_address = env->psw.addr;
495 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
496 /* FIXME: We currently no way to detect the address space used
497 to trigger the watchpoint. For now just consider it is the
498 current default ASC. This turn to be true except when MVCP
499 and MVCS instrutions are not used. */
500 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
502 /* Remove all watchpoints to re-execute the code. A PER exception
503 will be triggered, it will call load_psw which will recompute
505 cpu_watchpoint_remove_all(cs, BP_CPU);
506 cpu_loop_exit_noexc(cs);
510 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
511 this is only for the atomic operations, for which we want to raise a
512 specification exception. */
513 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
514 MMUAccessType access_type,
515 int mmu_idx, uintptr_t retaddr)
517 S390CPU *cpu = S390_CPU(cs);
518 CPUS390XState *env = &cpu->env;
520 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
523 #endif /* CONFIG_USER_ONLY */