]> Git Repo - qemu.git/blame - target/s390x/excp_helper.c
target/s390x: introduce internal.h
[qemu.git] / target / s390x / excp_helper.c
CommitLineData
cded4014
TH
1/*
2 * s390x exception / interrupt helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "qemu/osdep.h"
22#include "qapi/error.h"
23#include "cpu.h"
4e58b838 24#include "internal.h"
cded4014
TH
25#include "qemu/timer.h"
26#include "exec/exec-all.h"
27#include "exec/cpu_ldst.h"
28#include "hw/s390x/ioinst.h"
29#ifndef CONFIG_USER_ONLY
30#include "sysemu/sysemu.h"
31#endif
32
33/* #define DEBUG_S390 */
34/* #define DEBUG_S390_STDOUT */
35
36#ifdef DEBUG_S390
37#ifdef DEBUG_S390_STDOUT
38#define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
41#else
42#define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44#endif
45#else
46#define DPRINTF(fmt, ...) \
47 do { } while (0)
48#endif
49
50#if defined(CONFIG_USER_ONLY)
51
52void s390_cpu_do_interrupt(CPUState *cs)
53{
54 cs->exception_index = -1;
55}
56
57int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
58 int rw, int mmu_idx)
59{
60 S390CPU *cpu = S390_CPU(cs);
61
62 cs->exception_index = EXCP_PGM;
63 cpu->env.int_pgm_code = PGM_ADDRESSING;
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
68}
69
70#else /* !CONFIG_USER_ONLY */
71
12e1e8f1
DH
72static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
73{
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
83 }
84}
85
cded4014
TH
86int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
87 int rw, int mmu_idx)
88{
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
92 target_ulong vaddr, raddr;
93 int prot;
94
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
97
98 orig_vaddr &= TARGET_PAGE_MASK;
99 vaddr = orig_vaddr;
100
101 /* 31-Bit mode */
102 if (!(env->psw.mask & PSW_MASK_64)) {
103 vaddr &= 0x7fffffff;
104 }
105
106 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
107 /* Translation ended in exception */
108 return 1;
109 }
110
111 /* check out of RAM access */
112 if (raddr > ram_size) {
113 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
114 (uint64_t)raddr, (uint64_t)ram_size);
115 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
116 return 1;
117 }
118
119 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
120 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
121
122 tlb_set_page(cs, orig_vaddr, raddr, prot,
123 mmu_idx, TARGET_PAGE_SIZE);
124
125 return 0;
126}
127
128static void do_program_interrupt(CPUS390XState *env)
129{
130 uint64_t mask, addr;
131 LowCore *lowcore;
132 int ilen = env->int_pgm_ilen;
133
134 if (ilen == ILEN_AUTO) {
135 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
136 }
137 assert(ilen == 2 || ilen == 4 || ilen == 6);
138
139 switch (env->int_pgm_code) {
140 case PGM_PER:
141 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
142 break;
143 }
144 /* FALL THROUGH */
145 case PGM_OPERATION:
146 case PGM_PRIVILEGED:
147 case PGM_EXECUTE:
148 case PGM_PROTECTION:
149 case PGM_ADDRESSING:
150 case PGM_SPECIFICATION:
151 case PGM_DATA:
152 case PGM_FIXPT_OVERFLOW:
153 case PGM_FIXPT_DIVIDE:
154 case PGM_DEC_OVERFLOW:
155 case PGM_DEC_DIVIDE:
156 case PGM_HFP_EXP_OVERFLOW:
157 case PGM_HFP_EXP_UNDERFLOW:
158 case PGM_HFP_SIGNIFICANCE:
159 case PGM_HFP_DIVIDE:
160 case PGM_TRANS_SPEC:
161 case PGM_SPECIAL_OP:
162 case PGM_OPERAND:
163 case PGM_HFP_SQRT:
164 case PGM_PC_TRANS_SPEC:
165 case PGM_ALET_SPEC:
166 case PGM_MONITOR:
167 /* advance the PSW if our exception is not nullifying */
168 env->psw.addr += ilen;
169 break;
170 }
171
172 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
173 __func__, env->int_pgm_code, ilen);
174
175 lowcore = cpu_map_lowcore(env);
176
177 /* Signal PER events with the exception. */
178 if (env->per_perc_atmid) {
179 env->int_pgm_code |= PGM_PER;
180 lowcore->per_address = cpu_to_be64(env->per_address);
181 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
182 env->per_perc_atmid = 0;
183 }
184
185 lowcore->pgm_ilen = cpu_to_be16(ilen);
186 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
187 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
188 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
189 mask = be64_to_cpu(lowcore->program_new_psw.mask);
190 addr = be64_to_cpu(lowcore->program_new_psw.addr);
191 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
192
193 cpu_unmap_lowcore(lowcore);
194
195 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
196 env->int_pgm_code, ilen, env->psw.mask,
197 env->psw.addr);
198
199 load_psw(env, mask, addr);
200}
201
202static void do_svc_interrupt(CPUS390XState *env)
203{
204 uint64_t mask, addr;
205 LowCore *lowcore;
206
207 lowcore = cpu_map_lowcore(env);
208
209 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
210 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
211 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
212 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
213 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
214 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
215
216 cpu_unmap_lowcore(lowcore);
217
218 load_psw(env, mask, addr);
219
220 /* When a PER event is pending, the PER exception has to happen
221 immediately after the SERVICE CALL one. */
222 if (env->per_perc_atmid) {
223 env->int_pgm_code = PGM_PER;
224 env->int_pgm_ilen = env->int_svc_ilen;
225 do_program_interrupt(env);
226 }
227}
228
229#define VIRTIO_SUBCODE_64 0x0D00
230
231static void do_ext_interrupt(CPUS390XState *env)
232{
233 S390CPU *cpu = s390_env_get_cpu(env);
234 uint64_t mask, addr;
235 LowCore *lowcore;
236 ExtQueue *q;
237
238 if (!(env->psw.mask & PSW_MASK_EXT)) {
239 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
240 }
241
242 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
243 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
244 }
245
246 q = &env->ext_queue[env->ext_index];
247 lowcore = cpu_map_lowcore(env);
248
249 lowcore->ext_int_code = cpu_to_be16(q->code);
250 lowcore->ext_params = cpu_to_be32(q->param);
251 lowcore->ext_params2 = cpu_to_be64(q->param64);
252 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
253 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
254 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
255 mask = be64_to_cpu(lowcore->external_new_psw.mask);
256 addr = be64_to_cpu(lowcore->external_new_psw.addr);
257
258 cpu_unmap_lowcore(lowcore);
259
260 env->ext_index--;
261 if (env->ext_index == -1) {
262 env->pending_int &= ~INTERRUPT_EXT;
263 }
264
265 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
266 env->psw.mask, env->psw.addr);
267
268 load_psw(env, mask, addr);
269}
270
271static void do_io_interrupt(CPUS390XState *env)
272{
273 S390CPU *cpu = s390_env_get_cpu(env);
274 LowCore *lowcore;
275 IOIntQueue *q;
276 uint8_t isc;
277 int disable = 1;
278 int found = 0;
279
280 if (!(env->psw.mask & PSW_MASK_IO)) {
281 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
282 }
283
284 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
285 uint64_t isc_bits;
286
287 if (env->io_index[isc] < 0) {
288 continue;
289 }
290 if (env->io_index[isc] >= MAX_IO_QUEUE) {
291 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
292 isc, env->io_index[isc]);
293 }
294
295 q = &env->io_queue[env->io_index[isc]][isc];
296 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
297 if (!(env->cregs[6] & isc_bits)) {
298 disable = 0;
299 continue;
300 }
301 if (!found) {
302 uint64_t mask, addr;
303
304 found = 1;
305 lowcore = cpu_map_lowcore(env);
306
307 lowcore->subchannel_id = cpu_to_be16(q->id);
308 lowcore->subchannel_nr = cpu_to_be16(q->nr);
309 lowcore->io_int_parm = cpu_to_be32(q->parm);
310 lowcore->io_int_word = cpu_to_be32(q->word);
311 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
312 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
313 mask = be64_to_cpu(lowcore->io_new_psw.mask);
314 addr = be64_to_cpu(lowcore->io_new_psw.addr);
315
316 cpu_unmap_lowcore(lowcore);
317
318 env->io_index[isc]--;
319
320 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
321 env->psw.mask, env->psw.addr);
322 load_psw(env, mask, addr);
323 }
324 if (env->io_index[isc] >= 0) {
325 disable = 0;
326 }
327 continue;
328 }
329
330 if (disable) {
331 env->pending_int &= ~INTERRUPT_IO;
332 }
333
334}
335
336static void do_mchk_interrupt(CPUS390XState *env)
337{
338 S390CPU *cpu = s390_env_get_cpu(env);
339 uint64_t mask, addr;
340 LowCore *lowcore;
341 MchkQueue *q;
342 int i;
343
344 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
345 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
346 }
347
348 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
349 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
350 }
351
352 q = &env->mchk_queue[env->mchk_index];
353
354 if (q->type != 1) {
355 /* Don't know how to handle this... */
356 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
357 }
358 if (!(env->cregs[14] & (1 << 28))) {
359 /* CRW machine checks disabled */
360 return;
361 }
362
363 lowcore = cpu_map_lowcore(env);
364
365 for (i = 0; i < 16; i++) {
366 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
367 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
368 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
369 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
370 }
371 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
372 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
373 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
374 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
375 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
376 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
377 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
378
379 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
380 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
381 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
382 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
383 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
384 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
385
386 cpu_unmap_lowcore(lowcore);
387
388 env->mchk_index--;
389 if (env->mchk_index == -1) {
390 env->pending_int &= ~INTERRUPT_MCHK;
391 }
392
393 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
394 env->psw.mask, env->psw.addr);
395
396 load_psw(env, mask, addr);
397}
398
399void s390_cpu_do_interrupt(CPUState *cs)
400{
401 S390CPU *cpu = S390_CPU(cs);
402 CPUS390XState *env = &cpu->env;
403
404 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
405 __func__, cs->exception_index, env->psw.addr);
406
407 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
408 /* handle machine checks */
409 if ((env->psw.mask & PSW_MASK_MCHECK) &&
410 (cs->exception_index == -1)) {
411 if (env->pending_int & INTERRUPT_MCHK) {
412 cs->exception_index = EXCP_MCHK;
413 }
414 }
415 /* handle external interrupts */
416 if ((env->psw.mask & PSW_MASK_EXT) &&
417 cs->exception_index == -1) {
418 if (env->pending_int & INTERRUPT_EXT) {
419 /* code is already in env */
420 cs->exception_index = EXCP_EXT;
421 } else if (env->pending_int & INTERRUPT_TOD) {
422 cpu_inject_ext(cpu, 0x1004, 0, 0);
423 cs->exception_index = EXCP_EXT;
424 env->pending_int &= ~INTERRUPT_EXT;
425 env->pending_int &= ~INTERRUPT_TOD;
426 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
427 cpu_inject_ext(cpu, 0x1005, 0, 0);
428 cs->exception_index = EXCP_EXT;
429 env->pending_int &= ~INTERRUPT_EXT;
430 env->pending_int &= ~INTERRUPT_TOD;
431 }
432 }
433 /* handle I/O interrupts */
434 if ((env->psw.mask & PSW_MASK_IO) &&
435 (cs->exception_index == -1)) {
436 if (env->pending_int & INTERRUPT_IO) {
437 cs->exception_index = EXCP_IO;
438 }
439 }
440
441 switch (cs->exception_index) {
442 case EXCP_PGM:
443 do_program_interrupt(env);
444 break;
445 case EXCP_SVC:
446 do_svc_interrupt(env);
447 break;
448 case EXCP_EXT:
449 do_ext_interrupt(env);
450 break;
451 case EXCP_IO:
452 do_io_interrupt(env);
453 break;
454 case EXCP_MCHK:
455 do_mchk_interrupt(env);
456 break;
457 }
458 cs->exception_index = -1;
459
460 if (!env->pending_int) {
461 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
462 }
463}
464
465bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
466{
467 if (interrupt_request & CPU_INTERRUPT_HARD) {
468 S390CPU *cpu = S390_CPU(cs);
469 CPUS390XState *env = &cpu->env;
470
471 if (env->ex_value) {
472 /* Execution of the target insn is indivisible from
473 the parent EXECUTE insn. */
474 return false;
475 }
476 if (env->psw.mask & PSW_MASK_EXT) {
477 s390_cpu_do_interrupt(cs);
478 return true;
479 }
480 }
481 return false;
482}
483
484void s390x_cpu_debug_excp_handler(CPUState *cs)
485{
486 S390CPU *cpu = S390_CPU(cs);
487 CPUS390XState *env = &cpu->env;
488 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
489
490 if (wp_hit && wp_hit->flags & BP_CPU) {
491 /* FIXME: When the storage-alteration-space control bit is set,
492 the exception should only be triggered if the memory access
493 is done using an address space with the storage-alteration-event
494 bit set. We have no way to detect that with the current
495 watchpoint code. */
496 cs->watchpoint_hit = NULL;
497
498 env->per_address = env->psw.addr;
499 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
500 /* FIXME: We currently no way to detect the address space used
501 to trigger the watchpoint. For now just consider it is the
502 current default ASC. This turn to be true except when MVCP
503 and MVCS instrutions are not used. */
504 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
505
506 /* Remove all watchpoints to re-execute the code. A PER exception
507 will be triggered, it will call load_psw which will recompute
508 the watchpoints. */
509 cpu_watchpoint_remove_all(cs, BP_CPU);
510 cpu_loop_exit_noexc(cs);
511 }
512}
513
514/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
515 this is only for the atomic operations, for which we want to raise a
516 specification exception. */
517void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
518 MMUAccessType access_type,
519 int mmu_idx, uintptr_t retaddr)
520{
521 S390CPU *cpu = S390_CPU(cs);
522 CPUS390XState *env = &cpu->env;
523
524 if (retaddr) {
525 cpu_restore_state(cs, retaddr);
526 }
527 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
528}
529
530#endif /* CONFIG_USER_ONLY */
This page took 0.07558 seconds and 4 git commands to generate.