2 * S/390 condition code helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/host-utils.h"
26 /* #define DEBUG_HELPER */
28 #define HELPER_LOG(x...) qemu_log(x)
30 #define HELPER_LOG(x...)
33 static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
37 } else if (src < dst) {
44 static uint32_t cc_calc_ltgt0_32(int32_t dst)
46 return cc_calc_ltgt_32(dst, 0);
49 static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
53 } else if (src < dst) {
60 static uint32_t cc_calc_ltgt0_64(int64_t dst)
62 return cc_calc_ltgt_64(dst, 0);
65 static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
69 } else if (src < dst) {
76 static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
80 } else if (src < dst) {
87 static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
89 uint32_t r = val & mask;
93 } else if (r == mask) {
100 static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
102 uint64_t r = val & mask;
106 } else if (r == mask) {
109 int top = clz64(mask);
110 if ((int64_t)(val << top) < 0) {
118 static uint32_t cc_calc_nz(uint64_t dst)
123 static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
125 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
126 return 3; /* overflow */
138 static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
140 return (ar != 0) + 2 * (ar < a1);
143 static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
145 /* Recover a2 + carry_in. */
146 uint64_t a2c = ar - a1;
147 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
148 int carry_out = (a2c < a2) || (ar < a1);
150 return (ar != 0) + 2 * carry_out;
153 static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
155 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
156 return 3; /* overflow */
168 static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
181 static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
185 if (ar != a1 - a2) { /* difference means borrow-in */
186 borrow_out = (a2 >= a1);
188 borrow_out = (a2 > a1);
191 return (ar != 0) + 2 * !borrow_out;
194 static uint32_t cc_calc_abs_64(int64_t dst)
196 if ((uint64_t)dst == 0x8000000000000000ULL) {
205 static uint32_t cc_calc_nabs_64(int64_t dst)
210 static uint32_t cc_calc_comp_64(int64_t dst)
212 if ((uint64_t)dst == 0x8000000000000000ULL) {
214 } else if (dst < 0) {
216 } else if (dst > 0) {
224 static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
226 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
227 return 3; /* overflow */
239 static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
241 return (ar != 0) + 2 * (ar < a1);
244 static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
246 /* Recover a2 + carry_in. */
247 uint32_t a2c = ar - a1;
248 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
249 int carry_out = (a2c < a2) || (ar < a1);
251 return (ar != 0) + 2 * carry_out;
254 static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
256 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
257 return 3; /* overflow */
269 static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
282 static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
286 if (ar != a1 - a2) { /* difference means borrow-in */
287 borrow_out = (a2 >= a1);
289 borrow_out = (a2 > a1);
292 return (ar != 0) + 2 * !borrow_out;
295 static uint32_t cc_calc_abs_32(int32_t dst)
297 if ((uint32_t)dst == 0x80000000UL) {
306 static uint32_t cc_calc_nabs_32(int32_t dst)
311 static uint32_t cc_calc_comp_32(int32_t dst)
313 if ((uint32_t)dst == 0x80000000UL) {
315 } else if (dst < 0) {
317 } else if (dst > 0) {
324 /* calculate condition code for insert character under mask insn */
325 static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
327 if ((val & mask) == 0) {
330 int top = clz64(mask);
331 if ((int64_t)(val << top) < 0) {
339 static uint32_t cc_calc_sla_32(uint32_t src, int shift)
341 uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
342 uint32_t sign = 1U << 31;
346 /* Check if the sign bit stays the same. */
352 if ((src & mask) != match) {
357 r = ((src << shift) & ~sign) | (src & sign);
366 static uint32_t cc_calc_sla_64(uint64_t src, int shift)
368 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
369 uint64_t sign = 1ULL << 63;
373 /* Check if the sign bit stays the same. */
379 if ((src & mask) != match) {
384 r = ((src << shift) & ~sign) | (src & sign);
393 static uint32_t cc_calc_flogr(uint64_t dst)
398 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
399 uint64_t src, uint64_t dst, uint64_t vr)
401 S390CPU *cpu = s390_env_get_cpu(env);
409 /* cc_op value _is_ cc */
413 r = cc_calc_ltgt0_32(dst);
416 r = cc_calc_ltgt0_64(dst);
419 r = cc_calc_ltgt_32(src, dst);
422 r = cc_calc_ltgt_64(src, dst);
424 case CC_OP_LTUGTU_32:
425 r = cc_calc_ltugtu_32(src, dst);
427 case CC_OP_LTUGTU_64:
428 r = cc_calc_ltugtu_64(src, dst);
431 r = cc_calc_tm_32(src, dst);
434 r = cc_calc_tm_64(src, dst);
440 r = cc_calc_add_64(src, dst, vr);
443 r = cc_calc_addu_64(src, dst, vr);
446 r = cc_calc_addc_64(src, dst, vr);
449 r = cc_calc_sub_64(src, dst, vr);
452 r = cc_calc_subu_64(src, dst, vr);
455 r = cc_calc_subb_64(src, dst, vr);
458 r = cc_calc_abs_64(dst);
461 r = cc_calc_nabs_64(dst);
464 r = cc_calc_comp_64(dst);
468 r = cc_calc_add_32(src, dst, vr);
471 r = cc_calc_addu_32(src, dst, vr);
474 r = cc_calc_addc_32(src, dst, vr);
477 r = cc_calc_sub_32(src, dst, vr);
480 r = cc_calc_subu_32(src, dst, vr);
483 r = cc_calc_subb_32(src, dst, vr);
486 r = cc_calc_abs_32(dst);
489 r = cc_calc_nabs_32(dst);
492 r = cc_calc_comp_32(dst);
496 r = cc_calc_icm(src, dst);
499 r = cc_calc_sla_32(src, dst);
502 r = cc_calc_sla_64(src, dst);
505 r = cc_calc_flogr(dst);
509 r = set_cc_nz_f32(dst);
512 r = set_cc_nz_f64(dst);
515 r = set_cc_nz_f128(make_float128(src, dst));
519 cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op));
522 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
523 cc_name(cc_op), src, dst, vr, r);
527 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
530 return do_calc_cc(env, cc_op, src, dst, vr);
533 uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
534 uint64_t dst, uint64_t vr)
536 return do_calc_cc(env, cc_op, src, dst, vr);
539 #ifndef CONFIG_USER_ONLY
540 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
542 load_psw(env, mask, addr);
543 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
546 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
548 HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
550 switch (a1 & 0xf00) {
552 env->psw.mask &= ~PSW_MASK_ASC;
553 env->psw.mask |= PSW_ASC_PRIMARY;
556 env->psw.mask &= ~PSW_MASK_ASC;
557 env->psw.mask |= PSW_ASC_SECONDARY;
560 env->psw.mask &= ~PSW_MASK_ASC;
561 env->psw.mask |= PSW_ASC_HOME;
564 HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
565 program_interrupt(env, PGM_SPECIFICATION, 2);