]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * S/390 condition code helper routines | |
3 | * | |
4 | * Copyright (c) 2009 Ulrich Hecht | |
5 | * Copyright (c) 2009 Alexander Graf | |
6 | * | |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include "cpu.h" | |
22 | #include "exec/helper-proto.h" | |
23 | #include "qemu/host-utils.h" | |
24 | ||
25 | /* #define DEBUG_HELPER */ | |
26 | #ifdef DEBUG_HELPER | |
27 | #define HELPER_LOG(x...) qemu_log(x) | |
28 | #else | |
29 | #define HELPER_LOG(x...) | |
30 | #endif | |
31 | ||
32 | static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst) | |
33 | { | |
34 | if (src == dst) { | |
35 | return 0; | |
36 | } else if (src < dst) { | |
37 | return 1; | |
38 | } else { | |
39 | return 2; | |
40 | } | |
41 | } | |
42 | ||
43 | static uint32_t cc_calc_ltgt0_32(int32_t dst) | |
44 | { | |
45 | return cc_calc_ltgt_32(dst, 0); | |
46 | } | |
47 | ||
48 | static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst) | |
49 | { | |
50 | if (src == dst) { | |
51 | return 0; | |
52 | } else if (src < dst) { | |
53 | return 1; | |
54 | } else { | |
55 | return 2; | |
56 | } | |
57 | } | |
58 | ||
59 | static uint32_t cc_calc_ltgt0_64(int64_t dst) | |
60 | { | |
61 | return cc_calc_ltgt_64(dst, 0); | |
62 | } | |
63 | ||
64 | static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst) | |
65 | { | |
66 | if (src == dst) { | |
67 | return 0; | |
68 | } else if (src < dst) { | |
69 | return 1; | |
70 | } else { | |
71 | return 2; | |
72 | } | |
73 | } | |
74 | ||
75 | static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst) | |
76 | { | |
77 | if (src == dst) { | |
78 | return 0; | |
79 | } else if (src < dst) { | |
80 | return 1; | |
81 | } else { | |
82 | return 2; | |
83 | } | |
84 | } | |
85 | ||
86 | static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask) | |
87 | { | |
88 | uint32_t r = val & mask; | |
89 | ||
90 | if (r == 0) { | |
91 | return 0; | |
92 | } else if (r == mask) { | |
93 | return 3; | |
94 | } else { | |
95 | return 1; | |
96 | } | |
97 | } | |
98 | ||
99 | static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask) | |
100 | { | |
101 | uint64_t r = val & mask; | |
102 | ||
103 | if (r == 0) { | |
104 | return 0; | |
105 | } else if (r == mask) { | |
106 | return 3; | |
107 | } else { | |
108 | int top = clz64(mask); | |
109 | if ((int64_t)(val << top) < 0) { | |
110 | return 2; | |
111 | } else { | |
112 | return 1; | |
113 | } | |
114 | } | |
115 | } | |
116 | ||
117 | static uint32_t cc_calc_nz(uint64_t dst) | |
118 | { | |
119 | return !!dst; | |
120 | } | |
121 | ||
122 | static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) | |
123 | { | |
124 | if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { | |
125 | return 3; /* overflow */ | |
126 | } else { | |
127 | if (ar < 0) { | |
128 | return 1; | |
129 | } else if (ar > 0) { | |
130 | return 2; | |
131 | } else { | |
132 | return 0; | |
133 | } | |
134 | } | |
135 | } | |
136 | ||
137 | static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar) | |
138 | { | |
139 | return (ar != 0) + 2 * (ar < a1); | |
140 | } | |
141 | ||
142 | static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar) | |
143 | { | |
144 | /* Recover a2 + carry_in. */ | |
145 | uint64_t a2c = ar - a1; | |
146 | /* Check for a2+carry_in overflow, then a1+a2c overflow. */ | |
147 | int carry_out = (a2c < a2) || (ar < a1); | |
148 | ||
149 | return (ar != 0) + 2 * carry_out; | |
150 | } | |
151 | ||
152 | static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar) | |
153 | { | |
154 | if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { | |
155 | return 3; /* overflow */ | |
156 | } else { | |
157 | if (ar < 0) { | |
158 | return 1; | |
159 | } else if (ar > 0) { | |
160 | return 2; | |
161 | } else { | |
162 | return 0; | |
163 | } | |
164 | } | |
165 | } | |
166 | ||
167 | static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar) | |
168 | { | |
169 | if (ar == 0) { | |
170 | return 2; | |
171 | } else { | |
172 | if (a2 > a1) { | |
173 | return 1; | |
174 | } else { | |
175 | return 3; | |
176 | } | |
177 | } | |
178 | } | |
179 | ||
180 | static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar) | |
181 | { | |
182 | int borrow_out; | |
183 | ||
184 | if (ar != a1 - a2) { /* difference means borrow-in */ | |
185 | borrow_out = (a2 >= a1); | |
186 | } else { | |
187 | borrow_out = (a2 > a1); | |
188 | } | |
189 | ||
190 | return (ar != 0) + 2 * !borrow_out; | |
191 | } | |
192 | ||
193 | static uint32_t cc_calc_abs_64(int64_t dst) | |
194 | { | |
195 | if ((uint64_t)dst == 0x8000000000000000ULL) { | |
196 | return 3; | |
197 | } else if (dst) { | |
198 | return 1; | |
199 | } else { | |
200 | return 0; | |
201 | } | |
202 | } | |
203 | ||
204 | static uint32_t cc_calc_nabs_64(int64_t dst) | |
205 | { | |
206 | return !!dst; | |
207 | } | |
208 | ||
209 | static uint32_t cc_calc_comp_64(int64_t dst) | |
210 | { | |
211 | if ((uint64_t)dst == 0x8000000000000000ULL) { | |
212 | return 3; | |
213 | } else if (dst < 0) { | |
214 | return 1; | |
215 | } else if (dst > 0) { | |
216 | return 2; | |
217 | } else { | |
218 | return 0; | |
219 | } | |
220 | } | |
221 | ||
222 | ||
223 | static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) | |
224 | { | |
225 | if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { | |
226 | return 3; /* overflow */ | |
227 | } else { | |
228 | if (ar < 0) { | |
229 | return 1; | |
230 | } else if (ar > 0) { | |
231 | return 2; | |
232 | } else { | |
233 | return 0; | |
234 | } | |
235 | } | |
236 | } | |
237 | ||
238 | static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar) | |
239 | { | |
240 | return (ar != 0) + 2 * (ar < a1); | |
241 | } | |
242 | ||
243 | static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar) | |
244 | { | |
245 | /* Recover a2 + carry_in. */ | |
246 | uint32_t a2c = ar - a1; | |
247 | /* Check for a2+carry_in overflow, then a1+a2c overflow. */ | |
248 | int carry_out = (a2c < a2) || (ar < a1); | |
249 | ||
250 | return (ar != 0) + 2 * carry_out; | |
251 | } | |
252 | ||
253 | static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar) | |
254 | { | |
255 | if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { | |
256 | return 3; /* overflow */ | |
257 | } else { | |
258 | if (ar < 0) { | |
259 | return 1; | |
260 | } else if (ar > 0) { | |
261 | return 2; | |
262 | } else { | |
263 | return 0; | |
264 | } | |
265 | } | |
266 | } | |
267 | ||
268 | static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar) | |
269 | { | |
270 | if (ar == 0) { | |
271 | return 2; | |
272 | } else { | |
273 | if (a2 > a1) { | |
274 | return 1; | |
275 | } else { | |
276 | return 3; | |
277 | } | |
278 | } | |
279 | } | |
280 | ||
281 | static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar) | |
282 | { | |
283 | int borrow_out; | |
284 | ||
285 | if (ar != a1 - a2) { /* difference means borrow-in */ | |
286 | borrow_out = (a2 >= a1); | |
287 | } else { | |
288 | borrow_out = (a2 > a1); | |
289 | } | |
290 | ||
291 | return (ar != 0) + 2 * !borrow_out; | |
292 | } | |
293 | ||
294 | static uint32_t cc_calc_abs_32(int32_t dst) | |
295 | { | |
296 | if ((uint32_t)dst == 0x80000000UL) { | |
297 | return 3; | |
298 | } else if (dst) { | |
299 | return 1; | |
300 | } else { | |
301 | return 0; | |
302 | } | |
303 | } | |
304 | ||
305 | static uint32_t cc_calc_nabs_32(int32_t dst) | |
306 | { | |
307 | return !!dst; | |
308 | } | |
309 | ||
310 | static uint32_t cc_calc_comp_32(int32_t dst) | |
311 | { | |
312 | if ((uint32_t)dst == 0x80000000UL) { | |
313 | return 3; | |
314 | } else if (dst < 0) { | |
315 | return 1; | |
316 | } else if (dst > 0) { | |
317 | return 2; | |
318 | } else { | |
319 | return 0; | |
320 | } | |
321 | } | |
322 | ||
323 | /* calculate condition code for insert character under mask insn */ | |
324 | static uint32_t cc_calc_icm(uint64_t mask, uint64_t val) | |
325 | { | |
326 | if ((val & mask) == 0) { | |
327 | return 0; | |
328 | } else { | |
329 | int top = clz64(mask); | |
330 | if ((int64_t)(val << top) < 0) { | |
331 | return 1; | |
332 | } else { | |
333 | return 2; | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
338 | static uint32_t cc_calc_sla_32(uint32_t src, int shift) | |
339 | { | |
340 | uint32_t mask = ((1U << shift) - 1U) << (32 - shift); | |
341 | uint32_t sign = 1U << 31; | |
342 | uint32_t match; | |
343 | int32_t r; | |
344 | ||
345 | /* Check if the sign bit stays the same. */ | |
346 | if (src & sign) { | |
347 | match = mask; | |
348 | } else { | |
349 | match = 0; | |
350 | } | |
351 | if ((src & mask) != match) { | |
352 | /* Overflow. */ | |
353 | return 3; | |
354 | } | |
355 | ||
356 | r = ((src << shift) & ~sign) | (src & sign); | |
357 | if (r == 0) { | |
358 | return 0; | |
359 | } else if (r < 0) { | |
360 | return 1; | |
361 | } | |
362 | return 2; | |
363 | } | |
364 | ||
365 | static uint32_t cc_calc_sla_64(uint64_t src, int shift) | |
366 | { | |
367 | uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift); | |
368 | uint64_t sign = 1ULL << 63; | |
369 | uint64_t match; | |
370 | int64_t r; | |
371 | ||
372 | /* Check if the sign bit stays the same. */ | |
373 | if (src & sign) { | |
374 | match = mask; | |
375 | } else { | |
376 | match = 0; | |
377 | } | |
378 | if ((src & mask) != match) { | |
379 | /* Overflow. */ | |
380 | return 3; | |
381 | } | |
382 | ||
383 | r = ((src << shift) & ~sign) | (src & sign); | |
384 | if (r == 0) { | |
385 | return 0; | |
386 | } else if (r < 0) { | |
387 | return 1; | |
388 | } | |
389 | return 2; | |
390 | } | |
391 | ||
392 | static uint32_t cc_calc_flogr(uint64_t dst) | |
393 | { | |
394 | return dst ? 2 : 0; | |
395 | } | |
396 | ||
397 | static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, | |
398 | uint64_t src, uint64_t dst, uint64_t vr) | |
399 | { | |
400 | S390CPU *cpu = s390_env_get_cpu(env); | |
401 | uint32_t r = 0; | |
402 | ||
403 | switch (cc_op) { | |
404 | case CC_OP_CONST0: | |
405 | case CC_OP_CONST1: | |
406 | case CC_OP_CONST2: | |
407 | case CC_OP_CONST3: | |
408 | /* cc_op value _is_ cc */ | |
409 | r = cc_op; | |
410 | break; | |
411 | case CC_OP_LTGT0_32: | |
412 | r = cc_calc_ltgt0_32(dst); | |
413 | break; | |
414 | case CC_OP_LTGT0_64: | |
415 | r = cc_calc_ltgt0_64(dst); | |
416 | break; | |
417 | case CC_OP_LTGT_32: | |
418 | r = cc_calc_ltgt_32(src, dst); | |
419 | break; | |
420 | case CC_OP_LTGT_64: | |
421 | r = cc_calc_ltgt_64(src, dst); | |
422 | break; | |
423 | case CC_OP_LTUGTU_32: | |
424 | r = cc_calc_ltugtu_32(src, dst); | |
425 | break; | |
426 | case CC_OP_LTUGTU_64: | |
427 | r = cc_calc_ltugtu_64(src, dst); | |
428 | break; | |
429 | case CC_OP_TM_32: | |
430 | r = cc_calc_tm_32(src, dst); | |
431 | break; | |
432 | case CC_OP_TM_64: | |
433 | r = cc_calc_tm_64(src, dst); | |
434 | break; | |
435 | case CC_OP_NZ: | |
436 | r = cc_calc_nz(dst); | |
437 | break; | |
438 | case CC_OP_ADD_64: | |
439 | r = cc_calc_add_64(src, dst, vr); | |
440 | break; | |
441 | case CC_OP_ADDU_64: | |
442 | r = cc_calc_addu_64(src, dst, vr); | |
443 | break; | |
444 | case CC_OP_ADDC_64: | |
445 | r = cc_calc_addc_64(src, dst, vr); | |
446 | break; | |
447 | case CC_OP_SUB_64: | |
448 | r = cc_calc_sub_64(src, dst, vr); | |
449 | break; | |
450 | case CC_OP_SUBU_64: | |
451 | r = cc_calc_subu_64(src, dst, vr); | |
452 | break; | |
453 | case CC_OP_SUBB_64: | |
454 | r = cc_calc_subb_64(src, dst, vr); | |
455 | break; | |
456 | case CC_OP_ABS_64: | |
457 | r = cc_calc_abs_64(dst); | |
458 | break; | |
459 | case CC_OP_NABS_64: | |
460 | r = cc_calc_nabs_64(dst); | |
461 | break; | |
462 | case CC_OP_COMP_64: | |
463 | r = cc_calc_comp_64(dst); | |
464 | break; | |
465 | ||
466 | case CC_OP_ADD_32: | |
467 | r = cc_calc_add_32(src, dst, vr); | |
468 | break; | |
469 | case CC_OP_ADDU_32: | |
470 | r = cc_calc_addu_32(src, dst, vr); | |
471 | break; | |
472 | case CC_OP_ADDC_32: | |
473 | r = cc_calc_addc_32(src, dst, vr); | |
474 | break; | |
475 | case CC_OP_SUB_32: | |
476 | r = cc_calc_sub_32(src, dst, vr); | |
477 | break; | |
478 | case CC_OP_SUBU_32: | |
479 | r = cc_calc_subu_32(src, dst, vr); | |
480 | break; | |
481 | case CC_OP_SUBB_32: | |
482 | r = cc_calc_subb_32(src, dst, vr); | |
483 | break; | |
484 | case CC_OP_ABS_32: | |
485 | r = cc_calc_abs_32(dst); | |
486 | break; | |
487 | case CC_OP_NABS_32: | |
488 | r = cc_calc_nabs_32(dst); | |
489 | break; | |
490 | case CC_OP_COMP_32: | |
491 | r = cc_calc_comp_32(dst); | |
492 | break; | |
493 | ||
494 | case CC_OP_ICM: | |
495 | r = cc_calc_icm(src, dst); | |
496 | break; | |
497 | case CC_OP_SLA_32: | |
498 | r = cc_calc_sla_32(src, dst); | |
499 | break; | |
500 | case CC_OP_SLA_64: | |
501 | r = cc_calc_sla_64(src, dst); | |
502 | break; | |
503 | case CC_OP_FLOGR: | |
504 | r = cc_calc_flogr(dst); | |
505 | break; | |
506 | ||
507 | case CC_OP_NZ_F32: | |
508 | r = set_cc_nz_f32(dst); | |
509 | break; | |
510 | case CC_OP_NZ_F64: | |
511 | r = set_cc_nz_f64(dst); | |
512 | break; | |
513 | case CC_OP_NZ_F128: | |
514 | r = set_cc_nz_f128(make_float128(src, dst)); | |
515 | break; | |
516 | ||
517 | default: | |
518 | cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op)); | |
519 | } | |
520 | ||
521 | HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__, | |
522 | cc_name(cc_op), src, dst, vr, r); | |
523 | return r; | |
524 | } | |
525 | ||
526 | uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, | |
527 | uint64_t vr) | |
528 | { | |
529 | return do_calc_cc(env, cc_op, src, dst, vr); | |
530 | } | |
531 | ||
532 | uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src, | |
533 | uint64_t dst, uint64_t vr) | |
534 | { | |
535 | return do_calc_cc(env, cc_op, src, dst, vr); | |
536 | } | |
537 | ||
538 | #ifndef CONFIG_USER_ONLY | |
539 | void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) | |
540 | { | |
541 | load_psw(env, mask, addr); | |
542 | cpu_loop_exit(CPU(s390_env_get_cpu(env))); | |
543 | } | |
544 | ||
545 | void HELPER(sacf)(CPUS390XState *env, uint64_t a1) | |
546 | { | |
547 | HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1); | |
548 | ||
549 | switch (a1 & 0xf00) { | |
550 | case 0x000: | |
551 | env->psw.mask &= ~PSW_MASK_ASC; | |
552 | env->psw.mask |= PSW_ASC_PRIMARY; | |
553 | break; | |
554 | case 0x100: | |
555 | env->psw.mask &= ~PSW_MASK_ASC; | |
556 | env->psw.mask |= PSW_ASC_SECONDARY; | |
557 | break; | |
558 | case 0x300: | |
559 | env->psw.mask &= ~PSW_MASK_ASC; | |
560 | env->psw.mask |= PSW_ASC_HOME; | |
561 | break; | |
562 | default: | |
563 | qemu_log("unknown sacf mode: %" PRIx64 "\n", a1); | |
564 | program_interrupt(env, PGM_SPECIFICATION, 2); | |
565 | break; | |
566 | } | |
567 | } | |
568 | #endif |