]> Git Repo - qemu.git/blame - target/microblaze/translate.c
target/microblaze: Tidy mb_cpu_dump_state
[qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
90c84c56 31#include "qemu/qemu-print.h"
4acb54ba 32
a7e30d84 33#include "trace-tcg.h"
508127e2 34#include "exec/log.h"
a7e30d84 35
4acb54ba
EI
36#define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
38
77fc6f5e
LV
39/* is_jmp field values */
40#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
77fc6f5e 42
cfeea807 43static TCGv_i32 cpu_R[32];
0f96e96b 44static TCGv_i32 cpu_pc;
3e0e16ae 45static TCGv_i32 cpu_msr;
1074c0fb 46static TCGv_i32 cpu_msr_c;
9b158558
RH
47static TCGv_i32 cpu_imm;
48static TCGv_i32 cpu_btaken;
0f96e96b 49static TCGv_i32 cpu_btarget;
9b158558
RH
50static TCGv_i32 cpu_iflags;
51static TCGv cpu_res_addr;
52static TCGv_i32 cpu_res_val;
4acb54ba 53
022c62cb 54#include "exec/gen-icount.h"
4acb54ba
EI
55
56/* This is the state at translation time. */
57typedef struct DisasContext {
d4705ae0 58 DisasContextBase base;
0063ebd6 59 MicroBlazeCPU *cpu;
4acb54ba 60
683a247e
RH
61 /* TCG op of the current insn_start. */
62 TCGOp *insn_start;
63
20800179
RH
64 TCGv_i32 r0;
65 bool r0_set;
66
4acb54ba
EI
67 /* Decoder. */
68 int type_b;
69 uint32_t ir;
d7ecb757 70 uint32_t ext_imm;
4acb54ba
EI
71 uint8_t opcode;
72 uint8_t rd, ra, rb;
73 uint16_t imm;
74
75 unsigned int cpustate_changed;
683a247e 76 unsigned int tb_flags;
6f9642d7 77 unsigned int tb_flags_to_set;
287b1def 78 int mem_index;
4acb54ba 79
844bab60
EI
80#define JMP_NOJMP 0
81#define JMP_DIRECT 1
82#define JMP_DIRECT_CC 2
83#define JMP_INDIRECT 3
4acb54ba
EI
84 unsigned int jmp;
85 uint32_t jmp_pc;
86
87 int abort_at_next_insn;
4acb54ba
EI
88} DisasContext;
89
20800179
RH
90static int typeb_imm(DisasContext *dc, int x)
91{
92 if (dc->tb_flags & IMM_FLAG) {
93 return deposit32(dc->ext_imm, 0, 16, x);
94 }
95 return x;
96}
97
44d1432b
RH
98/* Include the auto-generated decoder. */
99#include "decode-insns.c.inc"
100
683a247e 101static void t_sync_flags(DisasContext *dc)
4acb54ba 102{
4abf79a4 103 /* Synch the tb dependent flags between translator and runtime. */
683a247e
RH
104 if ((dc->tb_flags ^ dc->base.tb->flags) & ~MSR_TB_MASK) {
105 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & ~MSR_TB_MASK);
4acb54ba
EI
106 }
107}
108
d8e59c4a
RH
109static inline void sync_jmpstate(DisasContext *dc)
110{
111 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
112 if (dc->jmp == JMP_DIRECT) {
113 tcg_gen_movi_i32(cpu_btaken, 1);
114 }
115 dc->jmp = JMP_INDIRECT;
116 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
117 }
118}
119
41ba37c4 120static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba
EI
121{
122 TCGv_i32 tmp = tcg_const_i32(index);
123
64254eba 124 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba 125 tcg_temp_free_i32(tmp);
d4705ae0 126 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
127}
128
41ba37c4
RH
129static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
130{
131 t_sync_flags(dc);
d4705ae0 132 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
41ba37c4
RH
133 gen_raise_exception(dc, index);
134}
135
136static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
137{
138 TCGv_i32 tmp = tcg_const_i32(esr_ec);
139 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
140 tcg_temp_free_i32(tmp);
141
142 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
143}
144
90aa39a1
SF
145static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
146{
147#ifndef CONFIG_USER_ONLY
d4705ae0 148 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
149#else
150 return true;
151#endif
152}
153
4acb54ba
EI
154static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
155{
d4705ae0 156 if (dc->base.singlestep_enabled) {
0b46fa08
RH
157 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
158 tcg_gen_movi_i32(cpu_pc, dest);
159 gen_helper_raise_exception(cpu_env, tmp);
160 tcg_temp_free_i32(tmp);
161 } else if (use_goto_tb(dc, dest)) {
4acb54ba 162 tcg_gen_goto_tb(n);
0f96e96b 163 tcg_gen_movi_i32(cpu_pc, dest);
d4705ae0 164 tcg_gen_exit_tb(dc->base.tb, n);
4acb54ba 165 } else {
0f96e96b 166 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 167 tcg_gen_exit_tb(NULL, 0);
4acb54ba 168 }
d4705ae0 169 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
170}
171
9ba8cd45
EI
172/*
173 * Returns true if the insn an illegal operation.
174 * If exceptions are enabled, an exception is raised.
175 */
176static bool trap_illegal(DisasContext *dc, bool cond)
177{
2c32179f 178 if (cond && (dc->tb_flags & MSR_EE)
5143fdf3 179 && dc->cpu->cfg.illegal_opcode_exception) {
41ba37c4 180 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
181 }
182 return cond;
183}
184
bdfc1e88
EI
185/*
186 * Returns true if the insn is illegal in userspace.
187 * If exceptions are enabled, an exception is raised.
188 */
189static bool trap_userspace(DisasContext *dc, bool cond)
190{
287b1def 191 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
bdfc1e88 192
2c32179f 193 if (cond_user && (dc->tb_flags & MSR_EE)) {
41ba37c4 194 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
195 }
196 return cond_user;
197}
198
d7ecb757 199static int32_t dec_alu_typeb_imm(DisasContext *dc)
61204ce8 200{
d7ecb757 201 tcg_debug_assert(dc->type_b);
20800179 202 return typeb_imm(dc, (int16_t)dc->imm);
61204ce8
EI
203}
204
cfeea807 205static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
206{
207 if (dc->type_b) {
d7ecb757 208 tcg_gen_movi_i32(cpu_imm, dec_alu_typeb_imm(dc));
9b158558 209 return &cpu_imm;
d7ecb757
RH
210 }
211 return &cpu_R[dc->rb];
4acb54ba
EI
212}
213
20800179 214static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
4acb54ba 215{
20800179
RH
216 if (likely(reg != 0)) {
217 return cpu_R[reg];
218 }
219 if (!dc->r0_set) {
220 if (dc->r0 == NULL) {
221 dc->r0 = tcg_temp_new_i32();
222 }
223 tcg_gen_movi_i32(dc->r0, 0);
224 dc->r0_set = true;
225 }
226 return dc->r0;
227}
4acb54ba 228
20800179
RH
229static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
230{
231 if (likely(reg != 0)) {
232 return cpu_R[reg];
233 }
234 if (dc->r0 == NULL) {
235 dc->r0 = tcg_temp_new_i32();
236 }
237 return dc->r0;
238}
4acb54ba 239
20800179
RH
240static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
241 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
242{
243 TCGv_i32 rd, ra, rb;
40cbf5b7 244
20800179
RH
245 if (arg->rd == 0 && !side_effects) {
246 return true;
40cbf5b7
EI
247 }
248
20800179
RH
249 rd = reg_for_write(dc, arg->rd);
250 ra = reg_for_read(dc, arg->ra);
251 rb = reg_for_read(dc, arg->rb);
252 fn(rd, ra, rb);
253 return true;
254}
255
39cf3864
RH
256static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
257 void (*fn)(TCGv_i32, TCGv_i32))
258{
259 TCGv_i32 rd, ra;
260
261 if (arg->rd == 0 && !side_effects) {
262 return true;
263 }
264
265 rd = reg_for_write(dc, arg->rd);
266 ra = reg_for_read(dc, arg->ra);
267 fn(rd, ra);
268 return true;
269}
270
20800179
RH
271static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
272 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
273{
274 TCGv_i32 rd, ra;
275
276 if (arg->rd == 0 && !side_effects) {
277 return true;
40cbf5b7
EI
278 }
279
20800179
RH
280 rd = reg_for_write(dc, arg->rd);
281 ra = reg_for_read(dc, arg->ra);
282 fni(rd, ra, arg->imm);
283 return true;
284}
285
286static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
287 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
288{
289 TCGv_i32 rd, ra, imm;
290
291 if (arg->rd == 0 && !side_effects) {
292 return true;
4acb54ba 293 }
20800179
RH
294
295 rd = reg_for_write(dc, arg->rd);
296 ra = reg_for_read(dc, arg->ra);
297 imm = tcg_const_i32(arg->imm);
298
299 fn(rd, ra, imm);
300
301 tcg_temp_free_i32(imm);
302 return true;
303}
304
305#define DO_TYPEA(NAME, SE, FN) \
306 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
307 { return do_typea(dc, a, SE, FN); }
308
607f5767
RH
309#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
310 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
311 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
312
39cf3864
RH
313#define DO_TYPEA0(NAME, SE, FN) \
314 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
315 { return do_typea0(dc, a, SE, FN); }
316
317#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
318 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
319 { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
320
20800179
RH
321#define DO_TYPEBI(NAME, SE, FNI) \
322 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
323 { return do_typeb_imm(dc, a, SE, FNI); }
324
97955ceb
RH
325#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
326 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
327 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
328
20800179
RH
329#define DO_TYPEBV(NAME, SE, FN) \
330 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
331 { return do_typeb_val(dc, a, SE, FN); }
332
d5aead3d
RH
333#define ENV_WRAPPER2(NAME, HELPER) \
334 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
335 { HELPER(out, cpu_env, ina); }
336
337#define ENV_WRAPPER3(NAME, HELPER) \
338 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
339 { HELPER(out, cpu_env, ina, inb); }
340
20800179
RH
341/* No input carry, but output carry. */
342static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
343{
344 TCGv_i32 zero = tcg_const_i32(0);
345
346 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
347
348 tcg_temp_free_i32(zero);
349}
350
351/* Input and output carry. */
352static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
353{
354 TCGv_i32 zero = tcg_const_i32(0);
355 TCGv_i32 tmp = tcg_temp_new_i32();
356
357 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
358 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
359
360 tcg_temp_free_i32(tmp);
361 tcg_temp_free_i32(zero);
362}
363
364/* Input carry, but no output carry. */
365static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
366{
367 tcg_gen_add_i32(out, ina, inb);
368 tcg_gen_add_i32(out, out, cpu_msr_c);
369}
370
371DO_TYPEA(add, true, gen_add)
372DO_TYPEA(addc, true, gen_addc)
373DO_TYPEA(addk, false, tcg_gen_add_i32)
374DO_TYPEA(addkc, true, gen_addkc)
375
376DO_TYPEBV(addi, true, gen_add)
377DO_TYPEBV(addic, true, gen_addc)
378DO_TYPEBI(addik, false, tcg_gen_addi_i32)
379DO_TYPEBV(addikc, true, gen_addkc)
380
cb0a0a4c
RH
381static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
382{
383 tcg_gen_andi_i32(out, ina, ~imm);
384}
385
386DO_TYPEA(and, false, tcg_gen_and_i32)
387DO_TYPEBI(andi, false, tcg_gen_andi_i32)
388DO_TYPEA(andn, false, tcg_gen_andc_i32)
389DO_TYPEBI(andni, false, gen_andni)
390
081d8e02
RH
391static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
392{
393 TCGv_i32 tmp = tcg_temp_new_i32();
394 tcg_gen_andi_i32(tmp, inb, 31);
395 tcg_gen_sar_i32(out, ina, tmp);
396 tcg_temp_free_i32(tmp);
397}
398
399static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
400{
401 TCGv_i32 tmp = tcg_temp_new_i32();
402 tcg_gen_andi_i32(tmp, inb, 31);
403 tcg_gen_shr_i32(out, ina, tmp);
404 tcg_temp_free_i32(tmp);
405}
406
407static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
408{
409 TCGv_i32 tmp = tcg_temp_new_i32();
410 tcg_gen_andi_i32(tmp, inb, 31);
411 tcg_gen_shl_i32(out, ina, tmp);
412 tcg_temp_free_i32(tmp);
413}
414
415static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
416{
417 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
418 int imm_w = extract32(imm, 5, 5);
419 int imm_s = extract32(imm, 0, 5);
420
421 if (imm_w + imm_s > 32 || imm_w == 0) {
422 /* These inputs have an undefined behavior. */
423 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
424 imm_w, imm_s);
425 } else {
426 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
427 }
428}
429
430static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
431{
432 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
433 int imm_w = extract32(imm, 5, 5);
434 int imm_s = extract32(imm, 0, 5);
435 int width = imm_w - imm_s + 1;
436
437 if (imm_w < imm_s) {
438 /* These inputs have an undefined behavior. */
439 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
440 imm_w, imm_s);
441 } else {
442 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
443 }
444}
445
446DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
447DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
448DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
449
450DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
451DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
452DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
453
454DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
455DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
456
39cf3864
RH
457static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
458{
459 tcg_gen_clzi_i32(out, ina, 32);
460}
461
462DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
463
58b48b63
RH
464static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
465{
466 TCGv_i32 lt = tcg_temp_new_i32();
467
468 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
469 tcg_gen_sub_i32(out, inb, ina);
470 tcg_gen_deposit_i32(out, out, lt, 31, 1);
471 tcg_temp_free_i32(lt);
472}
473
474static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
475{
476 TCGv_i32 lt = tcg_temp_new_i32();
477
478 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
479 tcg_gen_sub_i32(out, inb, ina);
480 tcg_gen_deposit_i32(out, out, lt, 31, 1);
481 tcg_temp_free_i32(lt);
482}
483
484DO_TYPEA(cmp, false, gen_cmp)
485DO_TYPEA(cmpu, false, gen_cmpu)
a2b0b90e 486
d5aead3d
RH
487ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
488ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
489ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
490ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
491ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
492ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
493ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
494ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
495ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
496ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
497ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
498
499DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
500DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
501DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
502DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
503DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
504DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
505DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
506DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
507DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
508DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
509DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
510
511ENV_WRAPPER2(gen_flt, gen_helper_flt)
512ENV_WRAPPER2(gen_fint, gen_helper_fint)
513ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
514
515DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
516DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
517DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
518
519/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
b1354342
RH
520static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
521{
522 gen_helper_divs(out, cpu_env, inb, ina);
523}
524
525static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
526{
527 gen_helper_divu(out, cpu_env, inb, ina);
528}
529
530DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
531DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
532
e64b2e5c
RH
533static bool trans_imm(DisasContext *dc, arg_imm *arg)
534{
535 dc->ext_imm = arg->imm << 16;
536 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
6f9642d7 537 dc->tb_flags_to_set = IMM_FLAG;
e64b2e5c
RH
538 return true;
539}
540
97955ceb
RH
541static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
542{
543 TCGv_i32 tmp = tcg_temp_new_i32();
544 tcg_gen_muls2_i32(tmp, out, ina, inb);
545 tcg_temp_free_i32(tmp);
546}
547
548static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
549{
550 TCGv_i32 tmp = tcg_temp_new_i32();
551 tcg_gen_mulu2_i32(tmp, out, ina, inb);
552 tcg_temp_free_i32(tmp);
553}
554
555static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
556{
557 TCGv_i32 tmp = tcg_temp_new_i32();
558 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
559 tcg_temp_free_i32(tmp);
560}
561
562DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
563DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
564DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
565DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
566DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
567
cb0a0a4c
RH
568DO_TYPEA(or, false, tcg_gen_or_i32)
569DO_TYPEBI(ori, false, tcg_gen_ori_i32)
570
607f5767
RH
571static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
572{
573 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
574}
575
576static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
577{
578 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
579}
580
581DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
582DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
583DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
584
a2b0b90e
RH
585/* No input carry, but output carry. */
586static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
587{
588 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
589 tcg_gen_sub_i32(out, inb, ina);
590}
591
592/* Input and output carry. */
593static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
594{
595 TCGv_i32 zero = tcg_const_i32(0);
596 TCGv_i32 tmp = tcg_temp_new_i32();
597
598 tcg_gen_not_i32(tmp, ina);
599 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
600 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
601
602 tcg_temp_free_i32(zero);
603 tcg_temp_free_i32(tmp);
604}
605
606/* No input or output carry. */
607static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
608{
609 tcg_gen_sub_i32(out, inb, ina);
610}
611
612/* Input carry, no output carry. */
613static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
614{
615 TCGv_i32 nota = tcg_temp_new_i32();
616
617 tcg_gen_not_i32(nota, ina);
618 tcg_gen_add_i32(out, inb, nota);
619 tcg_gen_add_i32(out, out, cpu_msr_c);
620
621 tcg_temp_free_i32(nota);
622}
623
624DO_TYPEA(rsub, true, gen_rsub)
625DO_TYPEA(rsubc, true, gen_rsubc)
626DO_TYPEA(rsubk, false, gen_rsubk)
627DO_TYPEA(rsubkc, true, gen_rsubkc)
628
629DO_TYPEBV(rsubi, true, gen_rsub)
630DO_TYPEBV(rsubic, true, gen_rsubc)
631DO_TYPEBV(rsubik, false, gen_rsubk)
632DO_TYPEBV(rsubikc, true, gen_rsubkc)
633
39cf3864
RH
634DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
635DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
636
637static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
638{
639 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
640 tcg_gen_sari_i32(out, ina, 1);
641}
642
643static void gen_src(TCGv_i32 out, TCGv_i32 ina)
644{
645 TCGv_i32 tmp = tcg_temp_new_i32();
646
647 tcg_gen_mov_i32(tmp, cpu_msr_c);
648 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
649 tcg_gen_extract2_i32(out, ina, tmp, 1);
650
651 tcg_temp_free_i32(tmp);
652}
653
654static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
655{
656 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
657 tcg_gen_shri_i32(out, ina, 1);
658}
659
660DO_TYPEA0(sra, false, gen_sra)
661DO_TYPEA0(src, false, gen_src)
662DO_TYPEA0(srl, false, gen_srl)
663
664static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
665{
666 tcg_gen_rotri_i32(out, ina, 16);
667}
668
669DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
670DO_TYPEA0(swaph, false, gen_swaph)
671
672static bool trans_wdic(DisasContext *dc, arg_wdic *a)
673{
674 /* Cache operations are nops: only check for supervisor mode. */
675 trap_userspace(dc, true);
676 return true;
677}
678
cb0a0a4c
RH
679DO_TYPEA(xor, false, tcg_gen_xor_i32)
680DO_TYPEBI(xori, false, tcg_gen_xori_i32)
681
d8e59c4a
RH
682static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
683{
684 TCGv ret = tcg_temp_new();
685
686 /* If any of the regs is r0, set t to the value of the other reg. */
687 if (ra && rb) {
688 TCGv_i32 tmp = tcg_temp_new_i32();
689 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
690 tcg_gen_extu_i32_tl(ret, tmp);
691 tcg_temp_free_i32(tmp);
692 } else if (ra) {
693 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
694 } else if (rb) {
695 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
696 } else {
697 tcg_gen_movi_tl(ret, 0);
698 }
699
700 if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
701 gen_helper_stackprot(cpu_env, ret);
702 }
703 return ret;
704}
705
706static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
707{
708 TCGv ret = tcg_temp_new();
709
710 /* If any of the regs is r0, set t to the value of the other reg. */
711 if (ra) {
712 TCGv_i32 tmp = tcg_temp_new_i32();
713 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
714 tcg_gen_extu_i32_tl(ret, tmp);
715 tcg_temp_free_i32(tmp);
716 } else {
717 tcg_gen_movi_tl(ret, (uint32_t)imm);
718 }
719
720 if (ra == 1 && dc->cpu->cfg.stackprot) {
721 gen_helper_stackprot(cpu_env, ret);
722 }
723 return ret;
724}
725
726static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
727{
728 int addr_size = dc->cpu->cfg.addr_size;
729 TCGv ret = tcg_temp_new();
730
731 if (addr_size == 32 || ra == 0) {
732 if (rb) {
733 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
734 } else {
735 tcg_gen_movi_tl(ret, 0);
736 }
737 } else {
738 if (rb) {
739 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
740 } else {
741 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
742 tcg_gen_shli_tl(ret, ret, 32);
743 }
744 if (addr_size < 64) {
745 /* Mask off out of range bits. */
746 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
747 }
748 }
749 return ret;
750}
751
ab0c8d0f
RH
752static void record_unaligned_ess(DisasContext *dc, int rd,
753 MemOp size, bool store)
754{
755 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
756
757 iflags |= ESR_ESS_FLAG;
758 iflags |= rd << 5;
759 iflags |= store * ESR_S;
760 iflags |= (size == MO_32) * ESR_W;
761
762 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
763}
764
d8e59c4a
RH
765static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
766 int mem_index, bool rev)
767{
d8e59c4a
RH
768 MemOp size = mop & MO_SIZE;
769
770 /*
771 * When doing reverse accesses we need to do two things.
772 *
773 * 1. Reverse the address wrt endianness.
774 * 2. Byteswap the data lanes on the way back into the CPU core.
775 */
776 if (rev) {
777 if (size > MO_8) {
778 mop ^= MO_BSWAP;
779 }
780 if (size < MO_32) {
781 tcg_gen_xori_tl(addr, addr, 3 - size);
782 }
783 }
784
d8e59c4a
RH
785 sync_jmpstate(dc);
786
ab0c8d0f
RH
787 if (size > MO_8 &&
788 (dc->tb_flags & MSR_EE) &&
789 dc->cpu->cfg.unaligned_exceptions) {
790 record_unaligned_ess(dc, rd, size, false);
791 mop |= MO_ALIGN;
d8e59c4a
RH
792 }
793
ab0c8d0f 794 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
d8e59c4a 795
d8e59c4a
RH
796 tcg_temp_free(addr);
797 return true;
798}
799
800static bool trans_lbu(DisasContext *dc, arg_typea *arg)
801{
802 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
803 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
804}
805
806static bool trans_lbur(DisasContext *dc, arg_typea *arg)
807{
808 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
809 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
810}
811
812static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
813{
814 if (trap_userspace(dc, true)) {
815 return true;
816 }
817 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
818 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
819}
820
821static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
822{
823 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
824 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
825}
826
827static bool trans_lhu(DisasContext *dc, arg_typea *arg)
828{
829 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
830 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
831}
832
833static bool trans_lhur(DisasContext *dc, arg_typea *arg)
834{
835 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
836 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
837}
838
839static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
840{
841 if (trap_userspace(dc, true)) {
842 return true;
843 }
844 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
845 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
846}
847
848static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
849{
850 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
851 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
852}
853
854static bool trans_lw(DisasContext *dc, arg_typea *arg)
855{
856 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
857 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
858}
859
860static bool trans_lwr(DisasContext *dc, arg_typea *arg)
861{
862 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
863 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
864}
865
866static bool trans_lwea(DisasContext *dc, arg_typea *arg)
867{
868 if (trap_userspace(dc, true)) {
869 return true;
870 }
871 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
872 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
873}
874
875static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
876{
877 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
878 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
879}
880
881static bool trans_lwx(DisasContext *dc, arg_typea *arg)
882{
883 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
884
885 /* lwx does not throw unaligned access errors, so force alignment */
886 tcg_gen_andi_tl(addr, addr, ~3);
887
d8e59c4a
RH
888 sync_jmpstate(dc);
889
890 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
891 tcg_gen_mov_tl(cpu_res_addr, addr);
892 tcg_temp_free(addr);
893
894 if (arg->rd) {
895 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
896 }
897
898 /* No support for AXI exclusive so always clear C */
899 tcg_gen_movi_i32(cpu_msr_c, 0);
900 return true;
901}
902
903static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
904 int mem_index, bool rev)
905{
906 MemOp size = mop & MO_SIZE;
907
908 /*
909 * When doing reverse accesses we need to do two things.
910 *
911 * 1. Reverse the address wrt endianness.
912 * 2. Byteswap the data lanes on the way back into the CPU core.
913 */
914 if (rev) {
915 if (size > MO_8) {
916 mop ^= MO_BSWAP;
917 }
918 if (size < MO_32) {
919 tcg_gen_xori_tl(addr, addr, 3 - size);
920 }
921 }
922
d8e59c4a
RH
923 sync_jmpstate(dc);
924
ab0c8d0f
RH
925 if (size > MO_8 &&
926 (dc->tb_flags & MSR_EE) &&
927 dc->cpu->cfg.unaligned_exceptions) {
928 record_unaligned_ess(dc, rd, size, true);
929 mop |= MO_ALIGN;
d8e59c4a
RH
930 }
931
ab0c8d0f
RH
932 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
933
d8e59c4a
RH
934 tcg_temp_free(addr);
935 return true;
936}
937
938static bool trans_sb(DisasContext *dc, arg_typea *arg)
939{
940 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
941 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
942}
943
944static bool trans_sbr(DisasContext *dc, arg_typea *arg)
945{
946 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
947 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
948}
949
950static bool trans_sbea(DisasContext *dc, arg_typea *arg)
951{
952 if (trap_userspace(dc, true)) {
953 return true;
954 }
955 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
956 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
957}
958
959static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
960{
961 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
962 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
963}
964
965static bool trans_sh(DisasContext *dc, arg_typea *arg)
966{
967 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
968 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
969}
970
971static bool trans_shr(DisasContext *dc, arg_typea *arg)
972{
973 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
974 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
975}
976
977static bool trans_shea(DisasContext *dc, arg_typea *arg)
978{
979 if (trap_userspace(dc, true)) {
980 return true;
981 }
982 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
983 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
984}
985
986static bool trans_shi(DisasContext *dc, arg_typeb *arg)
987{
988 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
989 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
990}
991
992static bool trans_sw(DisasContext *dc, arg_typea *arg)
993{
994 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
995 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
996}
997
998static bool trans_swr(DisasContext *dc, arg_typea *arg)
999{
1000 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1001 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
1002}
1003
1004static bool trans_swea(DisasContext *dc, arg_typea *arg)
1005{
1006 if (trap_userspace(dc, true)) {
1007 return true;
1008 }
1009 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1010 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1011}
1012
1013static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1014{
1015 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1016 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1017}
1018
1019static bool trans_swx(DisasContext *dc, arg_typea *arg)
1020{
1021 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1022 TCGLabel *swx_done = gen_new_label();
1023 TCGLabel *swx_fail = gen_new_label();
1024 TCGv_i32 tval;
1025
d8e59c4a
RH
1026 sync_jmpstate(dc);
1027
1028 /* swx does not throw unaligned access errors, so force alignment */
1029 tcg_gen_andi_tl(addr, addr, ~3);
1030
1031 /*
1032 * Compare the address vs the one we used during lwx.
1033 * On mismatch, the operation fails. On match, addr dies at the
1034 * branch, but we know we can use the equal version in the global.
1035 * In either case, addr is no longer needed.
1036 */
1037 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1038 tcg_temp_free(addr);
1039
1040 /*
1041 * Compare the value loaded during lwx with current contents of
1042 * the reserved location.
1043 */
1044 tval = tcg_temp_new_i32();
1045
1046 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1047 reg_for_write(dc, arg->rd),
1048 dc->mem_index, MO_TEUL);
1049
1050 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1051 tcg_temp_free_i32(tval);
1052
1053 /* Success */
1054 tcg_gen_movi_i32(cpu_msr_c, 0);
1055 tcg_gen_br(swx_done);
1056
1057 /* Failure */
1058 gen_set_label(swx_fail);
1059 tcg_gen_movi_i32(cpu_msr_c, 1);
1060
1061 gen_set_label(swx_done);
1062
1063 /*
1064 * Prevent the saved address from working again without another ldx.
1065 * Akin to the pseudocode setting reservation = 0.
1066 */
1067 tcg_gen_movi_tl(cpu_res_addr, -1);
1068 return true;
1069}
1070
20800179
RH
1071static bool trans_zero(DisasContext *dc, arg_zero *arg)
1072{
1073 /* If opcode_0_illegal, trap. */
1074 if (dc->cpu->cfg.opcode_0_illegal) {
1075 trap_illegal(dc, true);
1076 return true;
1077 }
1078 /*
1079 * Otherwise, this is "add r0, r0, r0".
1080 * Continue to trans_add so that MSR[C] gets cleared.
1081 */
1082 return false;
4acb54ba
EI
1083}
1084
1074c0fb 1085static void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 1086{
1074c0fb
RH
1087 TCGv_i32 t;
1088
1089 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1090 t = tcg_temp_new_i32();
1091 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1092 tcg_gen_or_i32(d, cpu_msr, t);
1093 tcg_temp_free_i32(t);
4acb54ba
EI
1094}
1095
1074c0fb 1096static void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba
EI
1097{
1098 dc->cpustate_changed = 1;
1074c0fb
RH
1099
1100 /* Install MSR_C. */
1101 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
1102
1103 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
1104 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
4acb54ba
EI
1105}
1106
1107static void dec_msr(DisasContext *dc)
1108{
0063ebd6 1109 CPUState *cs = CPU(dc->cpu);
cfeea807 1110 TCGv_i32 t0, t1;
2023e9a3 1111 unsigned int sr, rn;
f0f7e7f7 1112 bool to, clrset, extended = false;
4acb54ba 1113
2023e9a3
EI
1114 sr = extract32(dc->imm, 0, 14);
1115 to = extract32(dc->imm, 14, 1);
1116 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 1117 dc->type_b = 1;
2023e9a3 1118 if (to) {
4acb54ba 1119 dc->cpustate_changed = 1;
f0f7e7f7
EI
1120 }
1121
1122 /* Extended MSRs are only available if addr_size > 32. */
1123 if (dc->cpu->cfg.addr_size > 32) {
1124 /* The E-bit is encoded differently for To/From MSR. */
1125 static const unsigned int e_bit[] = { 19, 24 };
1126
1127 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 1128 }
4acb54ba
EI
1129
1130 /* msrclr and msrset. */
2023e9a3
EI
1131 if (clrset) {
1132 bool clr = extract32(dc->ir, 16, 1);
4acb54ba 1133
56837509 1134 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
1135 /* nop??? */
1136 return;
1137 }
1138
bdfc1e88 1139 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
1140 return;
1141 }
1142
4acb54ba
EI
1143 if (dc->rd)
1144 msr_read(dc, cpu_R[dc->rd]);
1145
cfeea807
EI
1146 t0 = tcg_temp_new_i32();
1147 t1 = tcg_temp_new_i32();
4acb54ba 1148 msr_read(dc, t0);
cfeea807 1149 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
1150
1151 if (clr) {
cfeea807
EI
1152 tcg_gen_not_i32(t1, t1);
1153 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 1154 } else
cfeea807 1155 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 1156 msr_write(dc, t0);
cfeea807
EI
1157 tcg_temp_free_i32(t0);
1158 tcg_temp_free_i32(t1);
d4705ae0
RH
1159 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1160 dc->base.is_jmp = DISAS_UPDATE;
4acb54ba
EI
1161 return;
1162 }
1163
bdfc1e88
EI
1164 if (trap_userspace(dc, to)) {
1165 return;
1567a005
EI
1166 }
1167
4acb54ba
EI
1168#if !defined(CONFIG_USER_ONLY)
1169 /* Catch read/writes to the mmu block. */
1170 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 1171 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
1172 TCGv_i32 tmp_sr;
1173
4acb54ba 1174 sr &= 7;
05a9a651 1175 tmp_sr = tcg_const_i32(sr);
05a9a651 1176 if (to) {
f0f7e7f7 1177 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 1178 } else {
f0f7e7f7 1179 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
1180 }
1181 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 1182 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
1183 return;
1184 }
1185#endif
1186
1187 if (to) {
4acb54ba 1188 switch (sr) {
aa28e6d4 1189 case SR_PC:
4acb54ba 1190 break;
aa28e6d4 1191 case SR_MSR:
4acb54ba
EI
1192 msr_write(dc, cpu_R[dc->ra]);
1193 break;
351527b7 1194 case SR_EAR:
dbdb77c4
RH
1195 {
1196 TCGv_i64 t64 = tcg_temp_new_i64();
1197 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
1198 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1199 tcg_temp_free_i64(t64);
1200 }
aa28e6d4 1201 break;
351527b7 1202 case SR_ESR:
41ba37c4
RH
1203 tcg_gen_st_i32(cpu_R[dc->ra],
1204 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 1205 break;
ab6dd380 1206 case SR_FSR:
86017ccf
RH
1207 tcg_gen_st_i32(cpu_R[dc->ra],
1208 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4
RH
1209 break;
1210 case SR_BTR:
ccf628b7
RH
1211 tcg_gen_st_i32(cpu_R[dc->ra],
1212 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4
RH
1213 break;
1214 case SR_EDR:
39db007e
RH
1215 tcg_gen_st_i32(cpu_R[dc->ra],
1216 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 1217 break;
5818dee5 1218 case 0x800:
cfeea807
EI
1219 tcg_gen_st_i32(cpu_R[dc->ra],
1220 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
1221 break;
1222 case 0x802:
cfeea807
EI
1223 tcg_gen_st_i32(cpu_R[dc->ra],
1224 cpu_env, offsetof(CPUMBState, shr));
5818dee5 1225 break;
4acb54ba 1226 default:
0063ebd6 1227 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
1228 break;
1229 }
1230 } else {
4acb54ba 1231 switch (sr) {
aa28e6d4 1232 case SR_PC:
d4705ae0 1233 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
4acb54ba 1234 break;
aa28e6d4 1235 case SR_MSR:
4acb54ba
EI
1236 msr_read(dc, cpu_R[dc->rd]);
1237 break;
351527b7 1238 case SR_EAR:
dbdb77c4
RH
1239 {
1240 TCGv_i64 t64 = tcg_temp_new_i64();
1241 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1242 if (extended) {
1243 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
1244 } else {
1245 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
1246 }
1247 tcg_temp_free_i64(t64);
a1b48e3a 1248 }
aa28e6d4 1249 break;
351527b7 1250 case SR_ESR:
41ba37c4
RH
1251 tcg_gen_ld_i32(cpu_R[dc->rd],
1252 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 1253 break;
351527b7 1254 case SR_FSR:
86017ccf
RH
1255 tcg_gen_ld_i32(cpu_R[dc->rd],
1256 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4 1257 break;
351527b7 1258 case SR_BTR:
ccf628b7
RH
1259 tcg_gen_ld_i32(cpu_R[dc->rd],
1260 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4 1261 break;
7cdae31d 1262 case SR_EDR:
39db007e
RH
1263 tcg_gen_ld_i32(cpu_R[dc->rd],
1264 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 1265 break;
5818dee5 1266 case 0x800:
cfeea807
EI
1267 tcg_gen_ld_i32(cpu_R[dc->rd],
1268 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
1269 break;
1270 case 0x802:
cfeea807
EI
1271 tcg_gen_ld_i32(cpu_R[dc->rd],
1272 cpu_env, offsetof(CPUMBState, shr));
5818dee5 1273 break;
351527b7 1274 case 0x2000 ... 0x200c:
4acb54ba 1275 rn = sr & 0xf;
cfeea807 1276 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 1277 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
1278 break;
1279 default:
a47dddd7 1280 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
1281 break;
1282 }
1283 }
ee7dbcf8
EI
1284
1285 if (dc->rd == 0) {
cfeea807 1286 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 1287 }
4acb54ba
EI
1288}
1289
4acb54ba 1290static inline void eval_cc(DisasContext *dc, unsigned int cc,
9e6e1828 1291 TCGv_i32 d, TCGv_i32 a)
4acb54ba 1292{
d89b86e9
EI
1293 static const int mb_to_tcg_cc[] = {
1294 [CC_EQ] = TCG_COND_EQ,
1295 [CC_NE] = TCG_COND_NE,
1296 [CC_LT] = TCG_COND_LT,
1297 [CC_LE] = TCG_COND_LE,
1298 [CC_GE] = TCG_COND_GE,
1299 [CC_GT] = TCG_COND_GT,
1300 };
1301
4acb54ba 1302 switch (cc) {
d89b86e9
EI
1303 case CC_EQ:
1304 case CC_NE:
1305 case CC_LT:
1306 case CC_LE:
1307 case CC_GE:
1308 case CC_GT:
9e6e1828 1309 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
d89b86e9
EI
1310 break;
1311 default:
1312 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1313 break;
4acb54ba
EI
1314 }
1315}
1316
0f96e96b 1317static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
4acb54ba 1318{
0f96e96b 1319 TCGv_i32 zero = tcg_const_i32(0);
e956caf2 1320
0f96e96b 1321 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
9b158558 1322 cpu_btaken, zero,
e956caf2
EI
1323 pc_true, pc_false);
1324
0f96e96b 1325 tcg_temp_free_i32(zero);
4acb54ba
EI
1326}
1327
f91c60f0
EI
1328static void dec_setup_dslot(DisasContext *dc)
1329{
1e521ce3 1330 dc->tb_flags_to_set |= D_FLAG;
7b34f45f 1331 if (dc->type_b && (dc->tb_flags & IMM_FLAG)) {
1e521ce3 1332 dc->tb_flags_to_set |= BIMM_FLAG;
7b34f45f 1333 }
f91c60f0
EI
1334}
1335
4acb54ba
EI
1336static void dec_bcc(DisasContext *dc)
1337{
1338 unsigned int cc;
1339 unsigned int dslot;
1340
1341 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1342 dslot = dc->ir & (1 << 25);
4acb54ba 1343
4acb54ba 1344 if (dslot) {
f91c60f0 1345 dec_setup_dslot(dc);
4acb54ba
EI
1346 }
1347
d7ecb757 1348 if (dc->type_b) {
844bab60 1349 dc->jmp = JMP_DIRECT_CC;
d7ecb757
RH
1350 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1351 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
61204ce8 1352 } else {
23979dc5 1353 dc->jmp = JMP_INDIRECT;
d7ecb757 1354 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
61204ce8 1355 }
9b158558 1356 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
4acb54ba
EI
1357}
1358
1359static void dec_br(DisasContext *dc)
1360{
9f6113c7 1361 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1362
1363 dslot = dc->ir & (1 << 20);
1364 abs = dc->ir & (1 << 19);
1365 link = dc->ir & (1 << 18);
9f6113c7
EI
1366
1367 /* Memory barrier. */
1368 mbar = (dc->ir >> 16) & 31;
1369 if (mbar == 2 && dc->imm == 4) {
badcbf9d
EI
1370 uint16_t mbar_imm = dc->rd;
1371
3f172744
EI
1372 /* Data access memory barrier. */
1373 if ((mbar_imm & 2) == 0) {
1374 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1375 }
1376
5d45de97 1377 /* mbar IMM & 16 decodes to sleep. */
badcbf9d 1378 if (mbar_imm & 16) {
41ba37c4 1379 TCGv_i32 tmp_1;
5d45de97 1380
b4919e7d
EI
1381 if (trap_userspace(dc, true)) {
1382 /* Sleep is a privileged instruction. */
1383 return;
1384 }
1385
5d45de97 1386 t_sync_flags(dc);
41ba37c4
RH
1387
1388 tmp_1 = tcg_const_i32(1);
5d45de97
EI
1389 tcg_gen_st_i32(tmp_1, cpu_env,
1390 -offsetof(MicroBlazeCPU, env)
1391 +offsetof(CPUState, halted));
5d45de97 1392 tcg_temp_free_i32(tmp_1);
41ba37c4 1393
d4705ae0 1394 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
41ba37c4
RH
1395
1396 gen_raise_exception(dc, EXCP_HLT);
5d45de97
EI
1397 return;
1398 }
9f6113c7
EI
1399 /* Break the TB. */
1400 dc->cpustate_changed = 1;
1401 return;
1402 }
1403
d7ecb757
RH
1404 if (abs && link && !dslot) {
1405 if (dc->type_b) {
1406 /* BRKI */
1407 uint32_t imm = dec_alu_typeb_imm(dc);
1408 if (trap_userspace(dc, imm != 8 && imm != 0x18)) {
1409 return;
1410 }
1411 } else {
1412 /* BRK */
1413 if (trap_userspace(dc, true)) {
1414 return;
1415 }
1416 }
1417 }
1418
4acb54ba 1419 if (dslot) {
f91c60f0 1420 dec_setup_dslot(dc);
4acb54ba 1421 }
d7ecb757 1422 if (link && dc->rd) {
d4705ae0 1423 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
d7ecb757 1424 }
4acb54ba 1425
4acb54ba 1426 if (abs) {
d7ecb757
RH
1427 if (dc->type_b) {
1428 uint32_t dest = dec_alu_typeb_imm(dc);
1429
1430 dc->jmp = JMP_DIRECT;
1431 dc->jmp_pc = dest;
1432 tcg_gen_movi_i32(cpu_btarget, dest);
1433 if (link && !dslot) {
1434 switch (dest) {
1435 case 8:
1436 case 0x18:
1437 gen_raise_exception_sync(dc, EXCP_BREAK);
1438 break;
1439 case 0:
1440 gen_raise_exception_sync(dc, EXCP_DEBUG);
1441 break;
ff21f70a 1442 }
ff21f70a 1443 }
61204ce8 1444 } else {
d7ecb757
RH
1445 dc->jmp = JMP_INDIRECT;
1446 tcg_gen_mov_i32(cpu_btarget, cpu_R[dc->rb]);
1447 if (link && !dslot) {
1448 gen_raise_exception_sync(dc, EXCP_BREAK);
1449 }
4acb54ba 1450 }
d7ecb757
RH
1451 } else if (dc->type_b) {
1452 dc->jmp = JMP_DIRECT;
1453 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1454 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1455 } else {
1456 dc->jmp = JMP_INDIRECT;
1457 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
4acb54ba 1458 }
d7ecb757 1459 tcg_gen_movi_i32(cpu_btaken, 1);
4acb54ba
EI
1460}
1461
1462static inline void do_rti(DisasContext *dc)
1463{
cfeea807
EI
1464 TCGv_i32 t0, t1;
1465 t0 = tcg_temp_new_i32();
1466 t1 = tcg_temp_new_i32();
3e0e16ae 1467 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf
EI
1468 tcg_gen_shri_i32(t0, t1, 1);
1469 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1470 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1471
1472 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1473 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1474 msr_write(dc, t1);
cfeea807
EI
1475 tcg_temp_free_i32(t1);
1476 tcg_temp_free_i32(t0);
4acb54ba
EI
1477 dc->tb_flags &= ~DRTI_FLAG;
1478}
1479
1480static inline void do_rtb(DisasContext *dc)
1481{
cfeea807
EI
1482 TCGv_i32 t0, t1;
1483 t0 = tcg_temp_new_i32();
1484 t1 = tcg_temp_new_i32();
3e0e16ae 1485 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1486 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1487 tcg_gen_shri_i32(t0, t1, 1);
1488 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1489
1490 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1491 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1492 msr_write(dc, t1);
cfeea807
EI
1493 tcg_temp_free_i32(t1);
1494 tcg_temp_free_i32(t0);
4acb54ba
EI
1495 dc->tb_flags &= ~DRTB_FLAG;
1496}
1497
1498static inline void do_rte(DisasContext *dc)
1499{
cfeea807
EI
1500 TCGv_i32 t0, t1;
1501 t0 = tcg_temp_new_i32();
1502 t1 = tcg_temp_new_i32();
4acb54ba 1503
3e0e16ae 1504 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1505 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1506 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1507 tcg_gen_shri_i32(t0, t1, 1);
1508 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1509
cfeea807
EI
1510 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1511 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1512 msr_write(dc, t1);
cfeea807
EI
1513 tcg_temp_free_i32(t1);
1514 tcg_temp_free_i32(t0);
4acb54ba
EI
1515 dc->tb_flags &= ~DRTE_FLAG;
1516}
1517
1518static void dec_rts(DisasContext *dc)
1519{
1520 unsigned int b_bit, i_bit, e_bit;
1521
1522 i_bit = dc->ir & (1 << 21);
1523 b_bit = dc->ir & (1 << 22);
1524 e_bit = dc->ir & (1 << 23);
1525
bdfc1e88
EI
1526 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1527 return;
1528 }
1529
f91c60f0 1530 dec_setup_dslot(dc);
4acb54ba
EI
1531
1532 if (i_bit) {
4acb54ba
EI
1533 dc->tb_flags |= DRTI_FLAG;
1534 } else if (b_bit) {
4acb54ba
EI
1535 dc->tb_flags |= DRTB_FLAG;
1536 } else if (e_bit) {
4acb54ba 1537 dc->tb_flags |= DRTE_FLAG;
11105d67 1538 }
4acb54ba 1539
23979dc5 1540 dc->jmp = JMP_INDIRECT;
9b158558 1541 tcg_gen_movi_i32(cpu_btaken, 1);
0f96e96b 1542 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
4acb54ba
EI
1543}
1544
1545static void dec_null(DisasContext *dc)
1546{
9ba8cd45 1547 if (trap_illegal(dc, true)) {
02b33596
EI
1548 return;
1549 }
d4705ae0
RH
1550 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n",
1551 (uint32_t)dc->base.pc_next, dc->opcode);
4acb54ba
EI
1552 dc->abort_at_next_insn = 1;
1553}
1554
6d76d23e
EI
1555/* Insns connected to FSL or AXI stream attached devices. */
1556static void dec_stream(DisasContext *dc)
1557{
6d76d23e
EI
1558 TCGv_i32 t_id, t_ctrl;
1559 int ctrl;
1560
bdfc1e88 1561 if (trap_userspace(dc, true)) {
6d76d23e
EI
1562 return;
1563 }
1564
cfeea807 1565 t_id = tcg_temp_new_i32();
6d76d23e 1566 if (dc->type_b) {
cfeea807 1567 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1568 ctrl = dc->imm >> 10;
1569 } else {
cfeea807 1570 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1571 ctrl = dc->imm >> 5;
1572 }
1573
cfeea807 1574 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1575
1576 if (dc->rd == 0) {
1577 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1578 } else {
1579 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1580 }
cfeea807
EI
1581 tcg_temp_free_i32(t_id);
1582 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1583}
1584
4acb54ba
EI
1585static struct decoder_info {
1586 struct {
1587 uint32_t bits;
1588 uint32_t mask;
1589 };
1590 void (*dec)(DisasContext *dc);
1591} decinfo[] = {
4acb54ba
EI
1592 {DEC_BR, dec_br},
1593 {DEC_BCC, dec_bcc},
1594 {DEC_RTS, dec_rts},
4acb54ba 1595 {DEC_MSR, dec_msr},
6d76d23e 1596 {DEC_STREAM, dec_stream},
4acb54ba
EI
1597 {{0, 0}, dec_null}
1598};
1599
44d1432b 1600static void old_decode(DisasContext *dc, uint32_t ir)
4acb54ba 1601{
4acb54ba
EI
1602 int i;
1603
64254eba 1604 dc->ir = ir;
4acb54ba 1605
4acb54ba
EI
1606 /* bit 2 seems to indicate insn type. */
1607 dc->type_b = ir & (1 << 29);
1608
1609 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1610 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1611 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1612 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1613 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1614
1615 /* Large switch for all insns. */
1616 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1617 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1618 decinfo[i].dec(dc);
1619 break;
1620 }
1621 }
1622}
1623
372122e3 1624static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
4acb54ba 1625{
372122e3
RH
1626 DisasContext *dc = container_of(dcb, DisasContext, base);
1627 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1628 int bound;
4acb54ba 1629
372122e3 1630 dc->cpu = cpu;
683a247e 1631 dc->tb_flags = dc->base.tb->flags;
1e521ce3 1632 dc->jmp = dc->tb_flags & D_FLAG ? JMP_INDIRECT : JMP_NOJMP;
4acb54ba
EI
1633 dc->cpustate_changed = 0;
1634 dc->abort_at_next_insn = 0;
d7ecb757 1635 dc->ext_imm = dc->base.tb->cs_base;
20800179
RH
1636 dc->r0 = NULL;
1637 dc->r0_set = false;
287b1def 1638 dc->mem_index = cpu_mmu_index(&cpu->env, false);
4acb54ba 1639
372122e3
RH
1640 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1641 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1642}
4acb54ba 1643
372122e3
RH
1644static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1645{
1646}
4acb54ba 1647
372122e3
RH
1648static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1649{
683a247e
RH
1650 DisasContext *dc = container_of(dcb, DisasContext, base);
1651
1652 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1653 dc->insn_start = tcg_last_op();
372122e3 1654}
4acb54ba 1655
372122e3
RH
1656static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1657 const CPUBreakpoint *bp)
1658{
1659 DisasContext *dc = container_of(dcb, DisasContext, base);
b933066a 1660
372122e3 1661 gen_raise_exception_sync(dc, EXCP_DEBUG);
4acb54ba 1662
372122e3
RH
1663 /*
1664 * The address covered by the breakpoint must be included in
1665 * [tb->pc, tb->pc + tb->size) in order to for it to be
1666 * properly cleared -- thus we increment the PC here so that
1667 * the logic setting tb->size below does the right thing.
1668 */
1669 dc->base.pc_next += 4;
1670 return true;
1671}
4acb54ba 1672
372122e3
RH
1673static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1674{
1675 DisasContext *dc = container_of(dcb, DisasContext, base);
1676 CPUMBState *env = cs->env_ptr;
44d1432b 1677 uint32_t ir;
372122e3
RH
1678
1679 /* TODO: This should raise an exception, not terminate qemu. */
1680 if (dc->base.pc_next & 3) {
1681 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1682 (uint32_t)dc->base.pc_next);
1683 }
1684
6f9642d7
RH
1685 dc->tb_flags_to_set = 0;
1686
44d1432b
RH
1687 ir = cpu_ldl_code(env, dc->base.pc_next);
1688 if (!decode(dc, ir)) {
1689 old_decode(dc, ir);
1690 }
20800179
RH
1691
1692 if (dc->r0) {
1693 tcg_temp_free_i32(dc->r0);
1694 dc->r0 = NULL;
1695 dc->r0_set = false;
1696 }
1697
6f9642d7
RH
1698 /* Discard the imm global when its contents cannot be used. */
1699 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
d7ecb757 1700 tcg_gen_discard_i32(cpu_imm);
372122e3 1701 }
6f9642d7 1702
1e521ce3 1703 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
6f9642d7 1704 dc->tb_flags |= dc->tb_flags_to_set;
372122e3
RH
1705 dc->base.pc_next += 4;
1706
1e521ce3 1707 if (dc->jmp != JMP_NOJMP && !(dc->tb_flags & D_FLAG)) {
372122e3
RH
1708 if (dc->tb_flags & DRTI_FLAG) {
1709 do_rti(dc);
4acb54ba 1710 }
372122e3
RH
1711 if (dc->tb_flags & DRTB_FLAG) {
1712 do_rtb(dc);
ed2803da 1713 }
372122e3
RH
1714 if (dc->tb_flags & DRTE_FLAG) {
1715 do_rte(dc);
1716 }
372122e3 1717 dc->base.is_jmp = DISAS_JUMP;
4acb54ba
EI
1718 }
1719
372122e3
RH
1720 /* Force an exit if the per-tb cpu state has changed. */
1721 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
d4705ae0 1722 dc->base.is_jmp = DISAS_UPDATE;
372122e3 1723 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
4acb54ba 1724 }
372122e3
RH
1725}
1726
1727static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1728{
1729 DisasContext *dc = container_of(dcb, DisasContext, base);
1730
1731 assert(!dc->abort_at_next_insn);
4acb54ba 1732
d4705ae0 1733 if (dc->base.is_jmp == DISAS_NORETURN) {
372122e3
RH
1734 /* We have already exited the TB. */
1735 return;
1736 }
1737
1738 t_sync_flags(dc);
1739 if (dc->tb_flags & D_FLAG) {
1740 sync_jmpstate(dc);
1741 dc->jmp = JMP_NOJMP;
1742 }
1743
1744 switch (dc->base.is_jmp) {
1745 case DISAS_TOO_MANY:
1746 assert(dc->jmp == JMP_NOJMP);
1747 gen_goto_tb(dc, 0, dc->base.pc_next);
1748 return;
6c5f738d 1749
372122e3
RH
1750 case DISAS_UPDATE:
1751 assert(dc->jmp == JMP_NOJMP);
1752 if (unlikely(cs->singlestep_enabled)) {
1753 gen_raise_exception(dc, EXCP_DEBUG);
1754 } else {
1755 tcg_gen_exit_tb(NULL, 0);
6c5f738d 1756 }
372122e3
RH
1757 return;
1758
1759 case DISAS_JUMP:
1760 switch (dc->jmp) {
1761 case JMP_INDIRECT:
1762 {
1763 TCGv_i32 tmp_pc = tcg_const_i32(dc->base.pc_next);
1764 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1765 tcg_temp_free_i32(tmp_pc);
1766
1767 if (unlikely(cs->singlestep_enabled)) {
1768 gen_raise_exception(dc, EXCP_DEBUG);
1769 } else {
1770 tcg_gen_exit_tb(NULL, 0);
1771 }
1772 }
1773 return;
1774
1775 case JMP_DIRECT_CC:
1776 {
1777 TCGLabel *l1 = gen_new_label();
1778 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
1779 gen_goto_tb(dc, 1, dc->base.pc_next);
1780 gen_set_label(l1);
1781 }
1782 /* fall through */
1783
1784 case JMP_DIRECT:
1785 gen_goto_tb(dc, 0, dc->jmp_pc);
1786 return;
4acb54ba 1787 }
372122e3 1788 /* fall through */
0a7df5da 1789
372122e3
RH
1790 default:
1791 g_assert_not_reached();
1792 }
1793}
4acb54ba 1794
372122e3
RH
1795static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1796{
372122e3
RH
1797 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1798 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
372122e3
RH
1799}
1800
1801static const TranslatorOps mb_tr_ops = {
1802 .init_disas_context = mb_tr_init_disas_context,
1803 .tb_start = mb_tr_tb_start,
1804 .insn_start = mb_tr_insn_start,
1805 .breakpoint_check = mb_tr_breakpoint_check,
1806 .translate_insn = mb_tr_translate_insn,
1807 .tb_stop = mb_tr_tb_stop,
1808 .disas_log = mb_tr_disas_log,
1809};
1810
1811void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1812{
1813 DisasContext dc;
1814 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
4acb54ba
EI
1815}
1816
90c84c56 1817void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1818{
878096ee
AF
1819 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1820 CPUMBState *env = &cpu->env;
0c3da918 1821 uint32_t iflags;
4acb54ba
EI
1822 int i;
1823
0c3da918
RH
1824 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1825 env->pc, env->msr,
2e5282ca
RH
1826 (env->msr & MSR_UM) ? "user" : "kernel",
1827 (env->msr & MSR_UMS) ? "user" : "kernel",
1828 (bool)(env->msr & MSR_EIP),
1829 (bool)(env->msr & MSR_IE));
0c3da918
RH
1830
1831 iflags = env->iflags;
1832 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1833 if (iflags & IMM_FLAG) {
1834 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1835 }
1836 if (iflags & BIMM_FLAG) {
1837 qemu_fprintf(f, " BIMM");
1838 }
1839 if (iflags & D_FLAG) {
1840 qemu_fprintf(f, " D(btaken=%d btarget=0x%08x)",
1841 env->btaken, env->btarget);
1842 }
1843 if (iflags & DRTI_FLAG) {
1844 qemu_fprintf(f, " DRTI");
1845 }
1846 if (iflags & DRTE_FLAG) {
1847 qemu_fprintf(f, " DRTE");
1848 }
1849 if (iflags & DRTB_FLAG) {
1850 qemu_fprintf(f, " DRTB");
1851 }
1852 if (iflags & ESR_ESS_FLAG) {
1853 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1854 }
1855
1856 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1857 "ear=0x%016" PRIx64 " slr=0x%x shr=0x%x\n",
1858 env->esr, env->fsr, env->btr, env->edr,
1859 env->ear, env->slr, env->shr);
1860
2ead1b18 1861 for (i = 0; i < 12; i++) {
0c3da918
RH
1862 qemu_fprintf(f, "rpvr%-2d=%08x%c",
1863 i, env->pvr.regs[i], i % 4 == 3 ? '\n' : ' ');
2ead1b18 1864 }
17c52a43 1865
4acb54ba 1866 for (i = 0; i < 32; i++) {
0c3da918
RH
1867 qemu_fprintf(f, "r%2.2d=%08x%c",
1868 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1869 }
1870 qemu_fprintf(f, "\n");
4acb54ba
EI
1871}
1872
cd0c24f9
AF
1873void mb_tcg_init(void)
1874{
480d29a8
RH
1875#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1876#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1877
1878 static const struct {
1879 TCGv_i32 *var; int ofs; char name[8];
1880 } i32s[] = {
1881 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1882 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1883 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1884 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1885
1886 SP(pc),
1887 SP(msr),
1074c0fb 1888 SP(msr_c),
480d29a8
RH
1889 SP(imm),
1890 SP(iflags),
1891 SP(btaken),
1892 SP(btarget),
1893 SP(res_val),
1894 };
1895
1896#undef R
1897#undef SP
1898
1899 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1900 *i32s[i].var =
1901 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1902 }
4acb54ba 1903
480d29a8
RH
1904 cpu_res_addr =
1905 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba
EI
1906}
1907
bad729e2
RH
1908void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1909 target_ulong *data)
4acb54ba 1910{
76e8187d 1911 env->pc = data[0];
683a247e 1912 env->iflags = data[1];
4acb54ba 1913}
This page took 1.268262 seconds and 4 git commands to generate.