]> Git Repo - qemu.git/blame - target/microblaze/translate.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
ee452036 10 * version 2.1 of the License, or (at your option) any later version.
4acb54ba
EI
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
f08b6170 27#include "exec/cpu_ldst.h"
2ef6175a 28#include "exec/helper-gen.h"
77fc6f5e 29#include "exec/translator.h"
90c84c56 30#include "qemu/qemu-print.h"
4acb54ba 31
508127e2 32#include "exec/log.h"
a7e30d84 33
4acb54ba
EI
34#define EXTRACT_FIELD(src, start, end) \
35 (((src) >> start) & ((1 << (end - start + 1)) - 1))
36
77fc6f5e
LV
37/* is_jmp field values */
38#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
17e77796 39#define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
77fc6f5e 40
f6278ca9
RH
41/* cpu state besides pc was modified dynamically; update pc to next */
42#define DISAS_EXIT_NEXT DISAS_TARGET_2
43/* cpu state besides pc was modified dynamically; update pc to btarget */
44#define DISAS_EXIT_JUMP DISAS_TARGET_3
45
cfeea807 46static TCGv_i32 cpu_R[32];
0f96e96b 47static TCGv_i32 cpu_pc;
3e0e16ae 48static TCGv_i32 cpu_msr;
1074c0fb 49static TCGv_i32 cpu_msr_c;
9b158558 50static TCGv_i32 cpu_imm;
b9c58aab 51static TCGv_i32 cpu_bvalue;
0f96e96b 52static TCGv_i32 cpu_btarget;
9b158558
RH
53static TCGv_i32 cpu_iflags;
54static TCGv cpu_res_addr;
55static TCGv_i32 cpu_res_val;
4acb54ba 56
022c62cb 57#include "exec/gen-icount.h"
4acb54ba
EI
58
59/* This is the state at translation time. */
60typedef struct DisasContext {
d4705ae0 61 DisasContextBase base;
4b893631 62 const MicroBlazeCPUConfig *cfg;
4acb54ba 63
683a247e
RH
64 /* TCG op of the current insn_start. */
65 TCGOp *insn_start;
66
20800179
RH
67 TCGv_i32 r0;
68 bool r0_set;
69
4acb54ba 70 /* Decoder. */
d7ecb757 71 uint32_t ext_imm;
683a247e 72 unsigned int tb_flags;
6f9642d7 73 unsigned int tb_flags_to_set;
287b1def 74 int mem_index;
4acb54ba 75
b9c58aab
RH
76 /* Condition under which to jump, including NEVER and ALWAYS. */
77 TCGCond jmp_cond;
78
79 /* Immediate branch-taken destination, or -1 for indirect. */
80 uint32_t jmp_dest;
4acb54ba
EI
81} DisasContext;
82
20800179
RH
83static int typeb_imm(DisasContext *dc, int x)
84{
85 if (dc->tb_flags & IMM_FLAG) {
86 return deposit32(dc->ext_imm, 0, 16, x);
87 }
88 return x;
89}
90
44d1432b
RH
91/* Include the auto-generated decoder. */
92#include "decode-insns.c.inc"
93
683a247e 94static void t_sync_flags(DisasContext *dc)
4acb54ba 95{
4abf79a4 96 /* Synch the tb dependent flags between translator and runtime. */
88e74b61
RH
97 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
98 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
4acb54ba
EI
99 }
100}
101
41ba37c4 102static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba
EI
103{
104 TCGv_i32 tmp = tcg_const_i32(index);
105
64254eba 106 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba 107 tcg_temp_free_i32(tmp);
d4705ae0 108 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
109}
110
41ba37c4
RH
111static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
112{
113 t_sync_flags(dc);
d4705ae0 114 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
41ba37c4
RH
115 gen_raise_exception(dc, index);
116}
117
118static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
119{
120 TCGv_i32 tmp = tcg_const_i32(esr_ec);
121 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
122 tcg_temp_free_i32(tmp);
123
124 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
125}
126
4acb54ba
EI
127static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
128{
66345580 129 if (translator_use_goto_tb(&dc->base, dest)) {
4acb54ba 130 tcg_gen_goto_tb(n);
0f96e96b 131 tcg_gen_movi_i32(cpu_pc, dest);
d4705ae0 132 tcg_gen_exit_tb(dc->base.tb, n);
4acb54ba 133 } else {
0f96e96b 134 tcg_gen_movi_i32(cpu_pc, dest);
4059bd90 135 tcg_gen_lookup_and_goto_ptr();
4acb54ba 136 }
d4705ae0 137 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
138}
139
9ba8cd45
EI
140/*
141 * Returns true if the insn an illegal operation.
142 * If exceptions are enabled, an exception is raised.
143 */
144static bool trap_illegal(DisasContext *dc, bool cond)
145{
2c32179f 146 if (cond && (dc->tb_flags & MSR_EE)
4b893631 147 && dc->cfg->illegal_opcode_exception) {
41ba37c4 148 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
149 }
150 return cond;
151}
152
bdfc1e88
EI
153/*
154 * Returns true if the insn is illegal in userspace.
155 * If exceptions are enabled, an exception is raised.
156 */
157static bool trap_userspace(DisasContext *dc, bool cond)
158{
287b1def 159 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
bdfc1e88 160
2c32179f 161 if (cond_user && (dc->tb_flags & MSR_EE)) {
41ba37c4 162 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
163 }
164 return cond_user;
165}
166
2a7567a2
RH
167/*
168 * Return true, and log an error, if the current insn is
169 * within a delay slot.
170 */
171static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
172{
173 if (dc->tb_flags & D_FLAG) {
174 qemu_log_mask(LOG_GUEST_ERROR,
175 "Invalid insn in delay slot: %s at %08x\n",
176 insn_type, (uint32_t)dc->base.pc_next);
177 return true;
178 }
179 return false;
180}
181
20800179 182static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
4acb54ba 183{
20800179
RH
184 if (likely(reg != 0)) {
185 return cpu_R[reg];
186 }
187 if (!dc->r0_set) {
188 if (dc->r0 == NULL) {
189 dc->r0 = tcg_temp_new_i32();
190 }
191 tcg_gen_movi_i32(dc->r0, 0);
192 dc->r0_set = true;
193 }
194 return dc->r0;
195}
4acb54ba 196
20800179
RH
197static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
198{
199 if (likely(reg != 0)) {
200 return cpu_R[reg];
201 }
202 if (dc->r0 == NULL) {
203 dc->r0 = tcg_temp_new_i32();
204 }
205 return dc->r0;
206}
4acb54ba 207
20800179
RH
208static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
209 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
210{
211 TCGv_i32 rd, ra, rb;
40cbf5b7 212
20800179
RH
213 if (arg->rd == 0 && !side_effects) {
214 return true;
40cbf5b7
EI
215 }
216
20800179
RH
217 rd = reg_for_write(dc, arg->rd);
218 ra = reg_for_read(dc, arg->ra);
219 rb = reg_for_read(dc, arg->rb);
220 fn(rd, ra, rb);
221 return true;
222}
223
39cf3864
RH
224static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
225 void (*fn)(TCGv_i32, TCGv_i32))
226{
227 TCGv_i32 rd, ra;
228
229 if (arg->rd == 0 && !side_effects) {
230 return true;
231 }
232
233 rd = reg_for_write(dc, arg->rd);
234 ra = reg_for_read(dc, arg->ra);
235 fn(rd, ra);
236 return true;
237}
238
20800179
RH
239static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
240 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
241{
242 TCGv_i32 rd, ra;
243
244 if (arg->rd == 0 && !side_effects) {
245 return true;
40cbf5b7
EI
246 }
247
20800179
RH
248 rd = reg_for_write(dc, arg->rd);
249 ra = reg_for_read(dc, arg->ra);
250 fni(rd, ra, arg->imm);
251 return true;
252}
253
254static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
255 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
256{
257 TCGv_i32 rd, ra, imm;
258
259 if (arg->rd == 0 && !side_effects) {
260 return true;
4acb54ba 261 }
20800179
RH
262
263 rd = reg_for_write(dc, arg->rd);
264 ra = reg_for_read(dc, arg->ra);
265 imm = tcg_const_i32(arg->imm);
266
267 fn(rd, ra, imm);
268
269 tcg_temp_free_i32(imm);
270 return true;
271}
272
273#define DO_TYPEA(NAME, SE, FN) \
274 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
275 { return do_typea(dc, a, SE, FN); }
276
607f5767
RH
277#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
278 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
4b893631 279 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
607f5767 280
39cf3864
RH
281#define DO_TYPEA0(NAME, SE, FN) \
282 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
283 { return do_typea0(dc, a, SE, FN); }
284
285#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
286 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
4b893631 287 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
39cf3864 288
20800179
RH
289#define DO_TYPEBI(NAME, SE, FNI) \
290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291 { return do_typeb_imm(dc, a, SE, FNI); }
292
97955ceb
RH
293#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
294 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
4b893631 295 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
97955ceb 296
20800179
RH
297#define DO_TYPEBV(NAME, SE, FN) \
298 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
299 { return do_typeb_val(dc, a, SE, FN); }
300
d5aead3d
RH
301#define ENV_WRAPPER2(NAME, HELPER) \
302 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
303 { HELPER(out, cpu_env, ina); }
304
305#define ENV_WRAPPER3(NAME, HELPER) \
306 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
307 { HELPER(out, cpu_env, ina, inb); }
308
20800179
RH
309/* No input carry, but output carry. */
310static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
311{
312 TCGv_i32 zero = tcg_const_i32(0);
313
314 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
315
316 tcg_temp_free_i32(zero);
317}
318
319/* Input and output carry. */
320static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
321{
322 TCGv_i32 zero = tcg_const_i32(0);
323 TCGv_i32 tmp = tcg_temp_new_i32();
324
325 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
326 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
327
328 tcg_temp_free_i32(tmp);
329 tcg_temp_free_i32(zero);
330}
331
332/* Input carry, but no output carry. */
333static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
334{
335 tcg_gen_add_i32(out, ina, inb);
336 tcg_gen_add_i32(out, out, cpu_msr_c);
337}
338
339DO_TYPEA(add, true, gen_add)
340DO_TYPEA(addc, true, gen_addc)
341DO_TYPEA(addk, false, tcg_gen_add_i32)
342DO_TYPEA(addkc, true, gen_addkc)
343
344DO_TYPEBV(addi, true, gen_add)
345DO_TYPEBV(addic, true, gen_addc)
346DO_TYPEBI(addik, false, tcg_gen_addi_i32)
347DO_TYPEBV(addikc, true, gen_addkc)
348
cb0a0a4c
RH
349static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
350{
351 tcg_gen_andi_i32(out, ina, ~imm);
352}
353
354DO_TYPEA(and, false, tcg_gen_and_i32)
355DO_TYPEBI(andi, false, tcg_gen_andi_i32)
356DO_TYPEA(andn, false, tcg_gen_andc_i32)
357DO_TYPEBI(andni, false, gen_andni)
358
081d8e02
RH
359static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
360{
361 TCGv_i32 tmp = tcg_temp_new_i32();
362 tcg_gen_andi_i32(tmp, inb, 31);
363 tcg_gen_sar_i32(out, ina, tmp);
364 tcg_temp_free_i32(tmp);
365}
366
367static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
368{
369 TCGv_i32 tmp = tcg_temp_new_i32();
370 tcg_gen_andi_i32(tmp, inb, 31);
371 tcg_gen_shr_i32(out, ina, tmp);
372 tcg_temp_free_i32(tmp);
373}
374
375static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
376{
377 TCGv_i32 tmp = tcg_temp_new_i32();
378 tcg_gen_andi_i32(tmp, inb, 31);
379 tcg_gen_shl_i32(out, ina, tmp);
380 tcg_temp_free_i32(tmp);
381}
382
383static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
384{
385 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
386 int imm_w = extract32(imm, 5, 5);
387 int imm_s = extract32(imm, 0, 5);
388
389 if (imm_w + imm_s > 32 || imm_w == 0) {
390 /* These inputs have an undefined behavior. */
391 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
392 imm_w, imm_s);
393 } else {
394 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
395 }
396}
397
398static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
399{
400 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
401 int imm_w = extract32(imm, 5, 5);
402 int imm_s = extract32(imm, 0, 5);
403 int width = imm_w - imm_s + 1;
404
405 if (imm_w < imm_s) {
406 /* These inputs have an undefined behavior. */
407 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
408 imm_w, imm_s);
409 } else {
410 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
411 }
412}
413
414DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
415DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
416DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
417
418DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
419DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
420DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
421
422DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
423DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
424
39cf3864
RH
425static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
426{
427 tcg_gen_clzi_i32(out, ina, 32);
428}
429
430DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
431
58b48b63
RH
432static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
433{
434 TCGv_i32 lt = tcg_temp_new_i32();
435
436 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
437 tcg_gen_sub_i32(out, inb, ina);
438 tcg_gen_deposit_i32(out, out, lt, 31, 1);
439 tcg_temp_free_i32(lt);
440}
441
442static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
443{
444 TCGv_i32 lt = tcg_temp_new_i32();
445
446 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
447 tcg_gen_sub_i32(out, inb, ina);
448 tcg_gen_deposit_i32(out, out, lt, 31, 1);
449 tcg_temp_free_i32(lt);
450}
451
452DO_TYPEA(cmp, false, gen_cmp)
453DO_TYPEA(cmpu, false, gen_cmpu)
a2b0b90e 454
d5aead3d
RH
455ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
456ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
457ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
458ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
459ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
460ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
461ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
462ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
463ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
464ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
465ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
466
467DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
468DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
469DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
470DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
471DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
472DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
473DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
474DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
475DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
476DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
477DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
478
479ENV_WRAPPER2(gen_flt, gen_helper_flt)
480ENV_WRAPPER2(gen_fint, gen_helper_fint)
481ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
482
483DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
484DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
485DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
486
487/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
b1354342
RH
488static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
489{
490 gen_helper_divs(out, cpu_env, inb, ina);
491}
492
493static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
494{
495 gen_helper_divu(out, cpu_env, inb, ina);
496}
497
498DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
499DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
500
e64b2e5c
RH
501static bool trans_imm(DisasContext *dc, arg_imm *arg)
502{
2a7567a2
RH
503 if (invalid_delay_slot(dc, "imm")) {
504 return true;
505 }
e64b2e5c
RH
506 dc->ext_imm = arg->imm << 16;
507 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
6f9642d7 508 dc->tb_flags_to_set = IMM_FLAG;
e64b2e5c
RH
509 return true;
510}
511
97955ceb
RH
512static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
513{
514 TCGv_i32 tmp = tcg_temp_new_i32();
515 tcg_gen_muls2_i32(tmp, out, ina, inb);
516 tcg_temp_free_i32(tmp);
517}
518
519static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
520{
521 TCGv_i32 tmp = tcg_temp_new_i32();
522 tcg_gen_mulu2_i32(tmp, out, ina, inb);
523 tcg_temp_free_i32(tmp);
524}
525
526static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
527{
528 TCGv_i32 tmp = tcg_temp_new_i32();
529 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
530 tcg_temp_free_i32(tmp);
531}
532
533DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
534DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
535DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
536DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
537DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
538
cb0a0a4c
RH
539DO_TYPEA(or, false, tcg_gen_or_i32)
540DO_TYPEBI(ori, false, tcg_gen_ori_i32)
541
607f5767
RH
542static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
543{
544 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
545}
546
547static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
548{
549 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
550}
551
552DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
553DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
554DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
555
a2b0b90e
RH
556/* No input carry, but output carry. */
557static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558{
559 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
560 tcg_gen_sub_i32(out, inb, ina);
561}
562
563/* Input and output carry. */
564static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
565{
566 TCGv_i32 zero = tcg_const_i32(0);
567 TCGv_i32 tmp = tcg_temp_new_i32();
568
569 tcg_gen_not_i32(tmp, ina);
570 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
571 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
572
573 tcg_temp_free_i32(zero);
574 tcg_temp_free_i32(tmp);
575}
576
577/* No input or output carry. */
578static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
579{
580 tcg_gen_sub_i32(out, inb, ina);
581}
582
583/* Input carry, no output carry. */
584static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
585{
586 TCGv_i32 nota = tcg_temp_new_i32();
587
588 tcg_gen_not_i32(nota, ina);
589 tcg_gen_add_i32(out, inb, nota);
590 tcg_gen_add_i32(out, out, cpu_msr_c);
591
592 tcg_temp_free_i32(nota);
593}
594
595DO_TYPEA(rsub, true, gen_rsub)
596DO_TYPEA(rsubc, true, gen_rsubc)
597DO_TYPEA(rsubk, false, gen_rsubk)
598DO_TYPEA(rsubkc, true, gen_rsubkc)
599
600DO_TYPEBV(rsubi, true, gen_rsub)
601DO_TYPEBV(rsubic, true, gen_rsubc)
602DO_TYPEBV(rsubik, false, gen_rsubk)
603DO_TYPEBV(rsubikc, true, gen_rsubkc)
604
39cf3864
RH
605DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
606DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
607
608static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
609{
610 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
611 tcg_gen_sari_i32(out, ina, 1);
612}
613
614static void gen_src(TCGv_i32 out, TCGv_i32 ina)
615{
616 TCGv_i32 tmp = tcg_temp_new_i32();
617
618 tcg_gen_mov_i32(tmp, cpu_msr_c);
619 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
620 tcg_gen_extract2_i32(out, ina, tmp, 1);
621
622 tcg_temp_free_i32(tmp);
623}
624
625static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
626{
627 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
628 tcg_gen_shri_i32(out, ina, 1);
629}
630
631DO_TYPEA0(sra, false, gen_sra)
632DO_TYPEA0(src, false, gen_src)
633DO_TYPEA0(srl, false, gen_srl)
634
635static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
636{
637 tcg_gen_rotri_i32(out, ina, 16);
638}
639
640DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
641DO_TYPEA0(swaph, false, gen_swaph)
642
643static bool trans_wdic(DisasContext *dc, arg_wdic *a)
644{
645 /* Cache operations are nops: only check for supervisor mode. */
646 trap_userspace(dc, true);
647 return true;
648}
649
cb0a0a4c
RH
650DO_TYPEA(xor, false, tcg_gen_xor_i32)
651DO_TYPEBI(xori, false, tcg_gen_xori_i32)
652
d8e59c4a
RH
653static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
654{
655 TCGv ret = tcg_temp_new();
656
657 /* If any of the regs is r0, set t to the value of the other reg. */
658 if (ra && rb) {
659 TCGv_i32 tmp = tcg_temp_new_i32();
660 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
661 tcg_gen_extu_i32_tl(ret, tmp);
662 tcg_temp_free_i32(tmp);
663 } else if (ra) {
664 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
665 } else if (rb) {
666 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
667 } else {
668 tcg_gen_movi_tl(ret, 0);
669 }
670
4b893631 671 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
d8e59c4a
RH
672 gen_helper_stackprot(cpu_env, ret);
673 }
674 return ret;
675}
676
677static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
678{
679 TCGv ret = tcg_temp_new();
680
681 /* If any of the regs is r0, set t to the value of the other reg. */
682 if (ra) {
683 TCGv_i32 tmp = tcg_temp_new_i32();
684 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
685 tcg_gen_extu_i32_tl(ret, tmp);
686 tcg_temp_free_i32(tmp);
687 } else {
688 tcg_gen_movi_tl(ret, (uint32_t)imm);
689 }
690
4b893631 691 if (ra == 1 && dc->cfg->stackprot) {
d8e59c4a
RH
692 gen_helper_stackprot(cpu_env, ret);
693 }
694 return ret;
695}
696
19f27b6c 697#ifndef CONFIG_USER_ONLY
d8e59c4a
RH
698static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
699{
4b893631 700 int addr_size = dc->cfg->addr_size;
d8e59c4a
RH
701 TCGv ret = tcg_temp_new();
702
703 if (addr_size == 32 || ra == 0) {
704 if (rb) {
705 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
706 } else {
707 tcg_gen_movi_tl(ret, 0);
708 }
709 } else {
710 if (rb) {
711 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
712 } else {
713 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
714 tcg_gen_shli_tl(ret, ret, 32);
715 }
716 if (addr_size < 64) {
717 /* Mask off out of range bits. */
718 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
719 }
720 }
721 return ret;
722}
19f27b6c 723#endif
d8e59c4a 724
b414df75 725#ifndef CONFIG_USER_ONLY
ab0c8d0f
RH
726static void record_unaligned_ess(DisasContext *dc, int rd,
727 MemOp size, bool store)
728{
729 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
730
731 iflags |= ESR_ESS_FLAG;
732 iflags |= rd << 5;
733 iflags |= store * ESR_S;
734 iflags |= (size == MO_32) * ESR_W;
735
736 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
737}
b414df75 738#endif
ab0c8d0f 739
d8e59c4a
RH
740static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
741 int mem_index, bool rev)
742{
d8e59c4a
RH
743 MemOp size = mop & MO_SIZE;
744
745 /*
746 * When doing reverse accesses we need to do two things.
747 *
748 * 1. Reverse the address wrt endianness.
749 * 2. Byteswap the data lanes on the way back into the CPU core.
750 */
751 if (rev) {
752 if (size > MO_8) {
753 mop ^= MO_BSWAP;
754 }
755 if (size < MO_32) {
756 tcg_gen_xori_tl(addr, addr, 3 - size);
757 }
758 }
759
b414df75
RH
760 /*
761 * For system mode, enforce alignment if the cpu configuration
762 * requires it. For user-mode, the Linux kernel will have fixed up
763 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
764 */
765#ifndef CONFIG_USER_ONLY
ab0c8d0f
RH
766 if (size > MO_8 &&
767 (dc->tb_flags & MSR_EE) &&
4b893631 768 dc->cfg->unaligned_exceptions) {
ab0c8d0f
RH
769 record_unaligned_ess(dc, rd, size, false);
770 mop |= MO_ALIGN;
d8e59c4a 771 }
b414df75 772#endif
d8e59c4a 773
ab0c8d0f 774 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
d8e59c4a 775
d8e59c4a
RH
776 tcg_temp_free(addr);
777 return true;
778}
779
780static bool trans_lbu(DisasContext *dc, arg_typea *arg)
781{
782 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
783 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
784}
785
786static bool trans_lbur(DisasContext *dc, arg_typea *arg)
787{
788 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
789 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
790}
791
792static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
793{
794 if (trap_userspace(dc, true)) {
795 return true;
796 }
19f27b6c
RH
797#ifdef CONFIG_USER_ONLY
798 return true;
799#else
d8e59c4a
RH
800 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
801 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
19f27b6c 802#endif
d8e59c4a
RH
803}
804
805static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
806{
807 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
808 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
809}
810
811static bool trans_lhu(DisasContext *dc, arg_typea *arg)
812{
813 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
814 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
815}
816
817static bool trans_lhur(DisasContext *dc, arg_typea *arg)
818{
819 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
820 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
821}
822
823static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
824{
825 if (trap_userspace(dc, true)) {
826 return true;
827 }
19f27b6c
RH
828#ifdef CONFIG_USER_ONLY
829 return true;
830#else
d8e59c4a
RH
831 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
832 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
19f27b6c 833#endif
d8e59c4a
RH
834}
835
836static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
837{
838 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
839 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
840}
841
842static bool trans_lw(DisasContext *dc, arg_typea *arg)
843{
844 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
845 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
846}
847
848static bool trans_lwr(DisasContext *dc, arg_typea *arg)
849{
850 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
851 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
852}
853
854static bool trans_lwea(DisasContext *dc, arg_typea *arg)
855{
856 if (trap_userspace(dc, true)) {
857 return true;
858 }
19f27b6c
RH
859#ifdef CONFIG_USER_ONLY
860 return true;
861#else
d8e59c4a
RH
862 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
863 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
19f27b6c 864#endif
d8e59c4a
RH
865}
866
867static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
868{
869 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
870 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
871}
872
873static bool trans_lwx(DisasContext *dc, arg_typea *arg)
874{
875 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
876
877 /* lwx does not throw unaligned access errors, so force alignment */
878 tcg_gen_andi_tl(addr, addr, ~3);
879
d8e59c4a
RH
880 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
881 tcg_gen_mov_tl(cpu_res_addr, addr);
882 tcg_temp_free(addr);
883
884 if (arg->rd) {
885 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
886 }
887
888 /* No support for AXI exclusive so always clear C */
889 tcg_gen_movi_i32(cpu_msr_c, 0);
890 return true;
891}
892
893static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
894 int mem_index, bool rev)
895{
896 MemOp size = mop & MO_SIZE;
897
898 /*
899 * When doing reverse accesses we need to do two things.
900 *
901 * 1. Reverse the address wrt endianness.
902 * 2. Byteswap the data lanes on the way back into the CPU core.
903 */
904 if (rev) {
905 if (size > MO_8) {
906 mop ^= MO_BSWAP;
907 }
908 if (size < MO_32) {
909 tcg_gen_xori_tl(addr, addr, 3 - size);
910 }
911 }
912
b414df75
RH
913 /*
914 * For system mode, enforce alignment if the cpu configuration
915 * requires it. For user-mode, the Linux kernel will have fixed up
916 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
917 */
918#ifndef CONFIG_USER_ONLY
ab0c8d0f
RH
919 if (size > MO_8 &&
920 (dc->tb_flags & MSR_EE) &&
4b893631 921 dc->cfg->unaligned_exceptions) {
ab0c8d0f
RH
922 record_unaligned_ess(dc, rd, size, true);
923 mop |= MO_ALIGN;
d8e59c4a 924 }
b414df75 925#endif
d8e59c4a 926
ab0c8d0f
RH
927 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
928
d8e59c4a
RH
929 tcg_temp_free(addr);
930 return true;
931}
932
933static bool trans_sb(DisasContext *dc, arg_typea *arg)
934{
935 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
936 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
937}
938
939static bool trans_sbr(DisasContext *dc, arg_typea *arg)
940{
941 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
942 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
943}
944
945static bool trans_sbea(DisasContext *dc, arg_typea *arg)
946{
947 if (trap_userspace(dc, true)) {
948 return true;
949 }
19f27b6c
RH
950#ifdef CONFIG_USER_ONLY
951 return true;
952#else
d8e59c4a
RH
953 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
954 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
19f27b6c 955#endif
d8e59c4a
RH
956}
957
958static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
959{
960 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
961 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
962}
963
964static bool trans_sh(DisasContext *dc, arg_typea *arg)
965{
966 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
967 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
968}
969
970static bool trans_shr(DisasContext *dc, arg_typea *arg)
971{
972 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
973 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
974}
975
976static bool trans_shea(DisasContext *dc, arg_typea *arg)
977{
978 if (trap_userspace(dc, true)) {
979 return true;
980 }
19f27b6c
RH
981#ifdef CONFIG_USER_ONLY
982 return true;
983#else
d8e59c4a
RH
984 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
985 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
19f27b6c 986#endif
d8e59c4a
RH
987}
988
989static bool trans_shi(DisasContext *dc, arg_typeb *arg)
990{
991 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
992 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
993}
994
995static bool trans_sw(DisasContext *dc, arg_typea *arg)
996{
997 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
998 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
999}
1000
1001static bool trans_swr(DisasContext *dc, arg_typea *arg)
1002{
1003 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1004 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
1005}
1006
1007static bool trans_swea(DisasContext *dc, arg_typea *arg)
1008{
1009 if (trap_userspace(dc, true)) {
1010 return true;
1011 }
19f27b6c
RH
1012#ifdef CONFIG_USER_ONLY
1013 return true;
1014#else
d8e59c4a
RH
1015 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1016 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
19f27b6c 1017#endif
d8e59c4a
RH
1018}
1019
1020static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1021{
1022 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1023 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1024}
1025
1026static bool trans_swx(DisasContext *dc, arg_typea *arg)
1027{
1028 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1029 TCGLabel *swx_done = gen_new_label();
1030 TCGLabel *swx_fail = gen_new_label();
1031 TCGv_i32 tval;
1032
d8e59c4a
RH
1033 /* swx does not throw unaligned access errors, so force alignment */
1034 tcg_gen_andi_tl(addr, addr, ~3);
1035
1036 /*
1037 * Compare the address vs the one we used during lwx.
1038 * On mismatch, the operation fails. On match, addr dies at the
1039 * branch, but we know we can use the equal version in the global.
1040 * In either case, addr is no longer needed.
1041 */
1042 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1043 tcg_temp_free(addr);
1044
1045 /*
1046 * Compare the value loaded during lwx with current contents of
1047 * the reserved location.
1048 */
1049 tval = tcg_temp_new_i32();
1050
1051 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1052 reg_for_write(dc, arg->rd),
1053 dc->mem_index, MO_TEUL);
1054
1055 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1056 tcg_temp_free_i32(tval);
1057
1058 /* Success */
1059 tcg_gen_movi_i32(cpu_msr_c, 0);
1060 tcg_gen_br(swx_done);
1061
1062 /* Failure */
1063 gen_set_label(swx_fail);
1064 tcg_gen_movi_i32(cpu_msr_c, 1);
1065
1066 gen_set_label(swx_done);
1067
1068 /*
1069 * Prevent the saved address from working again without another ldx.
1070 * Akin to the pseudocode setting reservation = 0.
1071 */
1072 tcg_gen_movi_tl(cpu_res_addr, -1);
1073 return true;
1074}
1075
16bbbbc9
RH
1076static void setup_dslot(DisasContext *dc, bool type_b)
1077{
1078 dc->tb_flags_to_set |= D_FLAG;
1079 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1080 dc->tb_flags_to_set |= BIMM_FLAG;
1081 }
1082}
1083
1084static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1085 bool delay, bool abs, int link)
1086{
1087 uint32_t add_pc;
1088
2a7567a2
RH
1089 if (invalid_delay_slot(dc, "branch")) {
1090 return true;
1091 }
16bbbbc9
RH
1092 if (delay) {
1093 setup_dslot(dc, dest_rb < 0);
1094 }
1095
1096 if (link) {
1097 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1098 }
1099
1100 /* Store the branch taken destination into btarget. */
1101 add_pc = abs ? 0 : dc->base.pc_next;
1102 if (dest_rb > 0) {
1103 dc->jmp_dest = -1;
1104 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1105 } else {
1106 dc->jmp_dest = add_pc + dest_imm;
1107 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1108 }
1109 dc->jmp_cond = TCG_COND_ALWAYS;
1110 return true;
1111}
1112
1113#define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1114 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1115 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1116 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1117 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1118
1119DO_BR(br, bri, false, false, false)
1120DO_BR(bra, brai, false, true, false)
1121DO_BR(brd, brid, true, false, false)
1122DO_BR(brad, braid, true, true, false)
1123DO_BR(brld, brlid, true, false, true)
1124DO_BR(brald, bralid, true, true, true)
1125
fd779113
RH
1126static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1127 TCGCond cond, int ra, bool delay)
1128{
1129 TCGv_i32 zero, next;
1130
2a7567a2
RH
1131 if (invalid_delay_slot(dc, "bcc")) {
1132 return true;
1133 }
fd779113
RH
1134 if (delay) {
1135 setup_dslot(dc, dest_rb < 0);
1136 }
1137
1138 dc->jmp_cond = cond;
1139
1140 /* Cache the condition register in cpu_bvalue across any delay slot. */
1141 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1142
1143 /* Store the branch taken destination into btarget. */
1144 if (dest_rb > 0) {
1145 dc->jmp_dest = -1;
1146 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1147 } else {
1148 dc->jmp_dest = dc->base.pc_next + dest_imm;
1149 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1150 }
1151
1152 /* Compute the final destination into btarget. */
1153 zero = tcg_const_i32(0);
1154 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1155 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1156 reg_for_read(dc, ra), zero,
1157 cpu_btarget, next);
1158 tcg_temp_free_i32(zero);
1159 tcg_temp_free_i32(next);
1160
1161 return true;
1162}
1163
1164#define DO_BCC(NAME, COND) \
1165 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1166 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1167 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1168 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1169 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1170 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1171 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1172 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1173
1174DO_BCC(beq, TCG_COND_EQ)
1175DO_BCC(bge, TCG_COND_GE)
1176DO_BCC(bgt, TCG_COND_GT)
1177DO_BCC(ble, TCG_COND_LE)
1178DO_BCC(blt, TCG_COND_LT)
1179DO_BCC(bne, TCG_COND_NE)
1180
f5235314
RH
1181static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1182{
1183 if (trap_userspace(dc, true)) {
1184 return true;
1185 }
2a7567a2
RH
1186 if (invalid_delay_slot(dc, "brk")) {
1187 return true;
1188 }
1189
f5235314
RH
1190 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1191 if (arg->rd) {
1192 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1193 }
1194 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1195 tcg_gen_movi_tl(cpu_res_addr, -1);
1196
17e77796 1197 dc->base.is_jmp = DISAS_EXIT;
f5235314
RH
1198 return true;
1199}
1200
1201static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1202{
1203 uint32_t imm = arg->imm;
1204
1205 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1206 return true;
1207 }
2a7567a2
RH
1208 if (invalid_delay_slot(dc, "brki")) {
1209 return true;
1210 }
1211
f5235314
RH
1212 tcg_gen_movi_i32(cpu_pc, imm);
1213 if (arg->rd) {
1214 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1215 }
1216 tcg_gen_movi_tl(cpu_res_addr, -1);
1217
1218#ifdef CONFIG_USER_ONLY
1219 switch (imm) {
1220 case 0x8: /* syscall trap */
1221 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1222 break;
1223 case 0x18: /* debug trap */
1224 gen_raise_exception_sync(dc, EXCP_DEBUG);
1225 break;
1226 default: /* eliminated with trap_userspace check */
1227 g_assert_not_reached();
1228 }
1229#else
1230 uint32_t msr_to_set = 0;
1231
1232 if (imm != 0x18) {
1233 msr_to_set |= MSR_BIP;
1234 }
1235 if (imm == 0x8 || imm == 0x18) {
1236 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1237 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1238 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1239 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1240 }
1241 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
17e77796 1242 dc->base.is_jmp = DISAS_EXIT;
f5235314
RH
1243#endif
1244
1245 return true;
1246}
1247
ee8c7f9f
RH
1248static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1249{
1250 int mbar_imm = arg->imm;
1251
2a7567a2
RH
1252 /* Note that mbar is a specialized branch instruction. */
1253 if (invalid_delay_slot(dc, "mbar")) {
1254 return true;
1255 }
1256
ee8c7f9f
RH
1257 /* Data access memory barrier. */
1258 if ((mbar_imm & 2) == 0) {
1259 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1260 }
1261
1262 /* Sleep. */
1263 if (mbar_imm & 16) {
1264 TCGv_i32 tmp_1;
1265
1266 if (trap_userspace(dc, true)) {
1267 /* Sleep is a privileged instruction. */
1268 return true;
1269 }
1270
1271 t_sync_flags(dc);
1272
1273 tmp_1 = tcg_const_i32(1);
1274 tcg_gen_st_i32(tmp_1, cpu_env,
1275 -offsetof(MicroBlazeCPU, env)
1276 +offsetof(CPUState, halted));
1277 tcg_temp_free_i32(tmp_1);
1278
1279 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1280
1281 gen_raise_exception(dc, EXCP_HLT);
1282 }
1283
1284 /*
1285 * If !(mbar_imm & 1), this is an instruction access memory barrier
1286 * and we need to end the TB so that we recognize self-modified
1287 * code immediately.
1288 *
1289 * However, there are some data mbars that need the TB break
1290 * (and return to main loop) to recognize interrupts right away.
1291 * E.g. recognizing a change to an interrupt controller register.
1292 *
1293 * Therefore, choose to end the TB always.
1294 */
43b34134 1295 dc->base.is_jmp = DISAS_EXIT_NEXT;
ee8c7f9f
RH
1296 return true;
1297}
1298
e6cb0354
RH
1299static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1300{
1301 if (trap_userspace(dc, to_set)) {
1302 return true;
1303 }
2a7567a2
RH
1304 if (invalid_delay_slot(dc, "rts")) {
1305 return true;
1306 }
1307
e6cb0354
RH
1308 dc->tb_flags_to_set |= to_set;
1309 setup_dslot(dc, true);
1310
1311 dc->jmp_cond = TCG_COND_ALWAYS;
1312 dc->jmp_dest = -1;
1313 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1314 return true;
1315}
1316
1317#define DO_RTS(NAME, IFLAG) \
1318 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1319 { return do_rts(dc, arg, IFLAG); }
1320
1321DO_RTS(rtbd, DRTB_FLAG)
1322DO_RTS(rtid, DRTI_FLAG)
1323DO_RTS(rted, DRTE_FLAG)
1324DO_RTS(rtsd, 0)
1325
20800179
RH
1326static bool trans_zero(DisasContext *dc, arg_zero *arg)
1327{
1328 /* If opcode_0_illegal, trap. */
4b893631 1329 if (dc->cfg->opcode_0_illegal) {
20800179
RH
1330 trap_illegal(dc, true);
1331 return true;
1332 }
1333 /*
1334 * Otherwise, this is "add r0, r0, r0".
1335 * Continue to trans_add so that MSR[C] gets cleared.
1336 */
1337 return false;
4acb54ba
EI
1338}
1339
1074c0fb 1340static void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 1341{
1074c0fb
RH
1342 TCGv_i32 t;
1343
1344 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1345 t = tcg_temp_new_i32();
1346 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1347 tcg_gen_or_i32(d, cpu_msr, t);
1348 tcg_temp_free_i32(t);
4acb54ba
EI
1349}
1350
536e340f
RH
1351static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1352{
1353 uint32_t imm = arg->imm;
1354
1355 if (trap_userspace(dc, imm != MSR_C)) {
1356 return true;
1357 }
1358
1359 if (arg->rd) {
1360 msr_read(dc, cpu_R[arg->rd]);
1361 }
1362
1363 /*
1364 * Handle the carry bit separately.
1365 * This is the only bit that userspace can modify.
1366 */
1367 if (imm & MSR_C) {
1368 tcg_gen_movi_i32(cpu_msr_c, set);
1369 }
1370
1371 /*
1372 * MSR_C and MSR_CC set above.
1373 * MSR_PVR is not writable, and is always clear.
1374 */
1375 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1376
1377 if (imm != 0) {
1378 if (set) {
1379 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1380 } else {
1381 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1382 }
43b34134 1383 dc->base.is_jmp = DISAS_EXIT_NEXT;
536e340f
RH
1384 }
1385 return true;
1386}
1387
1388static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1389{
1390 return do_msrclrset(dc, arg, false);
1391}
1392
1393static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1394{
1395 return do_msrclrset(dc, arg, true);
1396}
1397
9df297a2 1398static bool trans_mts(DisasContext *dc, arg_mts *arg)
4acb54ba 1399{
9df297a2
RH
1400 if (trap_userspace(dc, true)) {
1401 return true;
1402 }
4acb54ba 1403
9df297a2
RH
1404#ifdef CONFIG_USER_ONLY
1405 g_assert_not_reached();
1406#else
1407 if (arg->e && arg->rs != 0x1003) {
1408 qemu_log_mask(LOG_GUEST_ERROR,
1409 "Invalid extended mts reg 0x%x\n", arg->rs);
1410 return true;
f0f7e7f7
EI
1411 }
1412
9df297a2
RH
1413 TCGv_i32 src = reg_for_read(dc, arg->ra);
1414 switch (arg->rs) {
1415 case SR_MSR:
43b34134
RH
1416 /* Install MSR_C. */
1417 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1418 /*
1419 * Clear MSR_C and MSR_CC;
1420 * MSR_PVR is not writable, and is always clear.
1421 */
1422 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
9df297a2
RH
1423 break;
1424 case SR_FSR:
1425 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1426 break;
1427 case 0x800:
1428 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1429 break;
1430 case 0x802:
1431 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1432 break;
f0f7e7f7 1433
9df297a2
RH
1434 case 0x1000: /* PID */
1435 case 0x1001: /* ZPR */
1436 case 0x1002: /* TLBX */
1437 case 0x1003: /* TLBLO */
1438 case 0x1004: /* TLBHI */
1439 case 0x1005: /* TLBSX */
1440 {
1441 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1442 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1443
1444 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1445 tcg_temp_free_i32(tmp_reg);
1446 tcg_temp_free_i32(tmp_ext);
1447 }
1448 break;
4acb54ba 1449
9df297a2
RH
1450 default:
1451 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1452 return true;
1567a005 1453 }
43b34134 1454 dc->base.is_jmp = DISAS_EXIT_NEXT;
9df297a2
RH
1455 return true;
1456#endif
1457}
1567a005 1458
9df297a2
RH
1459static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1460{
1461 TCGv_i32 dest = reg_for_write(dc, arg->rd);
05a9a651 1462
9df297a2
RH
1463 if (arg->e) {
1464 switch (arg->rs) {
1465 case SR_EAR:
1466 {
1467 TCGv_i64 t64 = tcg_temp_new_i64();
1468 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1469 tcg_gen_extrh_i64_i32(dest, t64);
1470 tcg_temp_free_i64(t64);
1471 }
1472 return true;
1473#ifndef CONFIG_USER_ONLY
1474 case 0x1003: /* TLBLO */
1475 /* Handled below. */
1476 break;
1477#endif
1478 case 0x2006 ... 0x2009:
1479 /* High bits of PVR6-9 not implemented. */
1480 tcg_gen_movi_i32(dest, 0);
1481 return true;
1482 default:
1483 qemu_log_mask(LOG_GUEST_ERROR,
1484 "Invalid extended mfs reg 0x%x\n", arg->rs);
1485 return true;
05a9a651 1486 }
4acb54ba 1487 }
4acb54ba 1488
9df297a2
RH
1489 switch (arg->rs) {
1490 case SR_PC:
1491 tcg_gen_movi_i32(dest, dc->base.pc_next);
1492 break;
1493 case SR_MSR:
1494 msr_read(dc, dest);
1495 break;
1496 case SR_EAR:
1497 {
1498 TCGv_i64 t64 = tcg_temp_new_i64();
1499 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1500 tcg_gen_extrl_i64_i32(dest, t64);
1501 tcg_temp_free_i64(t64);
4acb54ba 1502 }
9df297a2
RH
1503 break;
1504 case SR_ESR:
1505 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1506 break;
1507 case SR_FSR:
1508 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1509 break;
1510 case SR_BTR:
1511 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1512 break;
1513 case SR_EDR:
1514 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1515 break;
1516 case 0x800:
1517 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1518 break;
1519 case 0x802:
1520 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1521 break;
1522
1523#ifndef CONFIG_USER_ONLY
1524 case 0x1000: /* PID */
1525 case 0x1001: /* ZPR */
1526 case 0x1002: /* TLBX */
1527 case 0x1003: /* TLBLO */
1528 case 0x1004: /* TLBHI */
1529 case 0x1005: /* TLBSX */
1530 {
1531 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1532 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1533
1534 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1535 tcg_temp_free_i32(tmp_reg);
1536 tcg_temp_free_i32(tmp_ext);
4acb54ba 1537 }
9df297a2
RH
1538 break;
1539#endif
ee7dbcf8 1540
9df297a2
RH
1541 case 0x2000 ... 0x200c:
1542 tcg_gen_ld_i32(dest, cpu_env,
a4bcfc33
RH
1543 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1544 - offsetof(MicroBlazeCPU, env));
9df297a2
RH
1545 break;
1546 default:
1547 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1548 break;
ee7dbcf8 1549 }
9df297a2 1550 return true;
4acb54ba
EI
1551}
1552
3fb394fd 1553static void do_rti(DisasContext *dc)
4acb54ba 1554{
3fb394fd
RH
1555 TCGv_i32 tmp = tcg_temp_new_i32();
1556
1557 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1558 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1559 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1560 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1561 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1562
1563 tcg_temp_free_i32(tmp);
4acb54ba
EI
1564}
1565
3fb394fd 1566static void do_rtb(DisasContext *dc)
4acb54ba 1567{
3fb394fd
RH
1568 TCGv_i32 tmp = tcg_temp_new_i32();
1569
1570 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1571 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1572 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1573 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1574
1575 tcg_temp_free_i32(tmp);
4acb54ba
EI
1576}
1577
3fb394fd 1578static void do_rte(DisasContext *dc)
4acb54ba 1579{
3fb394fd
RH
1580 TCGv_i32 tmp = tcg_temp_new_i32();
1581
1582 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1583 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1584 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1585 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1586 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1587
1588 tcg_temp_free_i32(tmp);
4acb54ba
EI
1589}
1590
6d76d23e 1591/* Insns connected to FSL or AXI stream attached devices. */
52065d8f 1592static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
6d76d23e 1593{
6d76d23e 1594 TCGv_i32 t_id, t_ctrl;
6d76d23e 1595
bdfc1e88 1596 if (trap_userspace(dc, true)) {
52065d8f 1597 return true;
6d76d23e
EI
1598 }
1599
cfeea807 1600 t_id = tcg_temp_new_i32();
52065d8f
RH
1601 if (rb) {
1602 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
6d76d23e 1603 } else {
52065d8f 1604 tcg_gen_movi_i32(t_id, imm);
6d76d23e
EI
1605 }
1606
cfeea807 1607 t_ctrl = tcg_const_i32(ctrl);
52065d8f
RH
1608 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1609 tcg_temp_free_i32(t_id);
1610 tcg_temp_free_i32(t_ctrl);
1611 return true;
1612}
1613
1614static bool trans_get(DisasContext *dc, arg_get *arg)
1615{
1616 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1617}
1618
1619static bool trans_getd(DisasContext *dc, arg_getd *arg)
1620{
1621 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1622}
1623
1624static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1625{
1626 TCGv_i32 t_id, t_ctrl;
1627
1628 if (trap_userspace(dc, true)) {
1629 return true;
1630 }
6d76d23e 1631
52065d8f
RH
1632 t_id = tcg_temp_new_i32();
1633 if (rb) {
1634 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
6d76d23e 1635 } else {
52065d8f 1636 tcg_gen_movi_i32(t_id, imm);
6d76d23e 1637 }
52065d8f
RH
1638
1639 t_ctrl = tcg_const_i32(ctrl);
1640 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
cfeea807
EI
1641 tcg_temp_free_i32(t_id);
1642 tcg_temp_free_i32(t_ctrl);
52065d8f
RH
1643 return true;
1644}
1645
1646static bool trans_put(DisasContext *dc, arg_put *arg)
1647{
1648 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1649}
1650
1651static bool trans_putd(DisasContext *dc, arg_putd *arg)
1652{
1653 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
6d76d23e
EI
1654}
1655
372122e3 1656static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
4acb54ba 1657{
372122e3
RH
1658 DisasContext *dc = container_of(dcb, DisasContext, base);
1659 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1660 int bound;
4acb54ba 1661
4b893631 1662 dc->cfg = &cpu->cfg;
683a247e 1663 dc->tb_flags = dc->base.tb->flags;
d7ecb757 1664 dc->ext_imm = dc->base.tb->cs_base;
20800179
RH
1665 dc->r0 = NULL;
1666 dc->r0_set = false;
287b1def 1667 dc->mem_index = cpu_mmu_index(&cpu->env, false);
b9c58aab
RH
1668 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1669 dc->jmp_dest = -1;
4acb54ba 1670
372122e3
RH
1671 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1672 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1673}
4acb54ba 1674
372122e3
RH
1675static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1676{
1677}
4acb54ba 1678
372122e3
RH
1679static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1680{
683a247e
RH
1681 DisasContext *dc = container_of(dcb, DisasContext, base);
1682
1683 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1684 dc->insn_start = tcg_last_op();
372122e3 1685}
4acb54ba 1686
372122e3
RH
1687static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1688{
1689 DisasContext *dc = container_of(dcb, DisasContext, base);
1690 CPUMBState *env = cs->env_ptr;
44d1432b 1691 uint32_t ir;
372122e3
RH
1692
1693 /* TODO: This should raise an exception, not terminate qemu. */
1694 if (dc->base.pc_next & 3) {
1695 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1696 (uint32_t)dc->base.pc_next);
1697 }
1698
6f9642d7
RH
1699 dc->tb_flags_to_set = 0;
1700
44d1432b
RH
1701 ir = cpu_ldl_code(env, dc->base.pc_next);
1702 if (!decode(dc, ir)) {
921afa9d 1703 trap_illegal(dc, true);
44d1432b 1704 }
20800179
RH
1705
1706 if (dc->r0) {
1707 tcg_temp_free_i32(dc->r0);
1708 dc->r0 = NULL;
1709 dc->r0_set = false;
1710 }
1711
6f9642d7
RH
1712 /* Discard the imm global when its contents cannot be used. */
1713 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
d7ecb757 1714 tcg_gen_discard_i32(cpu_imm);
372122e3 1715 }
6f9642d7 1716
1e521ce3 1717 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
6f9642d7 1718 dc->tb_flags |= dc->tb_flags_to_set;
372122e3
RH
1719 dc->base.pc_next += 4;
1720
b9c58aab 1721 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
3d35bcc2
RH
1722 /*
1723 * Finish any return-from branch.
3d35bcc2 1724 */
3c745866
RH
1725 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1726 if (unlikely(rt_ibe != 0)) {
1727 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1728 if (rt_ibe & DRTI_FLAG) {
1729 do_rti(dc);
1730 } else if (rt_ibe & DRTB_FLAG) {
1731 do_rtb(dc);
1732 } else {
1733 do_rte(dc);
1734 }
372122e3 1735 }
3d35bcc2
RH
1736
1737 /* Complete the branch, ending the TB. */
1738 switch (dc->base.is_jmp) {
1739 case DISAS_NORETURN:
1740 /*
1741 * E.g. illegal insn in a delay slot. We've already exited
1742 * and will handle D_FLAG in mb_cpu_do_interrupt.
1743 */
1744 break;
3d35bcc2 1745 case DISAS_NEXT:
3c745866
RH
1746 /*
1747 * Normal insn a delay slot.
1748 * However, the return-from-exception type insns should
1749 * return to the main loop, as they have adjusted MSR.
1750 */
1751 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
3d35bcc2
RH
1752 break;
1753 case DISAS_EXIT_NEXT:
1754 /*
1755 * E.g. mts insn in a delay slot. Continue with btarget,
1756 * but still return to the main loop.
1757 */
1758 dc->base.is_jmp = DISAS_EXIT_JUMP;
1759 break;
1760 default:
1761 g_assert_not_reached();
1762 }
4acb54ba 1763 }
372122e3
RH
1764}
1765
1766static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1767{
1768 DisasContext *dc = container_of(dcb, DisasContext, base);
1769
d4705ae0 1770 if (dc->base.is_jmp == DISAS_NORETURN) {
372122e3
RH
1771 /* We have already exited the TB. */
1772 return;
1773 }
1774
1775 t_sync_flags(dc);
372122e3
RH
1776
1777 switch (dc->base.is_jmp) {
1778 case DISAS_TOO_MANY:
372122e3
RH
1779 gen_goto_tb(dc, 0, dc->base.pc_next);
1780 return;
6c5f738d 1781
17e77796 1782 case DISAS_EXIT:
f6278ca9
RH
1783 break;
1784 case DISAS_EXIT_NEXT:
1785 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1786 break;
1787 case DISAS_EXIT_JUMP:
1788 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1789 tcg_gen_discard_i32(cpu_btarget);
1790 break;
372122e3
RH
1791
1792 case DISAS_JUMP:
fbafb3a4 1793 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
b9c58aab
RH
1794 /* Direct jump. */
1795 tcg_gen_discard_i32(cpu_btarget);
1796
1797 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1798 /* Conditional direct jump. */
1799 TCGLabel *taken = gen_new_label();
1800 TCGv_i32 tmp = tcg_temp_new_i32();
1801
1802 /*
1803 * Copy bvalue to a temp now, so we can discard bvalue.
1804 * This can avoid writing bvalue to memory when the
1805 * delay slot cannot raise an exception.
1806 */
1807 tcg_gen_mov_i32(tmp, cpu_bvalue);
1808 tcg_gen_discard_i32(cpu_bvalue);
1809
1810 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1811 gen_goto_tb(dc, 1, dc->base.pc_next);
1812 gen_set_label(taken);
372122e3 1813 }
b9c58aab 1814 gen_goto_tb(dc, 0, dc->jmp_dest);
372122e3 1815 return;
b9c58aab 1816 }
372122e3 1817
fbafb3a4 1818 /* Indirect jump (or direct jump w/ goto_tb disabled) */
b9c58aab
RH
1819 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1820 tcg_gen_discard_i32(cpu_btarget);
66345580 1821 tcg_gen_lookup_and_goto_ptr();
b9c58aab 1822 return;
0a7df5da 1823
372122e3
RH
1824 default:
1825 g_assert_not_reached();
1826 }
f6278ca9
RH
1827
1828 /* Finish DISAS_EXIT_* */
1829 if (unlikely(cs->singlestep_enabled)) {
1830 gen_raise_exception(dc, EXCP_DEBUG);
1831 } else {
1832 tcg_gen_exit_tb(NULL, 0);
1833 }
372122e3 1834}
4acb54ba 1835
372122e3
RH
1836static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1837{
372122e3
RH
1838 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1839 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
372122e3
RH
1840}
1841
1842static const TranslatorOps mb_tr_ops = {
1843 .init_disas_context = mb_tr_init_disas_context,
1844 .tb_start = mb_tr_tb_start,
1845 .insn_start = mb_tr_insn_start,
372122e3
RH
1846 .translate_insn = mb_tr_translate_insn,
1847 .tb_stop = mb_tr_tb_stop,
1848 .disas_log = mb_tr_disas_log,
1849};
1850
1851void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1852{
1853 DisasContext dc;
1854 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
4acb54ba
EI
1855}
1856
90c84c56 1857void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1858{
878096ee
AF
1859 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1860 CPUMBState *env = &cpu->env;
0c3da918 1861 uint32_t iflags;
4acb54ba
EI
1862 int i;
1863
0c3da918
RH
1864 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1865 env->pc, env->msr,
2e5282ca
RH
1866 (env->msr & MSR_UM) ? "user" : "kernel",
1867 (env->msr & MSR_UMS) ? "user" : "kernel",
1868 (bool)(env->msr & MSR_EIP),
1869 (bool)(env->msr & MSR_IE));
0c3da918
RH
1870
1871 iflags = env->iflags;
1872 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1873 if (iflags & IMM_FLAG) {
1874 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1875 }
1876 if (iflags & BIMM_FLAG) {
1877 qemu_fprintf(f, " BIMM");
1878 }
1879 if (iflags & D_FLAG) {
b9c58aab 1880 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
0c3da918
RH
1881 }
1882 if (iflags & DRTI_FLAG) {
1883 qemu_fprintf(f, " DRTI");
1884 }
1885 if (iflags & DRTE_FLAG) {
1886 qemu_fprintf(f, " DRTE");
1887 }
1888 if (iflags & DRTB_FLAG) {
1889 qemu_fprintf(f, " DRTB");
1890 }
1891 if (iflags & ESR_ESS_FLAG) {
1892 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1893 }
1894
1895 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
19f27b6c 1896 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
0c3da918
RH
1897 env->esr, env->fsr, env->btr, env->edr,
1898 env->ear, env->slr, env->shr);
1899
4acb54ba 1900 for (i = 0; i < 32; i++) {
0c3da918
RH
1901 qemu_fprintf(f, "r%2.2d=%08x%c",
1902 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1903 }
1904 qemu_fprintf(f, "\n");
4acb54ba
EI
1905}
1906
cd0c24f9
AF
1907void mb_tcg_init(void)
1908{
480d29a8
RH
1909#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1910#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1911
1912 static const struct {
1913 TCGv_i32 *var; int ofs; char name[8];
1914 } i32s[] = {
e47c2231
RH
1915 /*
1916 * Note that r0 is handled specially in reg_for_read
1917 * and reg_for_write. Nothing should touch cpu_R[0].
1918 * Leave that element NULL, which will assert quickly
1919 * inside the tcg generator functions.
1920 */
1921 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
480d29a8
RH
1922 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1923 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1924 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1925
1926 SP(pc),
1927 SP(msr),
1074c0fb 1928 SP(msr_c),
480d29a8
RH
1929 SP(imm),
1930 SP(iflags),
b9c58aab 1931 SP(bvalue),
480d29a8
RH
1932 SP(btarget),
1933 SP(res_val),
1934 };
1935
1936#undef R
1937#undef SP
1938
1939 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1940 *i32s[i].var =
1941 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1942 }
4acb54ba 1943
480d29a8
RH
1944 cpu_res_addr =
1945 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba
EI
1946}
1947
bad729e2
RH
1948void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1949 target_ulong *data)
4acb54ba 1950{
76e8187d 1951 env->pc = data[0];
683a247e 1952 env->iflags = data[1];
4acb54ba 1953}
This page took 1.274349 seconds and 4 git commands to generate.