]> Git Repo - qemu.git/blame - target/microblaze/translate.c
tcg: Pass tb and index to tcg_gen_exit_tb separately
[qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
4acb54ba 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84
LV
34
35
4acb54ba
EI
36#define SIM_COMPAT 0
37#define DISAS_GNU 1
38#define DISAS_MB 1
39#if DISAS_MB && !SIM_COMPAT
40# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41#else
42# define LOG_DIS(...) do { } while (0)
43#endif
44
45#define D(x)
46
47#define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49
77fc6f5e
LV
50/* is_jmp field values */
51#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54
cfeea807
EI
55static TCGv_i32 env_debug;
56static TCGv_i32 cpu_R[32];
0a22f8cf 57static TCGv_i64 cpu_SR[14];
cfeea807
EI
58static TCGv_i32 env_imm;
59static TCGv_i32 env_btaken;
43d318b2 60static TCGv_i64 env_btarget;
cfeea807 61static TCGv_i32 env_iflags;
403322ea 62static TCGv env_res_addr;
cfeea807 63static TCGv_i32 env_res_val;
4acb54ba 64
022c62cb 65#include "exec/gen-icount.h"
4acb54ba
EI
66
67/* This is the state at translation time. */
68typedef struct DisasContext {
0063ebd6 69 MicroBlazeCPU *cpu;
cfeea807 70 uint32_t pc;
4acb54ba
EI
71
72 /* Decoder. */
73 int type_b;
74 uint32_t ir;
75 uint8_t opcode;
76 uint8_t rd, ra, rb;
77 uint16_t imm;
78
79 unsigned int cpustate_changed;
80 unsigned int delayed_branch;
81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
82 unsigned int clear_imm;
83 int is_jmp;
84
844bab60
EI
85#define JMP_NOJMP 0
86#define JMP_DIRECT 1
87#define JMP_DIRECT_CC 2
88#define JMP_INDIRECT 3
4acb54ba
EI
89 unsigned int jmp;
90 uint32_t jmp_pc;
91
92 int abort_at_next_insn;
93 int nr_nops;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
96} DisasContext;
97
38972938 98static const char *regnames[] =
4acb54ba
EI
99{
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104};
105
38972938 106static const char *special_regnames[] =
4acb54ba 107{
0031eef2
EI
108 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
4acb54ba
EI
110};
111
4acb54ba
EI
112static inline void t_sync_flags(DisasContext *dc)
113{
4abf79a4 114 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba 115 if (dc->tb_flags != dc->synced_flags) {
cfeea807 116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
4acb54ba
EI
117 dc->synced_flags = dc->tb_flags;
118 }
119}
120
121static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122{
123 TCGv_i32 tmp = tcg_const_i32(index);
124
125 t_sync_flags(dc);
0a22f8cf 126 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
64254eba 127 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
130}
131
90aa39a1
SF
132static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133{
134#ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136#else
137 return true;
138#endif
139}
140
4acb54ba
EI
141static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142{
90aa39a1 143 if (use_goto_tb(dc, dest)) {
4acb54ba 144 tcg_gen_goto_tb(n);
0a22f8cf 145 tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
07ea28b4 146 tcg_gen_exit_tb(dc->tb, n);
4acb54ba 147 } else {
0a22f8cf 148 tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
07ea28b4 149 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
150 }
151}
152
cfeea807 153static void read_carry(DisasContext *dc, TCGv_i32 d)
ee8b246f 154{
0a22f8cf
EI
155 tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
156 tcg_gen_shri_i32(d, d, 31);
ee8b246f
EI
157}
158
04ec7df7
EI
159/*
160 * write_carry sets the carry bits in MSR based on bit 0 of v.
161 * v[31:1] are ignored.
162 */
cfeea807 163static void write_carry(DisasContext *dc, TCGv_i32 v)
ee8b246f 164{
0a22f8cf
EI
165 TCGv_i64 t0 = tcg_temp_new_i64();
166 tcg_gen_extu_i32_i64(t0, v);
167 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
168 tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
169 tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
170 tcg_temp_free_i64(t0);
ee8b246f
EI
171}
172
65ab5eb4 173static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f 174{
cfeea807
EI
175 TCGv_i32 t0 = tcg_temp_new_i32();
176 tcg_gen_movi_i32(t0, carry);
8cc9b43f 177 write_carry(dc, t0);
cfeea807 178 tcg_temp_free_i32(t0);
8cc9b43f
PC
179}
180
9ba8cd45
EI
181/*
182 * Returns true if the insn an illegal operation.
183 * If exceptions are enabled, an exception is raised.
184 */
185static bool trap_illegal(DisasContext *dc, bool cond)
186{
187 if (cond && (dc->tb_flags & MSR_EE_FLAG)
188 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0a22f8cf 189 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
190 t_gen_raise_exception(dc, EXCP_HW_EXCP);
191 }
192 return cond;
193}
194
bdfc1e88
EI
195/*
196 * Returns true if the insn is illegal in userspace.
197 * If exceptions are enabled, an exception is raised.
198 */
199static bool trap_userspace(DisasContext *dc, bool cond)
200{
201 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
202 bool cond_user = cond && mem_index == MMU_USER_IDX;
203
204 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
0a22f8cf 205 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
bdfc1e88
EI
206 t_gen_raise_exception(dc, EXCP_HW_EXCP);
207 }
208 return cond_user;
209}
210
61204ce8
EI
211/* True if ALU operand b is a small immediate that may deserve
212 faster treatment. */
213static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
214{
215 /* Immediate insn without the imm prefix ? */
216 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
217}
218
cfeea807 219static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
220{
221 if (dc->type_b) {
222 if (dc->tb_flags & IMM_FLAG)
cfeea807 223 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
4acb54ba 224 else
cfeea807 225 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
4acb54ba
EI
226 return &env_imm;
227 } else
228 return &cpu_R[dc->rb];
229}
230
231static void dec_add(DisasContext *dc)
232{
233 unsigned int k, c;
cfeea807 234 TCGv_i32 cf;
4acb54ba
EI
235
236 k = dc->opcode & 4;
237 c = dc->opcode & 2;
238
239 LOG_DIS("add%s%s%s r%d r%d r%d\n",
240 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
241 dc->rd, dc->ra, dc->rb);
242
40cbf5b7
EI
243 /* Take care of the easy cases first. */
244 if (k) {
245 /* k - keep carry, no need to update MSR. */
246 /* If rd == r0, it's a nop. */
247 if (dc->rd) {
cfeea807 248 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
249
250 if (c) {
251 /* c - Add carry into the result. */
cfeea807 252 cf = tcg_temp_new_i32();
40cbf5b7
EI
253
254 read_carry(dc, cf);
cfeea807
EI
255 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
256 tcg_temp_free_i32(cf);
40cbf5b7
EI
257 }
258 }
259 return;
260 }
261
262 /* From now on, we can assume k is zero. So we need to update MSR. */
263 /* Extract carry. */
cfeea807 264 cf = tcg_temp_new_i32();
40cbf5b7
EI
265 if (c) {
266 read_carry(dc, cf);
267 } else {
cfeea807 268 tcg_gen_movi_i32(cf, 0);
40cbf5b7
EI
269 }
270
271 if (dc->rd) {
cfeea807 272 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 273 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
cfeea807
EI
274 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
275 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
40cbf5b7 276 write_carry(dc, ncf);
cfeea807 277 tcg_temp_free_i32(ncf);
40cbf5b7 278 } else {
5d0bb823 279 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 280 write_carry(dc, cf);
4acb54ba 281 }
cfeea807 282 tcg_temp_free_i32(cf);
4acb54ba
EI
283}
284
285static void dec_sub(DisasContext *dc)
286{
287 unsigned int u, cmp, k, c;
cfeea807 288 TCGv_i32 cf, na;
4acb54ba
EI
289
290 u = dc->imm & 2;
291 k = dc->opcode & 4;
292 c = dc->opcode & 2;
293 cmp = (dc->imm & 1) && (!dc->type_b) && k;
294
295 if (cmp) {
296 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
297 if (dc->rd) {
298 if (u)
299 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
300 else
301 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302 }
e0a42ebc
EI
303 return;
304 }
305
306 LOG_DIS("sub%s%s r%d, r%d r%d\n",
307 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
308
309 /* Take care of the easy cases first. */
310 if (k) {
311 /* k - keep carry, no need to update MSR. */
312 /* If rd == r0, it's a nop. */
313 if (dc->rd) {
cfeea807 314 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
315
316 if (c) {
317 /* c - Add carry into the result. */
cfeea807 318 cf = tcg_temp_new_i32();
e0a42ebc
EI
319
320 read_carry(dc, cf);
cfeea807
EI
321 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
322 tcg_temp_free_i32(cf);
e0a42ebc
EI
323 }
324 }
325 return;
326 }
327
328 /* From now on, we can assume k is zero. So we need to update MSR. */
329 /* Extract carry. And complement a into na. */
cfeea807
EI
330 cf = tcg_temp_new_i32();
331 na = tcg_temp_new_i32();
e0a42ebc
EI
332 if (c) {
333 read_carry(dc, cf);
334 } else {
cfeea807 335 tcg_gen_movi_i32(cf, 1);
e0a42ebc
EI
336 }
337
338 /* d = b + ~a + c. carry defaults to 1. */
cfeea807 339 tcg_gen_not_i32(na, cpu_R[dc->ra]);
e0a42ebc
EI
340
341 if (dc->rd) {
cfeea807 342 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 343 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
cfeea807
EI
344 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
345 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
e0a42ebc 346 write_carry(dc, ncf);
cfeea807 347 tcg_temp_free_i32(ncf);
e0a42ebc 348 } else {
5d0bb823 349 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 350 write_carry(dc, cf);
4acb54ba 351 }
cfeea807
EI
352 tcg_temp_free_i32(cf);
353 tcg_temp_free_i32(na);
4acb54ba
EI
354}
355
356static void dec_pattern(DisasContext *dc)
357{
358 unsigned int mode;
4acb54ba 359
9ba8cd45
EI
360 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
361 return;
1567a005
EI
362 }
363
4acb54ba
EI
364 mode = dc->opcode & 3;
365 switch (mode) {
366 case 0:
367 /* pcmpbf. */
368 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
369 if (dc->rd)
370 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
371 break;
372 case 2:
373 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
374 if (dc->rd) {
cfeea807 375 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
86112805 376 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
377 }
378 break;
379 case 3:
380 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 381 if (dc->rd) {
cfeea807 382 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
86112805 383 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
384 }
385 break;
386 default:
0063ebd6 387 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
388 "unsupported pattern insn opcode=%x\n", dc->opcode);
389 break;
390 }
391}
392
393static void dec_and(DisasContext *dc)
394{
395 unsigned int not;
396
397 if (!dc->type_b && (dc->imm & (1 << 10))) {
398 dec_pattern(dc);
399 return;
400 }
401
402 not = dc->opcode & (1 << 1);
403 LOG_DIS("and%s\n", not ? "n" : "");
404
405 if (!dc->rd)
406 return;
407
408 if (not) {
cfeea807 409 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 410 } else
cfeea807 411 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
412}
413
414static void dec_or(DisasContext *dc)
415{
416 if (!dc->type_b && (dc->imm & (1 << 10))) {
417 dec_pattern(dc);
418 return;
419 }
420
421 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
422 if (dc->rd)
cfeea807 423 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
424}
425
426static void dec_xor(DisasContext *dc)
427{
428 if (!dc->type_b && (dc->imm & (1 << 10))) {
429 dec_pattern(dc);
430 return;
431 }
432
433 LOG_DIS("xor r%d\n", dc->rd);
434 if (dc->rd)
cfeea807 435 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
436}
437
cfeea807 438static inline void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 439{
0a22f8cf 440 tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
4acb54ba
EI
441}
442
cfeea807 443static inline void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba 444{
0a22f8cf 445 TCGv_i64 t;
97b833c5 446
0a22f8cf 447 t = tcg_temp_new_i64();
4acb54ba 448 dc->cpustate_changed = 1;
97b833c5 449 /* PVR bit is not writable. */
0a22f8cf
EI
450 tcg_gen_extu_i32_i64(t, v);
451 tcg_gen_andi_i64(t, t, ~MSR_PVR);
452 tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
453 tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
454 tcg_temp_free_i64(t);
4acb54ba
EI
455}
456
457static void dec_msr(DisasContext *dc)
458{
0063ebd6 459 CPUState *cs = CPU(dc->cpu);
cfeea807 460 TCGv_i32 t0, t1;
2023e9a3 461 unsigned int sr, rn;
f0f7e7f7 462 bool to, clrset, extended = false;
4acb54ba 463
2023e9a3
EI
464 sr = extract32(dc->imm, 0, 14);
465 to = extract32(dc->imm, 14, 1);
466 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 467 dc->type_b = 1;
2023e9a3 468 if (to) {
4acb54ba 469 dc->cpustate_changed = 1;
f0f7e7f7
EI
470 }
471
472 /* Extended MSRs are only available if addr_size > 32. */
473 if (dc->cpu->cfg.addr_size > 32) {
474 /* The E-bit is encoded differently for To/From MSR. */
475 static const unsigned int e_bit[] = { 19, 24 };
476
477 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 478 }
4acb54ba
EI
479
480 /* msrclr and msrset. */
2023e9a3
EI
481 if (clrset) {
482 bool clr = extract32(dc->ir, 16, 1);
4acb54ba
EI
483
484 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
485 dc->rd, dc->imm);
1567a005 486
56837509 487 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
488 /* nop??? */
489 return;
490 }
491
bdfc1e88 492 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
493 return;
494 }
495
4acb54ba
EI
496 if (dc->rd)
497 msr_read(dc, cpu_R[dc->rd]);
498
cfeea807
EI
499 t0 = tcg_temp_new_i32();
500 t1 = tcg_temp_new_i32();
4acb54ba 501 msr_read(dc, t0);
cfeea807 502 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
503
504 if (clr) {
cfeea807
EI
505 tcg_gen_not_i32(t1, t1);
506 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 507 } else
cfeea807 508 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 509 msr_write(dc, t0);
cfeea807
EI
510 tcg_temp_free_i32(t0);
511 tcg_temp_free_i32(t1);
0a22f8cf 512 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
4acb54ba
EI
513 dc->is_jmp = DISAS_UPDATE;
514 return;
515 }
516
bdfc1e88
EI
517 if (trap_userspace(dc, to)) {
518 return;
1567a005
EI
519 }
520
4acb54ba
EI
521#if !defined(CONFIG_USER_ONLY)
522 /* Catch read/writes to the mmu block. */
523 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 524 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
525 TCGv_i32 tmp_sr;
526
4acb54ba 527 sr &= 7;
05a9a651 528 tmp_sr = tcg_const_i32(sr);
4acb54ba 529 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
05a9a651 530 if (to) {
f0f7e7f7 531 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 532 } else {
f0f7e7f7 533 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
534 }
535 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 536 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
537 return;
538 }
539#endif
540
541 if (to) {
542 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
543 switch (sr) {
544 case 0:
545 break;
546 case 1:
547 msr_write(dc, cpu_R[dc->ra]);
548 break;
351527b7
EI
549 case SR_EAR:
550 case SR_ESR:
ab6dd380 551 case SR_FSR:
0a22f8cf 552 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
4acb54ba 553 break;
5818dee5 554 case 0x800:
cfeea807
EI
555 tcg_gen_st_i32(cpu_R[dc->ra],
556 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
557 break;
558 case 0x802:
cfeea807
EI
559 tcg_gen_st_i32(cpu_R[dc->ra],
560 cpu_env, offsetof(CPUMBState, shr));
5818dee5 561 break;
4acb54ba 562 default:
0063ebd6 563 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
564 break;
565 }
566 } else {
567 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
568
569 switch (sr) {
570 case 0:
cfeea807 571 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
572 break;
573 case 1:
574 msr_read(dc, cpu_R[dc->rd]);
575 break;
351527b7 576 case SR_EAR:
a1b48e3a
EI
577 if (extended) {
578 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
579 break;
580 }
351527b7
EI
581 case SR_ESR:
582 case SR_FSR:
583 case SR_BTR:
0a22f8cf 584 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
4acb54ba 585 break;
5818dee5 586 case 0x800:
cfeea807
EI
587 tcg_gen_ld_i32(cpu_R[dc->rd],
588 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
589 break;
590 case 0x802:
cfeea807
EI
591 tcg_gen_ld_i32(cpu_R[dc->rd],
592 cpu_env, offsetof(CPUMBState, shr));
5818dee5 593 break;
351527b7 594 case 0x2000 ... 0x200c:
4acb54ba 595 rn = sr & 0xf;
cfeea807 596 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 597 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
598 break;
599 default:
a47dddd7 600 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
601 break;
602 }
603 }
ee7dbcf8
EI
604
605 if (dc->rd == 0) {
cfeea807 606 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 607 }
4acb54ba
EI
608}
609
4acb54ba
EI
610/* Multiplier unit. */
611static void dec_mul(DisasContext *dc)
612{
cfeea807 613 TCGv_i32 tmp;
4acb54ba
EI
614 unsigned int subcode;
615
9ba8cd45 616 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
1567a005
EI
617 return;
618 }
619
4acb54ba 620 subcode = dc->imm & 3;
4acb54ba
EI
621
622 if (dc->type_b) {
623 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
cfeea807 624 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
16ece88d 625 return;
4acb54ba
EI
626 }
627
1567a005 628 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 629 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
630 /* nop??? */
631 }
632
cfeea807 633 tmp = tcg_temp_new_i32();
4acb54ba
EI
634 switch (subcode) {
635 case 0:
636 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 637 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
638 break;
639 case 1:
640 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
641 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
642 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
643 break;
644 case 2:
645 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
646 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
647 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
648 break;
649 case 3:
650 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 651 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
652 break;
653 default:
0063ebd6 654 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
655 break;
656 }
cfeea807 657 tcg_temp_free_i32(tmp);
4acb54ba
EI
658}
659
660/* Div unit. */
661static void dec_div(DisasContext *dc)
662{
663 unsigned int u;
664
665 u = dc->imm & 2;
666 LOG_DIS("div\n");
667
9ba8cd45
EI
668 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
669 return;
1567a005
EI
670 }
671
4acb54ba 672 if (u)
64254eba
BS
673 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
674 cpu_R[dc->ra]);
4acb54ba 675 else
64254eba
BS
676 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
677 cpu_R[dc->ra]);
4acb54ba 678 if (!dc->rd)
cfeea807 679 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
4acb54ba
EI
680}
681
682static void dec_barrel(DisasContext *dc)
683{
cfeea807 684 TCGv_i32 t0;
faa48d74 685 unsigned int imm_w, imm_s;
d09b2585 686 bool s, t, e = false, i = false;
4acb54ba 687
9ba8cd45 688 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
1567a005
EI
689 return;
690 }
691
faa48d74
EI
692 if (dc->type_b) {
693 /* Insert and extract are only available in immediate mode. */
d09b2585 694 i = extract32(dc->imm, 15, 1);
faa48d74
EI
695 e = extract32(dc->imm, 14, 1);
696 }
e3e84983
EI
697 s = extract32(dc->imm, 10, 1);
698 t = extract32(dc->imm, 9, 1);
faa48d74
EI
699 imm_w = extract32(dc->imm, 6, 5);
700 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 701
faa48d74
EI
702 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
703 e ? "e" : "",
4acb54ba
EI
704 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
705
faa48d74
EI
706 if (e) {
707 if (imm_w + imm_s > 32 || imm_w == 0) {
708 /* These inputs have an undefined behavior. */
709 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
710 imm_w, imm_s);
711 } else {
712 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
713 }
d09b2585
EI
714 } else if (i) {
715 int width = imm_w - imm_s + 1;
716
717 if (imm_w < imm_s) {
718 /* These inputs have an undefined behavior. */
719 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
720 imm_w, imm_s);
721 } else {
722 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
723 imm_s, width);
724 }
faa48d74 725 } else {
cfeea807 726 t0 = tcg_temp_new_i32();
4acb54ba 727
cfeea807
EI
728 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
729 tcg_gen_andi_i32(t0, t0, 31);
4acb54ba 730
faa48d74 731 if (s) {
cfeea807 732 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 733 } else {
faa48d74 734 if (t) {
cfeea807 735 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 736 } else {
cfeea807 737 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 738 }
2acf6d53 739 }
cfeea807 740 tcg_temp_free_i32(t0);
4acb54ba
EI
741 }
742}
743
744static void dec_bit(DisasContext *dc)
745{
0063ebd6 746 CPUState *cs = CPU(dc->cpu);
cfeea807 747 TCGv_i32 t0;
4acb54ba
EI
748 unsigned int op;
749
ace2e4da 750 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
751 switch (op) {
752 case 0x21:
753 /* src. */
cfeea807 754 t0 = tcg_temp_new_i32();
4acb54ba
EI
755
756 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
0a22f8cf
EI
757 tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
758 tcg_gen_andi_i32(t0, t0, MSR_CC);
09b9f113 759 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 760 if (dc->rd) {
cfeea807
EI
761 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
762 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 763 }
cfeea807 764 tcg_temp_free_i32(t0);
4acb54ba
EI
765 break;
766
767 case 0x1:
768 case 0x41:
769 /* srl. */
4acb54ba
EI
770 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
771
bb3cb951
EI
772 /* Update carry. Note that write carry only looks at the LSB. */
773 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
774 if (dc->rd) {
775 if (op == 0x41)
cfeea807 776 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba 777 else
cfeea807 778 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba
EI
779 }
780 break;
781 case 0x60:
782 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
783 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
784 break;
785 case 0x61:
786 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
787 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
788 break;
789 case 0x64:
f062a3c7
EI
790 case 0x66:
791 case 0x74:
792 case 0x76:
4acb54ba
EI
793 /* wdc. */
794 LOG_DIS("wdc r%d\n", dc->ra);
bdfc1e88 795 trap_userspace(dc, true);
4acb54ba
EI
796 break;
797 case 0x68:
798 /* wic. */
799 LOG_DIS("wic r%d\n", dc->ra);
bdfc1e88 800 trap_userspace(dc, true);
4acb54ba 801 break;
48b5e96f 802 case 0xe0:
9ba8cd45
EI
803 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
804 return;
48b5e96f 805 }
8fc5239e 806 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 807 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
808 }
809 break;
ace2e4da
PC
810 case 0x1e0:
811 /* swapb */
812 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
813 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
814 break;
b8c6a5d9 815 case 0x1e2:
ace2e4da
PC
816 /*swaph */
817 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
818 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
819 break;
4acb54ba 820 default:
a47dddd7
AF
821 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
823 break;
824 }
825}
826
827static inline void sync_jmpstate(DisasContext *dc)
828{
844bab60
EI
829 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
830 if (dc->jmp == JMP_DIRECT) {
cfeea807 831 tcg_gen_movi_i32(env_btaken, 1);
844bab60 832 }
23979dc5 833 dc->jmp = JMP_INDIRECT;
43d318b2 834 tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
4acb54ba
EI
835 }
836}
837
838static void dec_imm(DisasContext *dc)
839{
840 LOG_DIS("imm %x\n", dc->imm << 16);
cfeea807 841 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
4acb54ba
EI
842 dc->tb_flags |= IMM_FLAG;
843 dc->clear_imm = 0;
844}
845
d248e1be 846static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
4acb54ba 847{
0e9033c8
EI
848 bool extimm = dc->tb_flags & IMM_FLAG;
849 /* Should be set to true if r1 is used by loadstores. */
850 bool stackprot = false;
403322ea 851 TCGv_i32 t32;
5818dee5
EI
852
853 /* All load/stores use ra. */
9aaaa181 854 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 855 stackprot = true;
5818dee5 856 }
4acb54ba 857
9ef55357 858 /* Treat the common cases first. */
4acb54ba 859 if (!dc->type_b) {
d248e1be
EI
860 if (ea) {
861 int addr_size = dc->cpu->cfg.addr_size;
862
863 if (addr_size == 32) {
864 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
865 return;
866 }
867
868 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
869 if (addr_size < 64) {
870 /* Mask off out of range bits. */
871 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
872 }
873 return;
874 }
875
0dc4af5c 876 /* If any of the regs is r0, set t to the value of the other reg. */
4b5ef0b5 877 if (dc->ra == 0) {
403322ea 878 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
0dc4af5c 879 return;
4b5ef0b5 880 } else if (dc->rb == 0) {
403322ea 881 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
0dc4af5c 882 return;
4b5ef0b5
EI
883 }
884
9aaaa181 885 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 886 stackprot = true;
5818dee5
EI
887 }
888
403322ea
EI
889 t32 = tcg_temp_new_i32();
890 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
891 tcg_gen_extu_i32_tl(t, t32);
892 tcg_temp_free_i32(t32);
5818dee5
EI
893
894 if (stackprot) {
0a87e691 895 gen_helper_stackprot(cpu_env, t);
5818dee5 896 }
0dc4af5c 897 return;
4acb54ba
EI
898 }
899 /* Immediate. */
403322ea 900 t32 = tcg_temp_new_i32();
4acb54ba 901 if (!extimm) {
f7a66e3a 902 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
4acb54ba 903 } else {
403322ea 904 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 905 }
403322ea
EI
906 tcg_gen_extu_i32_tl(t, t32);
907 tcg_temp_free_i32(t32);
4acb54ba 908
5818dee5 909 if (stackprot) {
0a87e691 910 gen_helper_stackprot(cpu_env, t);
5818dee5 911 }
0dc4af5c 912 return;
4acb54ba
EI
913}
914
915static void dec_load(DisasContext *dc)
916{
403322ea
EI
917 TCGv_i32 v;
918 TCGv addr;
8534063a 919 unsigned int size;
d248e1be
EI
920 bool rev = false, ex = false, ea = false;
921 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
47acdd63 922 TCGMemOp mop;
4acb54ba 923
47acdd63
RH
924 mop = dc->opcode & 3;
925 size = 1 << mop;
9f8beb66 926 if (!dc->type_b) {
d248e1be 927 ea = extract32(dc->ir, 7, 1);
8534063a
EI
928 rev = extract32(dc->ir, 9, 1);
929 ex = extract32(dc->ir, 10, 1);
9f8beb66 930 }
47acdd63
RH
931 mop |= MO_TE;
932 if (rev) {
933 mop ^= MO_BSWAP;
934 }
9f8beb66 935
9ba8cd45 936 if (trap_illegal(dc, size > 4)) {
0187688f
EI
937 return;
938 }
4acb54ba 939
d248e1be
EI
940 if (trap_userspace(dc, ea)) {
941 return;
942 }
943
944 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
945 ex ? "x" : "",
946 ea ? "ea" : "");
9f8beb66 947
4acb54ba 948 t_sync_flags(dc);
403322ea 949 addr = tcg_temp_new();
d248e1be
EI
950 compute_ldst_addr(dc, ea, addr);
951 /* Extended addressing bypasses the MMU. */
952 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
4acb54ba 953
9f8beb66
EI
954 /*
955 * When doing reverse accesses we need to do two things.
956 *
4ff9786c 957 * 1. Reverse the address wrt endianness.
9f8beb66
EI
958 * 2. Byteswap the data lanes on the way back into the CPU core.
959 */
960 if (rev && size != 4) {
961 /* Endian reverse the address. t is addr. */
962 switch (size) {
963 case 1:
964 {
965 /* 00 -> 11
966 01 -> 10
967 10 -> 10
968 11 -> 00 */
403322ea 969 TCGv low = tcg_temp_new();
9f8beb66 970
403322ea
EI
971 tcg_gen_andi_tl(low, addr, 3);
972 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
973 tcg_gen_andi_tl(addr, addr, ~3);
974 tcg_gen_or_tl(addr, addr, low);
975 tcg_temp_free(low);
9f8beb66
EI
976 break;
977 }
978
979 case 2:
980 /* 00 -> 10
981 10 -> 00. */
403322ea 982 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
983 break;
984 default:
0063ebd6 985 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
986 break;
987 }
988 }
989
8cc9b43f
PC
990 /* lwx does not throw unaligned access errors, so force alignment */
991 if (ex) {
403322ea 992 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f
PC
993 }
994
4acb54ba
EI
995 /* If we get a fault on a dslot, the jmpstate better be in sync. */
996 sync_jmpstate(dc);
968a40f6
EI
997
998 /* Verify alignment if needed. */
47acdd63
RH
999 /*
1000 * Microblaze gives MMU faults priority over faults due to
1001 * unaligned addresses. That's why we speculatively do the load
1002 * into v. If the load succeeds, we verify alignment of the
1003 * address and if that succeeds we write into the destination reg.
1004 */
cfeea807 1005 v = tcg_temp_new_i32();
d248e1be 1006 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
a12f6507 1007
0063ebd6 1008 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
0a22f8cf 1009 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
0dc4af5c 1010 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
cfeea807 1011 tcg_const_i32(0), tcg_const_i32(size - 1));
4acb54ba
EI
1012 }
1013
47acdd63 1014 if (ex) {
403322ea 1015 tcg_gen_mov_tl(env_res_addr, addr);
cfeea807 1016 tcg_gen_mov_i32(env_res_val, v);
47acdd63
RH
1017 }
1018 if (dc->rd) {
cfeea807 1019 tcg_gen_mov_i32(cpu_R[dc->rd], v);
47acdd63 1020 }
cfeea807 1021 tcg_temp_free_i32(v);
47acdd63 1022
8cc9b43f 1023 if (ex) { /* lwx */
b6af0975 1024 /* no support for AXI exclusive so always clear C */
8cc9b43f 1025 write_carryi(dc, 0);
8cc9b43f
PC
1026 }
1027
403322ea 1028 tcg_temp_free(addr);
4acb54ba
EI
1029}
1030
4acb54ba
EI
1031static void dec_store(DisasContext *dc)
1032{
403322ea 1033 TCGv addr;
42a268c2 1034 TCGLabel *swx_skip = NULL;
b51b3d43 1035 unsigned int size;
d248e1be
EI
1036 bool rev = false, ex = false, ea = false;
1037 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
47acdd63 1038 TCGMemOp mop;
4acb54ba 1039
47acdd63
RH
1040 mop = dc->opcode & 3;
1041 size = 1 << mop;
9f8beb66 1042 if (!dc->type_b) {
d248e1be 1043 ea = extract32(dc->ir, 7, 1);
b51b3d43
EI
1044 rev = extract32(dc->ir, 9, 1);
1045 ex = extract32(dc->ir, 10, 1);
9f8beb66 1046 }
47acdd63
RH
1047 mop |= MO_TE;
1048 if (rev) {
1049 mop ^= MO_BSWAP;
1050 }
4acb54ba 1051
9ba8cd45 1052 if (trap_illegal(dc, size > 4)) {
0187688f
EI
1053 return;
1054 }
1055
d248e1be
EI
1056 trap_userspace(dc, ea);
1057
1058 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1059 ex ? "x" : "",
1060 ea ? "ea" : "");
4acb54ba
EI
1061 t_sync_flags(dc);
1062 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1063 sync_jmpstate(dc);
0dc4af5c 1064 /* SWX needs a temp_local. */
403322ea 1065 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
d248e1be
EI
1066 compute_ldst_addr(dc, ea, addr);
1067 /* Extended addressing bypasses the MMU. */
1068 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
968a40f6 1069
8cc9b43f 1070 if (ex) { /* swx */
cfeea807 1071 TCGv_i32 tval;
8cc9b43f 1072
8cc9b43f 1073 /* swx does not throw unaligned access errors, so force alignment */
403322ea 1074 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f 1075
8cc9b43f
PC
1076 write_carryi(dc, 1);
1077 swx_skip = gen_new_label();
403322ea 1078 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
11a76217
EI
1079
1080 /* Compare the value loaded at lwx with current contents of
1081 the reserved location.
1082 FIXME: This only works for system emulation where we can expect
1083 this compare and the following write to be atomic. For user
1084 emulation we need to add atomicity between threads. */
cfeea807 1085 tval = tcg_temp_new_i32();
0dc4af5c
EI
1086 tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1087 MO_TEUL);
cfeea807 1088 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1089 write_carryi(dc, 0);
cfeea807 1090 tcg_temp_free_i32(tval);
8cc9b43f
PC
1091 }
1092
9f8beb66
EI
1093 if (rev && size != 4) {
1094 /* Endian reverse the address. t is addr. */
1095 switch (size) {
1096 case 1:
1097 {
1098 /* 00 -> 11
1099 01 -> 10
1100 10 -> 10
1101 11 -> 00 */
403322ea 1102 TCGv low = tcg_temp_new();
9f8beb66 1103
403322ea
EI
1104 tcg_gen_andi_tl(low, addr, 3);
1105 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1106 tcg_gen_andi_tl(addr, addr, ~3);
1107 tcg_gen_or_tl(addr, addr, low);
1108 tcg_temp_free(low);
9f8beb66
EI
1109 break;
1110 }
1111
1112 case 2:
1113 /* 00 -> 10
1114 10 -> 00. */
1115 /* Force addr into the temp. */
403322ea 1116 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
1117 break;
1118 default:
0063ebd6 1119 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1120 break;
1121 }
9f8beb66 1122 }
d248e1be 1123 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
a12f6507 1124
968a40f6 1125 /* Verify alignment if needed. */
0063ebd6 1126 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
0a22f8cf 1127 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
a12f6507 1128 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1129 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1130 * the MMU prior to the memaccess, thay way we could put
1131 * the alignment checks in between the probe and the mem
1132 * access.
a12f6507 1133 */
0dc4af5c 1134 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
cfeea807 1135 tcg_const_i32(1), tcg_const_i32(size - 1));
968a40f6 1136 }
083dbf48 1137
8cc9b43f
PC
1138 if (ex) {
1139 gen_set_label(swx_skip);
8cc9b43f 1140 }
968a40f6 1141
403322ea 1142 tcg_temp_free(addr);
4acb54ba
EI
1143}
1144
1145static inline void eval_cc(DisasContext *dc, unsigned int cc,
9e6e1828 1146 TCGv_i32 d, TCGv_i32 a)
4acb54ba 1147{
d89b86e9
EI
1148 static const int mb_to_tcg_cc[] = {
1149 [CC_EQ] = TCG_COND_EQ,
1150 [CC_NE] = TCG_COND_NE,
1151 [CC_LT] = TCG_COND_LT,
1152 [CC_LE] = TCG_COND_LE,
1153 [CC_GE] = TCG_COND_GE,
1154 [CC_GT] = TCG_COND_GT,
1155 };
1156
4acb54ba 1157 switch (cc) {
d89b86e9
EI
1158 case CC_EQ:
1159 case CC_NE:
1160 case CC_LT:
1161 case CC_LE:
1162 case CC_GE:
1163 case CC_GT:
9e6e1828 1164 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
d89b86e9
EI
1165 break;
1166 default:
1167 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1168 break;
4acb54ba
EI
1169 }
1170}
1171
43d318b2 1172static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
4acb54ba 1173{
e956caf2
EI
1174 TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1175 TCGv_i64 tmp_zero = tcg_const_i64(0);
1176
1177 tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1178 tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1179 tmp_btaken, tmp_zero,
1180 pc_true, pc_false);
1181
1182 tcg_temp_free_i64(tmp_btaken);
1183 tcg_temp_free_i64(tmp_zero);
4acb54ba
EI
1184}
1185
1186static void dec_bcc(DisasContext *dc)
1187{
1188 unsigned int cc;
1189 unsigned int dslot;
1190
1191 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1192 dslot = dc->ir & (1 << 25);
1193 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1194
1195 dc->delayed_branch = 1;
1196 if (dslot) {
1197 dc->delayed_branch = 2;
1198 dc->tb_flags |= D_FLAG;
cfeea807 1199 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1200 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1201 }
1202
61204ce8
EI
1203 if (dec_alu_op_b_is_small_imm(dc)) {
1204 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1205
43d318b2 1206 tcg_gen_movi_i64(env_btarget, dc->pc + offset);
844bab60 1207 dc->jmp = JMP_DIRECT_CC;
23979dc5 1208 dc->jmp_pc = dc->pc + offset;
61204ce8 1209 } else {
23979dc5 1210 dc->jmp = JMP_INDIRECT;
43d318b2
EI
1211 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1212 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1213 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
61204ce8 1214 }
9e6e1828 1215 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
4acb54ba
EI
1216}
1217
1218static void dec_br(DisasContext *dc)
1219{
9f6113c7 1220 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1221
1222 dslot = dc->ir & (1 << 20);
1223 abs = dc->ir & (1 << 19);
1224 link = dc->ir & (1 << 18);
9f6113c7
EI
1225
1226 /* Memory barrier. */
1227 mbar = (dc->ir >> 16) & 31;
1228 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1229 /* mbar IMM & 16 decodes to sleep. */
1230 if (dc->rd & 16) {
1231 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1232 TCGv_i32 tmp_1 = tcg_const_i32(1);
1233
1234 LOG_DIS("sleep\n");
1235
1236 t_sync_flags(dc);
1237 tcg_gen_st_i32(tmp_1, cpu_env,
1238 -offsetof(MicroBlazeCPU, env)
1239 +offsetof(CPUState, halted));
0a22f8cf 1240 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
5d45de97
EI
1241 gen_helper_raise_exception(cpu_env, tmp_hlt);
1242 tcg_temp_free_i32(tmp_hlt);
1243 tcg_temp_free_i32(tmp_1);
1244 return;
1245 }
9f6113c7
EI
1246 LOG_DIS("mbar %d\n", dc->rd);
1247 /* Break the TB. */
1248 dc->cpustate_changed = 1;
1249 return;
1250 }
1251
4acb54ba
EI
1252 LOG_DIS("br%s%s%s%s imm=%x\n",
1253 abs ? "a" : "", link ? "l" : "",
1254 dc->type_b ? "i" : "", dslot ? "d" : "",
1255 dc->imm);
1256
1257 dc->delayed_branch = 1;
1258 if (dslot) {
1259 dc->delayed_branch = 2;
1260 dc->tb_flags |= D_FLAG;
cfeea807 1261 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1262 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1263 }
1264 if (link && dc->rd)
cfeea807 1265 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
1266
1267 dc->jmp = JMP_INDIRECT;
1268 if (abs) {
cfeea807 1269 tcg_gen_movi_i32(env_btaken, 1);
43d318b2 1270 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1271 if (link && !dslot) {
1272 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1273 t_gen_raise_exception(dc, EXCP_BREAK);
1274 if (dc->imm == 0) {
bdfc1e88 1275 if (trap_userspace(dc, true)) {
ff21f70a
EI
1276 return;
1277 }
1278
1279 t_gen_raise_exception(dc, EXCP_DEBUG);
1280 }
1281 }
4acb54ba 1282 } else {
61204ce8
EI
1283 if (dec_alu_op_b_is_small_imm(dc)) {
1284 dc->jmp = JMP_DIRECT;
1285 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1286 } else {
cfeea807 1287 tcg_gen_movi_i32(env_btaken, 1);
43d318b2
EI
1288 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1289 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1290 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
4acb54ba
EI
1291 }
1292 }
1293}
1294
1295static inline void do_rti(DisasContext *dc)
1296{
cfeea807
EI
1297 TCGv_i32 t0, t1;
1298 t0 = tcg_temp_new_i32();
1299 t1 = tcg_temp_new_i32();
0a22f8cf
EI
1300 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1301 tcg_gen_shri_i32(t0, t1, 1);
1302 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1303 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1304
1305 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1306 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1307 msr_write(dc, t1);
cfeea807
EI
1308 tcg_temp_free_i32(t1);
1309 tcg_temp_free_i32(t0);
4acb54ba
EI
1310 dc->tb_flags &= ~DRTI_FLAG;
1311}
1312
1313static inline void do_rtb(DisasContext *dc)
1314{
cfeea807
EI
1315 TCGv_i32 t0, t1;
1316 t0 = tcg_temp_new_i32();
1317 t1 = tcg_temp_new_i32();
0a22f8cf
EI
1318 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1319 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1320 tcg_gen_shri_i32(t0, t1, 1);
1321 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1322
1323 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1324 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1325 msr_write(dc, t1);
cfeea807
EI
1326 tcg_temp_free_i32(t1);
1327 tcg_temp_free_i32(t0);
4acb54ba
EI
1328 dc->tb_flags &= ~DRTB_FLAG;
1329}
1330
1331static inline void do_rte(DisasContext *dc)
1332{
cfeea807
EI
1333 TCGv_i32 t0, t1;
1334 t0 = tcg_temp_new_i32();
1335 t1 = tcg_temp_new_i32();
4acb54ba 1336
0a22f8cf
EI
1337 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1338 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1339 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1340 tcg_gen_shri_i32(t0, t1, 1);
1341 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1342
cfeea807
EI
1343 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1344 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1345 msr_write(dc, t1);
cfeea807
EI
1346 tcg_temp_free_i32(t1);
1347 tcg_temp_free_i32(t0);
4acb54ba
EI
1348 dc->tb_flags &= ~DRTE_FLAG;
1349}
1350
1351static void dec_rts(DisasContext *dc)
1352{
1353 unsigned int b_bit, i_bit, e_bit;
43d318b2 1354 TCGv_i64 tmp64;
4acb54ba
EI
1355
1356 i_bit = dc->ir & (1 << 21);
1357 b_bit = dc->ir & (1 << 22);
1358 e_bit = dc->ir & (1 << 23);
1359
bdfc1e88
EI
1360 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1361 return;
1362 }
1363
4acb54ba
EI
1364 dc->delayed_branch = 2;
1365 dc->tb_flags |= D_FLAG;
cfeea807 1366 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1367 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1368
1369 if (i_bit) {
1370 LOG_DIS("rtid ir=%x\n", dc->ir);
1371 dc->tb_flags |= DRTI_FLAG;
1372 } else if (b_bit) {
1373 LOG_DIS("rtbd ir=%x\n", dc->ir);
1374 dc->tb_flags |= DRTB_FLAG;
1375 } else if (e_bit) {
1376 LOG_DIS("rted ir=%x\n", dc->ir);
1377 dc->tb_flags |= DRTE_FLAG;
1378 } else
1379 LOG_DIS("rts ir=%x\n", dc->ir);
1380
23979dc5 1381 dc->jmp = JMP_INDIRECT;
cfeea807 1382 tcg_gen_movi_i32(env_btaken, 1);
43d318b2
EI
1383
1384 tmp64 = tcg_temp_new_i64();
1385 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1386 tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1387 tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1388 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1389 tcg_temp_free_i64(tmp64);
4acb54ba
EI
1390}
1391
97694c57
EI
1392static int dec_check_fpuv2(DisasContext *dc)
1393{
be67e9ab 1394 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
0a22f8cf 1395 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
97694c57
EI
1396 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397 }
be67e9ab 1398 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1399}
1400
1567a005
EI
1401static void dec_fpu(DisasContext *dc)
1402{
97694c57
EI
1403 unsigned int fpu_insn;
1404
9ba8cd45 1405 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1567a005
EI
1406 return;
1407 }
1408
97694c57
EI
1409 fpu_insn = (dc->ir >> 7) & 7;
1410
1411 switch (fpu_insn) {
1412 case 0:
64254eba
BS
1413 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1414 cpu_R[dc->rb]);
97694c57
EI
1415 break;
1416
1417 case 1:
64254eba
BS
1418 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1419 cpu_R[dc->rb]);
97694c57
EI
1420 break;
1421
1422 case 2:
64254eba
BS
1423 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1424 cpu_R[dc->rb]);
97694c57
EI
1425 break;
1426
1427 case 3:
64254eba
BS
1428 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1429 cpu_R[dc->rb]);
97694c57
EI
1430 break;
1431
1432 case 4:
1433 switch ((dc->ir >> 4) & 7) {
1434 case 0:
64254eba 1435 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1436 cpu_R[dc->ra], cpu_R[dc->rb]);
1437 break;
1438 case 1:
64254eba 1439 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1440 cpu_R[dc->ra], cpu_R[dc->rb]);
1441 break;
1442 case 2:
64254eba 1443 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1444 cpu_R[dc->ra], cpu_R[dc->rb]);
1445 break;
1446 case 3:
64254eba 1447 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1448 cpu_R[dc->ra], cpu_R[dc->rb]);
1449 break;
1450 case 4:
64254eba 1451 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1452 cpu_R[dc->ra], cpu_R[dc->rb]);
1453 break;
1454 case 5:
64254eba 1455 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1456 cpu_R[dc->ra], cpu_R[dc->rb]);
1457 break;
1458 case 6:
64254eba 1459 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1460 cpu_R[dc->ra], cpu_R[dc->rb]);
1461 break;
1462 default:
71547a3b
BS
1463 qemu_log_mask(LOG_UNIMP,
1464 "unimplemented fcmp fpu_insn=%x pc=%x"
1465 " opc=%x\n",
1466 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1467 dc->abort_at_next_insn = 1;
1468 break;
1469 }
1470 break;
1471
1472 case 5:
1473 if (!dec_check_fpuv2(dc)) {
1474 return;
1475 }
64254eba 1476 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1477 break;
1478
1479 case 6:
1480 if (!dec_check_fpuv2(dc)) {
1481 return;
1482 }
64254eba 1483 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1484 break;
1485
1486 case 7:
1487 if (!dec_check_fpuv2(dc)) {
1488 return;
1489 }
64254eba 1490 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1491 break;
1492
1493 default:
71547a3b
BS
1494 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1495 " opc=%x\n",
1496 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1497 dc->abort_at_next_insn = 1;
1498 break;
1499 }
1567a005
EI
1500}
1501
4acb54ba
EI
1502static void dec_null(DisasContext *dc)
1503{
9ba8cd45 1504 if (trap_illegal(dc, true)) {
02b33596
EI
1505 return;
1506 }
1d512a65 1507 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1508 dc->abort_at_next_insn = 1;
1509}
1510
6d76d23e
EI
1511/* Insns connected to FSL or AXI stream attached devices. */
1512static void dec_stream(DisasContext *dc)
1513{
6d76d23e
EI
1514 TCGv_i32 t_id, t_ctrl;
1515 int ctrl;
1516
1517 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1518 dc->type_b ? "" : "d", dc->imm);
1519
bdfc1e88 1520 if (trap_userspace(dc, true)) {
6d76d23e
EI
1521 return;
1522 }
1523
cfeea807 1524 t_id = tcg_temp_new_i32();
6d76d23e 1525 if (dc->type_b) {
cfeea807 1526 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1527 ctrl = dc->imm >> 10;
1528 } else {
cfeea807 1529 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1530 ctrl = dc->imm >> 5;
1531 }
1532
cfeea807 1533 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1534
1535 if (dc->rd == 0) {
1536 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1537 } else {
1538 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1539 }
cfeea807
EI
1540 tcg_temp_free_i32(t_id);
1541 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1542}
1543
4acb54ba
EI
1544static struct decoder_info {
1545 struct {
1546 uint32_t bits;
1547 uint32_t mask;
1548 };
1549 void (*dec)(DisasContext *dc);
1550} decinfo[] = {
1551 {DEC_ADD, dec_add},
1552 {DEC_SUB, dec_sub},
1553 {DEC_AND, dec_and},
1554 {DEC_XOR, dec_xor},
1555 {DEC_OR, dec_or},
1556 {DEC_BIT, dec_bit},
1557 {DEC_BARREL, dec_barrel},
1558 {DEC_LD, dec_load},
1559 {DEC_ST, dec_store},
1560 {DEC_IMM, dec_imm},
1561 {DEC_BR, dec_br},
1562 {DEC_BCC, dec_bcc},
1563 {DEC_RTS, dec_rts},
1567a005 1564 {DEC_FPU, dec_fpu},
4acb54ba
EI
1565 {DEC_MUL, dec_mul},
1566 {DEC_DIV, dec_div},
1567 {DEC_MSR, dec_msr},
6d76d23e 1568 {DEC_STREAM, dec_stream},
4acb54ba
EI
1569 {{0, 0}, dec_null}
1570};
1571
64254eba 1572static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1573{
4acb54ba
EI
1574 int i;
1575
64254eba 1576 dc->ir = ir;
4acb54ba
EI
1577 LOG_DIS("%8.8x\t", dc->ir);
1578
1579 if (dc->ir)
1580 dc->nr_nops = 0;
1581 else {
9ba8cd45 1582 trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1567a005 1583
4acb54ba
EI
1584 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1585 dc->nr_nops++;
a47dddd7 1586 if (dc->nr_nops > 4) {
0063ebd6 1587 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1588 }
4acb54ba
EI
1589 }
1590 /* bit 2 seems to indicate insn type. */
1591 dc->type_b = ir & (1 << 29);
1592
1593 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1594 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1595 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1596 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1597 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1598
1599 /* Large switch for all insns. */
1600 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1601 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1602 decinfo[i].dec(dc);
1603 break;
1604 }
1605 }
1606}
1607
4acb54ba 1608/* generate intermediate code for basic block 'tb'. */
9c489ea6 1609void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4acb54ba 1610{
9c489ea6 1611 CPUMBState *env = cs->env_ptr;
4e5e1215 1612 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
4acb54ba 1613 uint32_t pc_start;
4acb54ba
EI
1614 struct DisasContext ctx;
1615 struct DisasContext *dc = &ctx;
56371527 1616 uint32_t page_start, org_flags;
cfeea807 1617 uint32_t npc;
4acb54ba
EI
1618 int num_insns;
1619 int max_insns;
1620
4acb54ba 1621 pc_start = tb->pc;
0063ebd6 1622 dc->cpu = cpu;
4acb54ba
EI
1623 dc->tb = tb;
1624 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1625
4acb54ba
EI
1626 dc->is_jmp = DISAS_NEXT;
1627 dc->jmp = 0;
1628 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1629 if (dc->delayed_branch) {
1630 dc->jmp = JMP_INDIRECT;
1631 }
4acb54ba 1632 dc->pc = pc_start;
ed2803da 1633 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1634 dc->cpustate_changed = 0;
1635 dc->abort_at_next_insn = 0;
1636 dc->nr_nops = 0;
1637
a47dddd7
AF
1638 if (pc_start & 3) {
1639 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1640 }
4acb54ba 1641
56371527 1642 page_start = pc_start & TARGET_PAGE_MASK;
4acb54ba 1643 num_insns = 0;
c5a49c63 1644 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
190ce7fb 1645 if (max_insns == 0) {
4acb54ba 1646 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1647 }
1648 if (max_insns > TCG_MAX_INSNS) {
1649 max_insns = TCG_MAX_INSNS;
1650 }
4acb54ba 1651
cd42d5b2 1652 gen_tb_start(tb);
4acb54ba
EI
1653 do
1654 {
667b8e29 1655 tcg_gen_insn_start(dc->pc);
959082fc 1656 num_insns++;
4acb54ba 1657
b933066a
RH
1658#if SIM_COMPAT
1659 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
0a22f8cf 1660 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
b933066a
RH
1661 gen_helper_debug();
1662 }
1663#endif
1664
1665 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1666 t_gen_raise_exception(dc, EXCP_DEBUG);
1667 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1668 /* The address covered by the breakpoint must be included in
1669 [tb->pc, tb->pc + tb->size) in order to for it to be
1670 properly cleared -- thus we increment the PC here so that
1671 the logic setting tb->size below does the right thing. */
1672 dc->pc += 4;
b933066a
RH
1673 break;
1674 }
1675
4acb54ba
EI
1676 /* Pretty disas. */
1677 LOG_DIS("%8.8x:\t", dc->pc);
1678
c5a49c63 1679 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1680 gen_io_start();
959082fc 1681 }
4acb54ba
EI
1682
1683 dc->clear_imm = 1;
64254eba 1684 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1685 if (dc->clear_imm)
1686 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1687 dc->pc += 4;
4acb54ba
EI
1688
1689 if (dc->delayed_branch) {
1690 dc->delayed_branch--;
1691 if (!dc->delayed_branch) {
1692 if (dc->tb_flags & DRTI_FLAG)
1693 do_rti(dc);
1694 if (dc->tb_flags & DRTB_FLAG)
1695 do_rtb(dc);
1696 if (dc->tb_flags & DRTE_FLAG)
1697 do_rte(dc);
1698 /* Clear the delay slot flag. */
1699 dc->tb_flags &= ~D_FLAG;
1700 /* If it is a direct jump, try direct chaining. */
23979dc5 1701 if (dc->jmp == JMP_INDIRECT) {
0a22f8cf 1702 eval_cond_jmp(dc, env_btarget, tcg_const_i64(dc->pc));
4acb54ba 1703 dc->is_jmp = DISAS_JUMP;
23979dc5 1704 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1705 t_sync_flags(dc);
1706 gen_goto_tb(dc, 0, dc->jmp_pc);
1707 dc->is_jmp = DISAS_TB_JUMP;
1708 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1709 TCGLabel *l1 = gen_new_label();
23979dc5 1710 t_sync_flags(dc);
23979dc5 1711 /* Conditional jmp. */
cfeea807 1712 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
23979dc5
EI
1713 gen_goto_tb(dc, 1, dc->pc);
1714 gen_set_label(l1);
1715 gen_goto_tb(dc, 0, dc->jmp_pc);
1716
1717 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1718 }
1719 break;
1720 }
1721 }
ed2803da 1722 if (cs->singlestep_enabled) {
4acb54ba 1723 break;
ed2803da 1724 }
4acb54ba 1725 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1726 && !tcg_op_buf_full()
1727 && !singlestep
56371527 1728 && (dc->pc - page_start < TARGET_PAGE_SIZE)
fe700adb 1729 && num_insns < max_insns);
4acb54ba
EI
1730
1731 npc = dc->pc;
844bab60 1732 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1733 if (dc->tb_flags & D_FLAG) {
1734 dc->is_jmp = DISAS_UPDATE;
0a22f8cf 1735 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
4acb54ba
EI
1736 sync_jmpstate(dc);
1737 } else
1738 npc = dc->jmp_pc;
1739 }
1740
c5a49c63 1741 if (tb_cflags(tb) & CF_LAST_IO)
4acb54ba
EI
1742 gen_io_end();
1743 /* Force an update if the per-tb cpu state has changed. */
1744 if (dc->is_jmp == DISAS_NEXT
1745 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1746 dc->is_jmp = DISAS_UPDATE;
0a22f8cf 1747 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
4acb54ba
EI
1748 }
1749 t_sync_flags(dc);
1750
ed2803da 1751 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1752 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1753
1754 if (dc->is_jmp != DISAS_JUMP) {
0a22f8cf 1755 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
6c5f738d 1756 }
64254eba 1757 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1758 tcg_temp_free_i32(tmp);
4acb54ba
EI
1759 } else {
1760 switch(dc->is_jmp) {
1761 case DISAS_NEXT:
1762 gen_goto_tb(dc, 1, npc);
1763 break;
1764 default:
1765 case DISAS_JUMP:
1766 case DISAS_UPDATE:
1767 /* indicate that the hash table must be used
1768 to find the next TB */
07ea28b4 1769 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
1770 break;
1771 case DISAS_TB_JUMP:
1772 /* nothing more to generate */
1773 break;
1774 }
1775 }
806f352d 1776 gen_tb_end(tb, num_insns);
0a7df5da 1777
4e5e1215
RH
1778 tb->size = dc->pc - pc_start;
1779 tb->icount = num_insns;
4acb54ba
EI
1780
1781#ifdef DEBUG_DISAS
1782#if !SIM_COMPAT
4910e6e4
RH
1783 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1784 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1785 qemu_log_lock();
f01a5e7e 1786 qemu_log("--------------\n");
1d48474d 1787 log_target_disas(cs, pc_start, dc->pc - pc_start);
1ee73216 1788 qemu_log_unlock();
4acb54ba
EI
1789 }
1790#endif
1791#endif
1792 assert(!dc->abort_at_next_insn);
1793}
1794
878096ee
AF
1795void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1796 int flags)
4acb54ba 1797{
878096ee
AF
1798 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1799 CPUMBState *env = &cpu->env;
4acb54ba
EI
1800 int i;
1801
1802 if (!env || !f)
1803 return;
1804
0a22f8cf 1805 cpu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
4acb54ba 1806 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
0a22f8cf
EI
1807 cpu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1808 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
4c24aa0a 1809 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1810 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
43d318b2
EI
1811 cpu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1812 "eip=%d ie=%d\n",
4acb54ba
EI
1813 env->btaken, env->btarget,
1814 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43 1815 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
0a22f8cf
EI
1816 (bool)(env->sregs[SR_MSR] & MSR_EIP),
1817 (bool)(env->sregs[SR_MSR] & MSR_IE));
17c52a43 1818
4acb54ba
EI
1819 for (i = 0; i < 32; i++) {
1820 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1821 if ((i + 1) % 4 == 0)
1822 cpu_fprintf(f, "\n");
1823 }
1824 cpu_fprintf(f, "\n\n");
1825}
1826
cd0c24f9
AF
1827void mb_tcg_init(void)
1828{
1829 int i;
4acb54ba 1830
cfeea807 1831 env_debug = tcg_global_mem_new_i32(cpu_env,
68cee38a 1832 offsetof(CPUMBState, debug),
4acb54ba 1833 "debug0");
cfeea807 1834 env_iflags = tcg_global_mem_new_i32(cpu_env,
68cee38a 1835 offsetof(CPUMBState, iflags),
4acb54ba 1836 "iflags");
cfeea807 1837 env_imm = tcg_global_mem_new_i32(cpu_env,
68cee38a 1838 offsetof(CPUMBState, imm),
4acb54ba 1839 "imm");
43d318b2 1840 env_btarget = tcg_global_mem_new_i64(cpu_env,
68cee38a 1841 offsetof(CPUMBState, btarget),
4acb54ba 1842 "btarget");
cfeea807 1843 env_btaken = tcg_global_mem_new_i32(cpu_env,
68cee38a 1844 offsetof(CPUMBState, btaken),
4acb54ba 1845 "btaken");
403322ea 1846 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1847 offsetof(CPUMBState, res_addr),
1848 "res_addr");
cfeea807 1849 env_res_val = tcg_global_mem_new_i32(cpu_env,
11a76217
EI
1850 offsetof(CPUMBState, res_val),
1851 "res_val");
4acb54ba 1852 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
cfeea807 1853 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
68cee38a 1854 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1855 regnames[i]);
1856 }
1857 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
0a22f8cf 1858 cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
68cee38a 1859 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1860 special_regnames[i]);
1861 }
4acb54ba
EI
1862}
1863
bad729e2
RH
1864void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1865 target_ulong *data)
4acb54ba 1866{
bad729e2 1867 env->sregs[SR_PC] = data[0];
4acb54ba 1868}
This page took 1.093697 seconds and 4 git commands to generate.