]> Git Repo - qemu.git/blame - target/microblaze/translate.c
target/microblaze: Tidy mb_tcg_init
[qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
90c84c56 31#include "qemu/qemu-print.h"
4acb54ba 32
a7e30d84 33#include "trace-tcg.h"
508127e2 34#include "exec/log.h"
a7e30d84
LV
35
36
4acb54ba
EI
37#define SIM_COMPAT 0
38#define DISAS_GNU 1
39#define DISAS_MB 1
40#if DISAS_MB && !SIM_COMPAT
41# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42#else
43# define LOG_DIS(...) do { } while (0)
44#endif
45
46#define D(x)
47
48#define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
77fc6f5e
LV
51/* is_jmp field values */
52#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
cfeea807 56static TCGv_i32 cpu_R[32];
0f96e96b 57static TCGv_i32 cpu_pc;
3e0e16ae 58static TCGv_i32 cpu_msr;
9b158558
RH
59static TCGv_i32 cpu_imm;
60static TCGv_i32 cpu_btaken;
0f96e96b 61static TCGv_i32 cpu_btarget;
9b158558
RH
62static TCGv_i32 cpu_iflags;
63static TCGv cpu_res_addr;
64static TCGv_i32 cpu_res_val;
4acb54ba 65
022c62cb 66#include "exec/gen-icount.h"
4acb54ba
EI
67
68/* This is the state at translation time. */
69typedef struct DisasContext {
0063ebd6 70 MicroBlazeCPU *cpu;
cfeea807 71 uint32_t pc;
4acb54ba
EI
72
73 /* Decoder. */
74 int type_b;
75 uint32_t ir;
76 uint8_t opcode;
77 uint8_t rd, ra, rb;
78 uint16_t imm;
79
80 unsigned int cpustate_changed;
81 unsigned int delayed_branch;
82 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
83 unsigned int clear_imm;
84 int is_jmp;
85
844bab60
EI
86#define JMP_NOJMP 0
87#define JMP_DIRECT 1
88#define JMP_DIRECT_CC 2
89#define JMP_INDIRECT 3
4acb54ba
EI
90 unsigned int jmp;
91 uint32_t jmp_pc;
92
93 int abort_at_next_insn;
4acb54ba
EI
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
96} DisasContext;
97
4acb54ba
EI
98static inline void t_sync_flags(DisasContext *dc)
99{
4abf79a4 100 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba 101 if (dc->tb_flags != dc->synced_flags) {
9b158558 102 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags);
4acb54ba
EI
103 dc->synced_flags = dc->tb_flags;
104 }
105}
106
41ba37c4 107static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba
EI
108{
109 TCGv_i32 tmp = tcg_const_i32(index);
110
64254eba 111 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
112 tcg_temp_free_i32(tmp);
113 dc->is_jmp = DISAS_UPDATE;
114}
115
41ba37c4
RH
116static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
117{
118 t_sync_flags(dc);
119 tcg_gen_movi_i32(cpu_pc, dc->pc);
120 gen_raise_exception(dc, index);
121}
122
123static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
124{
125 TCGv_i32 tmp = tcg_const_i32(esr_ec);
126 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
127 tcg_temp_free_i32(tmp);
128
129 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
130}
131
90aa39a1
SF
132static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133{
134#ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136#else
137 return true;
138#endif
139}
140
4acb54ba
EI
141static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142{
90aa39a1 143 if (use_goto_tb(dc, dest)) {
4acb54ba 144 tcg_gen_goto_tb(n);
0f96e96b 145 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 146 tcg_gen_exit_tb(dc->tb, n);
4acb54ba 147 } else {
0f96e96b 148 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 149 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
150 }
151}
152
cfeea807 153static void read_carry(DisasContext *dc, TCGv_i32 d)
ee8b246f 154{
3e0e16ae 155 tcg_gen_shri_i32(d, cpu_msr, 31);
ee8b246f
EI
156}
157
04ec7df7
EI
158/*
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
161 */
cfeea807 162static void write_carry(DisasContext *dc, TCGv_i32 v)
ee8b246f 163{
0a22f8cf 164 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
3e0e16ae
RH
165 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 2, 1);
166 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 31, 1);
ee8b246f
EI
167}
168
65ab5eb4 169static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f 170{
cfeea807
EI
171 TCGv_i32 t0 = tcg_temp_new_i32();
172 tcg_gen_movi_i32(t0, carry);
8cc9b43f 173 write_carry(dc, t0);
cfeea807 174 tcg_temp_free_i32(t0);
8cc9b43f
PC
175}
176
9ba8cd45
EI
177/*
178 * Returns true if the insn an illegal operation.
179 * If exceptions are enabled, an exception is raised.
180 */
181static bool trap_illegal(DisasContext *dc, bool cond)
182{
183 if (cond && (dc->tb_flags & MSR_EE_FLAG)
5143fdf3 184 && dc->cpu->cfg.illegal_opcode_exception) {
41ba37c4 185 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
186 }
187 return cond;
188}
189
bdfc1e88
EI
190/*
191 * Returns true if the insn is illegal in userspace.
192 * If exceptions are enabled, an exception is raised.
193 */
194static bool trap_userspace(DisasContext *dc, bool cond)
195{
196 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
197 bool cond_user = cond && mem_index == MMU_USER_IDX;
198
199 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
41ba37c4 200 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
201 }
202 return cond_user;
203}
204
61204ce8
EI
205/* True if ALU operand b is a small immediate that may deserve
206 faster treatment. */
207static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
208{
209 /* Immediate insn without the imm prefix ? */
210 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
211}
212
cfeea807 213static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
214{
215 if (dc->type_b) {
216 if (dc->tb_flags & IMM_FLAG)
9b158558 217 tcg_gen_ori_i32(cpu_imm, cpu_imm, dc->imm);
4acb54ba 218 else
9b158558
RH
219 tcg_gen_movi_i32(cpu_imm, (int32_t)((int16_t)dc->imm));
220 return &cpu_imm;
4acb54ba
EI
221 } else
222 return &cpu_R[dc->rb];
223}
224
225static void dec_add(DisasContext *dc)
226{
227 unsigned int k, c;
cfeea807 228 TCGv_i32 cf;
4acb54ba
EI
229
230 k = dc->opcode & 4;
231 c = dc->opcode & 2;
232
233 LOG_DIS("add%s%s%s r%d r%d r%d\n",
234 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
235 dc->rd, dc->ra, dc->rb);
236
40cbf5b7
EI
237 /* Take care of the easy cases first. */
238 if (k) {
239 /* k - keep carry, no need to update MSR. */
240 /* If rd == r0, it's a nop. */
241 if (dc->rd) {
cfeea807 242 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
243
244 if (c) {
245 /* c - Add carry into the result. */
cfeea807 246 cf = tcg_temp_new_i32();
40cbf5b7
EI
247
248 read_carry(dc, cf);
cfeea807
EI
249 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
250 tcg_temp_free_i32(cf);
40cbf5b7
EI
251 }
252 }
253 return;
254 }
255
256 /* From now on, we can assume k is zero. So we need to update MSR. */
257 /* Extract carry. */
cfeea807 258 cf = tcg_temp_new_i32();
40cbf5b7
EI
259 if (c) {
260 read_carry(dc, cf);
261 } else {
cfeea807 262 tcg_gen_movi_i32(cf, 0);
40cbf5b7
EI
263 }
264
265 if (dc->rd) {
cfeea807 266 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 267 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
cfeea807
EI
268 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
269 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
40cbf5b7 270 write_carry(dc, ncf);
cfeea807 271 tcg_temp_free_i32(ncf);
40cbf5b7 272 } else {
5d0bb823 273 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 274 write_carry(dc, cf);
4acb54ba 275 }
cfeea807 276 tcg_temp_free_i32(cf);
4acb54ba
EI
277}
278
279static void dec_sub(DisasContext *dc)
280{
281 unsigned int u, cmp, k, c;
cfeea807 282 TCGv_i32 cf, na;
4acb54ba
EI
283
284 u = dc->imm & 2;
285 k = dc->opcode & 4;
286 c = dc->opcode & 2;
287 cmp = (dc->imm & 1) && (!dc->type_b) && k;
288
289 if (cmp) {
290 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
291 if (dc->rd) {
292 if (u)
293 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
294 else
295 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
296 }
e0a42ebc
EI
297 return;
298 }
299
300 LOG_DIS("sub%s%s r%d, r%d r%d\n",
301 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
302
303 /* Take care of the easy cases first. */
304 if (k) {
305 /* k - keep carry, no need to update MSR. */
306 /* If rd == r0, it's a nop. */
307 if (dc->rd) {
cfeea807 308 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
309
310 if (c) {
311 /* c - Add carry into the result. */
cfeea807 312 cf = tcg_temp_new_i32();
e0a42ebc
EI
313
314 read_carry(dc, cf);
cfeea807
EI
315 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
316 tcg_temp_free_i32(cf);
e0a42ebc
EI
317 }
318 }
319 return;
320 }
321
322 /* From now on, we can assume k is zero. So we need to update MSR. */
323 /* Extract carry. And complement a into na. */
cfeea807
EI
324 cf = tcg_temp_new_i32();
325 na = tcg_temp_new_i32();
e0a42ebc
EI
326 if (c) {
327 read_carry(dc, cf);
328 } else {
cfeea807 329 tcg_gen_movi_i32(cf, 1);
e0a42ebc
EI
330 }
331
332 /* d = b + ~a + c. carry defaults to 1. */
cfeea807 333 tcg_gen_not_i32(na, cpu_R[dc->ra]);
e0a42ebc
EI
334
335 if (dc->rd) {
cfeea807 336 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 337 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
cfeea807
EI
338 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
339 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
e0a42ebc 340 write_carry(dc, ncf);
cfeea807 341 tcg_temp_free_i32(ncf);
e0a42ebc 342 } else {
5d0bb823 343 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 344 write_carry(dc, cf);
4acb54ba 345 }
cfeea807
EI
346 tcg_temp_free_i32(cf);
347 tcg_temp_free_i32(na);
4acb54ba
EI
348}
349
350static void dec_pattern(DisasContext *dc)
351{
352 unsigned int mode;
4acb54ba 353
9ba8cd45
EI
354 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
355 return;
1567a005
EI
356 }
357
4acb54ba
EI
358 mode = dc->opcode & 3;
359 switch (mode) {
360 case 0:
361 /* pcmpbf. */
362 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
363 if (dc->rd)
364 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
365 break;
366 case 2:
367 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
368 if (dc->rd) {
cfeea807 369 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
86112805 370 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
371 }
372 break;
373 case 3:
374 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 375 if (dc->rd) {
cfeea807 376 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
86112805 377 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
378 }
379 break;
380 default:
0063ebd6 381 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
382 "unsupported pattern insn opcode=%x\n", dc->opcode);
383 break;
384 }
385}
386
387static void dec_and(DisasContext *dc)
388{
389 unsigned int not;
390
391 if (!dc->type_b && (dc->imm & (1 << 10))) {
392 dec_pattern(dc);
393 return;
394 }
395
396 not = dc->opcode & (1 << 1);
397 LOG_DIS("and%s\n", not ? "n" : "");
398
399 if (!dc->rd)
400 return;
401
402 if (not) {
cfeea807 403 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 404 } else
cfeea807 405 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
406}
407
408static void dec_or(DisasContext *dc)
409{
410 if (!dc->type_b && (dc->imm & (1 << 10))) {
411 dec_pattern(dc);
412 return;
413 }
414
415 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
416 if (dc->rd)
cfeea807 417 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
418}
419
420static void dec_xor(DisasContext *dc)
421{
422 if (!dc->type_b && (dc->imm & (1 << 10))) {
423 dec_pattern(dc);
424 return;
425 }
426
427 LOG_DIS("xor r%d\n", dc->rd);
428 if (dc->rd)
cfeea807 429 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
430}
431
cfeea807 432static inline void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 433{
3e0e16ae 434 tcg_gen_mov_i32(d, cpu_msr);
4acb54ba
EI
435}
436
cfeea807 437static inline void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba
EI
438{
439 dc->cpustate_changed = 1;
3e0e16ae
RH
440 /* PVR bit is not writable, and is never set. */
441 tcg_gen_andi_i32(cpu_msr, v, ~MSR_PVR);
4acb54ba
EI
442}
443
444static void dec_msr(DisasContext *dc)
445{
0063ebd6 446 CPUState *cs = CPU(dc->cpu);
cfeea807 447 TCGv_i32 t0, t1;
2023e9a3 448 unsigned int sr, rn;
f0f7e7f7 449 bool to, clrset, extended = false;
4acb54ba 450
2023e9a3
EI
451 sr = extract32(dc->imm, 0, 14);
452 to = extract32(dc->imm, 14, 1);
453 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 454 dc->type_b = 1;
2023e9a3 455 if (to) {
4acb54ba 456 dc->cpustate_changed = 1;
f0f7e7f7
EI
457 }
458
459 /* Extended MSRs are only available if addr_size > 32. */
460 if (dc->cpu->cfg.addr_size > 32) {
461 /* The E-bit is encoded differently for To/From MSR. */
462 static const unsigned int e_bit[] = { 19, 24 };
463
464 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 465 }
4acb54ba
EI
466
467 /* msrclr and msrset. */
2023e9a3
EI
468 if (clrset) {
469 bool clr = extract32(dc->ir, 16, 1);
4acb54ba
EI
470
471 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
472 dc->rd, dc->imm);
1567a005 473
56837509 474 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
475 /* nop??? */
476 return;
477 }
478
bdfc1e88 479 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
480 return;
481 }
482
4acb54ba
EI
483 if (dc->rd)
484 msr_read(dc, cpu_R[dc->rd]);
485
cfeea807
EI
486 t0 = tcg_temp_new_i32();
487 t1 = tcg_temp_new_i32();
4acb54ba 488 msr_read(dc, t0);
cfeea807 489 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
490
491 if (clr) {
cfeea807
EI
492 tcg_gen_not_i32(t1, t1);
493 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 494 } else
cfeea807 495 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 496 msr_write(dc, t0);
cfeea807
EI
497 tcg_temp_free_i32(t0);
498 tcg_temp_free_i32(t1);
0f96e96b 499 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
4acb54ba
EI
500 dc->is_jmp = DISAS_UPDATE;
501 return;
502 }
503
bdfc1e88
EI
504 if (trap_userspace(dc, to)) {
505 return;
1567a005
EI
506 }
507
4acb54ba
EI
508#if !defined(CONFIG_USER_ONLY)
509 /* Catch read/writes to the mmu block. */
510 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 511 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
512 TCGv_i32 tmp_sr;
513
4acb54ba 514 sr &= 7;
05a9a651 515 tmp_sr = tcg_const_i32(sr);
4acb54ba 516 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
05a9a651 517 if (to) {
f0f7e7f7 518 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 519 } else {
f0f7e7f7 520 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
521 }
522 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 523 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
524 return;
525 }
526#endif
527
528 if (to) {
529 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
530 switch (sr) {
aa28e6d4 531 case SR_PC:
4acb54ba 532 break;
aa28e6d4 533 case SR_MSR:
4acb54ba
EI
534 msr_write(dc, cpu_R[dc->ra]);
535 break;
351527b7 536 case SR_EAR:
dbdb77c4
RH
537 {
538 TCGv_i64 t64 = tcg_temp_new_i64();
539 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
540 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
541 tcg_temp_free_i64(t64);
542 }
aa28e6d4 543 break;
351527b7 544 case SR_ESR:
41ba37c4
RH
545 tcg_gen_st_i32(cpu_R[dc->ra],
546 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 547 break;
ab6dd380 548 case SR_FSR:
86017ccf
RH
549 tcg_gen_st_i32(cpu_R[dc->ra],
550 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4
RH
551 break;
552 case SR_BTR:
ccf628b7
RH
553 tcg_gen_st_i32(cpu_R[dc->ra],
554 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4
RH
555 break;
556 case SR_EDR:
39db007e
RH
557 tcg_gen_st_i32(cpu_R[dc->ra],
558 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 559 break;
5818dee5 560 case 0x800:
cfeea807
EI
561 tcg_gen_st_i32(cpu_R[dc->ra],
562 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
563 break;
564 case 0x802:
cfeea807
EI
565 tcg_gen_st_i32(cpu_R[dc->ra],
566 cpu_env, offsetof(CPUMBState, shr));
5818dee5 567 break;
4acb54ba 568 default:
0063ebd6 569 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
570 break;
571 }
572 } else {
573 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
574
575 switch (sr) {
aa28e6d4 576 case SR_PC:
cfeea807 577 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba 578 break;
aa28e6d4 579 case SR_MSR:
4acb54ba
EI
580 msr_read(dc, cpu_R[dc->rd]);
581 break;
351527b7 582 case SR_EAR:
dbdb77c4
RH
583 {
584 TCGv_i64 t64 = tcg_temp_new_i64();
585 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
586 if (extended) {
587 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
588 } else {
589 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
590 }
591 tcg_temp_free_i64(t64);
a1b48e3a 592 }
aa28e6d4 593 break;
351527b7 594 case SR_ESR:
41ba37c4
RH
595 tcg_gen_ld_i32(cpu_R[dc->rd],
596 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 597 break;
351527b7 598 case SR_FSR:
86017ccf
RH
599 tcg_gen_ld_i32(cpu_R[dc->rd],
600 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4 601 break;
351527b7 602 case SR_BTR:
ccf628b7
RH
603 tcg_gen_ld_i32(cpu_R[dc->rd],
604 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4 605 break;
7cdae31d 606 case SR_EDR:
39db007e
RH
607 tcg_gen_ld_i32(cpu_R[dc->rd],
608 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 609 break;
5818dee5 610 case 0x800:
cfeea807
EI
611 tcg_gen_ld_i32(cpu_R[dc->rd],
612 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
613 break;
614 case 0x802:
cfeea807
EI
615 tcg_gen_ld_i32(cpu_R[dc->rd],
616 cpu_env, offsetof(CPUMBState, shr));
5818dee5 617 break;
351527b7 618 case 0x2000 ... 0x200c:
4acb54ba 619 rn = sr & 0xf;
cfeea807 620 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 621 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
622 break;
623 default:
a47dddd7 624 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
625 break;
626 }
627 }
ee7dbcf8
EI
628
629 if (dc->rd == 0) {
cfeea807 630 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 631 }
4acb54ba
EI
632}
633
4acb54ba
EI
634/* Multiplier unit. */
635static void dec_mul(DisasContext *dc)
636{
cfeea807 637 TCGv_i32 tmp;
4acb54ba
EI
638 unsigned int subcode;
639
9ba8cd45 640 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
1567a005
EI
641 return;
642 }
643
4acb54ba 644 subcode = dc->imm & 3;
4acb54ba
EI
645
646 if (dc->type_b) {
647 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
cfeea807 648 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
16ece88d 649 return;
4acb54ba
EI
650 }
651
1567a005 652 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 653 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
654 /* nop??? */
655 }
656
cfeea807 657 tmp = tcg_temp_new_i32();
4acb54ba
EI
658 switch (subcode) {
659 case 0:
660 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 661 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
662 break;
663 case 1:
664 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
665 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
666 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
667 break;
668 case 2:
669 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
670 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
671 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
672 break;
673 case 3:
674 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 675 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
676 break;
677 default:
0063ebd6 678 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
679 break;
680 }
cfeea807 681 tcg_temp_free_i32(tmp);
4acb54ba
EI
682}
683
684/* Div unit. */
685static void dec_div(DisasContext *dc)
686{
687 unsigned int u;
688
689 u = dc->imm & 2;
690 LOG_DIS("div\n");
691
9ba8cd45
EI
692 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
693 return;
1567a005
EI
694 }
695
4acb54ba 696 if (u)
64254eba
BS
697 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
698 cpu_R[dc->ra]);
4acb54ba 699 else
64254eba
BS
700 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
701 cpu_R[dc->ra]);
4acb54ba 702 if (!dc->rd)
cfeea807 703 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
4acb54ba
EI
704}
705
706static void dec_barrel(DisasContext *dc)
707{
cfeea807 708 TCGv_i32 t0;
faa48d74 709 unsigned int imm_w, imm_s;
d09b2585 710 bool s, t, e = false, i = false;
4acb54ba 711
9ba8cd45 712 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
1567a005
EI
713 return;
714 }
715
faa48d74
EI
716 if (dc->type_b) {
717 /* Insert and extract are only available in immediate mode. */
d09b2585 718 i = extract32(dc->imm, 15, 1);
faa48d74
EI
719 e = extract32(dc->imm, 14, 1);
720 }
e3e84983
EI
721 s = extract32(dc->imm, 10, 1);
722 t = extract32(dc->imm, 9, 1);
faa48d74
EI
723 imm_w = extract32(dc->imm, 6, 5);
724 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 725
faa48d74
EI
726 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
727 e ? "e" : "",
4acb54ba
EI
728 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
729
faa48d74
EI
730 if (e) {
731 if (imm_w + imm_s > 32 || imm_w == 0) {
732 /* These inputs have an undefined behavior. */
733 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
734 imm_w, imm_s);
735 } else {
736 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
737 }
d09b2585
EI
738 } else if (i) {
739 int width = imm_w - imm_s + 1;
740
741 if (imm_w < imm_s) {
742 /* These inputs have an undefined behavior. */
743 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
744 imm_w, imm_s);
745 } else {
746 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
747 imm_s, width);
748 }
faa48d74 749 } else {
cfeea807 750 t0 = tcg_temp_new_i32();
4acb54ba 751
cfeea807
EI
752 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
753 tcg_gen_andi_i32(t0, t0, 31);
4acb54ba 754
faa48d74 755 if (s) {
cfeea807 756 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 757 } else {
faa48d74 758 if (t) {
cfeea807 759 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 760 } else {
cfeea807 761 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 762 }
2acf6d53 763 }
cfeea807 764 tcg_temp_free_i32(t0);
4acb54ba
EI
765 }
766}
767
768static void dec_bit(DisasContext *dc)
769{
0063ebd6 770 CPUState *cs = CPU(dc->cpu);
cfeea807 771 TCGv_i32 t0;
4acb54ba
EI
772 unsigned int op;
773
ace2e4da 774 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
775 switch (op) {
776 case 0x21:
777 /* src. */
cfeea807 778 t0 = tcg_temp_new_i32();
4acb54ba
EI
779
780 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
3e0e16ae 781 tcg_gen_andi_i32(t0, cpu_msr, MSR_CC);
09b9f113 782 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 783 if (dc->rd) {
cfeea807
EI
784 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 786 }
cfeea807 787 tcg_temp_free_i32(t0);
4acb54ba
EI
788 break;
789
790 case 0x1:
791 case 0x41:
792 /* srl. */
4acb54ba
EI
793 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
794
bb3cb951
EI
795 /* Update carry. Note that write carry only looks at the LSB. */
796 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
797 if (dc->rd) {
798 if (op == 0x41)
cfeea807 799 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba 800 else
cfeea807 801 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba
EI
802 }
803 break;
804 case 0x60:
805 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
806 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
807 break;
808 case 0x61:
809 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
810 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
811 break;
812 case 0x64:
f062a3c7
EI
813 case 0x66:
814 case 0x74:
815 case 0x76:
4acb54ba
EI
816 /* wdc. */
817 LOG_DIS("wdc r%d\n", dc->ra);
bdfc1e88 818 trap_userspace(dc, true);
4acb54ba
EI
819 break;
820 case 0x68:
821 /* wic. */
822 LOG_DIS("wic r%d\n", dc->ra);
bdfc1e88 823 trap_userspace(dc, true);
4acb54ba 824 break;
48b5e96f 825 case 0xe0:
9ba8cd45
EI
826 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
827 return;
48b5e96f 828 }
8fc5239e 829 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 830 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
831 }
832 break;
ace2e4da
PC
833 case 0x1e0:
834 /* swapb */
835 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
836 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
837 break;
b8c6a5d9 838 case 0x1e2:
ace2e4da
PC
839 /*swaph */
840 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
841 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
842 break;
4acb54ba 843 default:
a47dddd7
AF
844 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
845 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
846 break;
847 }
848}
849
850static inline void sync_jmpstate(DisasContext *dc)
851{
844bab60
EI
852 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
853 if (dc->jmp == JMP_DIRECT) {
9b158558 854 tcg_gen_movi_i32(cpu_btaken, 1);
844bab60 855 }
23979dc5 856 dc->jmp = JMP_INDIRECT;
0f96e96b 857 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
4acb54ba
EI
858 }
859}
860
861static void dec_imm(DisasContext *dc)
862{
863 LOG_DIS("imm %x\n", dc->imm << 16);
9b158558 864 tcg_gen_movi_i32(cpu_imm, (dc->imm << 16));
4acb54ba
EI
865 dc->tb_flags |= IMM_FLAG;
866 dc->clear_imm = 0;
867}
868
d248e1be 869static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
4acb54ba 870{
0e9033c8
EI
871 bool extimm = dc->tb_flags & IMM_FLAG;
872 /* Should be set to true if r1 is used by loadstores. */
873 bool stackprot = false;
403322ea 874 TCGv_i32 t32;
5818dee5
EI
875
876 /* All load/stores use ra. */
9aaaa181 877 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 878 stackprot = true;
5818dee5 879 }
4acb54ba 880
9ef55357 881 /* Treat the common cases first. */
4acb54ba 882 if (!dc->type_b) {
d248e1be
EI
883 if (ea) {
884 int addr_size = dc->cpu->cfg.addr_size;
885
886 if (addr_size == 32) {
887 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
888 return;
889 }
890
891 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
892 if (addr_size < 64) {
893 /* Mask off out of range bits. */
894 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
895 }
896 return;
897 }
898
0dc4af5c 899 /* If any of the regs is r0, set t to the value of the other reg. */
4b5ef0b5 900 if (dc->ra == 0) {
403322ea 901 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
0dc4af5c 902 return;
4b5ef0b5 903 } else if (dc->rb == 0) {
403322ea 904 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
0dc4af5c 905 return;
4b5ef0b5
EI
906 }
907
9aaaa181 908 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 909 stackprot = true;
5818dee5
EI
910 }
911
403322ea
EI
912 t32 = tcg_temp_new_i32();
913 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
914 tcg_gen_extu_i32_tl(t, t32);
915 tcg_temp_free_i32(t32);
5818dee5
EI
916
917 if (stackprot) {
0a87e691 918 gen_helper_stackprot(cpu_env, t);
5818dee5 919 }
0dc4af5c 920 return;
4acb54ba
EI
921 }
922 /* Immediate. */
403322ea 923 t32 = tcg_temp_new_i32();
4acb54ba 924 if (!extimm) {
f7a66e3a 925 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
4acb54ba 926 } else {
403322ea 927 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 928 }
403322ea
EI
929 tcg_gen_extu_i32_tl(t, t32);
930 tcg_temp_free_i32(t32);
4acb54ba 931
5818dee5 932 if (stackprot) {
0a87e691 933 gen_helper_stackprot(cpu_env, t);
5818dee5 934 }
0dc4af5c 935 return;
4acb54ba
EI
936}
937
938static void dec_load(DisasContext *dc)
939{
403322ea
EI
940 TCGv_i32 v;
941 TCGv addr;
8534063a 942 unsigned int size;
d248e1be
EI
943 bool rev = false, ex = false, ea = false;
944 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
14776ab5 945 MemOp mop;
4acb54ba 946
47acdd63
RH
947 mop = dc->opcode & 3;
948 size = 1 << mop;
9f8beb66 949 if (!dc->type_b) {
d248e1be 950 ea = extract32(dc->ir, 7, 1);
8534063a
EI
951 rev = extract32(dc->ir, 9, 1);
952 ex = extract32(dc->ir, 10, 1);
9f8beb66 953 }
47acdd63
RH
954 mop |= MO_TE;
955 if (rev) {
956 mop ^= MO_BSWAP;
957 }
9f8beb66 958
9ba8cd45 959 if (trap_illegal(dc, size > 4)) {
0187688f
EI
960 return;
961 }
4acb54ba 962
d248e1be
EI
963 if (trap_userspace(dc, ea)) {
964 return;
965 }
966
967 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
968 ex ? "x" : "",
969 ea ? "ea" : "");
9f8beb66 970
4acb54ba 971 t_sync_flags(dc);
403322ea 972 addr = tcg_temp_new();
d248e1be
EI
973 compute_ldst_addr(dc, ea, addr);
974 /* Extended addressing bypasses the MMU. */
975 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
4acb54ba 976
9f8beb66
EI
977 /*
978 * When doing reverse accesses we need to do two things.
979 *
4ff9786c 980 * 1. Reverse the address wrt endianness.
9f8beb66
EI
981 * 2. Byteswap the data lanes on the way back into the CPU core.
982 */
983 if (rev && size != 4) {
984 /* Endian reverse the address. t is addr. */
985 switch (size) {
986 case 1:
987 {
a6338015 988 tcg_gen_xori_tl(addr, addr, 3);
9f8beb66
EI
989 break;
990 }
991
992 case 2:
993 /* 00 -> 10
994 10 -> 00. */
403322ea 995 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
996 break;
997 default:
0063ebd6 998 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
999 break;
1000 }
1001 }
1002
8cc9b43f
PC
1003 /* lwx does not throw unaligned access errors, so force alignment */
1004 if (ex) {
403322ea 1005 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f
PC
1006 }
1007
4acb54ba
EI
1008 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1009 sync_jmpstate(dc);
968a40f6
EI
1010
1011 /* Verify alignment if needed. */
47acdd63
RH
1012 /*
1013 * Microblaze gives MMU faults priority over faults due to
1014 * unaligned addresses. That's why we speculatively do the load
1015 * into v. If the load succeeds, we verify alignment of the
1016 * address and if that succeeds we write into the destination reg.
1017 */
cfeea807 1018 v = tcg_temp_new_i32();
d248e1be 1019 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
a12f6507 1020
1507e5f6 1021 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
a6338015
EI
1022 TCGv_i32 t0 = tcg_const_i32(0);
1023 TCGv_i32 treg = tcg_const_i32(dc->rd);
1024 TCGv_i32 tsize = tcg_const_i32(size - 1);
1025
0f96e96b 1026 tcg_gen_movi_i32(cpu_pc, dc->pc);
a6338015
EI
1027 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1028
1029 tcg_temp_free_i32(t0);
1030 tcg_temp_free_i32(treg);
1031 tcg_temp_free_i32(tsize);
4acb54ba
EI
1032 }
1033
47acdd63 1034 if (ex) {
9b158558
RH
1035 tcg_gen_mov_tl(cpu_res_addr, addr);
1036 tcg_gen_mov_i32(cpu_res_val, v);
47acdd63
RH
1037 }
1038 if (dc->rd) {
cfeea807 1039 tcg_gen_mov_i32(cpu_R[dc->rd], v);
47acdd63 1040 }
cfeea807 1041 tcg_temp_free_i32(v);
47acdd63 1042
8cc9b43f 1043 if (ex) { /* lwx */
b6af0975 1044 /* no support for AXI exclusive so always clear C */
8cc9b43f 1045 write_carryi(dc, 0);
8cc9b43f
PC
1046 }
1047
403322ea 1048 tcg_temp_free(addr);
4acb54ba
EI
1049}
1050
4acb54ba
EI
1051static void dec_store(DisasContext *dc)
1052{
403322ea 1053 TCGv addr;
42a268c2 1054 TCGLabel *swx_skip = NULL;
b51b3d43 1055 unsigned int size;
d248e1be
EI
1056 bool rev = false, ex = false, ea = false;
1057 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
14776ab5 1058 MemOp mop;
4acb54ba 1059
47acdd63
RH
1060 mop = dc->opcode & 3;
1061 size = 1 << mop;
9f8beb66 1062 if (!dc->type_b) {
d248e1be 1063 ea = extract32(dc->ir, 7, 1);
b51b3d43
EI
1064 rev = extract32(dc->ir, 9, 1);
1065 ex = extract32(dc->ir, 10, 1);
9f8beb66 1066 }
47acdd63
RH
1067 mop |= MO_TE;
1068 if (rev) {
1069 mop ^= MO_BSWAP;
1070 }
4acb54ba 1071
9ba8cd45 1072 if (trap_illegal(dc, size > 4)) {
0187688f
EI
1073 return;
1074 }
1075
d248e1be
EI
1076 trap_userspace(dc, ea);
1077
1078 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1079 ex ? "x" : "",
1080 ea ? "ea" : "");
4acb54ba
EI
1081 t_sync_flags(dc);
1082 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1083 sync_jmpstate(dc);
0dc4af5c 1084 /* SWX needs a temp_local. */
403322ea 1085 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
d248e1be
EI
1086 compute_ldst_addr(dc, ea, addr);
1087 /* Extended addressing bypasses the MMU. */
1088 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
968a40f6 1089
8cc9b43f 1090 if (ex) { /* swx */
cfeea807 1091 TCGv_i32 tval;
8cc9b43f 1092
8cc9b43f 1093 /* swx does not throw unaligned access errors, so force alignment */
403322ea 1094 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f 1095
8cc9b43f
PC
1096 write_carryi(dc, 1);
1097 swx_skip = gen_new_label();
9b158558 1098 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_skip);
11a76217 1099
071cdc67
EI
1100 /*
1101 * Compare the value loaded at lwx with current contents of
1102 * the reserved location.
1103 */
cfeea807 1104 tval = tcg_temp_new_i32();
071cdc67 1105
9b158558 1106 tcg_gen_atomic_cmpxchg_i32(tval, addr, cpu_res_val,
071cdc67
EI
1107 cpu_R[dc->rd], mem_index,
1108 mop);
1109
9b158558 1110 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_skip);
8cc9b43f 1111 write_carryi(dc, 0);
cfeea807 1112 tcg_temp_free_i32(tval);
8cc9b43f
PC
1113 }
1114
9f8beb66
EI
1115 if (rev && size != 4) {
1116 /* Endian reverse the address. t is addr. */
1117 switch (size) {
1118 case 1:
1119 {
a6338015 1120 tcg_gen_xori_tl(addr, addr, 3);
9f8beb66
EI
1121 break;
1122 }
1123
1124 case 2:
1125 /* 00 -> 10
1126 10 -> 00. */
1127 /* Force addr into the temp. */
403322ea 1128 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
1129 break;
1130 default:
0063ebd6 1131 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1132 break;
1133 }
9f8beb66 1134 }
071cdc67
EI
1135
1136 if (!ex) {
1137 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1138 }
a12f6507 1139
968a40f6 1140 /* Verify alignment if needed. */
1507e5f6 1141 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
a6338015
EI
1142 TCGv_i32 t1 = tcg_const_i32(1);
1143 TCGv_i32 treg = tcg_const_i32(dc->rd);
1144 TCGv_i32 tsize = tcg_const_i32(size - 1);
1145
0f96e96b 1146 tcg_gen_movi_i32(cpu_pc, dc->pc);
a12f6507 1147 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1148 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1149 * the MMU prior to the memaccess, thay way we could put
1150 * the alignment checks in between the probe and the mem
1151 * access.
a12f6507 1152 */
a6338015
EI
1153 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1154
1155 tcg_temp_free_i32(t1);
1156 tcg_temp_free_i32(treg);
1157 tcg_temp_free_i32(tsize);
968a40f6 1158 }
083dbf48 1159
8cc9b43f
PC
1160 if (ex) {
1161 gen_set_label(swx_skip);
8cc9b43f 1162 }
968a40f6 1163
403322ea 1164 tcg_temp_free(addr);
4acb54ba
EI
1165}
1166
1167static inline void eval_cc(DisasContext *dc, unsigned int cc,
9e6e1828 1168 TCGv_i32 d, TCGv_i32 a)
4acb54ba 1169{
d89b86e9
EI
1170 static const int mb_to_tcg_cc[] = {
1171 [CC_EQ] = TCG_COND_EQ,
1172 [CC_NE] = TCG_COND_NE,
1173 [CC_LT] = TCG_COND_LT,
1174 [CC_LE] = TCG_COND_LE,
1175 [CC_GE] = TCG_COND_GE,
1176 [CC_GT] = TCG_COND_GT,
1177 };
1178
4acb54ba 1179 switch (cc) {
d89b86e9
EI
1180 case CC_EQ:
1181 case CC_NE:
1182 case CC_LT:
1183 case CC_LE:
1184 case CC_GE:
1185 case CC_GT:
9e6e1828 1186 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
d89b86e9
EI
1187 break;
1188 default:
1189 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1190 break;
4acb54ba
EI
1191 }
1192}
1193
0f96e96b 1194static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
4acb54ba 1195{
0f96e96b 1196 TCGv_i32 zero = tcg_const_i32(0);
e956caf2 1197
0f96e96b 1198 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
9b158558 1199 cpu_btaken, zero,
e956caf2
EI
1200 pc_true, pc_false);
1201
0f96e96b 1202 tcg_temp_free_i32(zero);
4acb54ba
EI
1203}
1204
f91c60f0
EI
1205static void dec_setup_dslot(DisasContext *dc)
1206{
1207 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1208
1209 dc->delayed_branch = 2;
1210 dc->tb_flags |= D_FLAG;
1211
1212 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1213 tcg_temp_free_i32(tmp);
1214}
1215
4acb54ba
EI
1216static void dec_bcc(DisasContext *dc)
1217{
1218 unsigned int cc;
1219 unsigned int dslot;
1220
1221 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1222 dslot = dc->ir & (1 << 25);
1223 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1224
1225 dc->delayed_branch = 1;
1226 if (dslot) {
f91c60f0 1227 dec_setup_dslot(dc);
4acb54ba
EI
1228 }
1229
61204ce8
EI
1230 if (dec_alu_op_b_is_small_imm(dc)) {
1231 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1232
0f96e96b 1233 tcg_gen_movi_i32(cpu_btarget, dc->pc + offset);
844bab60 1234 dc->jmp = JMP_DIRECT_CC;
23979dc5 1235 dc->jmp_pc = dc->pc + offset;
61204ce8 1236 } else {
23979dc5 1237 dc->jmp = JMP_INDIRECT;
0f96e96b 1238 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
61204ce8 1239 }
9b158558 1240 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
4acb54ba
EI
1241}
1242
1243static void dec_br(DisasContext *dc)
1244{
9f6113c7 1245 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1246
1247 dslot = dc->ir & (1 << 20);
1248 abs = dc->ir & (1 << 19);
1249 link = dc->ir & (1 << 18);
9f6113c7
EI
1250
1251 /* Memory barrier. */
1252 mbar = (dc->ir >> 16) & 31;
1253 if (mbar == 2 && dc->imm == 4) {
badcbf9d
EI
1254 uint16_t mbar_imm = dc->rd;
1255
6f3c458b
EI
1256 LOG_DIS("mbar %d\n", mbar_imm);
1257
3f172744
EI
1258 /* Data access memory barrier. */
1259 if ((mbar_imm & 2) == 0) {
1260 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1261 }
1262
5d45de97 1263 /* mbar IMM & 16 decodes to sleep. */
badcbf9d 1264 if (mbar_imm & 16) {
41ba37c4 1265 TCGv_i32 tmp_1;
5d45de97
EI
1266
1267 LOG_DIS("sleep\n");
1268
b4919e7d
EI
1269 if (trap_userspace(dc, true)) {
1270 /* Sleep is a privileged instruction. */
1271 return;
1272 }
1273
5d45de97 1274 t_sync_flags(dc);
41ba37c4
RH
1275
1276 tmp_1 = tcg_const_i32(1);
5d45de97
EI
1277 tcg_gen_st_i32(tmp_1, cpu_env,
1278 -offsetof(MicroBlazeCPU, env)
1279 +offsetof(CPUState, halted));
5d45de97 1280 tcg_temp_free_i32(tmp_1);
41ba37c4
RH
1281
1282 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
1283
1284 gen_raise_exception(dc, EXCP_HLT);
5d45de97
EI
1285 return;
1286 }
9f6113c7
EI
1287 /* Break the TB. */
1288 dc->cpustate_changed = 1;
1289 return;
1290 }
1291
4acb54ba
EI
1292 LOG_DIS("br%s%s%s%s imm=%x\n",
1293 abs ? "a" : "", link ? "l" : "",
1294 dc->type_b ? "i" : "", dslot ? "d" : "",
1295 dc->imm);
1296
1297 dc->delayed_branch = 1;
1298 if (dslot) {
f91c60f0 1299 dec_setup_dslot(dc);
4acb54ba
EI
1300 }
1301 if (link && dc->rd)
cfeea807 1302 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
1303
1304 dc->jmp = JMP_INDIRECT;
1305 if (abs) {
9b158558 1306 tcg_gen_movi_i32(cpu_btaken, 1);
0f96e96b 1307 tcg_gen_mov_i32(cpu_btarget, *(dec_alu_op_b(dc)));
ff21f70a 1308 if (link && !dslot) {
41ba37c4
RH
1309 if (!(dc->tb_flags & IMM_FLAG) &&
1310 (dc->imm == 8 || dc->imm == 0x18)) {
1311 gen_raise_exception_sync(dc, EXCP_BREAK);
1312 }
ff21f70a 1313 if (dc->imm == 0) {
bdfc1e88 1314 if (trap_userspace(dc, true)) {
ff21f70a
EI
1315 return;
1316 }
41ba37c4 1317 gen_raise_exception_sync(dc, EXCP_DEBUG);
ff21f70a
EI
1318 }
1319 }
4acb54ba 1320 } else {
61204ce8
EI
1321 if (dec_alu_op_b_is_small_imm(dc)) {
1322 dc->jmp = JMP_DIRECT;
1323 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1324 } else {
9b158558 1325 tcg_gen_movi_i32(cpu_btaken, 1);
0f96e96b 1326 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
4acb54ba
EI
1327 }
1328 }
1329}
1330
1331static inline void do_rti(DisasContext *dc)
1332{
cfeea807
EI
1333 TCGv_i32 t0, t1;
1334 t0 = tcg_temp_new_i32();
1335 t1 = tcg_temp_new_i32();
3e0e16ae 1336 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf
EI
1337 tcg_gen_shri_i32(t0, t1, 1);
1338 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1339 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1340
1341 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1342 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1343 msr_write(dc, t1);
cfeea807
EI
1344 tcg_temp_free_i32(t1);
1345 tcg_temp_free_i32(t0);
4acb54ba
EI
1346 dc->tb_flags &= ~DRTI_FLAG;
1347}
1348
1349static inline void do_rtb(DisasContext *dc)
1350{
cfeea807
EI
1351 TCGv_i32 t0, t1;
1352 t0 = tcg_temp_new_i32();
1353 t1 = tcg_temp_new_i32();
3e0e16ae 1354 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1355 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1356 tcg_gen_shri_i32(t0, t1, 1);
1357 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1358
1359 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1360 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1361 msr_write(dc, t1);
cfeea807
EI
1362 tcg_temp_free_i32(t1);
1363 tcg_temp_free_i32(t0);
4acb54ba
EI
1364 dc->tb_flags &= ~DRTB_FLAG;
1365}
1366
1367static inline void do_rte(DisasContext *dc)
1368{
cfeea807
EI
1369 TCGv_i32 t0, t1;
1370 t0 = tcg_temp_new_i32();
1371 t1 = tcg_temp_new_i32();
4acb54ba 1372
3e0e16ae 1373 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1374 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1375 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1376 tcg_gen_shri_i32(t0, t1, 1);
1377 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1378
cfeea807
EI
1379 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1380 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1381 msr_write(dc, t1);
cfeea807
EI
1382 tcg_temp_free_i32(t1);
1383 tcg_temp_free_i32(t0);
4acb54ba
EI
1384 dc->tb_flags &= ~DRTE_FLAG;
1385}
1386
1387static void dec_rts(DisasContext *dc)
1388{
1389 unsigned int b_bit, i_bit, e_bit;
1390
1391 i_bit = dc->ir & (1 << 21);
1392 b_bit = dc->ir & (1 << 22);
1393 e_bit = dc->ir & (1 << 23);
1394
bdfc1e88
EI
1395 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1396 return;
1397 }
1398
f91c60f0 1399 dec_setup_dslot(dc);
4acb54ba
EI
1400
1401 if (i_bit) {
1402 LOG_DIS("rtid ir=%x\n", dc->ir);
1403 dc->tb_flags |= DRTI_FLAG;
1404 } else if (b_bit) {
1405 LOG_DIS("rtbd ir=%x\n", dc->ir);
1406 dc->tb_flags |= DRTB_FLAG;
1407 } else if (e_bit) {
1408 LOG_DIS("rted ir=%x\n", dc->ir);
1409 dc->tb_flags |= DRTE_FLAG;
1410 } else
1411 LOG_DIS("rts ir=%x\n", dc->ir);
1412
23979dc5 1413 dc->jmp = JMP_INDIRECT;
9b158558 1414 tcg_gen_movi_i32(cpu_btaken, 1);
0f96e96b 1415 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
4acb54ba
EI
1416}
1417
97694c57
EI
1418static int dec_check_fpuv2(DisasContext *dc)
1419{
be67e9ab 1420 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
41ba37c4 1421 gen_raise_hw_excp(dc, ESR_EC_FPU);
97694c57 1422 }
2016a6a7 1423 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
97694c57
EI
1424}
1425
1567a005
EI
1426static void dec_fpu(DisasContext *dc)
1427{
97694c57
EI
1428 unsigned int fpu_insn;
1429
9ba8cd45 1430 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1567a005
EI
1431 return;
1432 }
1433
97694c57
EI
1434 fpu_insn = (dc->ir >> 7) & 7;
1435
1436 switch (fpu_insn) {
1437 case 0:
64254eba
BS
1438 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1439 cpu_R[dc->rb]);
97694c57
EI
1440 break;
1441
1442 case 1:
64254eba
BS
1443 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1444 cpu_R[dc->rb]);
97694c57
EI
1445 break;
1446
1447 case 2:
64254eba
BS
1448 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1449 cpu_R[dc->rb]);
97694c57
EI
1450 break;
1451
1452 case 3:
64254eba
BS
1453 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1454 cpu_R[dc->rb]);
97694c57
EI
1455 break;
1456
1457 case 4:
1458 switch ((dc->ir >> 4) & 7) {
1459 case 0:
64254eba 1460 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1461 cpu_R[dc->ra], cpu_R[dc->rb]);
1462 break;
1463 case 1:
64254eba 1464 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1465 cpu_R[dc->ra], cpu_R[dc->rb]);
1466 break;
1467 case 2:
64254eba 1468 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 case 3:
64254eba 1472 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1473 cpu_R[dc->ra], cpu_R[dc->rb]);
1474 break;
1475 case 4:
64254eba 1476 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1477 cpu_R[dc->ra], cpu_R[dc->rb]);
1478 break;
1479 case 5:
64254eba 1480 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1481 cpu_R[dc->ra], cpu_R[dc->rb]);
1482 break;
1483 case 6:
64254eba 1484 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1485 cpu_R[dc->ra], cpu_R[dc->rb]);
1486 break;
1487 default:
71547a3b
BS
1488 qemu_log_mask(LOG_UNIMP,
1489 "unimplemented fcmp fpu_insn=%x pc=%x"
1490 " opc=%x\n",
1491 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1492 dc->abort_at_next_insn = 1;
1493 break;
1494 }
1495 break;
1496
1497 case 5:
1498 if (!dec_check_fpuv2(dc)) {
1499 return;
1500 }
64254eba 1501 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1502 break;
1503
1504 case 6:
1505 if (!dec_check_fpuv2(dc)) {
1506 return;
1507 }
64254eba 1508 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1509 break;
1510
1511 case 7:
1512 if (!dec_check_fpuv2(dc)) {
1513 return;
1514 }
64254eba 1515 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1516 break;
1517
1518 default:
71547a3b
BS
1519 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1520 " opc=%x\n",
1521 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1522 dc->abort_at_next_insn = 1;
1523 break;
1524 }
1567a005
EI
1525}
1526
4acb54ba
EI
1527static void dec_null(DisasContext *dc)
1528{
9ba8cd45 1529 if (trap_illegal(dc, true)) {
02b33596
EI
1530 return;
1531 }
1d512a65 1532 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1533 dc->abort_at_next_insn = 1;
1534}
1535
6d76d23e
EI
1536/* Insns connected to FSL or AXI stream attached devices. */
1537static void dec_stream(DisasContext *dc)
1538{
6d76d23e
EI
1539 TCGv_i32 t_id, t_ctrl;
1540 int ctrl;
1541
1542 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1543 dc->type_b ? "" : "d", dc->imm);
1544
bdfc1e88 1545 if (trap_userspace(dc, true)) {
6d76d23e
EI
1546 return;
1547 }
1548
cfeea807 1549 t_id = tcg_temp_new_i32();
6d76d23e 1550 if (dc->type_b) {
cfeea807 1551 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1552 ctrl = dc->imm >> 10;
1553 } else {
cfeea807 1554 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1555 ctrl = dc->imm >> 5;
1556 }
1557
cfeea807 1558 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1559
1560 if (dc->rd == 0) {
1561 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1562 } else {
1563 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1564 }
cfeea807
EI
1565 tcg_temp_free_i32(t_id);
1566 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1567}
1568
4acb54ba
EI
1569static struct decoder_info {
1570 struct {
1571 uint32_t bits;
1572 uint32_t mask;
1573 };
1574 void (*dec)(DisasContext *dc);
1575} decinfo[] = {
1576 {DEC_ADD, dec_add},
1577 {DEC_SUB, dec_sub},
1578 {DEC_AND, dec_and},
1579 {DEC_XOR, dec_xor},
1580 {DEC_OR, dec_or},
1581 {DEC_BIT, dec_bit},
1582 {DEC_BARREL, dec_barrel},
1583 {DEC_LD, dec_load},
1584 {DEC_ST, dec_store},
1585 {DEC_IMM, dec_imm},
1586 {DEC_BR, dec_br},
1587 {DEC_BCC, dec_bcc},
1588 {DEC_RTS, dec_rts},
1567a005 1589 {DEC_FPU, dec_fpu},
4acb54ba
EI
1590 {DEC_MUL, dec_mul},
1591 {DEC_DIV, dec_div},
1592 {DEC_MSR, dec_msr},
6d76d23e 1593 {DEC_STREAM, dec_stream},
4acb54ba
EI
1594 {{0, 0}, dec_null}
1595};
1596
64254eba 1597static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1598{
4acb54ba
EI
1599 int i;
1600
64254eba 1601 dc->ir = ir;
4acb54ba
EI
1602 LOG_DIS("%8.8x\t", dc->ir);
1603
462c2544 1604 if (ir == 0) {
1ee1bd28 1605 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
462c2544
EI
1606 /* Don't decode nop/zero instructions any further. */
1607 return;
4acb54ba 1608 }
462c2544 1609
4acb54ba
EI
1610 /* bit 2 seems to indicate insn type. */
1611 dc->type_b = ir & (1 << 29);
1612
1613 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1614 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1615 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1616 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1617 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1618
1619 /* Large switch for all insns. */
1620 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1621 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1622 decinfo[i].dec(dc);
1623 break;
1624 }
1625 }
1626}
1627
4acb54ba 1628/* generate intermediate code for basic block 'tb'. */
8b86d6d2 1629void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4acb54ba 1630{
9c489ea6 1631 CPUMBState *env = cs->env_ptr;
f5c7e93a 1632 MicroBlazeCPU *cpu = env_archcpu(env);
4acb54ba 1633 uint32_t pc_start;
4acb54ba
EI
1634 struct DisasContext ctx;
1635 struct DisasContext *dc = &ctx;
56371527 1636 uint32_t page_start, org_flags;
cfeea807 1637 uint32_t npc;
4acb54ba 1638 int num_insns;
4acb54ba 1639
4acb54ba 1640 pc_start = tb->pc;
0063ebd6 1641 dc->cpu = cpu;
4acb54ba
EI
1642 dc->tb = tb;
1643 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1644
4acb54ba
EI
1645 dc->is_jmp = DISAS_NEXT;
1646 dc->jmp = 0;
1647 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1648 if (dc->delayed_branch) {
1649 dc->jmp = JMP_INDIRECT;
1650 }
4acb54ba 1651 dc->pc = pc_start;
ed2803da 1652 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1653 dc->cpustate_changed = 0;
1654 dc->abort_at_next_insn = 0;
4acb54ba 1655
a47dddd7
AF
1656 if (pc_start & 3) {
1657 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1658 }
4acb54ba 1659
56371527 1660 page_start = pc_start & TARGET_PAGE_MASK;
4acb54ba 1661 num_insns = 0;
4acb54ba 1662
cd42d5b2 1663 gen_tb_start(tb);
4acb54ba
EI
1664 do
1665 {
667b8e29 1666 tcg_gen_insn_start(dc->pc);
959082fc 1667 num_insns++;
4acb54ba 1668
b933066a 1669 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
41ba37c4 1670 gen_raise_exception_sync(dc, EXCP_DEBUG);
522a0d4e
RH
1671 /* The address covered by the breakpoint must be included in
1672 [tb->pc, tb->pc + tb->size) in order to for it to be
1673 properly cleared -- thus we increment the PC here so that
1674 the logic setting tb->size below does the right thing. */
1675 dc->pc += 4;
b933066a
RH
1676 break;
1677 }
1678
4acb54ba
EI
1679 /* Pretty disas. */
1680 LOG_DIS("%8.8x:\t", dc->pc);
1681
c5a49c63 1682 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1683 gen_io_start();
959082fc 1684 }
4acb54ba
EI
1685
1686 dc->clear_imm = 1;
64254eba 1687 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1688 if (dc->clear_imm)
1689 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1690 dc->pc += 4;
4acb54ba
EI
1691
1692 if (dc->delayed_branch) {
1693 dc->delayed_branch--;
1694 if (!dc->delayed_branch) {
1695 if (dc->tb_flags & DRTI_FLAG)
1696 do_rti(dc);
1697 if (dc->tb_flags & DRTB_FLAG)
1698 do_rtb(dc);
1699 if (dc->tb_flags & DRTE_FLAG)
1700 do_rte(dc);
1701 /* Clear the delay slot flag. */
1702 dc->tb_flags &= ~D_FLAG;
1703 /* If it is a direct jump, try direct chaining. */
23979dc5 1704 if (dc->jmp == JMP_INDIRECT) {
0f96e96b
RH
1705 TCGv_i32 tmp_pc = tcg_const_i32(dc->pc);
1706 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1707 tcg_temp_free_i32(tmp_pc);
4acb54ba 1708 dc->is_jmp = DISAS_JUMP;
23979dc5 1709 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1710 t_sync_flags(dc);
1711 gen_goto_tb(dc, 0, dc->jmp_pc);
1712 dc->is_jmp = DISAS_TB_JUMP;
1713 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1714 TCGLabel *l1 = gen_new_label();
23979dc5 1715 t_sync_flags(dc);
23979dc5 1716 /* Conditional jmp. */
9b158558 1717 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
23979dc5
EI
1718 gen_goto_tb(dc, 1, dc->pc);
1719 gen_set_label(l1);
1720 gen_goto_tb(dc, 0, dc->jmp_pc);
1721
1722 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1723 }
1724 break;
1725 }
1726 }
ed2803da 1727 if (cs->singlestep_enabled) {
4acb54ba 1728 break;
ed2803da 1729 }
4acb54ba 1730 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1731 && !tcg_op_buf_full()
1732 && !singlestep
56371527 1733 && (dc->pc - page_start < TARGET_PAGE_SIZE)
fe700adb 1734 && num_insns < max_insns);
4acb54ba
EI
1735
1736 npc = dc->pc;
844bab60 1737 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1738 if (dc->tb_flags & D_FLAG) {
1739 dc->is_jmp = DISAS_UPDATE;
0f96e96b 1740 tcg_gen_movi_i32(cpu_pc, npc);
4acb54ba
EI
1741 sync_jmpstate(dc);
1742 } else
1743 npc = dc->jmp_pc;
1744 }
1745
4acb54ba
EI
1746 /* Force an update if the per-tb cpu state has changed. */
1747 if (dc->is_jmp == DISAS_NEXT
1748 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1749 dc->is_jmp = DISAS_UPDATE;
0f96e96b 1750 tcg_gen_movi_i32(cpu_pc, npc);
4acb54ba
EI
1751 }
1752 t_sync_flags(dc);
1753
ed2803da 1754 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1755 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1756
1757 if (dc->is_jmp != DISAS_JUMP) {
0f96e96b 1758 tcg_gen_movi_i32(cpu_pc, npc);
6c5f738d 1759 }
64254eba 1760 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1761 tcg_temp_free_i32(tmp);
4acb54ba
EI
1762 } else {
1763 switch(dc->is_jmp) {
1764 case DISAS_NEXT:
1765 gen_goto_tb(dc, 1, npc);
1766 break;
1767 default:
1768 case DISAS_JUMP:
1769 case DISAS_UPDATE:
1770 /* indicate that the hash table must be used
1771 to find the next TB */
07ea28b4 1772 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
1773 break;
1774 case DISAS_TB_JUMP:
1775 /* nothing more to generate */
1776 break;
1777 }
1778 }
806f352d 1779 gen_tb_end(tb, num_insns);
0a7df5da 1780
4e5e1215
RH
1781 tb->size = dc->pc - pc_start;
1782 tb->icount = num_insns;
4acb54ba
EI
1783
1784#ifdef DEBUG_DISAS
1785#if !SIM_COMPAT
4910e6e4
RH
1786 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1787 && qemu_log_in_addr_range(pc_start)) {
fc59d2d8 1788 FILE *logfile = qemu_log_lock();
f01a5e7e 1789 qemu_log("--------------\n");
1d48474d 1790 log_target_disas(cs, pc_start, dc->pc - pc_start);
fc59d2d8 1791 qemu_log_unlock(logfile);
4acb54ba
EI
1792 }
1793#endif
1794#endif
1795 assert(!dc->abort_at_next_insn);
1796}
1797
90c84c56 1798void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1799{
878096ee
AF
1800 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1801 CPUMBState *env = &cpu->env;
4acb54ba
EI
1802 int i;
1803
90c84c56 1804 if (!env) {
4acb54ba 1805 return;
90c84c56 1806 }
4acb54ba 1807
0f96e96b 1808 qemu_fprintf(f, "IN: PC=%x %s\n",
76e8187d 1809 env->pc, lookup_symbol(env->pc));
6efd5599 1810 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
eb2022b7 1811 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
78e9caf2 1812 env->msr, env->esr, env->ear,
eb2022b7 1813 env->imm, env->iflags, env->fsr, env->btr);
0f96e96b 1814 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
90c84c56 1815 env->btaken, env->btarget,
2e5282ca
RH
1816 (env->msr & MSR_UM) ? "user" : "kernel",
1817 (env->msr & MSR_UMS) ? "user" : "kernel",
1818 (bool)(env->msr & MSR_EIP),
1819 (bool)(env->msr & MSR_IE));
2ead1b18
JK
1820 for (i = 0; i < 12; i++) {
1821 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1822 if ((i + 1) % 4 == 0) {
1823 qemu_fprintf(f, "\n");
1824 }
1825 }
17c52a43 1826
2ead1b18 1827 /* Registers that aren't modeled are reported as 0 */
39db007e 1828 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
af20a93a 1829 "rtlblo=0 rtlbhi=0\n", env->edr);
2ead1b18 1830 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
4acb54ba 1831 for (i = 0; i < 32; i++) {
90c84c56 1832 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
4acb54ba 1833 if ((i + 1) % 4 == 0)
90c84c56 1834 qemu_fprintf(f, "\n");
4acb54ba 1835 }
90c84c56 1836 qemu_fprintf(f, "\n\n");
4acb54ba
EI
1837}
1838
cd0c24f9
AF
1839void mb_tcg_init(void)
1840{
480d29a8
RH
1841#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1842#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1843
1844 static const struct {
1845 TCGv_i32 *var; int ofs; char name[8];
1846 } i32s[] = {
1847 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1848 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1849 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1850 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1851
1852 SP(pc),
1853 SP(msr),
1854 SP(imm),
1855 SP(iflags),
1856 SP(btaken),
1857 SP(btarget),
1858 SP(res_val),
1859 };
1860
1861#undef R
1862#undef SP
1863
1864 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1865 *i32s[i].var =
1866 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1867 }
4acb54ba 1868
480d29a8
RH
1869 cpu_res_addr =
1870 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba
EI
1871}
1872
bad729e2
RH
1873void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1874 target_ulong *data)
4acb54ba 1875{
76e8187d 1876 env->pc = data[0];
4acb54ba 1877}
This page took 1.19848 seconds and 4 git commands to generate.