]> Git Repo - qemu.git/blame - target/microblaze/translate.c
tcg: convert tb->cflags reads to tb_cflags(tb)
[qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
4acb54ba 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84
LV
34
35
4acb54ba
EI
36#define SIM_COMPAT 0
37#define DISAS_GNU 1
38#define DISAS_MB 1
39#if DISAS_MB && !SIM_COMPAT
40# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41#else
42# define LOG_DIS(...) do { } while (0)
43#endif
44
45#define D(x)
46
47#define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49
77fc6f5e
LV
50/* is_jmp field values */
51#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54
4acb54ba 55static TCGv env_debug;
1bcea73e 56static TCGv_env cpu_env;
4acb54ba
EI
57static TCGv cpu_R[32];
58static TCGv cpu_SR[18];
59static TCGv env_imm;
60static TCGv env_btaken;
61static TCGv env_btarget;
62static TCGv env_iflags;
4a536270 63static TCGv env_res_addr;
11a76217 64static TCGv env_res_val;
4acb54ba 65
022c62cb 66#include "exec/gen-icount.h"
4acb54ba
EI
67
68/* This is the state at translation time. */
69typedef struct DisasContext {
0063ebd6 70 MicroBlazeCPU *cpu;
a5efa644 71 target_ulong pc;
4acb54ba
EI
72
73 /* Decoder. */
74 int type_b;
75 uint32_t ir;
76 uint8_t opcode;
77 uint8_t rd, ra, rb;
78 uint16_t imm;
79
80 unsigned int cpustate_changed;
81 unsigned int delayed_branch;
82 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
83 unsigned int clear_imm;
84 int is_jmp;
85
844bab60
EI
86#define JMP_NOJMP 0
87#define JMP_DIRECT 1
88#define JMP_DIRECT_CC 2
89#define JMP_INDIRECT 3
4acb54ba
EI
90 unsigned int jmp;
91 uint32_t jmp_pc;
92
93 int abort_at_next_insn;
94 int nr_nops;
95 struct TranslationBlock *tb;
96 int singlestep_enabled;
97} DisasContext;
98
38972938 99static const char *regnames[] =
4acb54ba
EI
100{
101 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
102 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
103 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
104 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
105};
106
38972938 107static const char *special_regnames[] =
4acb54ba
EI
108{
109 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
110 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
111 "sr16", "sr17", "sr18"
112};
113
4acb54ba
EI
114static inline void t_sync_flags(DisasContext *dc)
115{
4abf79a4 116 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
117 if (dc->tb_flags != dc->synced_flags) {
118 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
119 dc->synced_flags = dc->tb_flags;
120 }
121}
122
123static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
124{
125 TCGv_i32 tmp = tcg_const_i32(index);
126
127 t_sync_flags(dc);
128 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 129 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
130 tcg_temp_free_i32(tmp);
131 dc->is_jmp = DISAS_UPDATE;
132}
133
90aa39a1
SF
134static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
135{
136#ifndef CONFIG_USER_ONLY
137 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
138#else
139 return true;
140#endif
141}
142
4acb54ba
EI
143static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
144{
90aa39a1 145 if (use_goto_tb(dc, dest)) {
4acb54ba
EI
146 tcg_gen_goto_tb(n);
147 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
90aa39a1 148 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
4acb54ba
EI
149 } else {
150 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
151 tcg_gen_exit_tb(0);
152 }
153}
154
ee8b246f
EI
155static void read_carry(DisasContext *dc, TCGv d)
156{
157 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
158}
159
04ec7df7
EI
160/*
161 * write_carry sets the carry bits in MSR based on bit 0 of v.
162 * v[31:1] are ignored.
163 */
ee8b246f
EI
164static void write_carry(DisasContext *dc, TCGv v)
165{
166 TCGv t0 = tcg_temp_new();
167 tcg_gen_shli_tl(t0, v, 31);
168 tcg_gen_sari_tl(t0, t0, 31);
169 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
170 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
171 ~(MSR_C | MSR_CC));
172 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
173 tcg_temp_free(t0);
174}
175
65ab5eb4 176static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
177{
178 TCGv t0 = tcg_temp_new();
65ab5eb4 179 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
180 write_carry(dc, t0);
181 tcg_temp_free(t0);
182}
183
61204ce8
EI
184/* True if ALU operand b is a small immediate that may deserve
185 faster treatment. */
186static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
187{
188 /* Immediate insn without the imm prefix ? */
189 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
190}
191
4acb54ba
EI
192static inline TCGv *dec_alu_op_b(DisasContext *dc)
193{
194 if (dc->type_b) {
195 if (dc->tb_flags & IMM_FLAG)
196 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
197 else
198 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
199 return &env_imm;
200 } else
201 return &cpu_R[dc->rb];
202}
203
204static void dec_add(DisasContext *dc)
205{
206 unsigned int k, c;
40cbf5b7 207 TCGv cf;
4acb54ba
EI
208
209 k = dc->opcode & 4;
210 c = dc->opcode & 2;
211
212 LOG_DIS("add%s%s%s r%d r%d r%d\n",
213 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
214 dc->rd, dc->ra, dc->rb);
215
40cbf5b7
EI
216 /* Take care of the easy cases first. */
217 if (k) {
218 /* k - keep carry, no need to update MSR. */
219 /* If rd == r0, it's a nop. */
220 if (dc->rd) {
221 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
222
223 if (c) {
224 /* c - Add carry into the result. */
225 cf = tcg_temp_new();
226
227 read_carry(dc, cf);
228 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
229 tcg_temp_free(cf);
230 }
231 }
232 return;
233 }
234
235 /* From now on, we can assume k is zero. So we need to update MSR. */
236 /* Extract carry. */
237 cf = tcg_temp_new();
238 if (c) {
239 read_carry(dc, cf);
240 } else {
241 tcg_gen_movi_tl(cf, 0);
242 }
243
244 if (dc->rd) {
245 TCGv ncf = tcg_temp_new();
5d0bb823 246 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 247 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
248 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
249 write_carry(dc, ncf);
250 tcg_temp_free(ncf);
251 } else {
5d0bb823 252 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 253 write_carry(dc, cf);
4acb54ba 254 }
40cbf5b7 255 tcg_temp_free(cf);
4acb54ba
EI
256}
257
258static void dec_sub(DisasContext *dc)
259{
260 unsigned int u, cmp, k, c;
e0a42ebc 261 TCGv cf, na;
4acb54ba
EI
262
263 u = dc->imm & 2;
264 k = dc->opcode & 4;
265 c = dc->opcode & 2;
266 cmp = (dc->imm & 1) && (!dc->type_b) && k;
267
268 if (cmp) {
269 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
270 if (dc->rd) {
271 if (u)
272 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
273 else
274 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
275 }
e0a42ebc
EI
276 return;
277 }
278
279 LOG_DIS("sub%s%s r%d, r%d r%d\n",
280 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
281
282 /* Take care of the easy cases first. */
283 if (k) {
284 /* k - keep carry, no need to update MSR. */
285 /* If rd == r0, it's a nop. */
286 if (dc->rd) {
4acb54ba 287 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
288
289 if (c) {
290 /* c - Add carry into the result. */
291 cf = tcg_temp_new();
292
293 read_carry(dc, cf);
294 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
295 tcg_temp_free(cf);
296 }
297 }
298 return;
299 }
300
301 /* From now on, we can assume k is zero. So we need to update MSR. */
302 /* Extract carry. And complement a into na. */
303 cf = tcg_temp_new();
304 na = tcg_temp_new();
305 if (c) {
306 read_carry(dc, cf);
307 } else {
308 tcg_gen_movi_tl(cf, 1);
309 }
310
311 /* d = b + ~a + c. carry defaults to 1. */
312 tcg_gen_not_tl(na, cpu_R[dc->ra]);
313
314 if (dc->rd) {
315 TCGv ncf = tcg_temp_new();
5d0bb823 316 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
317 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
318 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
319 write_carry(dc, ncf);
320 tcg_temp_free(ncf);
321 } else {
5d0bb823 322 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 323 write_carry(dc, cf);
4acb54ba 324 }
e0a42ebc
EI
325 tcg_temp_free(cf);
326 tcg_temp_free(na);
4acb54ba
EI
327}
328
329static void dec_pattern(DisasContext *dc)
330{
331 unsigned int mode;
4acb54ba 332
1567a005 333 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 334 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
8fc5239e 335 && !dc->cpu->cfg.use_pcmp_instr) {
1567a005
EI
336 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
337 t_gen_raise_exception(dc, EXCP_HW_EXCP);
338 }
339
4acb54ba
EI
340 mode = dc->opcode & 3;
341 switch (mode) {
342 case 0:
343 /* pcmpbf. */
344 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
345 if (dc->rd)
346 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
347 break;
348 case 2:
349 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
350 if (dc->rd) {
86112805
RH
351 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
352 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
353 }
354 break;
355 case 3:
356 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 357 if (dc->rd) {
86112805
RH
358 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
359 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
360 }
361 break;
362 default:
0063ebd6 363 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
364 "unsupported pattern insn opcode=%x\n", dc->opcode);
365 break;
366 }
367}
368
369static void dec_and(DisasContext *dc)
370{
371 unsigned int not;
372
373 if (!dc->type_b && (dc->imm & (1 << 10))) {
374 dec_pattern(dc);
375 return;
376 }
377
378 not = dc->opcode & (1 << 1);
379 LOG_DIS("and%s\n", not ? "n" : "");
380
381 if (!dc->rd)
382 return;
383
384 if (not) {
a235900e 385 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
386 } else
387 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
388}
389
390static void dec_or(DisasContext *dc)
391{
392 if (!dc->type_b && (dc->imm & (1 << 10))) {
393 dec_pattern(dc);
394 return;
395 }
396
397 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
398 if (dc->rd)
399 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
400}
401
402static void dec_xor(DisasContext *dc)
403{
404 if (!dc->type_b && (dc->imm & (1 << 10))) {
405 dec_pattern(dc);
406 return;
407 }
408
409 LOG_DIS("xor r%d\n", dc->rd);
410 if (dc->rd)
411 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412}
413
4acb54ba
EI
414static inline void msr_read(DisasContext *dc, TCGv d)
415{
416 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
417}
418
419static inline void msr_write(DisasContext *dc, TCGv v)
420{
97b833c5
EI
421 TCGv t;
422
423 t = tcg_temp_new();
4acb54ba 424 dc->cpustate_changed = 1;
97b833c5 425 /* PVR bit is not writable. */
8a84fc6b
EI
426 tcg_gen_andi_tl(t, v, ~MSR_PVR);
427 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
428 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
429 tcg_temp_free(t);
4acb54ba
EI
430}
431
432static void dec_msr(DisasContext *dc)
433{
0063ebd6 434 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
435 TCGv t0, t1;
436 unsigned int sr, to, rn;
97ed5ccd 437 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
438
439 sr = dc->imm & ((1 << 14) - 1);
440 to = dc->imm & (1 << 14);
441 dc->type_b = 1;
442 if (to)
443 dc->cpustate_changed = 1;
444
445 /* msrclr and msrset. */
446 if (!(dc->imm & (1 << 15))) {
447 unsigned int clr = dc->ir & (1 << 16);
448
449 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
450 dc->rd, dc->imm);
1567a005 451
56837509 452 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
453 /* nop??? */
454 return;
455 }
456
457 if ((dc->tb_flags & MSR_EE_FLAG)
458 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
459 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
460 t_gen_raise_exception(dc, EXCP_HW_EXCP);
461 return;
462 }
463
4acb54ba
EI
464 if (dc->rd)
465 msr_read(dc, cpu_R[dc->rd]);
466
467 t0 = tcg_temp_new();
468 t1 = tcg_temp_new();
469 msr_read(dc, t0);
470 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
471
472 if (clr) {
473 tcg_gen_not_tl(t1, t1);
474 tcg_gen_and_tl(t0, t0, t1);
475 } else
476 tcg_gen_or_tl(t0, t0, t1);
477 msr_write(dc, t0);
478 tcg_temp_free(t0);
479 tcg_temp_free(t1);
480 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
481 dc->is_jmp = DISAS_UPDATE;
482 return;
483 }
484
1567a005
EI
485 if (to) {
486 if ((dc->tb_flags & MSR_EE_FLAG)
487 && mem_index == MMU_USER_IDX) {
488 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
489 t_gen_raise_exception(dc, EXCP_HW_EXCP);
490 return;
491 }
492 }
493
4acb54ba
EI
494#if !defined(CONFIG_USER_ONLY)
495 /* Catch read/writes to the mmu block. */
496 if ((sr & ~0xff) == 0x1000) {
497 sr &= 7;
498 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
499 if (to)
64254eba 500 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 501 else
64254eba 502 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
503 return;
504 }
505#endif
506
507 if (to) {
508 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
509 switch (sr) {
510 case 0:
511 break;
512 case 1:
513 msr_write(dc, cpu_R[dc->ra]);
514 break;
515 case 0x3:
516 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
517 break;
518 case 0x5:
519 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
520 break;
521 case 0x7:
97694c57 522 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 523 break;
5818dee5 524 case 0x800:
68cee38a 525 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
526 break;
527 case 0x802:
68cee38a 528 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 529 break;
4acb54ba 530 default:
0063ebd6 531 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
532 break;
533 }
534 } else {
535 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
536
537 switch (sr) {
538 case 0:
539 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
540 break;
541 case 1:
542 msr_read(dc, cpu_R[dc->rd]);
543 break;
544 case 0x3:
545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
546 break;
547 case 0x5:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
549 break;
550 case 0x7:
97694c57 551 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
552 break;
553 case 0xb:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
555 break;
5818dee5 556 case 0x800:
68cee38a 557 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
558 break;
559 case 0x802:
68cee38a 560 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 561 break;
4acb54ba
EI
562 case 0x2000:
563 case 0x2001:
564 case 0x2002:
565 case 0x2003:
566 case 0x2004:
567 case 0x2005:
568 case 0x2006:
569 case 0x2007:
570 case 0x2008:
571 case 0x2009:
572 case 0x200a:
573 case 0x200b:
574 case 0x200c:
575 rn = sr & 0xf;
576 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 577 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
578 break;
579 default:
a47dddd7 580 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
581 break;
582 }
583 }
ee7dbcf8
EI
584
585 if (dc->rd == 0) {
586 tcg_gen_movi_tl(cpu_R[0], 0);
587 }
4acb54ba
EI
588}
589
4acb54ba
EI
590/* Multiplier unit. */
591static void dec_mul(DisasContext *dc)
592{
16ece88d 593 TCGv tmp;
4acb54ba
EI
594 unsigned int subcode;
595
1567a005 596 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 597 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
9b964318 598 && !dc->cpu->cfg.use_hw_mul) {
1567a005
EI
599 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
600 t_gen_raise_exception(dc, EXCP_HW_EXCP);
601 return;
602 }
603
4acb54ba 604 subcode = dc->imm & 3;
4acb54ba
EI
605
606 if (dc->type_b) {
607 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
16ece88d
RH
608 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
609 return;
4acb54ba
EI
610 }
611
1567a005 612 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 613 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
614 /* nop??? */
615 }
616
16ece88d 617 tmp = tcg_temp_new();
4acb54ba
EI
618 switch (subcode) {
619 case 0:
620 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 621 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
622 break;
623 case 1:
624 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 625 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
626 break;
627 case 2:
628 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 629 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
630 break;
631 case 3:
632 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 633 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
634 break;
635 default:
0063ebd6 636 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
637 break;
638 }
16ece88d 639 tcg_temp_free(tmp);
4acb54ba
EI
640}
641
642/* Div unit. */
643static void dec_div(DisasContext *dc)
644{
645 unsigned int u;
646
647 u = dc->imm & 2;
648 LOG_DIS("div\n");
649
0063ebd6 650 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
47709e4c 651 && !dc->cpu->cfg.use_div) {
1567a005
EI
652 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
653 t_gen_raise_exception(dc, EXCP_HW_EXCP);
654 }
655
4acb54ba 656 if (u)
64254eba
BS
657 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
658 cpu_R[dc->ra]);
4acb54ba 659 else
64254eba
BS
660 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
661 cpu_R[dc->ra]);
4acb54ba
EI
662 if (!dc->rd)
663 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
664}
665
666static void dec_barrel(DisasContext *dc)
667{
668 TCGv t0;
faa48d74 669 unsigned int imm_w, imm_s;
d09b2585 670 bool s, t, e = false, i = false;
4acb54ba 671
1567a005 672 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 673 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
7faa66aa 674 && !dc->cpu->cfg.use_barrel) {
1567a005
EI
675 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
676 t_gen_raise_exception(dc, EXCP_HW_EXCP);
677 return;
678 }
679
faa48d74
EI
680 if (dc->type_b) {
681 /* Insert and extract are only available in immediate mode. */
d09b2585 682 i = extract32(dc->imm, 15, 1);
faa48d74
EI
683 e = extract32(dc->imm, 14, 1);
684 }
e3e84983
EI
685 s = extract32(dc->imm, 10, 1);
686 t = extract32(dc->imm, 9, 1);
faa48d74
EI
687 imm_w = extract32(dc->imm, 6, 5);
688 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 689
faa48d74
EI
690 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
691 e ? "e" : "",
4acb54ba
EI
692 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
693
faa48d74
EI
694 if (e) {
695 if (imm_w + imm_s > 32 || imm_w == 0) {
696 /* These inputs have an undefined behavior. */
697 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
698 imm_w, imm_s);
699 } else {
700 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
701 }
d09b2585
EI
702 } else if (i) {
703 int width = imm_w - imm_s + 1;
704
705 if (imm_w < imm_s) {
706 /* These inputs have an undefined behavior. */
707 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
708 imm_w, imm_s);
709 } else {
710 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
711 imm_s, width);
712 }
faa48d74
EI
713 } else {
714 t0 = tcg_temp_new();
4acb54ba 715
faa48d74
EI
716 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
717 tcg_gen_andi_tl(t0, t0, 31);
4acb54ba 718
faa48d74
EI
719 if (s) {
720 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 721 } else {
faa48d74
EI
722 if (t) {
723 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
724 } else {
725 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
726 }
2acf6d53 727 }
faa48d74 728 tcg_temp_free(t0);
4acb54ba
EI
729 }
730}
731
732static void dec_bit(DisasContext *dc)
733{
0063ebd6 734 CPUState *cs = CPU(dc->cpu);
09b9f113 735 TCGv t0;
4acb54ba 736 unsigned int op;
97ed5ccd 737 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba 738
ace2e4da 739 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
740 switch (op) {
741 case 0x21:
742 /* src. */
743 t0 = tcg_temp_new();
744
745 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
746 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
747 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 748 if (dc->rd) {
4acb54ba 749 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 750 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 751 }
4acb54ba
EI
752 tcg_temp_free(t0);
753 break;
754
755 case 0x1:
756 case 0x41:
757 /* srl. */
4acb54ba
EI
758 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
759
bb3cb951
EI
760 /* Update carry. Note that write carry only looks at the LSB. */
761 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
762 if (dc->rd) {
763 if (op == 0x41)
764 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
765 else
766 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
767 }
768 break;
769 case 0x60:
770 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
771 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
772 break;
773 case 0x61:
774 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
775 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
776 break;
777 case 0x64:
f062a3c7
EI
778 case 0x66:
779 case 0x74:
780 case 0x76:
4acb54ba
EI
781 /* wdc. */
782 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
783 if ((dc->tb_flags & MSR_EE_FLAG)
784 && mem_index == MMU_USER_IDX) {
785 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
786 t_gen_raise_exception(dc, EXCP_HW_EXCP);
787 return;
788 }
4acb54ba
EI
789 break;
790 case 0x68:
791 /* wic. */
792 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
793 if ((dc->tb_flags & MSR_EE_FLAG)
794 && mem_index == MMU_USER_IDX) {
795 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
796 t_gen_raise_exception(dc, EXCP_HW_EXCP);
797 return;
798 }
4acb54ba 799 break;
48b5e96f
EI
800 case 0xe0:
801 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 802 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
8fc5239e 803 && !dc->cpu->cfg.use_pcmp_instr) {
48b5e96f
EI
804 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
805 t_gen_raise_exception(dc, EXCP_HW_EXCP);
806 }
8fc5239e 807 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 808 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
809 }
810 break;
ace2e4da
PC
811 case 0x1e0:
812 /* swapb */
813 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
814 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
815 break;
b8c6a5d9 816 case 0x1e2:
ace2e4da
PC
817 /*swaph */
818 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
819 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
820 break;
4acb54ba 821 default:
a47dddd7
AF
822 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
824 break;
825 }
826}
827
828static inline void sync_jmpstate(DisasContext *dc)
829{
844bab60
EI
830 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
831 if (dc->jmp == JMP_DIRECT) {
832 tcg_gen_movi_tl(env_btaken, 1);
833 }
23979dc5
EI
834 dc->jmp = JMP_INDIRECT;
835 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
836 }
837}
838
839static void dec_imm(DisasContext *dc)
840{
841 LOG_DIS("imm %x\n", dc->imm << 16);
842 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
843 dc->tb_flags |= IMM_FLAG;
844 dc->clear_imm = 0;
845}
846
4acb54ba
EI
847static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
848{
849 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
850 /* Should be set to one if r1 is used by loadstores. */
851 int stackprot = 0;
852
853 /* All load/stores use ra. */
9aaaa181 854 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
855 stackprot = 1;
856 }
4acb54ba 857
9ef55357 858 /* Treat the common cases first. */
4acb54ba 859 if (!dc->type_b) {
4b5ef0b5
EI
860 /* If any of the regs is r0, return a ptr to the other. */
861 if (dc->ra == 0) {
862 return &cpu_R[dc->rb];
863 } else if (dc->rb == 0) {
864 return &cpu_R[dc->ra];
865 }
866
9aaaa181 867 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
868 stackprot = 1;
869 }
870
4acb54ba
EI
871 *t = tcg_temp_new();
872 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
873
874 if (stackprot) {
64254eba 875 gen_helper_stackprot(cpu_env, *t);
5818dee5 876 }
4acb54ba
EI
877 return t;
878 }
879 /* Immediate. */
880 if (!extimm) {
881 if (dc->imm == 0) {
882 return &cpu_R[dc->ra];
883 }
884 *t = tcg_temp_new();
885 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
886 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
887 } else {
888 *t = tcg_temp_new();
889 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
890 }
891
5818dee5 892 if (stackprot) {
64254eba 893 gen_helper_stackprot(cpu_env, *t);
5818dee5 894 }
4acb54ba
EI
895 return t;
896}
897
898static void dec_load(DisasContext *dc)
899{
47acdd63 900 TCGv t, v, *addr;
8cc9b43f 901 unsigned int size, rev = 0, ex = 0;
47acdd63 902 TCGMemOp mop;
4acb54ba 903
47acdd63
RH
904 mop = dc->opcode & 3;
905 size = 1 << mop;
9f8beb66
EI
906 if (!dc->type_b) {
907 rev = (dc->ir >> 9) & 1;
8cc9b43f 908 ex = (dc->ir >> 10) & 1;
9f8beb66 909 }
47acdd63
RH
910 mop |= MO_TE;
911 if (rev) {
912 mop ^= MO_BSWAP;
913 }
9f8beb66 914
0187688f 915 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 916 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
917 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
918 t_gen_raise_exception(dc, EXCP_HW_EXCP);
919 return;
920 }
4acb54ba 921
8cc9b43f
PC
922 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
923 ex ? "x" : "");
9f8beb66 924
4acb54ba
EI
925 t_sync_flags(dc);
926 addr = compute_ldst_addr(dc, &t);
927
9f8beb66
EI
928 /*
929 * When doing reverse accesses we need to do two things.
930 *
4ff9786c 931 * 1. Reverse the address wrt endianness.
9f8beb66
EI
932 * 2. Byteswap the data lanes on the way back into the CPU core.
933 */
934 if (rev && size != 4) {
935 /* Endian reverse the address. t is addr. */
936 switch (size) {
937 case 1:
938 {
939 /* 00 -> 11
940 01 -> 10
941 10 -> 10
942 11 -> 00 */
943 TCGv low = tcg_temp_new();
944
945 /* Force addr into the temp. */
946 if (addr != &t) {
947 t = tcg_temp_new();
948 tcg_gen_mov_tl(t, *addr);
949 addr = &t;
950 }
951
952 tcg_gen_andi_tl(low, t, 3);
953 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
954 tcg_gen_andi_tl(t, t, ~3);
955 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
956 tcg_gen_mov_tl(env_imm, t);
957 tcg_temp_free(low);
958 break;
959 }
960
961 case 2:
962 /* 00 -> 10
963 10 -> 00. */
964 /* Force addr into the temp. */
965 if (addr != &t) {
966 t = tcg_temp_new();
967 tcg_gen_xori_tl(t, *addr, 2);
968 addr = &t;
969 } else {
970 tcg_gen_xori_tl(t, t, 2);
971 }
972 break;
973 default:
0063ebd6 974 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
975 break;
976 }
977 }
978
8cc9b43f
PC
979 /* lwx does not throw unaligned access errors, so force alignment */
980 if (ex) {
981 /* Force addr into the temp. */
982 if (addr != &t) {
983 t = tcg_temp_new();
984 tcg_gen_mov_tl(t, *addr);
985 addr = &t;
986 }
987 tcg_gen_andi_tl(t, t, ~3);
988 }
989
4acb54ba
EI
990 /* If we get a fault on a dslot, the jmpstate better be in sync. */
991 sync_jmpstate(dc);
968a40f6
EI
992
993 /* Verify alignment if needed. */
47acdd63
RH
994 /*
995 * Microblaze gives MMU faults priority over faults due to
996 * unaligned addresses. That's why we speculatively do the load
997 * into v. If the load succeeds, we verify alignment of the
998 * address and if that succeeds we write into the destination reg.
999 */
1000 v = tcg_temp_new();
97ed5ccd 1001 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1002
0063ebd6 1003 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 1004 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1005 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1006 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
1007 }
1008
47acdd63
RH
1009 if (ex) {
1010 tcg_gen_mov_tl(env_res_addr, *addr);
1011 tcg_gen_mov_tl(env_res_val, v);
1012 }
1013 if (dc->rd) {
1014 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1015 }
1016 tcg_temp_free(v);
1017
8cc9b43f 1018 if (ex) { /* lwx */
b6af0975 1019 /* no support for AXI exclusive so always clear C */
8cc9b43f 1020 write_carryi(dc, 0);
8cc9b43f
PC
1021 }
1022
4acb54ba
EI
1023 if (addr == &t)
1024 tcg_temp_free(t);
1025}
1026
4acb54ba
EI
1027static void dec_store(DisasContext *dc)
1028{
4a536270 1029 TCGv t, *addr, swx_addr;
42a268c2 1030 TCGLabel *swx_skip = NULL;
8cc9b43f 1031 unsigned int size, rev = 0, ex = 0;
47acdd63 1032 TCGMemOp mop;
4acb54ba 1033
47acdd63
RH
1034 mop = dc->opcode & 3;
1035 size = 1 << mop;
9f8beb66
EI
1036 if (!dc->type_b) {
1037 rev = (dc->ir >> 9) & 1;
8cc9b43f 1038 ex = (dc->ir >> 10) & 1;
9f8beb66 1039 }
47acdd63
RH
1040 mop |= MO_TE;
1041 if (rev) {
1042 mop ^= MO_BSWAP;
1043 }
4acb54ba 1044
0187688f 1045 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1046 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1047 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1048 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1049 return;
1050 }
1051
8cc9b43f
PC
1052 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1053 ex ? "x" : "");
4acb54ba
EI
1054 t_sync_flags(dc);
1055 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1056 sync_jmpstate(dc);
1057 addr = compute_ldst_addr(dc, &t);
968a40f6 1058
083dbf48 1059 swx_addr = tcg_temp_local_new();
8cc9b43f 1060 if (ex) { /* swx */
11a76217 1061 TCGv tval;
8cc9b43f
PC
1062
1063 /* Force addr into the swx_addr. */
1064 tcg_gen_mov_tl(swx_addr, *addr);
1065 addr = &swx_addr;
1066 /* swx does not throw unaligned access errors, so force alignment */
1067 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1068
8cc9b43f
PC
1069 write_carryi(dc, 1);
1070 swx_skip = gen_new_label();
4a536270 1071 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1072
1073 /* Compare the value loaded at lwx with current contents of
1074 the reserved location.
1075 FIXME: This only works for system emulation where we can expect
1076 this compare and the following write to be atomic. For user
1077 emulation we need to add atomicity between threads. */
1078 tval = tcg_temp_new();
97ed5ccd 1079 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
0063ebd6 1080 MO_TEUL);
11a76217 1081 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1082 write_carryi(dc, 0);
11a76217 1083 tcg_temp_free(tval);
8cc9b43f
PC
1084 }
1085
9f8beb66
EI
1086 if (rev && size != 4) {
1087 /* Endian reverse the address. t is addr. */
1088 switch (size) {
1089 case 1:
1090 {
1091 /* 00 -> 11
1092 01 -> 10
1093 10 -> 10
1094 11 -> 00 */
1095 TCGv low = tcg_temp_new();
1096
1097 /* Force addr into the temp. */
1098 if (addr != &t) {
1099 t = tcg_temp_new();
1100 tcg_gen_mov_tl(t, *addr);
1101 addr = &t;
1102 }
1103
1104 tcg_gen_andi_tl(low, t, 3);
1105 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1106 tcg_gen_andi_tl(t, t, ~3);
1107 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1108 tcg_gen_mov_tl(env_imm, t);
1109 tcg_temp_free(low);
1110 break;
1111 }
1112
1113 case 2:
1114 /* 00 -> 10
1115 10 -> 00. */
1116 /* Force addr into the temp. */
1117 if (addr != &t) {
1118 t = tcg_temp_new();
1119 tcg_gen_xori_tl(t, *addr, 2);
1120 addr = &t;
1121 } else {
1122 tcg_gen_xori_tl(t, t, 2);
1123 }
1124 break;
1125 default:
0063ebd6 1126 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1127 break;
1128 }
9f8beb66 1129 }
97ed5ccd 1130 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1131
968a40f6 1132 /* Verify alignment if needed. */
0063ebd6 1133 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1134 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1135 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1136 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1137 * the MMU prior to the memaccess, thay way we could put
1138 * the alignment checks in between the probe and the mem
1139 * access.
a12f6507 1140 */
64254eba 1141 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1142 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1143 }
083dbf48 1144
8cc9b43f
PC
1145 if (ex) {
1146 gen_set_label(swx_skip);
8cc9b43f 1147 }
083dbf48 1148 tcg_temp_free(swx_addr);
968a40f6 1149
4acb54ba
EI
1150 if (addr == &t)
1151 tcg_temp_free(t);
1152}
1153
1154static inline void eval_cc(DisasContext *dc, unsigned int cc,
1155 TCGv d, TCGv a, TCGv b)
1156{
4acb54ba
EI
1157 switch (cc) {
1158 case CC_EQ:
b2565c69 1159 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1160 break;
1161 case CC_NE:
b2565c69 1162 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1163 break;
1164 case CC_LT:
b2565c69 1165 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1166 break;
1167 case CC_LE:
b2565c69 1168 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1169 break;
1170 case CC_GE:
b2565c69 1171 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1172 break;
1173 case CC_GT:
b2565c69 1174 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1175 break;
1176 default:
0063ebd6 1177 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1178 break;
1179 }
1180}
1181
1182static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1183{
42a268c2 1184 TCGLabel *l1 = gen_new_label();
4acb54ba
EI
1185 /* Conditional jmp. */
1186 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1187 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1188 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1189 gen_set_label(l1);
1190}
1191
1192static void dec_bcc(DisasContext *dc)
1193{
1194 unsigned int cc;
1195 unsigned int dslot;
1196
1197 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1198 dslot = dc->ir & (1 << 25);
1199 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1200
1201 dc->delayed_branch = 1;
1202 if (dslot) {
1203 dc->delayed_branch = 2;
1204 dc->tb_flags |= D_FLAG;
1205 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1206 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1207 }
1208
61204ce8
EI
1209 if (dec_alu_op_b_is_small_imm(dc)) {
1210 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1211
1212 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1213 dc->jmp = JMP_DIRECT_CC;
23979dc5 1214 dc->jmp_pc = dc->pc + offset;
61204ce8 1215 } else {
23979dc5 1216 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1217 tcg_gen_movi_tl(env_btarget, dc->pc);
1218 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1219 }
61204ce8 1220 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1221}
1222
1223static void dec_br(DisasContext *dc)
1224{
9f6113c7 1225 unsigned int dslot, link, abs, mbar;
97ed5ccd 1226 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1227
1228 dslot = dc->ir & (1 << 20);
1229 abs = dc->ir & (1 << 19);
1230 link = dc->ir & (1 << 18);
9f6113c7
EI
1231
1232 /* Memory barrier. */
1233 mbar = (dc->ir >> 16) & 31;
1234 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1235 /* mbar IMM & 16 decodes to sleep. */
1236 if (dc->rd & 16) {
1237 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1238 TCGv_i32 tmp_1 = tcg_const_i32(1);
1239
1240 LOG_DIS("sleep\n");
1241
1242 t_sync_flags(dc);
1243 tcg_gen_st_i32(tmp_1, cpu_env,
1244 -offsetof(MicroBlazeCPU, env)
1245 +offsetof(CPUState, halted));
1246 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1247 gen_helper_raise_exception(cpu_env, tmp_hlt);
1248 tcg_temp_free_i32(tmp_hlt);
1249 tcg_temp_free_i32(tmp_1);
1250 return;
1251 }
9f6113c7
EI
1252 LOG_DIS("mbar %d\n", dc->rd);
1253 /* Break the TB. */
1254 dc->cpustate_changed = 1;
1255 return;
1256 }
1257
4acb54ba
EI
1258 LOG_DIS("br%s%s%s%s imm=%x\n",
1259 abs ? "a" : "", link ? "l" : "",
1260 dc->type_b ? "i" : "", dslot ? "d" : "",
1261 dc->imm);
1262
1263 dc->delayed_branch = 1;
1264 if (dslot) {
1265 dc->delayed_branch = 2;
1266 dc->tb_flags |= D_FLAG;
1267 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1268 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1269 }
1270 if (link && dc->rd)
1271 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1272
1273 dc->jmp = JMP_INDIRECT;
1274 if (abs) {
1275 tcg_gen_movi_tl(env_btaken, 1);
1276 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1277 if (link && !dslot) {
1278 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1279 t_gen_raise_exception(dc, EXCP_BREAK);
1280 if (dc->imm == 0) {
1281 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1282 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1283 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1284 return;
1285 }
1286
1287 t_gen_raise_exception(dc, EXCP_DEBUG);
1288 }
1289 }
4acb54ba 1290 } else {
61204ce8
EI
1291 if (dec_alu_op_b_is_small_imm(dc)) {
1292 dc->jmp = JMP_DIRECT;
1293 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1294 } else {
4acb54ba
EI
1295 tcg_gen_movi_tl(env_btaken, 1);
1296 tcg_gen_movi_tl(env_btarget, dc->pc);
1297 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1298 }
1299 }
1300}
1301
1302static inline void do_rti(DisasContext *dc)
1303{
1304 TCGv t0, t1;
1305 t0 = tcg_temp_new();
1306 t1 = tcg_temp_new();
1307 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1308 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1309 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1310
1311 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1312 tcg_gen_or_tl(t1, t1, t0);
1313 msr_write(dc, t1);
1314 tcg_temp_free(t1);
1315 tcg_temp_free(t0);
1316 dc->tb_flags &= ~DRTI_FLAG;
1317}
1318
1319static inline void do_rtb(DisasContext *dc)
1320{
1321 TCGv t0, t1;
1322 t0 = tcg_temp_new();
1323 t1 = tcg_temp_new();
1324 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1325 tcg_gen_shri_tl(t0, t1, 1);
1326 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1327
1328 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1329 tcg_gen_or_tl(t1, t1, t0);
1330 msr_write(dc, t1);
1331 tcg_temp_free(t1);
1332 tcg_temp_free(t0);
1333 dc->tb_flags &= ~DRTB_FLAG;
1334}
1335
1336static inline void do_rte(DisasContext *dc)
1337{
1338 TCGv t0, t1;
1339 t0 = tcg_temp_new();
1340 t1 = tcg_temp_new();
1341
1342 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1343 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1344 tcg_gen_shri_tl(t0, t1, 1);
1345 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1346
1347 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1348 tcg_gen_or_tl(t1, t1, t0);
1349 msr_write(dc, t1);
1350 tcg_temp_free(t1);
1351 tcg_temp_free(t0);
1352 dc->tb_flags &= ~DRTE_FLAG;
1353}
1354
1355static void dec_rts(DisasContext *dc)
1356{
1357 unsigned int b_bit, i_bit, e_bit;
97ed5ccd 1358 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1359
1360 i_bit = dc->ir & (1 << 21);
1361 b_bit = dc->ir & (1 << 22);
1362 e_bit = dc->ir & (1 << 23);
1363
1364 dc->delayed_branch = 2;
1365 dc->tb_flags |= D_FLAG;
1366 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1367 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1368
1369 if (i_bit) {
1370 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1371 if ((dc->tb_flags & MSR_EE_FLAG)
1372 && mem_index == MMU_USER_IDX) {
1373 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1374 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1375 }
4acb54ba
EI
1376 dc->tb_flags |= DRTI_FLAG;
1377 } else if (b_bit) {
1378 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1379 if ((dc->tb_flags & MSR_EE_FLAG)
1380 && mem_index == MMU_USER_IDX) {
1381 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1382 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1383 }
4acb54ba
EI
1384 dc->tb_flags |= DRTB_FLAG;
1385 } else if (e_bit) {
1386 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1387 if ((dc->tb_flags & MSR_EE_FLAG)
1388 && mem_index == MMU_USER_IDX) {
1389 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1390 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1391 }
4acb54ba
EI
1392 dc->tb_flags |= DRTE_FLAG;
1393 } else
1394 LOG_DIS("rts ir=%x\n", dc->ir);
1395
23979dc5 1396 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1397 tcg_gen_movi_tl(env_btaken, 1);
1398 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1399}
1400
97694c57
EI
1401static int dec_check_fpuv2(DisasContext *dc)
1402{
be67e9ab 1403 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
97694c57
EI
1404 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1405 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1406 }
be67e9ab 1407 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1408}
1409
1567a005
EI
1410static void dec_fpu(DisasContext *dc)
1411{
97694c57
EI
1412 unsigned int fpu_insn;
1413
1567a005 1414 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1415 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
be67e9ab 1416 && (dc->cpu->cfg.use_fpu != 1)) {
97694c57 1417 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1418 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1419 return;
1420 }
1421
97694c57
EI
1422 fpu_insn = (dc->ir >> 7) & 7;
1423
1424 switch (fpu_insn) {
1425 case 0:
64254eba
BS
1426 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1427 cpu_R[dc->rb]);
97694c57
EI
1428 break;
1429
1430 case 1:
64254eba
BS
1431 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1432 cpu_R[dc->rb]);
97694c57
EI
1433 break;
1434
1435 case 2:
64254eba
BS
1436 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1437 cpu_R[dc->rb]);
97694c57
EI
1438 break;
1439
1440 case 3:
64254eba
BS
1441 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1442 cpu_R[dc->rb]);
97694c57
EI
1443 break;
1444
1445 case 4:
1446 switch ((dc->ir >> 4) & 7) {
1447 case 0:
64254eba 1448 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1449 cpu_R[dc->ra], cpu_R[dc->rb]);
1450 break;
1451 case 1:
64254eba 1452 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1453 cpu_R[dc->ra], cpu_R[dc->rb]);
1454 break;
1455 case 2:
64254eba 1456 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1457 cpu_R[dc->ra], cpu_R[dc->rb]);
1458 break;
1459 case 3:
64254eba 1460 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1461 cpu_R[dc->ra], cpu_R[dc->rb]);
1462 break;
1463 case 4:
64254eba 1464 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1465 cpu_R[dc->ra], cpu_R[dc->rb]);
1466 break;
1467 case 5:
64254eba 1468 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 case 6:
64254eba 1472 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1473 cpu_R[dc->ra], cpu_R[dc->rb]);
1474 break;
1475 default:
71547a3b
BS
1476 qemu_log_mask(LOG_UNIMP,
1477 "unimplemented fcmp fpu_insn=%x pc=%x"
1478 " opc=%x\n",
1479 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1480 dc->abort_at_next_insn = 1;
1481 break;
1482 }
1483 break;
1484
1485 case 5:
1486 if (!dec_check_fpuv2(dc)) {
1487 return;
1488 }
64254eba 1489 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1490 break;
1491
1492 case 6:
1493 if (!dec_check_fpuv2(dc)) {
1494 return;
1495 }
64254eba 1496 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1497 break;
1498
1499 case 7:
1500 if (!dec_check_fpuv2(dc)) {
1501 return;
1502 }
64254eba 1503 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1504 break;
1505
1506 default:
71547a3b
BS
1507 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1508 " opc=%x\n",
1509 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1510 dc->abort_at_next_insn = 1;
1511 break;
1512 }
1567a005
EI
1513}
1514
4acb54ba
EI
1515static void dec_null(DisasContext *dc)
1516{
02b33596 1517 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1518 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1519 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1520 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1521 return;
1522 }
1d512a65 1523 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1524 dc->abort_at_next_insn = 1;
1525}
1526
6d76d23e
EI
1527/* Insns connected to FSL or AXI stream attached devices. */
1528static void dec_stream(DisasContext *dc)
1529{
97ed5ccd 1530 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
6d76d23e
EI
1531 TCGv_i32 t_id, t_ctrl;
1532 int ctrl;
1533
1534 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1535 dc->type_b ? "" : "d", dc->imm);
1536
1537 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1538 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1539 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1540 return;
1541 }
1542
1543 t_id = tcg_temp_new();
1544 if (dc->type_b) {
1545 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1546 ctrl = dc->imm >> 10;
1547 } else {
1548 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1549 ctrl = dc->imm >> 5;
1550 }
1551
1552 t_ctrl = tcg_const_tl(ctrl);
1553
1554 if (dc->rd == 0) {
1555 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1556 } else {
1557 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1558 }
1559 tcg_temp_free(t_id);
1560 tcg_temp_free(t_ctrl);
1561}
1562
4acb54ba
EI
1563static struct decoder_info {
1564 struct {
1565 uint32_t bits;
1566 uint32_t mask;
1567 };
1568 void (*dec)(DisasContext *dc);
1569} decinfo[] = {
1570 {DEC_ADD, dec_add},
1571 {DEC_SUB, dec_sub},
1572 {DEC_AND, dec_and},
1573 {DEC_XOR, dec_xor},
1574 {DEC_OR, dec_or},
1575 {DEC_BIT, dec_bit},
1576 {DEC_BARREL, dec_barrel},
1577 {DEC_LD, dec_load},
1578 {DEC_ST, dec_store},
1579 {DEC_IMM, dec_imm},
1580 {DEC_BR, dec_br},
1581 {DEC_BCC, dec_bcc},
1582 {DEC_RTS, dec_rts},
1567a005 1583 {DEC_FPU, dec_fpu},
4acb54ba
EI
1584 {DEC_MUL, dec_mul},
1585 {DEC_DIV, dec_div},
1586 {DEC_MSR, dec_msr},
6d76d23e 1587 {DEC_STREAM, dec_stream},
4acb54ba
EI
1588 {{0, 0}, dec_null}
1589};
1590
64254eba 1591static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1592{
4acb54ba
EI
1593 int i;
1594
64254eba 1595 dc->ir = ir;
4acb54ba
EI
1596 LOG_DIS("%8.8x\t", dc->ir);
1597
1598 if (dc->ir)
1599 dc->nr_nops = 0;
1600 else {
1567a005 1601 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1602 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1603 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1604 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1605 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1606 return;
1607 }
1608
4acb54ba
EI
1609 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1610 dc->nr_nops++;
a47dddd7 1611 if (dc->nr_nops > 4) {
0063ebd6 1612 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1613 }
4acb54ba
EI
1614 }
1615 /* bit 2 seems to indicate insn type. */
1616 dc->type_b = ir & (1 << 29);
1617
1618 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1619 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1620 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1621 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1622 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1623
1624 /* Large switch for all insns. */
1625 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1626 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1627 decinfo[i].dec(dc);
1628 break;
1629 }
1630 }
1631}
1632
4acb54ba 1633/* generate intermediate code for basic block 'tb'. */
9c489ea6 1634void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4acb54ba 1635{
9c489ea6 1636 CPUMBState *env = cs->env_ptr;
4e5e1215 1637 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
4acb54ba 1638 uint32_t pc_start;
4acb54ba
EI
1639 struct DisasContext ctx;
1640 struct DisasContext *dc = &ctx;
1641 uint32_t next_page_start, org_flags;
1642 target_ulong npc;
1643 int num_insns;
1644 int max_insns;
1645
4acb54ba 1646 pc_start = tb->pc;
0063ebd6 1647 dc->cpu = cpu;
4acb54ba
EI
1648 dc->tb = tb;
1649 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1650
4acb54ba
EI
1651 dc->is_jmp = DISAS_NEXT;
1652 dc->jmp = 0;
1653 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1654 if (dc->delayed_branch) {
1655 dc->jmp = JMP_INDIRECT;
1656 }
4acb54ba 1657 dc->pc = pc_start;
ed2803da 1658 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1659 dc->cpustate_changed = 0;
1660 dc->abort_at_next_insn = 0;
1661 dc->nr_nops = 0;
1662
a47dddd7
AF
1663 if (pc_start & 3) {
1664 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1665 }
4acb54ba 1666
4acb54ba 1667 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4acb54ba 1668 num_insns = 0;
c5a49c63 1669 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
190ce7fb 1670 if (max_insns == 0) {
4acb54ba 1671 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1672 }
1673 if (max_insns > TCG_MAX_INSNS) {
1674 max_insns = TCG_MAX_INSNS;
1675 }
4acb54ba 1676
cd42d5b2 1677 gen_tb_start(tb);
4acb54ba
EI
1678 do
1679 {
667b8e29 1680 tcg_gen_insn_start(dc->pc);
959082fc 1681 num_insns++;
4acb54ba 1682
b933066a
RH
1683#if SIM_COMPAT
1684 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1685 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1686 gen_helper_debug();
1687 }
1688#endif
1689
1690 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1691 t_gen_raise_exception(dc, EXCP_DEBUG);
1692 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1693 /* The address covered by the breakpoint must be included in
1694 [tb->pc, tb->pc + tb->size) in order to for it to be
1695 properly cleared -- thus we increment the PC here so that
1696 the logic setting tb->size below does the right thing. */
1697 dc->pc += 4;
b933066a
RH
1698 break;
1699 }
1700
4acb54ba
EI
1701 /* Pretty disas. */
1702 LOG_DIS("%8.8x:\t", dc->pc);
1703
c5a49c63 1704 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1705 gen_io_start();
959082fc 1706 }
4acb54ba
EI
1707
1708 dc->clear_imm = 1;
64254eba 1709 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1710 if (dc->clear_imm)
1711 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1712 dc->pc += 4;
4acb54ba
EI
1713
1714 if (dc->delayed_branch) {
1715 dc->delayed_branch--;
1716 if (!dc->delayed_branch) {
1717 if (dc->tb_flags & DRTI_FLAG)
1718 do_rti(dc);
1719 if (dc->tb_flags & DRTB_FLAG)
1720 do_rtb(dc);
1721 if (dc->tb_flags & DRTE_FLAG)
1722 do_rte(dc);
1723 /* Clear the delay slot flag. */
1724 dc->tb_flags &= ~D_FLAG;
1725 /* If it is a direct jump, try direct chaining. */
23979dc5 1726 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1727 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1728 dc->is_jmp = DISAS_JUMP;
23979dc5 1729 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1730 t_sync_flags(dc);
1731 gen_goto_tb(dc, 0, dc->jmp_pc);
1732 dc->is_jmp = DISAS_TB_JUMP;
1733 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1734 TCGLabel *l1 = gen_new_label();
23979dc5 1735 t_sync_flags(dc);
23979dc5
EI
1736 /* Conditional jmp. */
1737 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1738 gen_goto_tb(dc, 1, dc->pc);
1739 gen_set_label(l1);
1740 gen_goto_tb(dc, 0, dc->jmp_pc);
1741
1742 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1743 }
1744 break;
1745 }
1746 }
ed2803da 1747 if (cs->singlestep_enabled) {
4acb54ba 1748 break;
ed2803da 1749 }
4acb54ba 1750 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1751 && !tcg_op_buf_full()
1752 && !singlestep
1753 && (dc->pc < next_page_start)
1754 && num_insns < max_insns);
4acb54ba
EI
1755
1756 npc = dc->pc;
844bab60 1757 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1758 if (dc->tb_flags & D_FLAG) {
1759 dc->is_jmp = DISAS_UPDATE;
1760 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1761 sync_jmpstate(dc);
1762 } else
1763 npc = dc->jmp_pc;
1764 }
1765
c5a49c63 1766 if (tb_cflags(tb) & CF_LAST_IO)
4acb54ba
EI
1767 gen_io_end();
1768 /* Force an update if the per-tb cpu state has changed. */
1769 if (dc->is_jmp == DISAS_NEXT
1770 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1771 dc->is_jmp = DISAS_UPDATE;
1772 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1773 }
1774 t_sync_flags(dc);
1775
ed2803da 1776 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1777 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1778
1779 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1780 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1781 }
64254eba 1782 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1783 tcg_temp_free_i32(tmp);
4acb54ba
EI
1784 } else {
1785 switch(dc->is_jmp) {
1786 case DISAS_NEXT:
1787 gen_goto_tb(dc, 1, npc);
1788 break;
1789 default:
1790 case DISAS_JUMP:
1791 case DISAS_UPDATE:
1792 /* indicate that the hash table must be used
1793 to find the next TB */
1794 tcg_gen_exit_tb(0);
1795 break;
1796 case DISAS_TB_JUMP:
1797 /* nothing more to generate */
1798 break;
1799 }
1800 }
806f352d 1801 gen_tb_end(tb, num_insns);
0a7df5da 1802
4e5e1215
RH
1803 tb->size = dc->pc - pc_start;
1804 tb->icount = num_insns;
4acb54ba
EI
1805
1806#ifdef DEBUG_DISAS
1807#if !SIM_COMPAT
4910e6e4
RH
1808 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1809 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1810 qemu_log_lock();
f01a5e7e 1811 qemu_log("--------------\n");
4acb54ba 1812#if DISAS_GNU
d49190c4 1813 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
4acb54ba 1814#endif
fe700adb
RH
1815 qemu_log("\nisize=%d osize=%d\n",
1816 dc->pc - pc_start, tcg_op_buf_count());
1ee73216 1817 qemu_log_unlock();
4acb54ba
EI
1818 }
1819#endif
1820#endif
1821 assert(!dc->abort_at_next_insn);
1822}
1823
878096ee
AF
1824void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1825 int flags)
4acb54ba 1826{
878096ee
AF
1827 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1828 CPUMBState *env = &cpu->env;
4acb54ba
EI
1829 int i;
1830
1831 if (!env || !f)
1832 return;
1833
1834 cpu_fprintf(f, "IN: PC=%x %s\n",
1835 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1836 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1837 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1838 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1839 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1840 env->btaken, env->btarget,
1841 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1842 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1843 (env->sregs[SR_MSR] & MSR_EIP),
1844 (env->sregs[SR_MSR] & MSR_IE));
1845
4acb54ba
EI
1846 for (i = 0; i < 32; i++) {
1847 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1848 if ((i + 1) % 4 == 0)
1849 cpu_fprintf(f, "\n");
1850 }
1851 cpu_fprintf(f, "\n\n");
1852}
1853
cd0c24f9
AF
1854void mb_tcg_init(void)
1855{
1856 int i;
4acb54ba
EI
1857
1858 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 1859 tcg_ctx.tcg_env = cpu_env;
4acb54ba 1860
e1ccc054 1861 env_debug = tcg_global_mem_new(cpu_env,
68cee38a 1862 offsetof(CPUMBState, debug),
4acb54ba 1863 "debug0");
e1ccc054 1864 env_iflags = tcg_global_mem_new(cpu_env,
68cee38a 1865 offsetof(CPUMBState, iflags),
4acb54ba 1866 "iflags");
e1ccc054 1867 env_imm = tcg_global_mem_new(cpu_env,
68cee38a 1868 offsetof(CPUMBState, imm),
4acb54ba 1869 "imm");
e1ccc054 1870 env_btarget = tcg_global_mem_new(cpu_env,
68cee38a 1871 offsetof(CPUMBState, btarget),
4acb54ba 1872 "btarget");
e1ccc054 1873 env_btaken = tcg_global_mem_new(cpu_env,
68cee38a 1874 offsetof(CPUMBState, btaken),
4acb54ba 1875 "btaken");
e1ccc054 1876 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1877 offsetof(CPUMBState, res_addr),
1878 "res_addr");
e1ccc054 1879 env_res_val = tcg_global_mem_new(cpu_env,
11a76217
EI
1880 offsetof(CPUMBState, res_val),
1881 "res_val");
4acb54ba 1882 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
e1ccc054 1883 cpu_R[i] = tcg_global_mem_new(cpu_env,
68cee38a 1884 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1885 regnames[i]);
1886 }
1887 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
e1ccc054 1888 cpu_SR[i] = tcg_global_mem_new(cpu_env,
68cee38a 1889 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1890 special_regnames[i]);
1891 }
4acb54ba
EI
1892}
1893
bad729e2
RH
1894void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1895 target_ulong *data)
4acb54ba 1896{
bad729e2 1897 env->sregs[SR_PC] = data[0];
4acb54ba 1898}
This page took 1.026903 seconds and 4 git commands to generate.