]> Git Repo - qemu.git/blame - target/hppa/translate.c
target-hppa: Add framework and enable compilation
[qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <[email protected]>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
25#include "tcg-op.h"
26#include "exec/cpu_ldst.h"
27
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
30
31#include "trace-tcg.h"
32#include "exec/log.h"
33
34typedef struct DisasCond {
35 TCGCond c;
36 TCGv a0, a1;
37 bool a0_is_n;
38 bool a1_is_0;
39} DisasCond;
40
41typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 CPUState *cs;
44
45 target_ulong iaoq_f;
46 target_ulong iaoq_b;
47 target_ulong iaoq_n;
48 TCGv iaoq_n_var;
49
50 int ntemps;
51 TCGv temps[8];
52
53 DisasCond null_cond;
54 TCGLabel *null_lab;
55
56 bool singlestep_enabled;
57 bool psw_n_nonzero;
58} DisasContext;
59
60/* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
62
63typedef enum {
64 NO_EXIT,
65
66 /* We have emitted one or more goto_tb. No fixup required. */
67 EXIT_GOTO_TB,
68
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
71 EXIT_IAQ_N_UPDATED,
72
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
75 EXIT_IAQ_N_STALE,
76
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
79 EXIT_NORETURN,
80} ExitStatus;
81
82typedef struct DisasInsn {
83 uint32_t insn, mask;
84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn,
85 const struct DisasInsn *f);
86} DisasInsn;
87
88/* global register indexes */
89static TCGv_env cpu_env;
90static TCGv cpu_gr[32];
91static TCGv cpu_iaoq_f;
92static TCGv cpu_iaoq_b;
93static TCGv cpu_sar;
94static TCGv cpu_psw_n;
95static TCGv cpu_psw_v;
96static TCGv cpu_psw_cb;
97static TCGv cpu_psw_cb_msb;
98static TCGv cpu_cr26;
99static TCGv cpu_cr27;
100
101#include "exec/gen-icount.h"
102
103void hppa_translate_init(void)
104{
105#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
106
107 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
108 static const GlobalVar vars[] = {
109 DEF_VAR(sar),
110 DEF_VAR(cr26),
111 DEF_VAR(cr27),
112 DEF_VAR(psw_n),
113 DEF_VAR(psw_v),
114 DEF_VAR(psw_cb),
115 DEF_VAR(psw_cb_msb),
116 DEF_VAR(iaoq_f),
117 DEF_VAR(iaoq_b),
118 };
119
120#undef DEF_VAR
121
122 /* Use the symbolic register names that match the disassembler. */
123 static const char gr_names[32][4] = {
124 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
125 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
126 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
127 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
128 };
129
130 static bool done_init = 0;
131 int i;
132
133 if (done_init) {
134 return;
135 }
136 done_init = 1;
137
138 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
139 tcg_ctx.tcg_env = cpu_env;
140
141 TCGV_UNUSED(cpu_gr[0]);
142 for (i = 1; i < 32; i++) {
143 cpu_gr[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUHPPAState, gr[i]),
145 gr_names[i]);
146 }
147
148 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
149 const GlobalVar *v = &vars[i];
150 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
151 }
152}
153
154static TCGv get_temp(DisasContext *ctx)
155{
156 unsigned i = ctx->ntemps++;
157 g_assert(i < ARRAY_SIZE(ctx->temps));
158 return ctx->temps[i] = tcg_temp_new();
159}
160
161static TCGv load_const(DisasContext *ctx, target_long v)
162{
163 TCGv t = get_temp(ctx);
164 tcg_gen_movi_tl(t, v);
165 return t;
166}
167
168static TCGv load_gpr(DisasContext *ctx, unsigned reg)
169{
170 if (reg == 0) {
171 TCGv t = get_temp(ctx);
172 tcg_gen_movi_tl(t, 0);
173 return t;
174 } else {
175 return cpu_gr[reg];
176 }
177}
178
179static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
180{
181 if (reg == 0) {
182 return get_temp(ctx);
183 } else {
184 return cpu_gr[reg];
185 }
186}
187
188static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
189{
190 if (unlikely(ival == -1)) {
191 tcg_gen_mov_tl(dest, vval);
192 } else {
193 tcg_gen_movi_tl(dest, ival);
194 }
195}
196
197static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
198{
199 return ctx->iaoq_f + disp + 8;
200}
201
202static void gen_excp_1(int exception)
203{
204 TCGv_i32 t = tcg_const_i32(exception);
205 gen_helper_excp(cpu_env, t);
206 tcg_temp_free_i32(t);
207}
208
209static ExitStatus gen_excp(DisasContext *ctx, int exception)
210{
211 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
212 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
213 gen_excp_1(exception);
214 return EXIT_NORETURN;
215}
216
217static ExitStatus gen_illegal(DisasContext *ctx)
218{
219 return gen_excp(ctx, EXCP_SIGILL);
220}
221
222static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
223{
224 /* Suppress goto_tb in the case of single-steping and IO. */
225 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) {
226 return false;
227 }
228 return true;
229}
230
231static void gen_goto_tb(DisasContext *ctx, int which,
232 target_ulong f, target_ulong b)
233{
234 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
235 tcg_gen_goto_tb(which);
236 tcg_gen_movi_tl(cpu_iaoq_f, f);
237 tcg_gen_movi_tl(cpu_iaoq_b, b);
238 tcg_gen_exit_tb((uintptr_t)ctx->tb + which);
239 } else {
240 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
241 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
242 if (ctx->singlestep_enabled) {
243 gen_excp_1(EXCP_DEBUG);
244 } else {
245 tcg_gen_exit_tb(0);
246 }
247 }
248}
249
250static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn,
251 const DisasInsn table[], size_t n)
252{
253 size_t i;
254 for (i = 0; i < n; ++i) {
255 if ((insn & table[i].mask) == table[i].insn) {
256 return table[i].trans(ctx, insn, &table[i]);
257 }
258 }
259 return gen_illegal(ctx);
260}
261
262#define translate_table(ctx, insn, table) \
263 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
264
265static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
266{
267 uint32_t opc = extract32(insn, 26, 6);
268
269 switch (opc) {
270 default:
271 break;
272 }
273 return gen_illegal(ctx);
274}
275
276void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
277{
278 HPPACPU *cpu = hppa_env_get_cpu(env);
279 CPUState *cs = CPU(cpu);
280 DisasContext ctx;
281 ExitStatus ret;
282 int num_insns, max_insns, i;
283
284 ctx.tb = tb;
285 ctx.cs = cs;
286 ctx.iaoq_f = tb->pc;
287 ctx.iaoq_b = tb->cs_base;
288 ctx.singlestep_enabled = cs->singlestep_enabled;
289
290 ctx.ntemps = 0;
291 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
292 TCGV_UNUSED(ctx.temps[i]);
293 }
294
295 /* Compute the maximum number of insns to execute, as bounded by
296 (1) icount, (2) single-stepping, (3) branch delay slots, or
297 (4) the number of insns remaining on the current page. */
298 max_insns = tb->cflags & CF_COUNT_MASK;
299 if (max_insns == 0) {
300 max_insns = CF_COUNT_MASK;
301 }
302 if (ctx.singlestep_enabled || singlestep) {
303 max_insns = 1;
304 } else if (max_insns > TCG_MAX_INSNS) {
305 max_insns = TCG_MAX_INSNS;
306 }
307
308 num_insns = 0;
309 gen_tb_start(tb);
310
311 do {
312 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
313 num_insns++;
314
315 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
316 ret = gen_excp(&ctx, EXCP_DEBUG);
317 break;
318 }
319 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
320 gen_io_start();
321 }
322
323 {
324 /* Always fetch the insn, even if nullified, so that we check
325 the page permissions for execute. */
326 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
327
328 /* Set up the IA queue for the next insn.
329 This will be overwritten by a branch. */
330 if (ctx.iaoq_b == -1) {
331 ctx.iaoq_n = -1;
332 ctx.iaoq_n_var = get_temp(&ctx);
333 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
334 } else {
335 ctx.iaoq_n = ctx.iaoq_b + 4;
336 TCGV_UNUSED(ctx.iaoq_n_var);
337 }
338
339 ret = translate_one(&ctx, insn);
340 }
341
342 for (i = 0; i < ctx.ntemps; ++i) {
343 tcg_temp_free(ctx.temps[i]);
344 TCGV_UNUSED(ctx.temps[i]);
345 }
346 ctx.ntemps = 0;
347
348 /* If we see non-linear instructions, exhaust instruction count,
349 or run out of buffer space, stop generation. */
350 /* ??? The non-linear instruction restriction is purely due to
351 the debugging dump. Otherwise we *could* follow unconditional
352 branches within the same page. */
353 if (ret == NO_EXIT
354 && (ctx.iaoq_b != ctx.iaoq_f + 4
355 || num_insns >= max_insns
356 || tcg_op_buf_full())) {
357 ret = EXIT_IAQ_N_STALE;
358 }
359
360 ctx.iaoq_f = ctx.iaoq_b;
361 ctx.iaoq_b = ctx.iaoq_n;
362 if (ret == EXIT_NORETURN
363 || ret == EXIT_GOTO_TB
364 || ret == EXIT_IAQ_N_UPDATED) {
365 break;
366 }
367 if (ctx.iaoq_f == -1) {
368 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
369 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
370 ret = EXIT_IAQ_N_UPDATED;
371 break;
372 }
373 if (ctx.iaoq_b == -1) {
374 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
375 }
376 } while (ret == NO_EXIT);
377
378 if (tb->cflags & CF_LAST_IO) {
379 gen_io_end();
380 }
381
382 switch (ret) {
383 case EXIT_GOTO_TB:
384 case EXIT_NORETURN:
385 break;
386 case EXIT_IAQ_N_STALE:
387 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
388 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
389 /* FALLTHRU */
390 case EXIT_IAQ_N_UPDATED:
391 if (ctx.singlestep_enabled) {
392 gen_excp_1(EXCP_DEBUG);
393 } else {
394 tcg_gen_exit_tb(0);
395 }
396 break;
397 default:
398 abort();
399 }
400
401 gen_tb_end(tb, num_insns);
402
403 tb->size = num_insns * 4;
404 tb->icount = num_insns;
405
406#ifdef DEBUG_DISAS
407 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
408 && qemu_log_in_addr_range(tb->pc)) {
409 qemu_log_lock();
410 qemu_log("IN: %s\n", lookup_symbol(tb->pc));
411 log_target_disas(cs, tb->pc, tb->size, 1);
412 qemu_log("\n");
413 qemu_log_unlock();
414 }
415#endif
416}
417
418void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
419 target_ulong *data)
420{
421 env->iaoq_f = data[0];
422 if (data[1] != -1) {
423 env->iaoq_b = data[1];
424 }
425 /* Since we were executing the instruction at IAOQ_F, and took some
426 sort of action that provoked the cpu_restore_state, we can infer
427 that the instruction was not nullified. */
428 env->psw_n = 0;
429}
This page took 0.066293 seconds and 4 git commands to generate.