]>
Commit | Line | Data |
---|---|---|
61766fe9 RH |
1 | /* |
2 | * PA-RISC emulation cpu definitions for qemu. | |
3 | * | |
4 | * Copyright (c) 2016 Richard Henderson <[email protected]> | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #ifndef HPPA_CPU_H | |
21 | #define HPPA_CPU_H | |
22 | ||
23 | #include "qemu-common.h" | |
24 | #include "cpu-qom.h" | |
25 | ||
86f8d05f RH |
26 | #ifdef TARGET_HPPA64 |
27 | #define TARGET_LONG_BITS 64 | |
28 | #define TARGET_VIRT_ADDR_SPACE_BITS 64 | |
29 | #define TARGET_REGISTER_BITS 64 | |
30 | #define TARGET_PHYS_ADDR_SPACE_BITS 64 | |
31 | #elif defined(CONFIG_USER_ONLY) | |
eaa3783b RH |
32 | #define TARGET_LONG_BITS 32 |
33 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | |
34 | #define TARGET_REGISTER_BITS 32 | |
35 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | |
86f8d05f RH |
36 | #else |
37 | /* In order to form the GVA from space:offset, | |
38 | we need a 64-bit virtual address space. */ | |
39 | #define TARGET_LONG_BITS 64 | |
40 | #define TARGET_VIRT_ADDR_SPACE_BITS 64 | |
41 | #define TARGET_REGISTER_BITS 32 | |
42 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | |
43 | #endif | |
61766fe9 RH |
44 | |
45 | #define CPUArchState struct CPUHPPAState | |
46 | ||
47 | #include "exec/cpu-defs.h" | |
48 | #include "fpu/softfloat.h" | |
49 | ||
50 | #define TARGET_PAGE_BITS 12 | |
51 | ||
52 | #define ALIGNED_ONLY | |
3d68ee7b RH |
53 | #define NB_MMU_MODES 5 |
54 | #define MMU_KERNEL_IDX 0 | |
55 | #define MMU_USER_IDX 3 | |
56 | #define MMU_PHYS_IDX 4 | |
61766fe9 RH |
57 | #define TARGET_INSN_START_EXTRA_WORDS 1 |
58 | ||
2986721d RH |
59 | /* Hardware exceptions, interupts, faults, and traps. */ |
60 | #define EXCP_HPMC 1 /* high priority machine check */ | |
61 | #define EXCP_POWER_FAIL 2 | |
62 | #define EXCP_RC 3 /* recovery counter */ | |
63 | #define EXCP_EXT_INTERRUPT 4 /* external interrupt */ | |
64 | #define EXCP_LPMC 5 /* low priority machine check */ | |
65 | #define EXCP_ITLB_MISS 6 /* itlb miss / instruction page fault */ | |
66 | #define EXCP_IMP 7 /* instruction memory protection trap */ | |
67 | #define EXCP_ILL 8 /* illegal instruction trap */ | |
68 | #define EXCP_BREAK 9 /* break instruction */ | |
69 | #define EXCP_PRIV_OPR 10 /* privileged operation trap */ | |
70 | #define EXCP_PRIV_REG 11 /* privileged register trap */ | |
71 | #define EXCP_OVERFLOW 12 /* signed overflow trap */ | |
72 | #define EXCP_COND 13 /* trap-on-condition */ | |
73 | #define EXCP_ASSIST 14 /* assist exception trap */ | |
74 | #define EXCP_DTLB_MISS 15 /* dtlb miss / data page fault */ | |
75 | #define EXCP_NA_ITLB_MISS 16 /* non-access itlb miss */ | |
76 | #define EXCP_NA_DTLB_MISS 17 /* non-access dtlb miss */ | |
77 | #define EXCP_DMP 18 /* data memory protection trap */ | |
78 | #define EXCP_DMB 19 /* data memory break trap */ | |
79 | #define EXCP_TLB_DIRTY 20 /* tlb dirty bit trap */ | |
80 | #define EXCP_PAGE_REF 21 /* page reference trap */ | |
81 | #define EXCP_ASSIST_EMU 22 /* assist emulation trap */ | |
82 | #define EXCP_HPT 23 /* high-privilege transfer trap */ | |
83 | #define EXCP_LPT 24 /* low-privilege transfer trap */ | |
84 | #define EXCP_TB 25 /* taken branch trap */ | |
85 | #define EXCP_DMAR 26 /* data memory access rights trap */ | |
86 | #define EXCP_DMPI 27 /* data memory protection id trap */ | |
87 | #define EXCP_UNALIGN 28 /* unaligned data reference trap */ | |
88 | #define EXCP_PER_INTERRUPT 29 /* performance monitor interrupt */ | |
89 | ||
90 | /* Exceptions for linux-user emulation. */ | |
91 | #define EXCP_SYSCALL 30 | |
92 | #define EXCP_SYSCALL_LWS 31 | |
61766fe9 | 93 | |
fa57e327 RH |
94 | /* Taken from Linux kernel: arch/parisc/include/asm/psw.h */ |
95 | #define PSW_I 0x00000001 | |
96 | #define PSW_D 0x00000002 | |
97 | #define PSW_P 0x00000004 | |
98 | #define PSW_Q 0x00000008 | |
99 | #define PSW_R 0x00000010 | |
100 | #define PSW_F 0x00000020 | |
101 | #define PSW_G 0x00000040 /* PA1.x only */ | |
102 | #define PSW_O 0x00000080 /* PA2.0 only */ | |
103 | #define PSW_CB 0x0000ff00 | |
104 | #define PSW_M 0x00010000 | |
105 | #define PSW_V 0x00020000 | |
106 | #define PSW_C 0x00040000 | |
107 | #define PSW_B 0x00080000 | |
108 | #define PSW_X 0x00100000 | |
109 | #define PSW_N 0x00200000 | |
110 | #define PSW_L 0x00400000 | |
111 | #define PSW_H 0x00800000 | |
112 | #define PSW_T 0x01000000 | |
113 | #define PSW_S 0x02000000 | |
114 | #define PSW_E 0x04000000 | |
115 | #ifdef TARGET_HPPA64 | |
116 | #define PSW_W 0x08000000 /* PA2.0 only */ | |
117 | #else | |
118 | #define PSW_W 0 | |
119 | #endif | |
120 | #define PSW_Z 0x40000000 /* PA1.x only */ | |
121 | #define PSW_Y 0x80000000 /* PA1.x only */ | |
122 | ||
123 | #define PSW_SM (PSW_W | PSW_E | PSW_O | PSW_G | PSW_F \ | |
124 | | PSW_R | PSW_Q | PSW_P | PSW_D | PSW_I) | |
125 | ||
126 | /* ssm/rsm instructions number PSW_W and PSW_E differently */ | |
127 | #define PSW_SM_I PSW_I /* Enable External Interrupts */ | |
128 | #define PSW_SM_D PSW_D | |
129 | #define PSW_SM_P PSW_P | |
130 | #define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */ | |
131 | #define PSW_SM_R PSW_R /* Enable Recover Counter Trap */ | |
132 | #ifdef TARGET_HPPA64 | |
133 | #define PSW_SM_E 0x100 | |
134 | #define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */ | |
135 | #else | |
136 | #define PSW_SM_E 0 | |
137 | #define PSW_SM_W 0 | |
138 | #endif | |
139 | ||
35136a77 RH |
140 | #define CR_RC 0 |
141 | #define CR_SCRCCR 10 | |
142 | #define CR_SAR 11 | |
143 | #define CR_IVA 14 | |
144 | #define CR_EIEM 15 | |
145 | #define CR_IT 16 | |
146 | #define CR_IIASQ 17 | |
147 | #define CR_IIAOQ 18 | |
148 | #define CR_IIR 19 | |
149 | #define CR_ISR 20 | |
150 | #define CR_IOR 21 | |
151 | #define CR_IPSW 22 | |
152 | #define CR_EIRR 23 | |
153 | ||
61766fe9 RH |
154 | typedef struct CPUHPPAState CPUHPPAState; |
155 | ||
eaa3783b RH |
156 | #if TARGET_REGISTER_BITS == 32 |
157 | typedef uint32_t target_ureg; | |
158 | typedef int32_t target_sreg; | |
159 | #define TREG_FMT_lx "%08"PRIx32 | |
160 | #define TREG_FMT_ld "%"PRId32 | |
161 | #else | |
162 | typedef uint64_t target_ureg; | |
163 | typedef int64_t target_sreg; | |
164 | #define TREG_FMT_lx "%016"PRIx64 | |
165 | #define TREG_FMT_ld "%"PRId64 | |
166 | #endif | |
167 | ||
650cdb2a RH |
168 | typedef struct { |
169 | uint64_t va_b; | |
170 | uint64_t va_e; | |
171 | target_ureg pa; | |
172 | unsigned u : 1; | |
173 | unsigned t : 1; | |
174 | unsigned d : 1; | |
175 | unsigned b : 1; | |
176 | unsigned page_size : 4; | |
177 | unsigned ar_type : 3; | |
178 | unsigned ar_pl1 : 2; | |
179 | unsigned ar_pl2 : 2; | |
180 | unsigned entry_valid : 1; | |
181 | unsigned access_id : 16; | |
182 | } hppa_tlb_entry; | |
183 | ||
61766fe9 | 184 | struct CPUHPPAState { |
eaa3783b | 185 | target_ureg gr[32]; |
61766fe9 | 186 | uint64_t fr[32]; |
33423472 | 187 | uint64_t sr[8]; /* stored shifted into place for gva */ |
61766fe9 | 188 | |
eaa3783b RH |
189 | target_ureg psw; /* All psw bits except the following: */ |
190 | target_ureg psw_n; /* boolean */ | |
191 | target_sreg psw_v; /* in most significant bit */ | |
61766fe9 RH |
192 | |
193 | /* Splitting the carry-borrow field into the MSB and "the rest", allows | |
194 | * for "the rest" to be deleted when it is unused, but the MSB is in use. | |
195 | * In addition, it's easier to compute carry-in for bit B+1 than it is to | |
196 | * compute carry-out for bit B (3 vs 4 insns for addition, assuming the | |
197 | * host has the appropriate add-with-carry insn to compute the msb). | |
198 | * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110. | |
199 | */ | |
eaa3783b RH |
200 | target_ureg psw_cb; /* in least significant bit of next nibble */ |
201 | target_ureg psw_cb_msb; /* boolean */ | |
61766fe9 | 202 | |
eaa3783b RH |
203 | target_ureg iaoq_f; /* front */ |
204 | target_ureg iaoq_b; /* back, aka next instruction */ | |
c301f34e RH |
205 | uint64_t iasq_f; |
206 | uint64_t iasq_b; | |
61766fe9 | 207 | |
61766fe9 RH |
208 | uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */ |
209 | float_status fp_status; | |
210 | ||
35136a77 RH |
211 | target_ureg cr[32]; /* control registers */ |
212 | target_ureg cr_back[2]; /* back of cr17/cr18 */ | |
f49b3537 | 213 | target_ureg shadow[7]; /* shadow registers */ |
35136a77 | 214 | |
61766fe9 RH |
215 | /* Those resources are used only in QEMU core */ |
216 | CPU_COMMON | |
650cdb2a RH |
217 | |
218 | /* ??? The number of entries isn't specified by the architecture. */ | |
219 | /* ??? Implement a unified itlb/dtlb for the moment. */ | |
220 | /* ??? We should use a more intelligent data structure. */ | |
221 | hppa_tlb_entry tlb[256]; | |
222 | uint32_t tlb_last; | |
61766fe9 RH |
223 | }; |
224 | ||
225 | /** | |
226 | * HPPACPU: | |
227 | * @env: #CPUHPPAState | |
228 | * | |
229 | * An HPPA CPU. | |
230 | */ | |
231 | struct HPPACPU { | |
232 | /*< private >*/ | |
233 | CPUState parent_obj; | |
234 | /*< public >*/ | |
235 | ||
236 | CPUHPPAState env; | |
49c29d6c | 237 | QEMUTimer *alarm_timer; |
61766fe9 RH |
238 | }; |
239 | ||
240 | static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env) | |
241 | { | |
242 | return container_of(env, HPPACPU, env); | |
243 | } | |
244 | ||
245 | #define ENV_GET_CPU(e) CPU(hppa_env_get_cpu(e)) | |
246 | #define ENV_OFFSET offsetof(HPPACPU, env) | |
247 | ||
248 | #include "exec/cpu-all.h" | |
249 | ||
250 | static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) | |
251 | { | |
3d68ee7b RH |
252 | #ifdef CONFIG_USER_ONLY |
253 | return MMU_USER_IDX; | |
254 | #else | |
255 | if (env->psw & (ifetch ? PSW_C : PSW_D)) { | |
256 | return env->iaoq_f & 3; | |
257 | } | |
258 | return MMU_PHYS_IDX; /* mmu disabled */ | |
259 | #endif | |
61766fe9 RH |
260 | } |
261 | ||
262 | void hppa_translate_init(void); | |
263 | ||
8fc24ad5 | 264 | #define cpu_init(cpu_model) cpu_generic_init(TYPE_HPPA_CPU, cpu_model) |
61766fe9 RH |
265 | |
266 | void hppa_cpu_list(FILE *f, fprintf_function cpu_fprintf); | |
267 | ||
c301f34e RH |
268 | static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc, |
269 | target_ureg off) | |
270 | { | |
271 | #ifdef CONFIG_USER_ONLY | |
272 | return off; | |
273 | #else | |
274 | off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull); | |
275 | return spc | off; | |
276 | #endif | |
277 | } | |
278 | ||
279 | static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc, | |
280 | target_ureg off) | |
281 | { | |
282 | return hppa_form_gva_psw(env->psw, spc, off); | |
283 | } | |
284 | ||
494737b7 RH |
285 | /* Since PSW_{I,CB} will never need to be in tb->flags, reuse them. |
286 | * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the | |
287 | * same value. | |
288 | */ | |
289 | #define TB_FLAG_SR_SAME PSW_I | |
c301f34e RH |
290 | #define TB_FLAG_PRIV_SHIFT 8 |
291 | ||
61766fe9 RH |
292 | static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc, |
293 | target_ulong *cs_base, | |
294 | uint32_t *pflags) | |
295 | { | |
c301f34e RH |
296 | uint32_t flags = env->psw_n * PSW_N; |
297 | ||
298 | /* TB lookup assumes that PC contains the complete virtual address. | |
299 | If we leave space+offset separate, we'll get ITLB misses to an | |
300 | incomplete virtual address. This also means that we must separate | |
301 | out current cpu priviledge from the low bits of IAOQ_F. */ | |
302 | #ifdef CONFIG_USER_ONLY | |
61766fe9 RH |
303 | *pc = env->iaoq_f; |
304 | *cs_base = env->iaoq_b; | |
c301f34e | 305 | #else |
3d68ee7b | 306 | /* ??? E, T, H, L, B, P bits need to be here, when implemented. */ |
c301f34e RH |
307 | flags |= env->psw & (PSW_W | PSW_C | PSW_D); |
308 | flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT; | |
309 | ||
310 | *pc = (env->psw & PSW_C | |
311 | ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4) | |
312 | : env->iaoq_f & -4); | |
313 | *cs_base = env->iasq_f; | |
314 | ||
315 | /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero | |
316 | low 32-bits of CS_BASE. This will succeed for all direct branches, | |
317 | which is the primary case we care about -- using goto_tb within a page. | |
318 | Failure is indicated by a zero difference. */ | |
319 | if (env->iasq_f == env->iasq_b) { | |
320 | target_sreg diff = env->iaoq_b - env->iaoq_f; | |
321 | if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) { | |
322 | *cs_base |= (uint32_t)diff; | |
323 | } | |
324 | } | |
494737b7 RH |
325 | if ((env->sr[4] == env->sr[5]) |
326 | & (env->sr[4] == env->sr[6]) | |
327 | & (env->sr[4] == env->sr[7])) { | |
328 | flags |= TB_FLAG_SR_SAME; | |
329 | } | |
c301f34e RH |
330 | #endif |
331 | ||
332 | *pflags = flags; | |
61766fe9 RH |
333 | } |
334 | ||
eaa3783b RH |
335 | target_ureg cpu_hppa_get_psw(CPUHPPAState *env); |
336 | void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg); | |
61766fe9 RH |
337 | void cpu_hppa_loaded_fr0(CPUHPPAState *env); |
338 | ||
339 | #define cpu_signal_handler cpu_hppa_signal_handler | |
340 | ||
341 | int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc); | |
813dff13 | 342 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); |
61766fe9 RH |
343 | int hppa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); |
344 | int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); | |
345 | void hppa_cpu_do_interrupt(CPUState *cpu); | |
346 | bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); | |
347 | void hppa_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function, int); | |
650cdb2a RH |
348 | #ifdef CONFIG_USER_ONLY |
349 | int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, | |
350 | int rw, int midx); | |
351 | #else | |
352 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, | |
353 | int type, hwaddr *pphys, int *pprot); | |
4f5f2548 | 354 | extern const MemoryRegionOps hppa_io_eir_ops; |
49c29d6c | 355 | void hppa_cpu_alarm_timer(void *); |
650cdb2a | 356 | #endif |
2dfcca9f | 357 | void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); |
61766fe9 RH |
358 | |
359 | #endif /* HPPA_CPU_H */ |