]>
Commit | Line | Data |
---|---|---|
813dff13 HD |
1 | /* |
2 | * HPPA memory access helper routines | |
3 | * | |
4 | * Copyright (c) 2017 Helge Deller | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
21 | #include "cpu.h" | |
22 | #include "exec/exec-all.h" | |
23 | #include "exec/helper-proto.h" | |
24 | #include "qom/cpu.h" | |
25 | ||
26 | #ifdef CONFIG_USER_ONLY | |
27 | int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, | |
28 | int size, int rw, int mmu_idx) | |
29 | { | |
30 | HPPACPU *cpu = HPPA_CPU(cs); | |
31 | ||
2986721d RH |
32 | /* ??? Test between data page fault and data memory protection trap, |
33 | which would affect si_code. */ | |
34 | cs->exception_index = EXCP_DMP; | |
35136a77 | 35 | cpu->env.cr[CR_IOR] = address; |
813dff13 HD |
36 | return 1; |
37 | } | |
38 | #else | |
650cdb2a RH |
39 | static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) |
40 | { | |
41 | int i; | |
42 | ||
43 | for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { | |
44 | hppa_tlb_entry *ent = &env->tlb[i]; | |
8d6ae7fb | 45 | if (ent->va_b <= addr && addr <= ent->va_e) { |
650cdb2a RH |
46 | return ent; |
47 | } | |
48 | } | |
49 | return NULL; | |
50 | } | |
51 | ||
8d6ae7fb RH |
52 | static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent) |
53 | { | |
54 | CPUState *cs = CPU(hppa_env_get_cpu(env)); | |
55 | unsigned i, n = 1 << (2 * ent->page_size); | |
56 | uint64_t addr = ent->va_b; | |
57 | ||
58 | for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) { | |
59 | /* Do not flush MMU_PHYS_IDX. */ | |
60 | tlb_flush_page_by_mmuidx(cs, addr, 0xf); | |
61 | } | |
62 | ||
63 | memset(ent, 0, sizeof(*ent)); | |
64 | ent->va_b = -1; | |
65 | } | |
66 | ||
67 | static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env) | |
68 | { | |
69 | hppa_tlb_entry *ent; | |
70 | uint32_t i = env->tlb_last; | |
71 | ||
72 | env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1); | |
73 | ent = &env->tlb[i]; | |
74 | ||
75 | hppa_flush_tlb_ent(env, ent); | |
76 | return ent; | |
77 | } | |
78 | ||
650cdb2a RH |
79 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, |
80 | int type, hwaddr *pphys, int *pprot) | |
81 | { | |
82 | hwaddr phys; | |
83 | int prot, r_prot, w_prot, x_prot; | |
84 | hppa_tlb_entry *ent; | |
85 | int ret = -1; | |
86 | ||
87 | /* Virtual translation disabled. Direct map virtual to physical. */ | |
88 | if (mmu_idx == MMU_PHYS_IDX) { | |
89 | phys = addr; | |
90 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
91 | goto egress; | |
92 | } | |
93 | ||
94 | /* Find a valid tlb entry that matches the virtual address. */ | |
95 | ent = hppa_find_tlb(env, addr); | |
8d6ae7fb | 96 | if (ent == NULL || !ent->entry_valid) { |
650cdb2a RH |
97 | phys = 0; |
98 | prot = 0; | |
99 | ret = (type & PAGE_EXEC ? EXCP_ITLB_MISS : EXCP_DTLB_MISS); | |
100 | goto egress; | |
101 | } | |
102 | ||
103 | /* We now know the physical address. */ | |
104 | phys = ent->pa + (addr & ~TARGET_PAGE_MASK); | |
105 | ||
106 | /* Map TLB access_rights field to QEMU protection. */ | |
107 | r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ; | |
108 | w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE; | |
109 | x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC; | |
110 | switch (ent->ar_type) { | |
111 | case 0: /* read-only: data page */ | |
112 | prot = r_prot; | |
113 | break; | |
114 | case 1: /* read/write: dynamic data page */ | |
115 | prot = r_prot | w_prot; | |
116 | break; | |
117 | case 2: /* read/execute: normal code page */ | |
118 | prot = r_prot | x_prot; | |
119 | break; | |
120 | case 3: /* read/write/execute: dynamic code page */ | |
121 | prot = r_prot | w_prot | x_prot; | |
122 | break; | |
123 | default: /* execute: promote to privilege level type & 3 */ | |
124 | prot = x_prot; | |
125 | } | |
126 | ||
127 | /* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */ | |
128 | ||
129 | /* No guest access type indicates a non-architectural access from | |
130 | within QEMU. Bypass checks for access, D, B and T bits. */ | |
131 | if (type == 0) { | |
132 | goto egress; | |
133 | } | |
134 | ||
135 | if (unlikely(!(prot & type))) { | |
136 | /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ | |
137 | ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP); | |
138 | goto egress; | |
139 | } | |
140 | ||
141 | /* In reverse priority order, check for conditions which raise faults. | |
142 | As we go, remove PROT bits that cover the condition we want to check. | |
143 | In this way, the resulting PROT will force a re-check of the | |
144 | architectural TLB entry for the next access. */ | |
145 | if (unlikely(!ent->d)) { | |
146 | if (type & PAGE_WRITE) { | |
147 | /* The D bit is not set -- TLB Dirty Bit Fault. */ | |
148 | ret = EXCP_TLB_DIRTY; | |
149 | } | |
150 | prot &= PAGE_READ | PAGE_EXEC; | |
151 | } | |
152 | if (unlikely(ent->b)) { | |
153 | if (type & PAGE_WRITE) { | |
154 | /* The B bit is set -- Data Memory Break Fault. */ | |
155 | ret = EXCP_DMB; | |
156 | } | |
157 | prot &= PAGE_READ | PAGE_EXEC; | |
158 | } | |
159 | if (unlikely(ent->t)) { | |
160 | if (!(type & PAGE_EXEC)) { | |
161 | /* The T bit is set -- Page Reference Fault. */ | |
162 | ret = EXCP_PAGE_REF; | |
163 | } | |
164 | prot &= PAGE_EXEC; | |
165 | } | |
166 | ||
167 | egress: | |
168 | *pphys = phys; | |
169 | *pprot = prot; | |
170 | return ret; | |
171 | } | |
172 | ||
813dff13 HD |
173 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
174 | { | |
650cdb2a RH |
175 | HPPACPU *cpu = HPPA_CPU(cs); |
176 | hwaddr phys; | |
177 | int prot, excp; | |
178 | ||
179 | /* If the (data) mmu is disabled, bypass translation. */ | |
180 | /* ??? We really ought to know if the code mmu is disabled too, | |
181 | in order to get the correct debugging dumps. */ | |
182 | if (!(cpu->env.psw & PSW_D)) { | |
183 | return addr; | |
184 | } | |
185 | ||
186 | excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, | |
187 | &phys, &prot); | |
188 | ||
189 | /* Since we're translating for debugging, the only error that is a | |
190 | hard error is no translation at all. Otherwise, while a real cpu | |
191 | access might not have permission, the debugger does. */ | |
192 | return excp == EXCP_DTLB_MISS ? -1 : phys; | |
813dff13 HD |
193 | } |
194 | ||
650cdb2a RH |
195 | void tlb_fill(CPUState *cs, target_ulong addr, int size, |
196 | MMUAccessType type, int mmu_idx, uintptr_t retaddr) | |
813dff13 | 197 | { |
650cdb2a RH |
198 | HPPACPU *cpu = HPPA_CPU(cs); |
199 | int prot, excp, a_prot; | |
200 | hwaddr phys; | |
201 | ||
202 | switch (type) { | |
203 | case MMU_INST_FETCH: | |
204 | a_prot = PAGE_EXEC; | |
205 | break; | |
206 | case MMU_DATA_STORE: | |
207 | a_prot = PAGE_WRITE; | |
208 | break; | |
209 | default: | |
210 | a_prot = PAGE_READ; | |
211 | break; | |
212 | } | |
213 | ||
214 | excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, | |
215 | a_prot, &phys, &prot); | |
216 | if (unlikely(excp >= 0)) { | |
217 | /* Failure. Raise the indicated exception. */ | |
218 | cs->exception_index = excp; | |
219 | if (cpu->env.psw & PSW_Q) { | |
220 | /* ??? Needs tweaking for hppa64. */ | |
221 | cpu->env.cr[CR_IOR] = addr; | |
222 | cpu->env.cr[CR_ISR] = addr >> 32; | |
223 | } | |
224 | cpu_loop_exit_restore(cs, retaddr); | |
225 | } | |
813dff13 HD |
226 | |
227 | /* Success! Store the translation into the QEMU TLB. */ | |
228 | tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, | |
229 | prot, mmu_idx, TARGET_PAGE_SIZE); | |
230 | } | |
8d6ae7fb RH |
231 | |
232 | /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ | |
233 | void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg) | |
234 | { | |
235 | hppa_tlb_entry *empty = NULL; | |
236 | int i; | |
237 | ||
238 | /* Zap any old entries covering ADDR; notice empty entries on the way. */ | |
239 | for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { | |
240 | hppa_tlb_entry *ent = &env->tlb[i]; | |
241 | if (!ent->entry_valid) { | |
242 | empty = ent; | |
243 | } else if (ent->va_b <= addr && addr <= ent->va_e) { | |
244 | hppa_flush_tlb_ent(env, ent); | |
245 | empty = ent; | |
246 | } | |
247 | } | |
248 | ||
249 | /* If we didn't see an empty entry, evict one. */ | |
250 | if (empty == NULL) { | |
251 | empty = hppa_alloc_tlb_ent(env); | |
252 | } | |
253 | ||
254 | /* Note that empty->entry_valid == 0 already. */ | |
255 | empty->va_b = addr & TARGET_PAGE_MASK; | |
256 | empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1; | |
257 | empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; | |
258 | } | |
259 | ||
260 | /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ | |
261 | void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg) | |
262 | { | |
263 | hppa_tlb_entry *ent = hppa_find_tlb(env, addr); | |
264 | ||
265 | if (unlikely(ent == NULL || ent->entry_valid)) { | |
266 | qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); | |
267 | return; | |
268 | } | |
269 | ||
270 | ent->access_id = extract32(reg, 1, 18); | |
271 | ent->u = extract32(reg, 19, 1); | |
272 | ent->ar_pl2 = extract32(reg, 20, 2); | |
273 | ent->ar_pl1 = extract32(reg, 22, 2); | |
274 | ent->ar_type = extract32(reg, 24, 3); | |
275 | ent->b = extract32(reg, 27, 1); | |
276 | ent->d = extract32(reg, 28, 1); | |
277 | ent->t = extract32(reg, 29, 1); | |
278 | ent->entry_valid = 1; | |
279 | } | |
63300a00 RH |
280 | |
281 | /* Purge (Insn/Data) TLB. This is explicitly page-based, and is | |
282 | synchronous across all processors. */ | |
283 | static void ptlb_work(CPUState *cpu, run_on_cpu_data data) | |
284 | { | |
285 | CPUHPPAState *env = cpu->env_ptr; | |
286 | target_ulong addr = (target_ulong) data.target_ptr; | |
287 | hppa_tlb_entry *ent = hppa_find_tlb(env, addr); | |
288 | ||
289 | if (ent && ent->entry_valid) { | |
290 | hppa_flush_tlb_ent(env, ent); | |
291 | } | |
292 | } | |
293 | ||
294 | void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) | |
295 | { | |
296 | CPUState *src = CPU(hppa_env_get_cpu(env)); | |
297 | CPUState *cpu; | |
298 | run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); | |
299 | ||
300 | CPU_FOREACH(cpu) { | |
301 | if (cpu != src) { | |
302 | async_run_on_cpu(cpu, ptlb_work, data); | |
303 | } | |
304 | } | |
305 | async_safe_run_on_cpu(src, ptlb_work, data); | |
306 | } | |
307 | ||
308 | /* Purge (Insn/Data) TLB entry. This affects an implementation-defined | |
309 | number of pages/entries (we choose all), and is local to the cpu. */ | |
310 | void HELPER(ptlbe)(CPUHPPAState *env) | |
311 | { | |
312 | CPUState *src = CPU(hppa_env_get_cpu(env)); | |
313 | ||
314 | memset(env->tlb, 0, sizeof(env->tlb)); | |
315 | tlb_flush_by_mmuidx(src, 0xf); | |
316 | } | |
813dff13 | 317 | #endif /* CONFIG_USER_ONLY */ |