]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * SH4 emulation | |
3 | * | |
4 | * Copyright (c) 2005 Samuel Tardieu | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #include "qemu/osdep.h" | |
20 | ||
21 | #include "cpu.h" | |
22 | #include "exec/log.h" | |
23 | ||
24 | #if !defined(CONFIG_USER_ONLY) | |
25 | #include "hw/sh4/sh_intc.h" | |
26 | #endif | |
27 | ||
28 | #if defined(CONFIG_USER_ONLY) | |
29 | ||
30 | void superh_cpu_do_interrupt(CPUState *cs) | |
31 | { | |
32 | cs->exception_index = -1; | |
33 | } | |
34 | ||
35 | int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, | |
36 | int mmu_idx) | |
37 | { | |
38 | SuperHCPU *cpu = SUPERH_CPU(cs); | |
39 | CPUSH4State *env = &cpu->env; | |
40 | ||
41 | env->tea = address; | |
42 | cs->exception_index = -1; | |
43 | switch (rw) { | |
44 | case 0: | |
45 | cs->exception_index = 0x0a0; | |
46 | break; | |
47 | case 1: | |
48 | cs->exception_index = 0x0c0; | |
49 | break; | |
50 | case 2: | |
51 | cs->exception_index = 0x0a0; | |
52 | break; | |
53 | } | |
54 | return 1; | |
55 | } | |
56 | ||
57 | int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr) | |
58 | { | |
59 | /* For user mode, only U0 area is cacheable. */ | |
60 | return !(addr & 0x80000000); | |
61 | } | |
62 | ||
63 | #else /* !CONFIG_USER_ONLY */ | |
64 | ||
65 | #define MMU_OK 0 | |
66 | #define MMU_ITLB_MISS (-1) | |
67 | #define MMU_ITLB_MULTIPLE (-2) | |
68 | #define MMU_ITLB_VIOLATION (-3) | |
69 | #define MMU_DTLB_MISS_READ (-4) | |
70 | #define MMU_DTLB_MISS_WRITE (-5) | |
71 | #define MMU_DTLB_INITIAL_WRITE (-6) | |
72 | #define MMU_DTLB_VIOLATION_READ (-7) | |
73 | #define MMU_DTLB_VIOLATION_WRITE (-8) | |
74 | #define MMU_DTLB_MULTIPLE (-9) | |
75 | #define MMU_DTLB_MISS (-10) | |
76 | #define MMU_IADDR_ERROR (-11) | |
77 | #define MMU_DADDR_ERROR_READ (-12) | |
78 | #define MMU_DADDR_ERROR_WRITE (-13) | |
79 | ||
80 | void superh_cpu_do_interrupt(CPUState *cs) | |
81 | { | |
82 | SuperHCPU *cpu = SUPERH_CPU(cs); | |
83 | CPUSH4State *env = &cpu->env; | |
84 | int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD; | |
85 | int do_exp, irq_vector = cs->exception_index; | |
86 | ||
87 | /* prioritize exceptions over interrupts */ | |
88 | ||
89 | do_exp = cs->exception_index != -1; | |
90 | do_irq = do_irq && (cs->exception_index == -1); | |
91 | ||
92 | if (env->sr & (1u << SR_BL)) { | |
93 | if (do_exp && cs->exception_index != 0x1e0) { | |
94 | cs->exception_index = 0x000; /* masked exception -> reset */ | |
95 | } | |
96 | if (do_irq && !env->in_sleep) { | |
97 | return; /* masked */ | |
98 | } | |
99 | } | |
100 | env->in_sleep = 0; | |
101 | ||
102 | if (do_irq) { | |
103 | irq_vector = sh_intc_get_pending_vector(env->intc_handle, | |
104 | (env->sr >> 4) & 0xf); | |
105 | if (irq_vector == -1) { | |
106 | return; /* masked */ | |
107 | } | |
108 | } | |
109 | ||
110 | if (qemu_loglevel_mask(CPU_LOG_INT)) { | |
111 | const char *expname; | |
112 | switch (cs->exception_index) { | |
113 | case 0x0e0: | |
114 | expname = "addr_error"; | |
115 | break; | |
116 | case 0x040: | |
117 | expname = "tlb_miss"; | |
118 | break; | |
119 | case 0x0a0: | |
120 | expname = "tlb_violation"; | |
121 | break; | |
122 | case 0x180: | |
123 | expname = "illegal_instruction"; | |
124 | break; | |
125 | case 0x1a0: | |
126 | expname = "slot_illegal_instruction"; | |
127 | break; | |
128 | case 0x800: | |
129 | expname = "fpu_disable"; | |
130 | break; | |
131 | case 0x820: | |
132 | expname = "slot_fpu"; | |
133 | break; | |
134 | case 0x100: | |
135 | expname = "data_write"; | |
136 | break; | |
137 | case 0x060: | |
138 | expname = "dtlb_miss_write"; | |
139 | break; | |
140 | case 0x0c0: | |
141 | expname = "dtlb_violation_write"; | |
142 | break; | |
143 | case 0x120: | |
144 | expname = "fpu_exception"; | |
145 | break; | |
146 | case 0x080: | |
147 | expname = "initial_page_write"; | |
148 | break; | |
149 | case 0x160: | |
150 | expname = "trapa"; | |
151 | break; | |
152 | default: | |
153 | expname = do_irq ? "interrupt" : "???"; | |
154 | break; | |
155 | } | |
156 | qemu_log("exception 0x%03x [%s] raised\n", | |
157 | irq_vector, expname); | |
158 | log_cpu_state(cs, 0); | |
159 | } | |
160 | ||
161 | env->ssr = cpu_read_sr(env); | |
162 | env->spc = env->pc; | |
163 | env->sgr = env->gregs[15]; | |
164 | env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB); | |
165 | ||
166 | if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { | |
167 | /* Branch instruction should be executed again before delay slot. */ | |
168 | env->spc -= 2; | |
169 | /* Clear flags for exception/interrupt routine. */ | |
170 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL | DELAY_SLOT_TRUE); | |
171 | } | |
172 | if (env->flags & DELAY_SLOT_CLEARME) | |
173 | env->flags = 0; | |
174 | ||
175 | if (do_exp) { | |
176 | env->expevt = cs->exception_index; | |
177 | switch (cs->exception_index) { | |
178 | case 0x000: | |
179 | case 0x020: | |
180 | case 0x140: | |
181 | env->sr &= ~(1u << SR_FD); | |
182 | env->sr |= 0xf << 4; /* IMASK */ | |
183 | env->pc = 0xa0000000; | |
184 | break; | |
185 | case 0x040: | |
186 | case 0x060: | |
187 | env->pc = env->vbr + 0x400; | |
188 | break; | |
189 | case 0x160: | |
190 | env->spc += 2; /* special case for TRAPA */ | |
191 | /* fall through */ | |
192 | default: | |
193 | env->pc = env->vbr + 0x100; | |
194 | break; | |
195 | } | |
196 | return; | |
197 | } | |
198 | ||
199 | if (do_irq) { | |
200 | env->intevt = irq_vector; | |
201 | env->pc = env->vbr + 0x600; | |
202 | return; | |
203 | } | |
204 | } | |
205 | ||
206 | static void update_itlb_use(CPUSH4State * env, int itlbnb) | |
207 | { | |
208 | uint8_t or_mask = 0, and_mask = (uint8_t) - 1; | |
209 | ||
210 | switch (itlbnb) { | |
211 | case 0: | |
212 | and_mask = 0x1f; | |
213 | break; | |
214 | case 1: | |
215 | and_mask = 0xe7; | |
216 | or_mask = 0x80; | |
217 | break; | |
218 | case 2: | |
219 | and_mask = 0xfb; | |
220 | or_mask = 0x50; | |
221 | break; | |
222 | case 3: | |
223 | or_mask = 0x2c; | |
224 | break; | |
225 | } | |
226 | ||
227 | env->mmucr &= (and_mask << 24) | 0x00ffffff; | |
228 | env->mmucr |= (or_mask << 24); | |
229 | } | |
230 | ||
231 | static int itlb_replacement(CPUSH4State * env) | |
232 | { | |
233 | SuperHCPU *cpu = sh_env_get_cpu(env); | |
234 | ||
235 | if ((env->mmucr & 0xe0000000) == 0xe0000000) { | |
236 | return 0; | |
237 | } | |
238 | if ((env->mmucr & 0x98000000) == 0x18000000) { | |
239 | return 1; | |
240 | } | |
241 | if ((env->mmucr & 0x54000000) == 0x04000000) { | |
242 | return 2; | |
243 | } | |
244 | if ((env->mmucr & 0x2c000000) == 0x00000000) { | |
245 | return 3; | |
246 | } | |
247 | cpu_abort(CPU(cpu), "Unhandled itlb_replacement"); | |
248 | } | |
249 | ||
250 | /* Find the corresponding entry in the right TLB | |
251 | Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE | |
252 | */ | |
253 | static int find_tlb_entry(CPUSH4State * env, target_ulong address, | |
254 | tlb_t * entries, uint8_t nbtlb, int use_asid) | |
255 | { | |
256 | int match = MMU_DTLB_MISS; | |
257 | uint32_t start, end; | |
258 | uint8_t asid; | |
259 | int i; | |
260 | ||
261 | asid = env->pteh & 0xff; | |
262 | ||
263 | for (i = 0; i < nbtlb; i++) { | |
264 | if (!entries[i].v) | |
265 | continue; /* Invalid entry */ | |
266 | if (!entries[i].sh && use_asid && entries[i].asid != asid) | |
267 | continue; /* Bad ASID */ | |
268 | start = (entries[i].vpn << 10) & ~(entries[i].size - 1); | |
269 | end = start + entries[i].size - 1; | |
270 | if (address >= start && address <= end) { /* Match */ | |
271 | if (match != MMU_DTLB_MISS) | |
272 | return MMU_DTLB_MULTIPLE; /* Multiple match */ | |
273 | match = i; | |
274 | } | |
275 | } | |
276 | return match; | |
277 | } | |
278 | ||
279 | static void increment_urc(CPUSH4State * env) | |
280 | { | |
281 | uint8_t urb, urc; | |
282 | ||
283 | /* Increment URC */ | |
284 | urb = ((env->mmucr) >> 18) & 0x3f; | |
285 | urc = ((env->mmucr) >> 10) & 0x3f; | |
286 | urc++; | |
287 | if ((urb > 0 && urc > urb) || urc > (UTLB_SIZE - 1)) | |
288 | urc = 0; | |
289 | env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10); | |
290 | } | |
291 | ||
292 | /* Copy and utlb entry into itlb | |
293 | Return entry | |
294 | */ | |
295 | static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb) | |
296 | { | |
297 | int itlb; | |
298 | ||
299 | tlb_t * ientry; | |
300 | itlb = itlb_replacement(env); | |
301 | ientry = &env->itlb[itlb]; | |
302 | if (ientry->v) { | |
303 | tlb_flush_page(CPU(sh_env_get_cpu(env)), ientry->vpn << 10); | |
304 | } | |
305 | *ientry = env->utlb[utlb]; | |
306 | update_itlb_use(env, itlb); | |
307 | return itlb; | |
308 | } | |
309 | ||
310 | /* Find itlb entry | |
311 | Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE | |
312 | */ | |
313 | static int find_itlb_entry(CPUSH4State * env, target_ulong address, | |
314 | int use_asid) | |
315 | { | |
316 | int e; | |
317 | ||
318 | e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid); | |
319 | if (e == MMU_DTLB_MULTIPLE) { | |
320 | e = MMU_ITLB_MULTIPLE; | |
321 | } else if (e == MMU_DTLB_MISS) { | |
322 | e = MMU_ITLB_MISS; | |
323 | } else if (e >= 0) { | |
324 | update_itlb_use(env, e); | |
325 | } | |
326 | return e; | |
327 | } | |
328 | ||
329 | /* Find utlb entry | |
330 | Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */ | |
331 | static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid) | |
332 | { | |
333 | /* per utlb access */ | |
334 | increment_urc(env); | |
335 | ||
336 | /* Return entry */ | |
337 | return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid); | |
338 | } | |
339 | ||
340 | /* Match address against MMU | |
341 | Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE, | |
342 | MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ, | |
343 | MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS, | |
344 | MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION, | |
345 | MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE. | |
346 | */ | |
347 | static int get_mmu_address(CPUSH4State * env, target_ulong * physical, | |
348 | int *prot, target_ulong address, | |
349 | int rw, int access_type) | |
350 | { | |
351 | int use_asid, n; | |
352 | tlb_t *matching = NULL; | |
353 | ||
354 | use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD)); | |
355 | ||
356 | if (rw == 2) { | |
357 | n = find_itlb_entry(env, address, use_asid); | |
358 | if (n >= 0) { | |
359 | matching = &env->itlb[n]; | |
360 | if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) { | |
361 | n = MMU_ITLB_VIOLATION; | |
362 | } else { | |
363 | *prot = PAGE_EXEC; | |
364 | } | |
365 | } else { | |
366 | n = find_utlb_entry(env, address, use_asid); | |
367 | if (n >= 0) { | |
368 | n = copy_utlb_entry_itlb(env, n); | |
369 | matching = &env->itlb[n]; | |
370 | if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) { | |
371 | n = MMU_ITLB_VIOLATION; | |
372 | } else { | |
373 | *prot = PAGE_READ | PAGE_EXEC; | |
374 | if ((matching->pr & 1) && matching->d) { | |
375 | *prot |= PAGE_WRITE; | |
376 | } | |
377 | } | |
378 | } else if (n == MMU_DTLB_MULTIPLE) { | |
379 | n = MMU_ITLB_MULTIPLE; | |
380 | } else if (n == MMU_DTLB_MISS) { | |
381 | n = MMU_ITLB_MISS; | |
382 | } | |
383 | } | |
384 | } else { | |
385 | n = find_utlb_entry(env, address, use_asid); | |
386 | if (n >= 0) { | |
387 | matching = &env->utlb[n]; | |
388 | if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) { | |
389 | n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE : | |
390 | MMU_DTLB_VIOLATION_READ; | |
391 | } else if ((rw == 1) && !(matching->pr & 1)) { | |
392 | n = MMU_DTLB_VIOLATION_WRITE; | |
393 | } else if ((rw == 1) && !matching->d) { | |
394 | n = MMU_DTLB_INITIAL_WRITE; | |
395 | } else { | |
396 | *prot = PAGE_READ; | |
397 | if ((matching->pr & 1) && matching->d) { | |
398 | *prot |= PAGE_WRITE; | |
399 | } | |
400 | } | |
401 | } else if (n == MMU_DTLB_MISS) { | |
402 | n = (rw == 1) ? MMU_DTLB_MISS_WRITE : | |
403 | MMU_DTLB_MISS_READ; | |
404 | } | |
405 | } | |
406 | if (n >= 0) { | |
407 | n = MMU_OK; | |
408 | *physical = ((matching->ppn << 10) & ~(matching->size - 1)) | | |
409 | (address & (matching->size - 1)); | |
410 | } | |
411 | return n; | |
412 | } | |
413 | ||
414 | static int get_physical_address(CPUSH4State * env, target_ulong * physical, | |
415 | int *prot, target_ulong address, | |
416 | int rw, int access_type) | |
417 | { | |
418 | /* P1, P2 and P4 areas do not use translation */ | |
419 | if ((address >= 0x80000000 && address < 0xc0000000) || | |
420 | address >= 0xe0000000) { | |
421 | if (!(env->sr & (1u << SR_MD)) | |
422 | && (address < 0xe0000000 || address >= 0xe4000000)) { | |
423 | /* Unauthorized access in user mode (only store queues are available) */ | |
424 | fprintf(stderr, "Unauthorized access\n"); | |
425 | if (rw == 0) | |
426 | return MMU_DADDR_ERROR_READ; | |
427 | else if (rw == 1) | |
428 | return MMU_DADDR_ERROR_WRITE; | |
429 | else | |
430 | return MMU_IADDR_ERROR; | |
431 | } | |
432 | if (address >= 0x80000000 && address < 0xc0000000) { | |
433 | /* Mask upper 3 bits for P1 and P2 areas */ | |
434 | *physical = address & 0x1fffffff; | |
435 | } else { | |
436 | *physical = address; | |
437 | } | |
438 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
439 | return MMU_OK; | |
440 | } | |
441 | ||
442 | /* If MMU is disabled, return the corresponding physical page */ | |
443 | if (!(env->mmucr & MMUCR_AT)) { | |
444 | *physical = address & 0x1FFFFFFF; | |
445 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
446 | return MMU_OK; | |
447 | } | |
448 | ||
449 | /* We need to resort to the MMU */ | |
450 | return get_mmu_address(env, physical, prot, address, rw, access_type); | |
451 | } | |
452 | ||
453 | int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, | |
454 | int mmu_idx) | |
455 | { | |
456 | SuperHCPU *cpu = SUPERH_CPU(cs); | |
457 | CPUSH4State *env = &cpu->env; | |
458 | target_ulong physical; | |
459 | int prot, ret, access_type; | |
460 | ||
461 | access_type = ACCESS_INT; | |
462 | ret = | |
463 | get_physical_address(env, &physical, &prot, address, rw, | |
464 | access_type); | |
465 | ||
466 | if (ret != MMU_OK) { | |
467 | env->tea = address; | |
468 | if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) { | |
469 | env->pteh = (env->pteh & PTEH_ASID_MASK) | | |
470 | (address & PTEH_VPN_MASK); | |
471 | } | |
472 | switch (ret) { | |
473 | case MMU_ITLB_MISS: | |
474 | case MMU_DTLB_MISS_READ: | |
475 | cs->exception_index = 0x040; | |
476 | break; | |
477 | case MMU_DTLB_MULTIPLE: | |
478 | case MMU_ITLB_MULTIPLE: | |
479 | cs->exception_index = 0x140; | |
480 | break; | |
481 | case MMU_ITLB_VIOLATION: | |
482 | cs->exception_index = 0x0a0; | |
483 | break; | |
484 | case MMU_DTLB_MISS_WRITE: | |
485 | cs->exception_index = 0x060; | |
486 | break; | |
487 | case MMU_DTLB_INITIAL_WRITE: | |
488 | cs->exception_index = 0x080; | |
489 | break; | |
490 | case MMU_DTLB_VIOLATION_READ: | |
491 | cs->exception_index = 0x0a0; | |
492 | break; | |
493 | case MMU_DTLB_VIOLATION_WRITE: | |
494 | cs->exception_index = 0x0c0; | |
495 | break; | |
496 | case MMU_IADDR_ERROR: | |
497 | case MMU_DADDR_ERROR_READ: | |
498 | cs->exception_index = 0x0e0; | |
499 | break; | |
500 | case MMU_DADDR_ERROR_WRITE: | |
501 | cs->exception_index = 0x100; | |
502 | break; | |
503 | default: | |
504 | cpu_abort(cs, "Unhandled MMU fault"); | |
505 | } | |
506 | return 1; | |
507 | } | |
508 | ||
509 | address &= TARGET_PAGE_MASK; | |
510 | physical &= TARGET_PAGE_MASK; | |
511 | ||
512 | tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE); | |
513 | return 0; | |
514 | } | |
515 | ||
516 | hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) | |
517 | { | |
518 | SuperHCPU *cpu = SUPERH_CPU(cs); | |
519 | target_ulong physical; | |
520 | int prot; | |
521 | ||
522 | get_physical_address(&cpu->env, &physical, &prot, addr, 0, 0); | |
523 | return physical; | |
524 | } | |
525 | ||
526 | void cpu_load_tlb(CPUSH4State * env) | |
527 | { | |
528 | SuperHCPU *cpu = sh_env_get_cpu(env); | |
529 | int n = cpu_mmucr_urc(env->mmucr); | |
530 | tlb_t * entry = &env->utlb[n]; | |
531 | ||
532 | if (entry->v) { | |
533 | /* Overwriting valid entry in utlb. */ | |
534 | target_ulong address = entry->vpn << 10; | |
535 | tlb_flush_page(CPU(cpu), address); | |
536 | } | |
537 | ||
538 | /* Take values into cpu status from registers. */ | |
539 | entry->asid = (uint8_t)cpu_pteh_asid(env->pteh); | |
540 | entry->vpn = cpu_pteh_vpn(env->pteh); | |
541 | entry->v = (uint8_t)cpu_ptel_v(env->ptel); | |
542 | entry->ppn = cpu_ptel_ppn(env->ptel); | |
543 | entry->sz = (uint8_t)cpu_ptel_sz(env->ptel); | |
544 | switch (entry->sz) { | |
545 | case 0: /* 00 */ | |
546 | entry->size = 1024; /* 1K */ | |
547 | break; | |
548 | case 1: /* 01 */ | |
549 | entry->size = 1024 * 4; /* 4K */ | |
550 | break; | |
551 | case 2: /* 10 */ | |
552 | entry->size = 1024 * 64; /* 64K */ | |
553 | break; | |
554 | case 3: /* 11 */ | |
555 | entry->size = 1024 * 1024; /* 1M */ | |
556 | break; | |
557 | default: | |
558 | cpu_abort(CPU(cpu), "Unhandled load_tlb"); | |
559 | break; | |
560 | } | |
561 | entry->sh = (uint8_t)cpu_ptel_sh(env->ptel); | |
562 | entry->c = (uint8_t)cpu_ptel_c(env->ptel); | |
563 | entry->pr = (uint8_t)cpu_ptel_pr(env->ptel); | |
564 | entry->d = (uint8_t)cpu_ptel_d(env->ptel); | |
565 | entry->wt = (uint8_t)cpu_ptel_wt(env->ptel); | |
566 | entry->sa = (uint8_t)cpu_ptea_sa(env->ptea); | |
567 | entry->tc = (uint8_t)cpu_ptea_tc(env->ptea); | |
568 | } | |
569 | ||
570 | void cpu_sh4_invalidate_tlb(CPUSH4State *s) | |
571 | { | |
572 | int i; | |
573 | ||
574 | /* UTLB */ | |
575 | for (i = 0; i < UTLB_SIZE; i++) { | |
576 | tlb_t * entry = &s->utlb[i]; | |
577 | entry->v = 0; | |
578 | } | |
579 | /* ITLB */ | |
580 | for (i = 0; i < ITLB_SIZE; i++) { | |
581 | tlb_t * entry = &s->itlb[i]; | |
582 | entry->v = 0; | |
583 | } | |
584 | ||
585 | tlb_flush(CPU(sh_env_get_cpu(s)), 1); | |
586 | } | |
587 | ||
588 | uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s, | |
589 | hwaddr addr) | |
590 | { | |
591 | int index = (addr & 0x00000300) >> 8; | |
592 | tlb_t * entry = &s->itlb[index]; | |
593 | ||
594 | return (entry->vpn << 10) | | |
595 | (entry->v << 8) | | |
596 | (entry->asid); | |
597 | } | |
598 | ||
599 | void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr, | |
600 | uint32_t mem_value) | |
601 | { | |
602 | uint32_t vpn = (mem_value & 0xfffffc00) >> 10; | |
603 | uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8); | |
604 | uint8_t asid = (uint8_t)(mem_value & 0x000000ff); | |
605 | ||
606 | int index = (addr & 0x00000300) >> 8; | |
607 | tlb_t * entry = &s->itlb[index]; | |
608 | if (entry->v) { | |
609 | /* Overwriting valid entry in itlb. */ | |
610 | target_ulong address = entry->vpn << 10; | |
611 | tlb_flush_page(CPU(sh_env_get_cpu(s)), address); | |
612 | } | |
613 | entry->asid = asid; | |
614 | entry->vpn = vpn; | |
615 | entry->v = v; | |
616 | } | |
617 | ||
618 | uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s, | |
619 | hwaddr addr) | |
620 | { | |
621 | int array = (addr & 0x00800000) >> 23; | |
622 | int index = (addr & 0x00000300) >> 8; | |
623 | tlb_t * entry = &s->itlb[index]; | |
624 | ||
625 | if (array == 0) { | |
626 | /* ITLB Data Array 1 */ | |
627 | return (entry->ppn << 10) | | |
628 | (entry->v << 8) | | |
629 | (entry->pr << 5) | | |
630 | ((entry->sz & 1) << 6) | | |
631 | ((entry->sz & 2) << 4) | | |
632 | (entry->c << 3) | | |
633 | (entry->sh << 1); | |
634 | } else { | |
635 | /* ITLB Data Array 2 */ | |
636 | return (entry->tc << 1) | | |
637 | (entry->sa); | |
638 | } | |
639 | } | |
640 | ||
641 | void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr, | |
642 | uint32_t mem_value) | |
643 | { | |
644 | int array = (addr & 0x00800000) >> 23; | |
645 | int index = (addr & 0x00000300) >> 8; | |
646 | tlb_t * entry = &s->itlb[index]; | |
647 | ||
648 | if (array == 0) { | |
649 | /* ITLB Data Array 1 */ | |
650 | if (entry->v) { | |
651 | /* Overwriting valid entry in utlb. */ | |
652 | target_ulong address = entry->vpn << 10; | |
653 | tlb_flush_page(CPU(sh_env_get_cpu(s)), address); | |
654 | } | |
655 | entry->ppn = (mem_value & 0x1ffffc00) >> 10; | |
656 | entry->v = (mem_value & 0x00000100) >> 8; | |
657 | entry->sz = (mem_value & 0x00000080) >> 6 | | |
658 | (mem_value & 0x00000010) >> 4; | |
659 | entry->pr = (mem_value & 0x00000040) >> 5; | |
660 | entry->c = (mem_value & 0x00000008) >> 3; | |
661 | entry->sh = (mem_value & 0x00000002) >> 1; | |
662 | } else { | |
663 | /* ITLB Data Array 2 */ | |
664 | entry->tc = (mem_value & 0x00000008) >> 3; | |
665 | entry->sa = (mem_value & 0x00000007); | |
666 | } | |
667 | } | |
668 | ||
669 | uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s, | |
670 | hwaddr addr) | |
671 | { | |
672 | int index = (addr & 0x00003f00) >> 8; | |
673 | tlb_t * entry = &s->utlb[index]; | |
674 | ||
675 | increment_urc(s); /* per utlb access */ | |
676 | ||
677 | return (entry->vpn << 10) | | |
678 | (entry->v << 8) | | |
679 | (entry->asid); | |
680 | } | |
681 | ||
682 | void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr, | |
683 | uint32_t mem_value) | |
684 | { | |
685 | int associate = addr & 0x0000080; | |
686 | uint32_t vpn = (mem_value & 0xfffffc00) >> 10; | |
687 | uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9); | |
688 | uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8); | |
689 | uint8_t asid = (uint8_t)(mem_value & 0x000000ff); | |
690 | int use_asid = !(s->mmucr & MMUCR_SV) || !(s->sr & (1u << SR_MD)); | |
691 | ||
692 | if (associate) { | |
693 | int i; | |
694 | tlb_t * utlb_match_entry = NULL; | |
695 | int needs_tlb_flush = 0; | |
696 | ||
697 | /* search UTLB */ | |
698 | for (i = 0; i < UTLB_SIZE; i++) { | |
699 | tlb_t * entry = &s->utlb[i]; | |
700 | if (!entry->v) | |
701 | continue; | |
702 | ||
703 | if (entry->vpn == vpn | |
704 | && (!use_asid || entry->asid == asid || entry->sh)) { | |
705 | if (utlb_match_entry) { | |
706 | CPUState *cs = CPU(sh_env_get_cpu(s)); | |
707 | ||
708 | /* Multiple TLB Exception */ | |
709 | cs->exception_index = 0x140; | |
710 | s->tea = addr; | |
711 | break; | |
712 | } | |
713 | if (entry->v && !v) | |
714 | needs_tlb_flush = 1; | |
715 | entry->v = v; | |
716 | entry->d = d; | |
717 | utlb_match_entry = entry; | |
718 | } | |
719 | increment_urc(s); /* per utlb access */ | |
720 | } | |
721 | ||
722 | /* search ITLB */ | |
723 | for (i = 0; i < ITLB_SIZE; i++) { | |
724 | tlb_t * entry = &s->itlb[i]; | |
725 | if (entry->vpn == vpn | |
726 | && (!use_asid || entry->asid == asid || entry->sh)) { | |
727 | if (entry->v && !v) | |
728 | needs_tlb_flush = 1; | |
729 | if (utlb_match_entry) | |
730 | *entry = *utlb_match_entry; | |
731 | else | |
732 | entry->v = v; | |
733 | break; | |
734 | } | |
735 | } | |
736 | ||
737 | if (needs_tlb_flush) { | |
738 | tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10); | |
739 | } | |
740 | ||
741 | } else { | |
742 | int index = (addr & 0x00003f00) >> 8; | |
743 | tlb_t * entry = &s->utlb[index]; | |
744 | if (entry->v) { | |
745 | CPUState *cs = CPU(sh_env_get_cpu(s)); | |
746 | ||
747 | /* Overwriting valid entry in utlb. */ | |
748 | target_ulong address = entry->vpn << 10; | |
749 | tlb_flush_page(cs, address); | |
750 | } | |
751 | entry->asid = asid; | |
752 | entry->vpn = vpn; | |
753 | entry->d = d; | |
754 | entry->v = v; | |
755 | increment_urc(s); | |
756 | } | |
757 | } | |
758 | ||
759 | uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s, | |
760 | hwaddr addr) | |
761 | { | |
762 | int array = (addr & 0x00800000) >> 23; | |
763 | int index = (addr & 0x00003f00) >> 8; | |
764 | tlb_t * entry = &s->utlb[index]; | |
765 | ||
766 | increment_urc(s); /* per utlb access */ | |
767 | ||
768 | if (array == 0) { | |
769 | /* ITLB Data Array 1 */ | |
770 | return (entry->ppn << 10) | | |
771 | (entry->v << 8) | | |
772 | (entry->pr << 5) | | |
773 | ((entry->sz & 1) << 6) | | |
774 | ((entry->sz & 2) << 4) | | |
775 | (entry->c << 3) | | |
776 | (entry->d << 2) | | |
777 | (entry->sh << 1) | | |
778 | (entry->wt); | |
779 | } else { | |
780 | /* ITLB Data Array 2 */ | |
781 | return (entry->tc << 1) | | |
782 | (entry->sa); | |
783 | } | |
784 | } | |
785 | ||
786 | void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr, | |
787 | uint32_t mem_value) | |
788 | { | |
789 | int array = (addr & 0x00800000) >> 23; | |
790 | int index = (addr & 0x00003f00) >> 8; | |
791 | tlb_t * entry = &s->utlb[index]; | |
792 | ||
793 | increment_urc(s); /* per utlb access */ | |
794 | ||
795 | if (array == 0) { | |
796 | /* UTLB Data Array 1 */ | |
797 | if (entry->v) { | |
798 | /* Overwriting valid entry in utlb. */ | |
799 | target_ulong address = entry->vpn << 10; | |
800 | tlb_flush_page(CPU(sh_env_get_cpu(s)), address); | |
801 | } | |
802 | entry->ppn = (mem_value & 0x1ffffc00) >> 10; | |
803 | entry->v = (mem_value & 0x00000100) >> 8; | |
804 | entry->sz = (mem_value & 0x00000080) >> 6 | | |
805 | (mem_value & 0x00000010) >> 4; | |
806 | entry->pr = (mem_value & 0x00000060) >> 5; | |
807 | entry->c = (mem_value & 0x00000008) >> 3; | |
808 | entry->d = (mem_value & 0x00000004) >> 2; | |
809 | entry->sh = (mem_value & 0x00000002) >> 1; | |
810 | entry->wt = (mem_value & 0x00000001); | |
811 | } else { | |
812 | /* UTLB Data Array 2 */ | |
813 | entry->tc = (mem_value & 0x00000008) >> 3; | |
814 | entry->sa = (mem_value & 0x00000007); | |
815 | } | |
816 | } | |
817 | ||
818 | int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr) | |
819 | { | |
820 | int n; | |
821 | int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD)); | |
822 | ||
823 | /* check area */ | |
824 | if (env->sr & (1u << SR_MD)) { | |
825 | /* For privileged mode, P2 and P4 area is not cacheable. */ | |
826 | if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr) | |
827 | return 0; | |
828 | } else { | |
829 | /* For user mode, only U0 area is cacheable. */ | |
830 | if (0x80000000 <= addr) | |
831 | return 0; | |
832 | } | |
833 | ||
834 | /* | |
835 | * TODO : Evaluate CCR and check if the cache is on or off. | |
836 | * Now CCR is not in CPUSH4State, but in SH7750State. | |
837 | * When you move the ccr into CPUSH4State, the code will be | |
838 | * as follows. | |
839 | */ | |
840 | #if 0 | |
841 | /* check if operand cache is enabled or not. */ | |
842 | if (!(env->ccr & 1)) | |
843 | return 0; | |
844 | #endif | |
845 | ||
846 | /* if MMU is off, no check for TLB. */ | |
847 | if (env->mmucr & MMUCR_AT) | |
848 | return 1; | |
849 | ||
850 | /* check TLB */ | |
851 | n = find_tlb_entry(env, addr, env->itlb, ITLB_SIZE, use_asid); | |
852 | if (n >= 0) | |
853 | return env->itlb[n].c; | |
854 | ||
855 | n = find_tlb_entry(env, addr, env->utlb, UTLB_SIZE, use_asid); | |
856 | if (n >= 0) | |
857 | return env->utlb[n].c; | |
858 | ||
859 | return 0; | |
860 | } | |
861 | ||
862 | #endif | |
863 | ||
864 | bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) | |
865 | { | |
866 | if (interrupt_request & CPU_INTERRUPT_HARD) { | |
867 | superh_cpu_do_interrupt(cs); | |
868 | return true; | |
869 | } | |
870 | return false; | |
871 | } |