1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "sysemu/hw_accel.h"
4 #include "sysemu/sysemu.h"
7 #include "exec/exec-all.h"
8 #include "helper_regs.h"
9 #include "hw/ppc/spapr.h"
10 #include "mmu-hash64.h"
11 #include "cpu-models.h"
14 #include "hw/ppc/spapr_ovec.h"
15 #include "qemu/error-report.h"
16 #include "mmu-book3s-v3.h"
24 static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
26 struct SPRSyncState *s = arg.host_ptr;
27 PowerPCCPU *cpu = POWERPC_CPU(cs);
28 CPUPPCState *env = &cpu->env;
30 cpu_synchronize_state(cs);
31 env->spr[s->spr] &= ~s->mask;
32 env->spr[s->spr] |= s->value;
35 static void set_spr(CPUState *cs, int spr, target_ulong value,
38 struct SPRSyncState s = {
43 run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
46 static bool has_spr(PowerPCCPU *cpu, int spr)
48 /* We can test whether the SPR is defined by checking for a valid name */
49 return cpu->env.spr_cb[spr].name != NULL;
52 static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
55 * hash value/pteg group index is normalized by HPT mask
57 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
63 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
65 MachineState *machine = MACHINE(spapr);
66 MemoryHotplugState *hpms = &spapr->hotplug_memory;
68 if (addr < machine->ram_size) {
71 if ((addr >= hpms->base)
72 && ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
79 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
80 target_ulong opcode, target_ulong *args)
82 target_ulong flags = args[0];
83 target_ulong ptex = args[1];
84 target_ulong pteh = args[2];
85 target_ulong ptel = args[3];
89 const ppc_hash_pte64_t *hptes;
91 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
93 /* Bad page size encoding */
97 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
99 if (is_ram_address(spapr, raddr)) {
100 /* Regular RAM - should have WIMG=0010 */
101 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
105 target_ulong wimg_flags;
106 /* Looks like an IO address */
107 /* FIXME: What WIMG combinations could be sensible for IO?
108 * For now we allow WIMG=010x, but are there others? */
109 /* FIXME: Should we check against registered IO addresses? */
110 wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
112 if (wimg_flags != HPTE64_R_I &&
113 wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
120 if (!valid_ptex(cpu, ptex)) {
127 if (likely((flags & H_EXACT) == 0)) {
128 hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
129 for (slot = 0; slot < 8; slot++) {
130 if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
134 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
139 hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
140 if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
141 ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
144 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
147 ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
149 args[0] = ptex + slot;
155 REMOVE_NOT_FOUND = 1,
160 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
163 target_ulong *vp, target_ulong *rp)
165 const ppc_hash_pte64_t *hptes;
168 if (!valid_ptex(cpu, ptex)) {
172 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
173 v = ppc_hash64_hpte0(cpu, hptes, 0);
174 r = ppc_hash64_hpte1(cpu, hptes, 0);
175 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
177 if ((v & HPTE64_V_VALID) == 0 ||
178 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
179 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
180 return REMOVE_NOT_FOUND;
184 ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
185 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
186 return REMOVE_SUCCESS;
189 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
190 target_ulong opcode, target_ulong *args)
192 CPUPPCState *env = &cpu->env;
193 target_ulong flags = args[0];
194 target_ulong ptex = args[1];
195 target_ulong avpn = args[2];
198 ret = remove_hpte(cpu, ptex, avpn, flags,
203 check_tlb_flush(env, true);
206 case REMOVE_NOT_FOUND:
216 g_assert_not_reached();
219 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
220 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
221 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
222 #define H_BULK_REMOVE_END 0xc000000000000000ULL
223 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
224 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
225 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
226 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
227 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
228 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
229 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
230 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
231 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
232 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
233 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
235 #define H_BULK_REMOVE_MAX_BATCH 4
237 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
238 target_ulong opcode, target_ulong *args)
240 CPUPPCState *env = &cpu->env;
242 target_ulong rc = H_SUCCESS;
244 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
245 target_ulong *tsh = &args[i*2];
246 target_ulong tsl = args[i*2 + 1];
247 target_ulong v, r, ret;
249 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
251 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
255 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
256 *tsh |= H_BULK_REMOVE_RESPONSE;
258 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
259 *tsh |= H_BULK_REMOVE_PARM;
263 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
264 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
271 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
284 check_tlb_flush(env, true);
289 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
290 target_ulong opcode, target_ulong *args)
292 CPUPPCState *env = &cpu->env;
293 target_ulong flags = args[0];
294 target_ulong ptex = args[1];
295 target_ulong avpn = args[2];
296 const ppc_hash_pte64_t *hptes;
299 if (!valid_ptex(cpu, ptex)) {
303 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
304 v = ppc_hash64_hpte0(cpu, hptes, 0);
305 r = ppc_hash64_hpte1(cpu, hptes, 0);
306 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
308 if ((v & HPTE64_V_VALID) == 0 ||
309 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
313 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
314 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
315 r |= (flags << 55) & HPTE64_R_PP0;
316 r |= (flags << 48) & HPTE64_R_KEY_HI;
317 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
318 ppc_hash64_store_hpte(cpu, ptex,
319 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
320 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
322 check_tlb_flush(env, true);
323 /* Don't need a memory barrier, due to qemu's global lock */
324 ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
328 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
329 target_ulong opcode, target_ulong *args)
331 target_ulong flags = args[0];
332 target_ulong ptex = args[1];
334 int i, ridx, n_entries = 1;
336 if (!valid_ptex(cpu, ptex)) {
340 if (flags & H_READ_4) {
341 /* Clear the two low order bits */
346 hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64);
348 for (i = 0, ridx = 0; i < n_entries; i++) {
349 args[ridx++] = ldq_p(hpte);
350 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
351 hpte += HASH_PTE_SIZE_64;
357 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
358 target_ulong opcode, target_ulong *args)
360 cpu_synchronize_state(CPU(cpu));
361 cpu->env.spr[SPR_SPRG0] = args[0];
366 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
367 target_ulong opcode, target_ulong *args)
369 if (!has_spr(cpu, SPR_DABR)) {
370 return H_HARDWARE; /* DABR register not available */
372 cpu_synchronize_state(CPU(cpu));
374 if (has_spr(cpu, SPR_DABRX)) {
375 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
376 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
377 return H_RESERVED_DABR;
380 cpu->env.spr[SPR_DABR] = args[0];
384 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
385 target_ulong opcode, target_ulong *args)
387 target_ulong dabrx = args[1];
389 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
393 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
394 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
398 cpu_synchronize_state(CPU(cpu));
399 cpu->env.spr[SPR_DABRX] = dabrx;
400 cpu->env.spr[SPR_DABR] = args[0];
405 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
406 target_ulong opcode, target_ulong *args)
408 target_ulong flags = args[0];
409 hwaddr dst = args[1];
410 hwaddr src = args[2];
411 hwaddr len = TARGET_PAGE_SIZE;
412 uint8_t *pdst, *psrc;
413 target_long ret = H_SUCCESS;
415 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
416 | H_COPY_PAGE | H_ZERO_PAGE)) {
417 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
422 /* Map-in destination */
423 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
426 pdst = cpu_physical_memory_map(dst, &len, 1);
427 if (!pdst || len != TARGET_PAGE_SIZE) {
431 if (flags & H_COPY_PAGE) {
432 /* Map-in source, copy to destination, and unmap source again */
433 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
437 psrc = cpu_physical_memory_map(src, &len, 0);
438 if (!psrc || len != TARGET_PAGE_SIZE) {
442 memcpy(pdst, psrc, len);
443 cpu_physical_memory_unmap(psrc, len, 0, len);
444 } else if (flags & H_ZERO_PAGE) {
445 memset(pdst, 0, len); /* Just clear the destination page */
448 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
449 kvmppc_dcbst_range(cpu, pdst, len);
451 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
453 kvmppc_icbi_range(cpu, pdst, len);
460 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
464 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
465 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
466 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
467 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
468 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
469 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
471 #define VPA_MIN_SIZE 640
472 #define VPA_SIZE_OFFSET 0x4
473 #define VPA_SHARED_PROC_OFFSET 0x9
474 #define VPA_SHARED_PROC_VAL 0x2
476 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
478 CPUState *cs = CPU(ppc_env_get_cpu(env));
483 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
487 if (vpa % env->dcache_line_size) {
490 /* FIXME: bounds check the address */
492 size = lduw_be_phys(cs->as, vpa + 0x4);
494 if (size < VPA_MIN_SIZE) {
498 /* VPA is not allowed to cross a page boundary */
499 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
505 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
506 tmp |= VPA_SHARED_PROC_VAL;
507 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
512 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
514 if (env->slb_shadow_addr) {
526 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
528 CPUState *cs = CPU(ppc_env_get_cpu(env));
532 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
536 size = ldl_be_phys(cs->as, addr + 0x4);
541 if ((addr / 4096) != ((addr + size - 1) / 4096)) {
545 if (!env->vpa_addr) {
549 env->slb_shadow_addr = addr;
550 env->slb_shadow_size = size;
555 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
557 env->slb_shadow_addr = 0;
558 env->slb_shadow_size = 0;
562 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
564 CPUState *cs = CPU(ppc_env_get_cpu(env));
568 hcall_dprintf("Can't cope with DTL at logical 0\n");
572 size = ldl_be_phys(cs->as, addr + 0x4);
578 if (!env->vpa_addr) {
582 env->dtl_addr = addr;
583 env->dtl_size = size;
588 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
596 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
597 target_ulong opcode, target_ulong *args)
599 target_ulong flags = args[0];
600 target_ulong procno = args[1];
601 target_ulong vpa = args[2];
602 target_ulong ret = H_PARAMETER;
606 tcpu = ppc_get_vcpu_by_dt_id(procno);
613 case FLAGS_REGISTER_VPA:
614 ret = register_vpa(tenv, vpa);
617 case FLAGS_DEREGISTER_VPA:
618 ret = deregister_vpa(tenv, vpa);
621 case FLAGS_REGISTER_SLBSHADOW:
622 ret = register_slb_shadow(tenv, vpa);
625 case FLAGS_DEREGISTER_SLBSHADOW:
626 ret = deregister_slb_shadow(tenv, vpa);
629 case FLAGS_REGISTER_DTL:
630 ret = register_dtl(tenv, vpa);
633 case FLAGS_DEREGISTER_DTL:
634 ret = deregister_dtl(tenv, vpa);
641 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
642 target_ulong opcode, target_ulong *args)
644 CPUPPCState *env = &cpu->env;
645 CPUState *cs = CPU(cpu);
647 env->msr |= (1ULL << MSR_EE);
648 hreg_compute_hflags(env);
649 if (!cpu_has_work(cs)) {
651 cs->exception_index = EXCP_HLT;
652 cs->exit_request = 1;
657 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
658 target_ulong opcode, target_ulong *args)
660 target_ulong rtas_r3 = args[0];
661 uint32_t token = rtas_ld(rtas_r3, 0);
662 uint32_t nargs = rtas_ld(rtas_r3, 1);
663 uint32_t nret = rtas_ld(rtas_r3, 2);
665 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
666 nret, rtas_r3 + 12 + 4*nargs);
669 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
670 target_ulong opcode, target_ulong *args)
672 CPUState *cs = CPU(cpu);
673 target_ulong size = args[0];
674 target_ulong addr = args[1];
678 args[0] = ldub_phys(cs->as, addr);
681 args[0] = lduw_phys(cs->as, addr);
684 args[0] = ldl_phys(cs->as, addr);
687 args[0] = ldq_phys(cs->as, addr);
693 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
694 target_ulong opcode, target_ulong *args)
696 CPUState *cs = CPU(cpu);
698 target_ulong size = args[0];
699 target_ulong addr = args[1];
700 target_ulong val = args[2];
704 stb_phys(cs->as, addr, val);
707 stw_phys(cs->as, addr, val);
710 stl_phys(cs->as, addr, val);
713 stq_phys(cs->as, addr, val);
719 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
720 target_ulong opcode, target_ulong *args)
722 CPUState *cs = CPU(cpu);
724 target_ulong dst = args[0]; /* Destination address */
725 target_ulong src = args[1]; /* Source address */
726 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
727 target_ulong count = args[3]; /* Element count */
728 target_ulong op = args[4]; /* 0 = copy, 1 = invert */
730 unsigned int mask = (1 << esize) - 1;
731 int step = 1 << esize;
733 if (count > 0x80000000) {
737 if ((dst & mask) || (src & mask) || (op > 1)) {
741 if (dst >= src && dst < (src + (count << esize))) {
742 dst = dst + ((count - 1) << esize);
743 src = src + ((count - 1) << esize);
750 tmp = ldub_phys(cs->as, src);
753 tmp = lduw_phys(cs->as, src);
756 tmp = ldl_phys(cs->as, src);
759 tmp = ldq_phys(cs->as, src);
769 stb_phys(cs->as, dst, tmp);
772 stw_phys(cs->as, dst, tmp);
775 stl_phys(cs->as, dst, tmp);
778 stq_phys(cs->as, dst, tmp);
788 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
789 target_ulong opcode, target_ulong *args)
791 /* Nothing to do on emulation, KVM will trap this in the kernel */
795 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
796 target_ulong opcode, target_ulong *args)
798 /* Nothing to do on emulation, KVM will trap this in the kernel */
802 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
817 case H_SET_MODE_ENDIAN_BIG:
819 set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
821 spapr_pci_switch_vga(true);
824 case H_SET_MODE_ENDIAN_LITTLE:
826 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
828 spapr_pci_switch_vga(false);
832 return H_UNSUPPORTED_FLAG;
835 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
841 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
843 if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
853 if (mflags == AIL_RESERVED) {
854 return H_UNSUPPORTED_FLAG;
858 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
864 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
865 target_ulong opcode, target_ulong *args)
867 target_ulong resource = args[1];
868 target_ulong ret = H_P2;
871 case H_SET_MODE_RESOURCE_LE:
872 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
874 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
875 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
883 static target_ulong h_clean_slb(PowerPCCPU *cpu, sPAPRMachineState *spapr,
884 target_ulong opcode, target_ulong *args)
886 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
887 opcode, " (H_CLEAN_SLB)");
891 static target_ulong h_invalidate_pid(PowerPCCPU *cpu, sPAPRMachineState *spapr,
892 target_ulong opcode, target_ulong *args)
894 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
895 opcode, " (H_INVALIDATE_PID)");
899 static void spapr_check_setup_free_hpt(sPAPRMachineState *spapr,
900 uint64_t patbe_old, uint64_t patbe_new)
904 * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing
905 * HASH->RADIX : Free HPT
906 * RADIX->HASH : Allocate HPT
907 * NOTHING->HASH : Allocate HPT
908 * Note: NOTHING implies the case where we said the guest could choose
909 * later and so assumed radix and now it's called H_REG_PROC_TBL
912 if ((patbe_old & PATBE1_GR) == (patbe_new & PATBE1_GR)) {
913 /* We assume RADIX, so this catches all the "Do Nothing" cases */
914 } else if (!(patbe_old & PATBE1_GR)) {
915 /* HASH->RADIX : Free HPT */
918 spapr->htab_shift = 0;
919 close_htab_fd(spapr);
920 } else if (!(patbe_new & PATBE1_GR)) {
921 /* RADIX->HASH || NOTHING->HASH : Allocate HPT */
922 spapr_setup_hpt_and_vrma(spapr);
927 #define FLAGS_MASK 0x01FULL
928 #define FLAG_MODIFY 0x10
929 #define FLAG_REGISTER 0x08
930 #define FLAG_RADIX 0x04
931 #define FLAG_HASH_PROC_TBL 0x02
932 #define FLAG_GTSE 0x01
934 static target_ulong h_register_process_table(PowerPCCPU *cpu,
935 sPAPRMachineState *spapr,
940 target_ulong flags = args[0];
941 target_ulong proc_tbl = args[1];
942 target_ulong page_size = args[2];
943 target_ulong table_size = args[3];
946 if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */
949 if (flags & FLAG_MODIFY) {
950 if (flags & FLAG_REGISTER) {
951 if (flags & FLAG_RADIX) { /* Register new RADIX process table */
952 if (proc_tbl & 0xfff || proc_tbl >> 60) {
954 } else if (page_size) {
956 } else if (table_size > 24) {
959 cproc = PATBE1_GR | proc_tbl | table_size;
960 } else { /* Register new HPT process table */
961 if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */
962 /* TODO - Not Supported */
963 /* Technically caused by flag bits => H_PARAMETER */
965 } else { /* Hash with SLB */
966 if (proc_tbl >> 38) {
968 } else if (page_size & ~0x7) {
970 } else if (table_size > 24) {
974 cproc = (proc_tbl << 25) | page_size << 5 | table_size;
977 } else { /* Deregister current process table */
978 /* Set to benign value: (current GR) | 0. This allows
979 * deregistration in KVM to succeed even if the radix bit in flags
980 * doesn't match the radix bit in the old PATB. */
981 cproc = spapr->patb_entry & PATBE1_GR;
983 } else { /* Maintain current registration */
984 if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATBE1_GR)) {
985 /* Technically caused by flag bits => H_PARAMETER */
986 return H_PARAMETER; /* Existing Process Table Mismatch */
988 cproc = spapr->patb_entry;
991 /* Check if we need to setup OR free the hpt */
992 spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc);
994 spapr->patb_entry = cproc; /* Save new process table */
996 /* Update the UPRT and GTSE bits in the LPCR for all cpus */
998 set_spr(cs, SPR_LPCR, LPCR_UPRT | LPCR_GTSE,
999 ((flags & (FLAG_RADIX | FLAG_HASH_PROC_TBL)) ? LPCR_UPRT : 0) |
1000 ((flags & FLAG_GTSE) ? LPCR_GTSE : 0));
1003 if (kvm_enabled()) {
1004 return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX,
1005 flags & FLAG_GTSE, cproc);
1010 #define H_SIGNAL_SYS_RESET_ALL -1
1011 #define H_SIGNAL_SYS_RESET_ALLBUTSELF -2
1013 static target_ulong h_signal_sys_reset(PowerPCCPU *cpu,
1014 sPAPRMachineState *spapr,
1015 target_ulong opcode, target_ulong *args)
1017 target_long target = args[0];
1022 if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1027 PowerPCCPU *c = POWERPC_CPU(cs);
1029 if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1034 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1041 if (cpu->cpu_dt_id == target) {
1042 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1050 static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
1051 sPAPRMachineState *spapr,
1052 target_ulong opcode,
1055 target_ulong list = ppc64_phys_to_real(args[0]);
1056 target_ulong ov_table;
1057 bool explicit_match = false; /* Matched the CPU's real PVR */
1058 uint32_t max_compat = cpu->max_compat;
1059 uint32_t best_compat = 0;
1061 sPAPROptionVector *ov1_guest, *ov5_guest, *ov5_cas_old, *ov5_updates;
1065 * We scan the supplied table of PVRs looking for two things
1066 * 1. Is our real CPU PVR in the list?
1067 * 2. What's the "best" listed logical PVR
1069 for (i = 0; i < 512; ++i) {
1070 uint32_t pvr, pvr_mask;
1072 pvr_mask = ldl_be_phys(&address_space_memory, list);
1073 pvr = ldl_be_phys(&address_space_memory, list + 4);
1076 if (~pvr_mask & pvr) {
1077 break; /* Terminator record */
1080 if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) {
1081 explicit_match = true;
1083 if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) {
1089 if ((best_compat == 0) && (!explicit_match || max_compat)) {
1090 /* We couldn't find a suitable compatibility mode, and either
1091 * the guest doesn't support "raw" mode for this CPU, or raw
1092 * mode is disabled because a maximum compat mode is set */
1096 /* Parsing finished */
1097 trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat);
1100 if (cpu->compat_pvr != best_compat) {
1101 Error *local_err = NULL;
1103 ppc_set_compat_all(best_compat, &local_err);
1105 error_report_err(local_err);
1110 /* For the future use: here @ov_table points to the first option vector */
1113 ov1_guest = spapr_ovec_parse_vector(ov_table, 1);
1114 ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
1115 if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) {
1116 error_report("guest requested hash and radix MMU, which is invalid.");
1119 /* The radix/hash bit in byte 24 requires special handling: */
1120 guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300);
1121 spapr_ovec_clear(ov5_guest, OV5_MMU_RADIX_300);
1123 /* NOTE: there are actually a number of ov5 bits where input from the
1124 * guest is always zero, and the platform/QEMU enables them independently
1125 * of guest input. To model these properly we'd want some sort of mask,
1126 * but since they only currently apply to memory migration as defined
1127 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
1128 * to worry about this for now.
1130 ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas);
1131 /* full range of negotiated ov5 capabilities */
1132 spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
1133 spapr_ovec_cleanup(ov5_guest);
1134 /* capabilities that have been added since CAS-generated guest reset.
1135 * if capabilities have since been removed, generate another reset
1137 ov5_updates = spapr_ovec_new();
1138 spapr->cas_reboot = spapr_ovec_diff(ov5_updates,
1139 ov5_cas_old, spapr->ov5_cas);
1140 /* Now that processing is finished, set the radix/hash bit for the
1141 * guest if it requested a valid mode; otherwise terminate the boot. */
1143 if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1144 error_report("Guest requested unavailable MMU mode (radix).");
1147 spapr_ovec_set(spapr->ov5_cas, OV5_MMU_RADIX_300);
1149 if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1150 && !kvmppc_has_cap_mmu_hash_v3()) {
1151 error_report("Guest requested unavailable MMU mode (hash).");
1155 spapr->cas_legacy_guest_workaround = !spapr_ovec_test(ov1_guest,
1157 if (!spapr->cas_reboot) {
1159 (spapr_h_cas_compose_response(spapr, args[1], args[2],
1162 spapr_ovec_cleanup(ov5_updates);
1164 if (spapr->cas_reboot) {
1165 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1167 /* If ppc_spapr_reset() did not set up a HPT but one is necessary
1168 * (because the guest isn't going to use radix) then set it up here. */
1169 if ((spapr->patb_entry & PATBE1_GR) && !guest_radix) {
1170 /* legacy hash or new hash: */
1171 spapr_setup_hpt_and_vrma(spapr);
1178 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1179 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1181 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1183 spapr_hcall_fn *slot;
1185 if (opcode <= MAX_HCALL_OPCODE) {
1186 assert((opcode & 0x3) == 0);
1188 slot = &papr_hypercall_table[opcode / 4];
1190 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1192 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1199 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1202 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1204 if ((opcode <= MAX_HCALL_OPCODE)
1205 && ((opcode & 0x3) == 0)) {
1206 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1209 return fn(cpu, spapr, opcode, args);
1211 } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1212 (opcode <= KVMPPC_HCALL_MAX)) {
1213 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1216 return fn(cpu, spapr, opcode, args);
1220 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1225 static void hypercall_register_types(void)
1228 spapr_register_hypercall(H_ENTER, h_enter);
1229 spapr_register_hypercall(H_REMOVE, h_remove);
1230 spapr_register_hypercall(H_PROTECT, h_protect);
1231 spapr_register_hypercall(H_READ, h_read);
1234 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1237 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1238 spapr_register_hypercall(H_CEDE, h_cede);
1239 spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset);
1241 /* processor register resource access h-calls */
1242 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1243 spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1244 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1245 spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1246 spapr_register_hypercall(H_SET_MODE, h_set_mode);
1248 /* In Memory Table MMU h-calls */
1249 spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb);
1250 spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid);
1251 spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table);
1253 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1254 * here between the "CI" and the "CACHE" variants, they will use whatever
1255 * mapping attributes qemu is using. When using KVM, the kernel will
1256 * enforce the attributes more strongly
1258 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1259 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1260 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1261 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1262 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1263 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1264 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1266 /* qemu/KVM-PPC specific hcalls */
1267 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1269 /* ibm,client-architecture-support support */
1270 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1273 type_init(hypercall_register_types)