1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "sysemu/sysemu.h"
6 #include "helper_regs.h"
7 #include "hw/ppc/spapr.h"
8 #include "mmu-hash64.h"
9 #include "cpu-models.h"
11 #include "sysemu/kvm.h"
21 static void do_spr_sync(void *arg)
23 struct SPRSyncState *s = arg;
24 PowerPCCPU *cpu = POWERPC_CPU(s->cs);
25 CPUPPCState *env = &cpu->env;
27 cpu_synchronize_state(s->cs);
28 env->spr[s->spr] &= ~s->mask;
29 env->spr[s->spr] |= s->value;
32 static void set_spr(CPUState *cs, int spr, target_ulong value,
35 struct SPRSyncState s = {
41 run_on_cpu(cs, do_spr_sync, &s);
44 static bool has_spr(PowerPCCPU *cpu, int spr)
46 /* We can test whether the SPR is defined by checking for a valid name */
47 return cpu->env.spr_cb[spr].name != NULL;
50 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
53 * hash value/pteg group index is normalized by htab_mask
55 if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
61 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
63 MachineState *machine = MACHINE(spapr);
64 MemoryHotplugState *hpms = &spapr->hotplug_memory;
66 if (addr < machine->ram_size) {
69 if ((addr >= hpms->base)
70 && ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
77 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
78 target_ulong opcode, target_ulong *args)
80 CPUPPCState *env = &cpu->env;
81 target_ulong flags = args[0];
82 target_ulong pte_index = args[1];
83 target_ulong pteh = args[2];
84 target_ulong ptel = args[3];
85 unsigned apshift, spshift;
90 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
92 /* Bad page size encoding */
96 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
98 if (is_ram_address(spapr, raddr)) {
99 /* Regular RAM - should have WIMG=0010 */
100 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
104 /* Looks like an IO address */
105 /* FIXME: What WIMG combinations could be sensible for IO?
106 * For now we allow WIMG=010x, but are there others? */
107 /* FIXME: Should we check against registered IO addresses? */
108 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
115 if (!valid_pte_index(env, pte_index)) {
120 if (likely((flags & H_EXACT) == 0)) {
122 token = ppc_hash64_start_access(cpu, pte_index);
123 for (; index < 8; index++) {
124 if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
128 ppc_hash64_stop_access(cpu, token);
133 token = ppc_hash64_start_access(cpu, pte_index);
134 if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
135 ppc_hash64_stop_access(cpu, token);
138 ppc_hash64_stop_access(cpu, token);
141 ppc_hash64_store_hpte(cpu, pte_index + index,
142 pteh | HPTE64_V_HPTE_DIRTY, ptel);
144 args[0] = pte_index + index;
150 REMOVE_NOT_FOUND = 1,
155 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
158 target_ulong *vp, target_ulong *rp)
160 CPUPPCState *env = &cpu->env;
164 if (!valid_pte_index(env, ptex)) {
168 token = ppc_hash64_start_access(cpu, ptex);
169 v = ppc_hash64_load_hpte0(cpu, token, 0);
170 r = ppc_hash64_load_hpte1(cpu, token, 0);
171 ppc_hash64_stop_access(cpu, token);
173 if ((v & HPTE64_V_VALID) == 0 ||
174 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
175 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
176 return REMOVE_NOT_FOUND;
180 ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
181 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
182 return REMOVE_SUCCESS;
185 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
186 target_ulong opcode, target_ulong *args)
188 target_ulong flags = args[0];
189 target_ulong pte_index = args[1];
190 target_ulong avpn = args[2];
193 ret = remove_hpte(cpu, pte_index, avpn, flags,
200 case REMOVE_NOT_FOUND:
210 g_assert_not_reached();
213 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
214 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
215 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
216 #define H_BULK_REMOVE_END 0xc000000000000000ULL
217 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
218 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
219 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
220 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
221 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
222 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
223 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
224 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
225 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
226 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
227 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
229 #define H_BULK_REMOVE_MAX_BATCH 4
231 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
232 target_ulong opcode, target_ulong *args)
236 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
237 target_ulong *tsh = &args[i*2];
238 target_ulong tsl = args[i*2 + 1];
239 target_ulong v, r, ret;
241 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
243 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
247 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
248 *tsh |= H_BULK_REMOVE_RESPONSE;
250 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
251 *tsh |= H_BULK_REMOVE_PARM;
255 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
256 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
263 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
277 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
278 target_ulong opcode, target_ulong *args)
280 CPUPPCState *env = &cpu->env;
281 target_ulong flags = args[0];
282 target_ulong pte_index = args[1];
283 target_ulong avpn = args[2];
287 if (!valid_pte_index(env, pte_index)) {
291 token = ppc_hash64_start_access(cpu, pte_index);
292 v = ppc_hash64_load_hpte0(cpu, token, 0);
293 r = ppc_hash64_load_hpte1(cpu, token, 0);
294 ppc_hash64_stop_access(cpu, token);
296 if ((v & HPTE64_V_VALID) == 0 ||
297 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
301 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
302 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
303 r |= (flags << 55) & HPTE64_R_PP0;
304 r |= (flags << 48) & HPTE64_R_KEY_HI;
305 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
306 ppc_hash64_store_hpte(cpu, pte_index,
307 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
308 ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
309 /* Don't need a memory barrier, due to qemu's global lock */
310 ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
314 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
315 target_ulong opcode, target_ulong *args)
317 CPUPPCState *env = &cpu->env;
318 target_ulong flags = args[0];
319 target_ulong pte_index = args[1];
321 int i, ridx, n_entries = 1;
323 if (!valid_pte_index(env, pte_index)) {
327 if (flags & H_READ_4) {
328 /* Clear the two low order bits */
329 pte_index &= ~(3ULL);
333 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
335 for (i = 0, ridx = 0; i < n_entries; i++) {
336 args[ridx++] = ldq_p(hpte);
337 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
338 hpte += HASH_PTE_SIZE_64;
344 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
345 target_ulong opcode, target_ulong *args)
347 cpu_synchronize_state(CPU(cpu));
348 cpu->env.spr[SPR_SPRG0] = args[0];
353 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
354 target_ulong opcode, target_ulong *args)
356 if (!has_spr(cpu, SPR_DABR)) {
357 return H_HARDWARE; /* DABR register not available */
359 cpu_synchronize_state(CPU(cpu));
361 if (has_spr(cpu, SPR_DABRX)) {
362 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
363 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
364 return H_RESERVED_DABR;
367 cpu->env.spr[SPR_DABR] = args[0];
371 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
372 target_ulong opcode, target_ulong *args)
374 target_ulong dabrx = args[1];
376 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
380 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
381 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
385 cpu_synchronize_state(CPU(cpu));
386 cpu->env.spr[SPR_DABRX] = dabrx;
387 cpu->env.spr[SPR_DABR] = args[0];
392 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
393 target_ulong opcode, target_ulong *args)
395 target_ulong flags = args[0];
396 hwaddr dst = args[1];
397 hwaddr src = args[2];
398 hwaddr len = TARGET_PAGE_SIZE;
399 uint8_t *pdst, *psrc;
400 target_long ret = H_SUCCESS;
402 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
403 | H_COPY_PAGE | H_ZERO_PAGE)) {
404 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
409 /* Map-in destination */
410 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
413 pdst = cpu_physical_memory_map(dst, &len, 1);
414 if (!pdst || len != TARGET_PAGE_SIZE) {
418 if (flags & H_COPY_PAGE) {
419 /* Map-in source, copy to destination, and unmap source again */
420 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
424 psrc = cpu_physical_memory_map(src, &len, 0);
425 if (!psrc || len != TARGET_PAGE_SIZE) {
429 memcpy(pdst, psrc, len);
430 cpu_physical_memory_unmap(psrc, len, 0, len);
431 } else if (flags & H_ZERO_PAGE) {
432 memset(pdst, 0, len); /* Just clear the destination page */
435 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
436 kvmppc_dcbst_range(cpu, pdst, len);
438 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
440 kvmppc_icbi_range(cpu, pdst, len);
447 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
451 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
452 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
453 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
454 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
455 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
456 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
458 #define VPA_MIN_SIZE 640
459 #define VPA_SIZE_OFFSET 0x4
460 #define VPA_SHARED_PROC_OFFSET 0x9
461 #define VPA_SHARED_PROC_VAL 0x2
463 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
465 CPUState *cs = CPU(ppc_env_get_cpu(env));
470 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
474 if (vpa % env->dcache_line_size) {
477 /* FIXME: bounds check the address */
479 size = lduw_be_phys(cs->as, vpa + 0x4);
481 if (size < VPA_MIN_SIZE) {
485 /* VPA is not allowed to cross a page boundary */
486 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
492 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
493 tmp |= VPA_SHARED_PROC_VAL;
494 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
499 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
501 if (env->slb_shadow_addr) {
513 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
515 CPUState *cs = CPU(ppc_env_get_cpu(env));
519 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
523 size = ldl_be_phys(cs->as, addr + 0x4);
528 if ((addr / 4096) != ((addr + size - 1) / 4096)) {
532 if (!env->vpa_addr) {
536 env->slb_shadow_addr = addr;
537 env->slb_shadow_size = size;
542 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
544 env->slb_shadow_addr = 0;
545 env->slb_shadow_size = 0;
549 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
551 CPUState *cs = CPU(ppc_env_get_cpu(env));
555 hcall_dprintf("Can't cope with DTL at logical 0\n");
559 size = ldl_be_phys(cs->as, addr + 0x4);
565 if (!env->vpa_addr) {
569 env->dtl_addr = addr;
570 env->dtl_size = size;
575 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
583 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
584 target_ulong opcode, target_ulong *args)
586 target_ulong flags = args[0];
587 target_ulong procno = args[1];
588 target_ulong vpa = args[2];
589 target_ulong ret = H_PARAMETER;
593 tcpu = ppc_get_vcpu_by_dt_id(procno);
600 case FLAGS_REGISTER_VPA:
601 ret = register_vpa(tenv, vpa);
604 case FLAGS_DEREGISTER_VPA:
605 ret = deregister_vpa(tenv, vpa);
608 case FLAGS_REGISTER_SLBSHADOW:
609 ret = register_slb_shadow(tenv, vpa);
612 case FLAGS_DEREGISTER_SLBSHADOW:
613 ret = deregister_slb_shadow(tenv, vpa);
616 case FLAGS_REGISTER_DTL:
617 ret = register_dtl(tenv, vpa);
620 case FLAGS_DEREGISTER_DTL:
621 ret = deregister_dtl(tenv, vpa);
628 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
629 target_ulong opcode, target_ulong *args)
631 CPUPPCState *env = &cpu->env;
632 CPUState *cs = CPU(cpu);
634 env->msr |= (1ULL << MSR_EE);
635 hreg_compute_hflags(env);
636 if (!cpu_has_work(cs)) {
638 cs->exception_index = EXCP_HLT;
639 cs->exit_request = 1;
644 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
645 target_ulong opcode, target_ulong *args)
647 target_ulong rtas_r3 = args[0];
648 uint32_t token = rtas_ld(rtas_r3, 0);
649 uint32_t nargs = rtas_ld(rtas_r3, 1);
650 uint32_t nret = rtas_ld(rtas_r3, 2);
652 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
653 nret, rtas_r3 + 12 + 4*nargs);
656 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
657 target_ulong opcode, target_ulong *args)
659 CPUState *cs = CPU(cpu);
660 target_ulong size = args[0];
661 target_ulong addr = args[1];
665 args[0] = ldub_phys(cs->as, addr);
668 args[0] = lduw_phys(cs->as, addr);
671 args[0] = ldl_phys(cs->as, addr);
674 args[0] = ldq_phys(cs->as, addr);
680 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
681 target_ulong opcode, target_ulong *args)
683 CPUState *cs = CPU(cpu);
685 target_ulong size = args[0];
686 target_ulong addr = args[1];
687 target_ulong val = args[2];
691 stb_phys(cs->as, addr, val);
694 stw_phys(cs->as, addr, val);
697 stl_phys(cs->as, addr, val);
700 stq_phys(cs->as, addr, val);
706 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
707 target_ulong opcode, target_ulong *args)
709 CPUState *cs = CPU(cpu);
711 target_ulong dst = args[0]; /* Destination address */
712 target_ulong src = args[1]; /* Source address */
713 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
714 target_ulong count = args[3]; /* Element count */
715 target_ulong op = args[4]; /* 0 = copy, 1 = invert */
717 unsigned int mask = (1 << esize) - 1;
718 int step = 1 << esize;
720 if (count > 0x80000000) {
724 if ((dst & mask) || (src & mask) || (op > 1)) {
728 if (dst >= src && dst < (src + (count << esize))) {
729 dst = dst + ((count - 1) << esize);
730 src = src + ((count - 1) << esize);
737 tmp = ldub_phys(cs->as, src);
740 tmp = lduw_phys(cs->as, src);
743 tmp = ldl_phys(cs->as, src);
746 tmp = ldq_phys(cs->as, src);
756 stb_phys(cs->as, dst, tmp);
759 stw_phys(cs->as, dst, tmp);
762 stl_phys(cs->as, dst, tmp);
765 stq_phys(cs->as, dst, tmp);
775 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
776 target_ulong opcode, target_ulong *args)
778 /* Nothing to do on emulation, KVM will trap this in the kernel */
782 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
783 target_ulong opcode, target_ulong *args)
785 /* Nothing to do on emulation, KVM will trap this in the kernel */
789 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
804 case H_SET_MODE_ENDIAN_BIG:
806 set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
808 spapr_pci_switch_vga(true);
811 case H_SET_MODE_ENDIAN_LITTLE:
813 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
815 spapr_pci_switch_vga(false);
819 return H_UNSUPPORTED_FLAG;
822 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
828 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
830 if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
840 if (mflags == AIL_RESERVED) {
841 return H_UNSUPPORTED_FLAG;
845 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
851 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
852 target_ulong opcode, target_ulong *args)
854 target_ulong resource = args[1];
855 target_ulong ret = H_P2;
858 case H_SET_MODE_RESOURCE_LE:
859 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
861 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
862 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
871 * Return the offset to the requested option vector @vector in the
872 * option vector table @table.
874 static target_ulong cas_get_option_vector(int vector, target_ulong table)
877 char nr_vectors, nr_entries;
883 nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
884 if (!vector || vector > nr_vectors) {
887 table++; /* skip nr option vectors */
889 for (i = 0; i < vector - 1; i++) {
890 nr_entries = ldl_phys(&address_space_memory, table) >> 24;
891 table += nr_entries + 2;
898 uint32_t cpu_version;
902 static void do_set_compat(void *arg)
904 SetCompatState *s = arg;
906 cpu_synchronize_state(CPU(s->cpu));
907 ppc_set_compat(s->cpu, s->cpu_version, &s->err);
910 #define get_compat_level(cpuver) ( \
911 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
912 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
913 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
914 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
916 #define OV5_DRCONF_MEMORY 0x20
918 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
919 sPAPRMachineState *spapr,
923 target_ulong list = ppc64_phys_to_real(args[0]);
924 target_ulong ov_table, ov5;
925 PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
927 bool cpu_match = false, cpu_update = true, memory_update = false;
928 unsigned old_cpu_version = cpu_->cpu_version;
929 unsigned compat_lvl = 0, cpu_version = 0;
930 unsigned max_lvl = get_compat_level(cpu_->max_compat);
935 for (counter = 0; counter < 512; ++counter) {
936 uint32_t pvr, pvr_mask;
938 pvr_mask = ldl_be_phys(&address_space_memory, list);
940 pvr = ldl_be_phys(&address_space_memory, list);
943 trace_spapr_cas_pvr_try(pvr);
945 ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
948 } else if (pvr == cpu_->cpu_version) {
950 cpu_version = cpu_->cpu_version;
951 } else if (!cpu_match) {
952 /* If it is a logical PVR, try to determine the highest level */
953 unsigned lvl = get_compat_level(pvr);
955 bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
956 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
957 bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
958 ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
959 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
961 if (is205 || is206) {
963 /* User did not set the level, choose the highest */
964 if (compat_lvl <= lvl) {
968 } else if (max_lvl >= lvl) {
969 /* User chose the level, don't set higher than this */
976 /* Terminator record */
977 if (~pvr_mask & pvr) {
982 /* Parsing finished */
983 trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
984 cpu_version, pcc_->pcr_mask);
987 if (old_cpu_version != cpu_version) {
990 .cpu = POWERPC_CPU(cs),
991 .cpu_version = cpu_version,
995 run_on_cpu(cs, do_set_compat, &s);
998 error_report_err(s.err);
1008 /* For the future use: here @ov_table points to the first option vector */
1011 ov5 = cas_get_option_vector(5, ov_table);
1016 /* @list now points to OV 5 */
1017 ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2);
1018 if (ov5_byte2 & OV5_DRCONF_MEMORY) {
1019 memory_update = true;
1022 if (spapr_h_cas_compose_response(spapr, args[1], args[2],
1023 cpu_update, memory_update)) {
1024 qemu_system_reset_request();
1030 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1031 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1033 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1035 spapr_hcall_fn *slot;
1037 if (opcode <= MAX_HCALL_OPCODE) {
1038 assert((opcode & 0x3) == 0);
1040 slot = &papr_hypercall_table[opcode / 4];
1042 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1044 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1051 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1054 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1056 if ((opcode <= MAX_HCALL_OPCODE)
1057 && ((opcode & 0x3) == 0)) {
1058 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1061 return fn(cpu, spapr, opcode, args);
1063 } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1064 (opcode <= KVMPPC_HCALL_MAX)) {
1065 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1068 return fn(cpu, spapr, opcode, args);
1072 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1077 static void hypercall_register_types(void)
1080 spapr_register_hypercall(H_ENTER, h_enter);
1081 spapr_register_hypercall(H_REMOVE, h_remove);
1082 spapr_register_hypercall(H_PROTECT, h_protect);
1083 spapr_register_hypercall(H_READ, h_read);
1086 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1089 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1090 spapr_register_hypercall(H_CEDE, h_cede);
1092 /* processor register resource access h-calls */
1093 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1094 spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1095 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1096 spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1097 spapr_register_hypercall(H_SET_MODE, h_set_mode);
1099 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1100 * here between the "CI" and the "CACHE" variants, they will use whatever
1101 * mapping attributes qemu is using. When using KVM, the kernel will
1102 * enforce the attributes more strongly
1104 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1105 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1106 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1107 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1108 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1109 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1110 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1112 /* qemu/KVM-PPC specific hcalls */
1113 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1115 /* ibm,client-architecture-support support */
1116 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1119 type_init(hypercall_register_types)