]> Git Repo - linux.git/commitdiff
LoongArch: KVM: Implement handle mmio exception
authorTianrui Zhao <[email protected]>
Mon, 2 Oct 2023 02:01:28 +0000 (10:01 +0800)
committerHuacai Chen <[email protected]>
Mon, 2 Oct 2023 02:01:28 +0000 (10:01 +0800)
Implement handle mmio exception, setting the mmio info into vcpu_run and
return to user space to handle it.

Reviewed-by: Bibo Mao <[email protected]>
Tested-by: Huacai Chen <[email protected]>
Signed-off-by: Tianrui Zhao <[email protected]>
Signed-off-by: Huacai Chen <[email protected]>
arch/loongarch/kvm/exit.c

index 33d1b4190a62c59f22fe8d019a4591ea3e518171..c31894b75b0757299fb04e7eda21f5d0b449f841 100644 (file)
@@ -321,3 +321,313 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
 
        return ret;
 }
+
+int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+       int ret;
+       unsigned int op8, opcode, rd;
+       struct kvm_run *run = vcpu->run;
+
+       run->mmio.phys_addr = vcpu->arch.badv;
+       vcpu->mmio_needed = 2;  /* signed */
+       op8 = (inst.word >> 24) & 0xff;
+       ret = EMULATE_DO_MMIO;
+
+       switch (op8) {
+       case 0x24 ... 0x27:     /* ldptr.w/d process */
+               rd = inst.reg2i14_format.rd;
+               opcode = inst.reg2i14_format.opcode;
+
+               switch (opcode) {
+               case ldptrw_op:
+                       run->mmio.len = 4;
+                       break;
+               case ldptrd_op:
+                       run->mmio.len = 8;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case 0x28 ... 0x2e:     /* ld.b/h/w/d, ld.bu/hu/wu process */
+               rd = inst.reg2i12_format.rd;
+               opcode = inst.reg2i12_format.opcode;
+
+               switch (opcode) {
+               case ldb_op:
+                       run->mmio.len = 1;
+                       break;
+               case ldbu_op:
+                       vcpu->mmio_needed = 1;  /* unsigned */
+                       run->mmio.len = 1;
+                       break;
+               case ldh_op:
+                       run->mmio.len = 2;
+                       break;
+               case ldhu_op:
+                       vcpu->mmio_needed = 1;  /* unsigned */
+                       run->mmio.len = 2;
+                       break;
+               case ldw_op:
+                       run->mmio.len = 4;
+                       break;
+               case ldwu_op:
+                       vcpu->mmio_needed = 1;  /* unsigned */
+                       run->mmio.len = 4;
+                       break;
+               case ldd_op:
+                       run->mmio.len = 8;
+                       break;
+               default:
+                       ret = EMULATE_FAIL;
+                       break;
+               }
+               break;
+       case 0x38:      /* ldx.b/h/w/d, ldx.bu/hu/wu process */
+               rd = inst.reg3_format.rd;
+               opcode = inst.reg3_format.opcode;
+
+               switch (opcode) {
+               case ldxb_op:
+                       run->mmio.len = 1;
+                       break;
+               case ldxbu_op:
+                       run->mmio.len = 1;
+                       vcpu->mmio_needed = 1;  /* unsigned */
+                       break;
+               case ldxh_op:
+                       run->mmio.len = 2;
+                       break;
+               case ldxhu_op:
+                       run->mmio.len = 2;
+                       vcpu->mmio_needed = 1;  /* unsigned */
+                       break;
+               case ldxw_op:
+                       run->mmio.len = 4;
+                       break;
+               case ldxwu_op:
+                       run->mmio.len = 4;
+                       vcpu->mmio_needed = 1;  /* unsigned */
+                       break;
+               case ldxd_op:
+                       run->mmio.len = 8;
+                       break;
+               default:
+                       ret = EMULATE_FAIL;
+                       break;
+               }
+               break;
+       default:
+               ret = EMULATE_FAIL;
+       }
+
+       if (ret == EMULATE_DO_MMIO) {
+               /* Set for kvm_complete_mmio_read() use */
+               vcpu->arch.io_gpr = rd;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+       } else {
+               kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
+                       inst.word, vcpu->arch.pc, vcpu->arch.badv);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               vcpu->mmio_needed = 0;
+       }
+
+       return ret;
+}
+
+int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+
+       /* Update with new PC */
+       update_pc(&vcpu->arch);
+       switch (run->mmio.len) {
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(s8 *)run->mmio.data;
+               else
+                       *gpr = *(u8 *)run->mmio.data;
+               break;
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(s16 *)run->mmio.data;
+               else
+                       *gpr = *(u16 *)run->mmio.data;
+               break;
+       case 4:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(s32 *)run->mmio.data;
+               else
+                       *gpr = *(u32 *)run->mmio.data;
+               break;
+       case 8:
+               *gpr = *(s64 *)run->mmio.data;
+               break;
+       default:
+               kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
+                               run->mmio.len, vcpu->arch.badv);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+       int ret;
+       unsigned int rd, op8, opcode;
+       unsigned long curr_pc, rd_val = 0;
+       struct kvm_run *run = vcpu->run;
+       void *data = run->mmio.data;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       update_pc(&vcpu->arch);
+
+       op8 = (inst.word >> 24) & 0xff;
+       run->mmio.phys_addr = vcpu->arch.badv;
+       ret = EMULATE_DO_MMIO;
+       switch (op8) {
+       case 0x24 ... 0x27:     /* stptr.w/d process */
+               rd = inst.reg2i14_format.rd;
+               opcode = inst.reg2i14_format.opcode;
+
+               switch (opcode) {
+               case stptrw_op:
+                       run->mmio.len = 4;
+                       *(unsigned int *)data = vcpu->arch.gprs[rd];
+                       break;
+               case stptrd_op:
+                       run->mmio.len = 8;
+                       *(unsigned long *)data = vcpu->arch.gprs[rd];
+                       break;
+               default:
+                       ret = EMULATE_FAIL;
+                       break;
+               }
+               break;
+       case 0x28 ... 0x2e:     /* st.b/h/w/d  process */
+               rd = inst.reg2i12_format.rd;
+               opcode = inst.reg2i12_format.opcode;
+               rd_val = vcpu->arch.gprs[rd];
+
+               switch (opcode) {
+               case stb_op:
+                       run->mmio.len = 1;
+                       *(unsigned char *)data = rd_val;
+                       break;
+               case sth_op:
+                       run->mmio.len = 2;
+                       *(unsigned short *)data = rd_val;
+                       break;
+               case stw_op:
+                       run->mmio.len = 4;
+                       *(unsigned int *)data = rd_val;
+                       break;
+               case std_op:
+                       run->mmio.len = 8;
+                       *(unsigned long *)data = rd_val;
+                       break;
+               default:
+                       ret = EMULATE_FAIL;
+                       break;
+               }
+               break;
+       case 0x38:      /* stx.b/h/w/d process */
+               rd = inst.reg3_format.rd;
+               opcode = inst.reg3_format.opcode;
+
+               switch (opcode) {
+               case stxb_op:
+                       run->mmio.len = 1;
+                       *(unsigned char *)data = vcpu->arch.gprs[rd];
+                       break;
+               case stxh_op:
+                       run->mmio.len = 2;
+                       *(unsigned short *)data = vcpu->arch.gprs[rd];
+                       break;
+               case stxw_op:
+                       run->mmio.len = 4;
+                       *(unsigned int *)data = vcpu->arch.gprs[rd];
+                       break;
+               case stxd_op:
+                       run->mmio.len = 8;
+                       *(unsigned long *)data = vcpu->arch.gprs[rd];
+                       break;
+               default:
+                       ret = EMULATE_FAIL;
+                       break;
+               }
+               break;
+       default:
+               ret = EMULATE_FAIL;
+       }
+
+       if (ret == EMULATE_DO_MMIO) {
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+       } else {
+               vcpu->arch.pc = curr_pc;
+               kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
+                       inst.word, vcpu->arch.pc, vcpu->arch.badv);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               /* Rollback PC if emulation was unsuccessful */
+       }
+
+       return ret;
+}
+
+static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+{
+       int ret;
+       larch_inst inst;
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_run *run = vcpu->run;
+       unsigned long badv = vcpu->arch.badv;
+
+       ret = kvm_handle_mm_fault(vcpu, badv, write);
+       if (ret) {
+               /* Treat as MMIO */
+               inst.word = vcpu->arch.badi;
+               if (write) {
+                       er = kvm_emu_mmio_write(vcpu, inst);
+               } else {
+                       /* A code fetch fault doesn't count as an MMIO */
+                       if (kvm_is_ifetch_fault(&vcpu->arch)) {
+                               kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
+                               return RESUME_GUEST;
+                       }
+
+                       er = kvm_emu_mmio_read(vcpu, inst);
+               }
+       }
+
+       if (er == EMULATE_DONE) {
+               ret = RESUME_GUEST;
+       } else if (er == EMULATE_DO_MMIO) {
+               run->exit_reason = KVM_EXIT_MMIO;
+               ret = RESUME_HOST;
+       } else {
+               kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
+               ret = RESUME_GUEST;
+       }
+
+       return ret;
+}
+
+static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
+{
+       return kvm_handle_rdwr_fault(vcpu, false);
+}
+
+static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
+{
+       return kvm_handle_rdwr_fault(vcpu, true);
+}
This page took 0.064178 seconds and 4 git commands to generate.