]> Git Repo - J-linux.git/commitdiff
Merge branch 'kvm-arm64/pmu-debug-fixes-5.11' into kvmarm-master/next
authorMarc Zyngier <[email protected]>
Fri, 12 Feb 2021 14:08:41 +0000 (14:08 +0000)
committerMarc Zyngier <[email protected]>
Fri, 12 Feb 2021 14:08:41 +0000 (14:08 +0000)
Signed-off-by: Marc Zyngier <[email protected]>
1  2 
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c

index 247422ac78a9e5c01e543146076428d81db061fc,cb16ca2eee9246e68bed14203f04198e25d63969..e9ec08b0b070337d476ec0d00a28d62efa0af89e
@@@ -23,11 -23,11 +23,11 @@@ static void kvm_pmu_stop_counter(struc
  static u32 kvm_pmu_event_mask(struct kvm *kvm)
  {
        switch (kvm->arch.pmuver) {
-       case 1:                 /* ARMv8.0 */
+       case ID_AA64DFR0_PMUVER_8_0:
                return GENMASK(9, 0);
-       case 4:                 /* ARMv8.1 */
-       case 5:                 /* ARMv8.4 */
-       case 6:                 /* ARMv8.5 */
+       case ID_AA64DFR0_PMUVER_8_1:
+       case ID_AA64DFR0_PMUVER_8_4:
+       case ID_AA64DFR0_PMUVER_8_5:
                return GENMASK(15, 0);
        default:                /* Shouldn't be here, just for sanity */
                WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
@@@ -788,30 -788,32 +788,36 @@@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu 
  {
        unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
        u64 val, mask = 0;
 -      int base, i;
 +      int base, i, nr_events;
  
        if (!pmceid1) {
                val = read_sysreg(pmceid0_el0);
                base = 0;
        } else {
                val = read_sysreg(pmceid1_el0);
+               /*
+                * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
+                * as RAZ
+                */
+               if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
+                       val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
                base = 32;
        }
  
        if (!bmap)
                return val;
  
 +      nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
 +
        for (i = 0; i < 32; i += 8) {
                u64 byte;
  
                byte = bitmap_get_value8(bmap, base + i);
                mask |= byte << i;
 -              byte = bitmap_get_value8(bmap, 0x4000 + base + i);
 -              mask |= byte << (32 + i);
 +              if (nr_events >= (0x4000 + base + 32)) {
 +                      byte = bitmap_get_value8(bmap, 0x4000 + base + i);
 +                      mask |= byte << (32 + i);
 +              }
        }
  
        return val & mask;
@@@ -854,6 -856,8 +860,6 @@@ int kvm_arm_pmu_v3_enable(struct kvm_vc
                   return -EINVAL;
        }
  
 -      kvm_pmu_vcpu_reset(vcpu);
 -
        return 0;
  }
  
index 7c4f79532406b5ca865e4fdaf37bcc8e5cea9bd2,d9ca200c8b1deb5723197d0934e0012bb0310128..4f2f1e3145debfc90ba0d358eb769e1300539ce8
@@@ -9,6 -9,7 +9,7 @@@
   *          Christoffer Dall <[email protected]>
   */
  
+ #include <linux/bitfield.h>
  #include <linux/bsearch.h>
  #include <linux/kvm_host.h>
  #include <linux/mm.h>
   * 64bit interface.
   */
  
 +#define reg_to_encoding(x)                                            \
 +      sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
 +              (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
 +
  static bool read_from_write_only(struct kvm_vcpu *vcpu,
                                 struct sys_reg_params *params,
                                 const struct sys_reg_desc *r)
@@@ -277,7 -274,8 +278,7 @@@ static bool trap_loregion(struct kvm_vc
                          const struct sys_reg_desc *r)
  {
        u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 -      u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
 -                       (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
 +      u32 sr = reg_to_encoding(r);
  
        if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
                kvm_inject_undefined(vcpu);
@@@ -593,23 -591,10 +594,23 @@@ static void reset_mpidr(struct kvm_vcp
        vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
  }
  
 +static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
 +                                 const struct sys_reg_desc *r)
 +{
 +      if (kvm_vcpu_has_pmu(vcpu))
 +              return 0;
 +
 +      return REG_HIDDEN;
 +}
 +
  static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  {
        u64 pmcr, val;
  
 +      /* No PMU available, PMCR_EL0 may UNDEF... */
 +      if (!kvm_arm_support_pmu_v3())
 +              return;
 +
        pmcr = read_sysreg(pmcr_el0);
        /*
         * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
  static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
  {
        u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 -      bool enabled = kvm_vcpu_has_pmu(vcpu);
 +      bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
  
 -      enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
        if (!enabled)
                kvm_inject_undefined(vcpu);
  
@@@ -700,14 -686,18 +701,18 @@@ static bool access_pmselr(struct kvm_vc
  static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                          const struct sys_reg_desc *r)
  {
-       u64 pmceid;
+       u64 pmceid, mask, shift;
  
        BUG_ON(p->is_write);
  
        if (pmu_access_el0_disabled(vcpu))
                return false;
  
+       get_access_mask(r, &mask, &shift);
        pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
+       pmceid &= mask;
+       pmceid >>= shift;
  
        p->regval = pmceid;
  
@@@ -911,6 -901,11 +916,6 @@@ static bool access_pmswinc(struct kvm_v
  static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                             const struct sys_reg_desc *r)
  {
 -      if (!kvm_vcpu_has_pmu(vcpu)) {
 -              kvm_inject_undefined(vcpu);
 -              return false;
 -      }
 -
        if (p->is_write) {
                if (!vcpu_mode_priv(vcpu)) {
                        kvm_inject_undefined(vcpu);
        return true;
  }
  
 -#define reg_to_encoding(x)                                            \
 -      sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
 -              (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
 -
  /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                    \
        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
        { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
          trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
  
 +#define PMU_SYS_REG(r)                                                \
 +      SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
 +
  /* Macro to expand the PMEVCNTRn_EL0 register */
  #define PMU_PMEVCNTR_EL0(n)                                           \
 -      { SYS_DESC(SYS_PMEVCNTRn_EL0(n)),                                       \
 -        access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
 +      { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),                            \
 +        .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
  
  /* Macro to expand the PMEVTYPERn_EL0 register */
  #define PMU_PMEVTYPER_EL0(n)                                          \
 -      { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
 -        access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
 +      { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)),                           \
 +        .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
  
  static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                         const struct sys_reg_desc *r)
@@@ -1021,43 -1017,51 +1026,50 @@@ static bool access_arch_timer(struct kv
        return true;
  }
  
+ #define FEATURE(x)    (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
  /* Read a sanitised cpufeature ID register by sys_reg_desc */
  static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                struct sys_reg_desc const *r, bool raz)
  {
 -      u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
 -                       (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
 +      u32 id = reg_to_encoding(r);
        u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
  
-       if (id == SYS_ID_AA64PFR0_EL1) {
+       switch (id) {
+       case SYS_ID_AA64PFR0_EL1:
                if (!vcpu_has_sve(vcpu))
-                       val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
-               val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
-               val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
-               val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
-               val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT);
-               val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT);
-       } else if (id == SYS_ID_AA64PFR1_EL1) {
-               val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
-       } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
-               val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
-                        (0xfUL << ID_AA64ISAR1_API_SHIFT) |
-                        (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
-                        (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
-       } else if (id == SYS_ID_AA64DFR0_EL1) {
-               u64 cap = 0;
-               /* Limit guests to PMUv3 for ARMv8.1 */
-               if (kvm_vcpu_has_pmu(vcpu))
-                       cap = ID_AA64DFR0_PMUVER_8_1;
+                       val &= ~FEATURE(ID_AA64PFR0_SVE);
+               val &= ~FEATURE(ID_AA64PFR0_AMU);
+               val &= ~FEATURE(ID_AA64PFR0_CSV2);
+               val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
+               val &= ~FEATURE(ID_AA64PFR0_CSV3);
+               val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+               break;
+       case SYS_ID_AA64PFR1_EL1:
+               val &= ~FEATURE(ID_AA64PFR1_MTE);
+               break;
+       case SYS_ID_AA64ISAR1_EL1:
+               if (!vcpu_has_ptrauth(vcpu))
+                       val &= ~(FEATURE(ID_AA64ISAR1_APA) |
+                                FEATURE(ID_AA64ISAR1_API) |
+                                FEATURE(ID_AA64ISAR1_GPA) |
+                                FEATURE(ID_AA64ISAR1_GPI));
+               break;
+       case SYS_ID_AA64DFR0_EL1:
+               /* Limit debug to ARMv8.0 */
+               val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
+               val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
+               /* Limit guests to PMUv3 for ARMv8.4 */
                val = cpuid_feature_cap_perfmon_field(val,
-                                               ID_AA64DFR0_PMUVER_SHIFT,
-                                               cap);
-       } else if (id == SYS_ID_DFR0_EL1) {
-               /* Limit guests to PMUv3 for ARMv8.1 */
+                                                     ID_AA64DFR0_PMUVER_SHIFT,
+                                                     kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
+               break;
+       case SYS_ID_DFR0_EL1:
+               /* Limit guests to PMUv3 for ARMv8.4 */
                val = cpuid_feature_cap_perfmon_field(val,
-                                               ID_DFR0_PERFMON_SHIFT,
-                                               ID_DFR0_PERFMON_8_1);
+                                                     ID_DFR0_PERFMON_SHIFT,
+                                                     kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
+               break;
        }
  
        return val;
  static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
                                  const struct sys_reg_desc *r)
  {
 -      u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
 -                       (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
 +      u32 id = reg_to_encoding(r);
  
        switch (id) {
        case SYS_ID_AA64ZFR0_EL1:
@@@ -1489,10 -1494,9 +1501,11 @@@ static const struct sys_reg_desc sys_re
        { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
        { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
  
 -      { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
 -      { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
 +      { PMU_SYS_REG(SYS_PMINTENSET_EL1),
 +        .access = access_pminten, .reg = PMINTENSET_EL1 },
 +      { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
 +        .access = access_pminten, .reg = PMINTENSET_EL1 },
+       { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
  
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
        { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
        { SYS_DESC(SYS_CTR_EL0), access_ctr },
  
 -      { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
 -      { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 -      { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 -      { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
 -      { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
 -      { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
 -      { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
 -      { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
 -      { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
 -      { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
 -      { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
 +      { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
 +        .reset = reset_pmcr, .reg = PMCR_EL0 },
 +      { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
 +        .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
 +      { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
 +        .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
 +      { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
 +        .access = access_pmovs, .reg = PMOVSSET_EL0 },
 +      { PMU_SYS_REG(SYS_PMSWINC_EL0),
 +        .access = access_pmswinc, .reg = PMSWINC_EL0 },
 +      { PMU_SYS_REG(SYS_PMSELR_EL0),
 +        .access = access_pmselr, .reg = PMSELR_EL0 },
 +      { PMU_SYS_REG(SYS_PMCEID0_EL0),
 +        .access = access_pmceid, .reset = NULL },
 +      { PMU_SYS_REG(SYS_PMCEID1_EL0),
 +        .access = access_pmceid, .reset = NULL },
 +      { PMU_SYS_REG(SYS_PMCCNTR_EL0),
 +        .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
 +      { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
 +        .access = access_pmu_evtyper, .reset = NULL },
 +      { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
 +        .access = access_pmu_evcntr, .reset = NULL },
        /*
         * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
         * in 32bit mode. Here we choose to reset it as zero for consistency.
         */
 -      { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
 -      { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
 +      { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
 +        .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
 +      { PMU_SYS_REG(SYS_PMOVSSET_EL0),
 +        .access = access_pmovs, .reg = PMOVSSET_EL0 },
  
        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
         * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
         * in 32bit mode. Here we choose to reset it as zero for consistency.
         */
 -      { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
 +      { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
 +        .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
  
        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
        { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
  };
  
- static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
                        struct sys_reg_params *p,
                        const struct sys_reg_desc *r)
  {
                p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
                             (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
                             (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
-                            | (6 << 16) | (el3 << 14) | (el3 << 12));
+                            | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
                return true;
        }
  }
   * guest. Revisit this one day, would this principle change.
   */
  static const struct sys_reg_desc cp14_regs[] = {
-       /* DBGIDR */
-       { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+       /* DBGDIDR */
+       { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
        /* DBGDTRRXext */
        { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  
@@@ -1918,8 -1908,8 +1931,8 @@@ static const struct sys_reg_desc cp15_r
        { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
        { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
        { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
+       { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
+       { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
        { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
        { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
        { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
        { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
        { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
        { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
+       { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
+       { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
+       /* PMMIR */
+       { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
  
        /* PRRR/MAIR0 */
        { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
This page took 0.080257 seconds and 4 git commands to generate.