#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_IA32_PKRS 0x6e1
+#define MSR_ARCH_LBR_CTL 0x000014ce
+#define MSR_ARCH_LBR_DEPTH 0x000014cf
+#define MSR_ARCH_LBR_FROM_0 0x00001500
+#define MSR_ARCH_LBR_TO_0 0x00001600
+#define MSR_ARCH_LBR_INFO_0 0x00001200
#define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
uint64_t msr_xfd;
uint64_t msr_xfd_err;
+ /* Per-VCPU Arch LBR MSRs */
+ uint64_t msr_lbr_ctl;
+ uint64_t msr_lbr_depth;
+ LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
env->msr_xfd_err);
}
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ uint64_t depth;
+ int i, ret;
+
+ /*
+ * Only migrate Arch LBR states when: 1) Arch LBR is enabled
+ * for migrated vcpu. 2) the host Arch LBR depth equals that
+ * of source guest's, this is to avoid mismatch of guest/host
+ * config for the msr hence avoid unexpected misbehavior.
+ */
+ ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+
+ if (ret == 1 && (env->msr_lbr_ctl & 0x1) && !!depth &&
+ depth == env->msr_lbr_depth) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
+
+ for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
+ if (!env->lbr_records[i].from) {
+ continue;
+ }
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
+ env->lbr_records[i].from);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
+ env->lbr_records[i].to);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
+ env->lbr_records[i].info);
+ }
+ }
+ }
+
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
}
kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
}
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ uint64_t ctl, depth;
+ int i, ret2;
+
+ ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_CTL, &ctl);
+ ret2 = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+ if (ret == 1 && ret2 == 1 && (ctl & 0x1) &&
+ depth == ARCH_LBR_NR_ENTRIES) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
+
+ for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
+ }
+ }
+ }
+
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
case MSR_IA32_XFD_ERR:
env->msr_xfd_err = msrs[i].data;
break;
+ case MSR_ARCH_LBR_CTL:
+ env->msr_lbr_ctl = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_DEPTH:
+ env->msr_lbr_depth = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
+ break;
}
}