]> Git Repo - J-linux.git/commitdiff
KVM: selftests: Increase robustness of LLC cache misses in PMU counters test
authorMaxim Levitsky <[email protected]>
Fri, 28 Jun 2024 00:55:57 +0000 (17:55 -0700)
committerSean Christopherson <[email protected]>
Fri, 28 Jun 2024 15:44:03 +0000 (08:44 -0700)
Currently the PMU counters test does a single CLFLUSH{,OPT} on the loop's
code, but due to speculative execution this might not cause LLC misses
within the measured section.

Instead of doing a single flush before the loop, do a cache flush on each
iteration of the loop to confuse the prediction and ensure that at least
one cache miss occurs within the measured section.

Signed-off-by: Maxim Levitsky <[email protected]>
[sean: keep MFENCE, massage changelog]
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Sean Christopherson <[email protected]>
tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

index bb40d7c0f83e8433647f4ef11505cef5a6c110f0..698cb36989db1367f8fb0f75fbaa88d691ee2de2 100644 (file)
 /* Each iteration of the loop retires one branch instruction. */
 #define NUM_BRANCH_INSNS_RETIRED       (NUM_LOOPS)
 
-/* Number of instructions in each loop. */
-#define NUM_INSNS_PER_LOOP             1
+/*
+ * Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
+ * 1 LOOP.
+ */
+#define NUM_INSNS_PER_LOOP             3
 
 /*
  * Number of "extra" instructions that will be counted, i.e. the number of
  * instructions that are needed to set up the loop and then disable the
- * counter.  1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
+ * counter.  2 MOV, 2 XOR, 1 WRMSR.
  */
-#define NUM_EXTRA_INSNS                        7
+#define NUM_EXTRA_INSNS                        5
 
 /* Total number of instructions retired within the measured section. */
 #define NUM_INSNS_RETIRED              (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
@@ -144,8 +147,8 @@ sanity_checks:
  * before the end of the sequence.
  *
  * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
- * start of the loop to force LLC references and misses, i.e. to allow testing
- * that those events actually count.
+ * CLFUSH{,OPT} instruction on each loop iteration to force LLC references and
+ * misses, i.e. to allow testing that those events actually count.
  *
  * If forced emulation is enabled (and specified), force emulation on a subset
  * of the measured code to verify that KVM correctly emulates instructions and
@@ -155,10 +158,11 @@ sanity_checks:
 #define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP)                                \
 do {                                                                           \
        __asm__ __volatile__("wrmsr\n\t"                                        \
+                            " mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t"      \
+                            "1:\n\t"                                           \
                             clflush "\n\t"                                     \
                             "mfence\n\t"                                       \
-                            "1: mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t"    \
-                            FEP "loop .\n\t"                                   \
+                            FEP "loop 1b\n\t"                                  \
                             FEP "mov %%edi, %%ecx\n\t"                         \
                             FEP "xor %%eax, %%eax\n\t"                         \
                             FEP "xor %%edx, %%edx\n\t"                         \
@@ -173,9 +177,9 @@ do {                                                                                \
        wrmsr(pmc_msr, 0);                                                      \
                                                                                \
        if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))                               \
-               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP);   \
+               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP);    \
        else if (this_cpu_has(X86_FEATURE_CLFLUSH))                             \
-               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP);      \
+               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP);       \
        else                                                                    \
                GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP);             \
                                                                                \
This page took 0.065591 seconds and 4 git commands to generate.