1 // SPDX-License-Identifier: GPL-2.0
3 * perf.c - performance monitor
5 * Copyright (C) 2021 Intel Corporation
11 #include <linux/spinlock.h>
16 static DEFINE_SPINLOCK(latency_lock);
18 bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type)
20 struct latency_statistic *lstat = iommu->perf_statistic;
22 return lstat && lstat[type].enabled;
25 int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
27 struct latency_statistic *lstat;
31 if (dmar_latency_enabled(iommu, type))
34 spin_lock_irqsave(&latency_lock, flags);
35 if (!iommu->perf_statistic) {
36 iommu->perf_statistic = kzalloc(sizeof(*lstat) * DMAR_LATENCY_NUM,
38 if (!iommu->perf_statistic) {
44 lstat = iommu->perf_statistic;
46 if (!lstat[type].enabled) {
47 lstat[type].enabled = true;
48 lstat[type].counter[COUNTS_MIN] = UINT_MAX;
52 spin_unlock_irqrestore(&latency_lock, flags);
57 void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type)
59 struct latency_statistic *lstat = iommu->perf_statistic;
62 if (!dmar_latency_enabled(iommu, type))
65 spin_lock_irqsave(&latency_lock, flags);
66 memset(&lstat[type], 0, sizeof(*lstat) * DMAR_LATENCY_NUM);
67 spin_unlock_irqrestore(&latency_lock, flags);
70 void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 latency)
72 struct latency_statistic *lstat = iommu->perf_statistic;
76 if (!dmar_latency_enabled(iommu, type))
79 spin_lock_irqsave(&latency_lock, flags);
81 lstat[type].counter[COUNTS_10e2]++;
82 else if (latency < 1000)
83 lstat[type].counter[COUNTS_10e3]++;
84 else if (latency < 10000)
85 lstat[type].counter[COUNTS_10e4]++;
86 else if (latency < 100000)
87 lstat[type].counter[COUNTS_10e5]++;
88 else if (latency < 1000000)
89 lstat[type].counter[COUNTS_10e6]++;
90 else if (latency < 10000000)
91 lstat[type].counter[COUNTS_10e7]++;
93 lstat[type].counter[COUNTS_10e8_plus]++;
95 min = lstat[type].counter[COUNTS_MIN];
96 max = lstat[type].counter[COUNTS_MAX];
97 lstat[type].counter[COUNTS_MIN] = min_t(u64, min, latency);
98 lstat[type].counter[COUNTS_MAX] = max_t(u64, max, latency);
99 lstat[type].counter[COUNTS_SUM] += latency;
100 lstat[type].samples++;
101 spin_unlock_irqrestore(&latency_lock, flags);
104 static char *latency_counter_names[] = {
106 " 0.1us-1us", " 1us-10us", " 10us-100us",
107 " 100us-1ms", " 1ms-10ms", " >=10ms",
108 " min(us)", " max(us)", " average(us)"
111 static char *latency_type_names[] = {
112 " inv_iotlb", " inv_devtlb", " inv_iec",
116 int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
118 struct latency_statistic *lstat = iommu->perf_statistic;
122 memset(str, 0, size);
124 for (i = 0; i < COUNTS_NUM; i++)
125 bytes += snprintf(str + bytes, size - bytes,
126 "%s", latency_counter_names[i]);
128 spin_lock_irqsave(&latency_lock, flags);
129 for (i = 0; i < DMAR_LATENCY_NUM; i++) {
130 if (!dmar_latency_enabled(iommu, i))
133 bytes += snprintf(str + bytes, size - bytes,
134 "\n%s", latency_type_names[i]);
136 for (j = 0; j < COUNTS_NUM; j++) {
137 u64 val = lstat[i].counter[j];
144 val = div_u64(val, 1000);
147 val = div_u64(val, 1000);
150 if (lstat[i].samples)
151 val = div_u64(val, (lstat[i].samples * 1000));
159 bytes += snprintf(str + bytes, size - bytes,
163 spin_unlock_irqrestore(&latency_lock, flags);