1 // SPDX-License-Identifier: MIT
4 * Copyright © 2020 Intel Corporation
7 #include <linux/bitmap.h>
8 #include <linux/string_helpers.h>
11 #include "intel_gt_debugfs.h"
12 #include "intel_gt_regs.h"
13 #include "intel_sseu_debugfs.h"
15 static void cherryview_sseu_device_status(struct intel_gt *gt,
16 struct sseu_dev_info *sseu)
19 struct intel_uncore *uncore = gt->uncore;
20 const int ss_max = SS_MAX;
21 u32 sig1[SS_MAX], sig2[SS_MAX];
24 sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
25 sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
26 sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
27 sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
29 for (ss = 0; ss < ss_max; ss++) {
32 if (sig1[ss] & CHV_SS_PG_ENABLE)
33 /* skip disabled subslice */
36 sseu->slice_mask = BIT(0);
37 sseu->subslice_mask.hsw[0] |= BIT(ss);
38 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
39 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
40 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
41 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
42 sseu->eu_total += eu_cnt;
43 sseu->eu_per_subslice = max_t(unsigned int,
44 sseu->eu_per_subslice, eu_cnt);
49 static void gen11_sseu_device_status(struct intel_gt *gt,
50 struct sseu_dev_info *sseu)
53 struct intel_uncore *uncore = gt->uncore;
54 const struct intel_gt_info *info = >->info;
55 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
58 for (s = 0; s < info->sseu.max_slices; s++) {
60 * FIXME: Valid SS Mask respects the spec and read
61 * only valid bits for those registers, excluding reserved
62 * although this seems wrong because it would leave many
63 * subslices without ACK.
65 s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
66 GEN10_PGCTL_VALID_SS_MASK(s);
67 eu_reg[2 * s] = intel_uncore_read(uncore,
68 GEN10_SS01_EU_PGCTL_ACK(s));
69 eu_reg[2 * s + 1] = intel_uncore_read(uncore,
70 GEN10_SS23_EU_PGCTL_ACK(s));
73 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
74 GEN9_PGCTL_SSA_EU19_ACK |
75 GEN9_PGCTL_SSA_EU210_ACK |
76 GEN9_PGCTL_SSA_EU311_ACK;
77 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
78 GEN9_PGCTL_SSB_EU19_ACK |
79 GEN9_PGCTL_SSB_EU210_ACK |
80 GEN9_PGCTL_SSB_EU311_ACK;
82 for (s = 0; s < info->sseu.max_slices; s++) {
83 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
84 /* skip disabled slice */
87 sseu->slice_mask |= BIT(s);
88 sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
90 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
93 if (info->sseu.has_subslice_pg &&
94 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
95 /* skip disabled subslice */
98 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
100 sseu->eu_total += eu_cnt;
101 sseu->eu_per_subslice = max_t(unsigned int,
102 sseu->eu_per_subslice,
109 static void gen9_sseu_device_status(struct intel_gt *gt,
110 struct sseu_dev_info *sseu)
113 struct intel_uncore *uncore = gt->uncore;
114 const struct intel_gt_info *info = >->info;
115 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
118 for (s = 0; s < info->sseu.max_slices; s++) {
119 s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
121 intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
123 intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
126 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
127 GEN9_PGCTL_SSA_EU19_ACK |
128 GEN9_PGCTL_SSA_EU210_ACK |
129 GEN9_PGCTL_SSA_EU311_ACK;
130 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
131 GEN9_PGCTL_SSB_EU19_ACK |
132 GEN9_PGCTL_SSB_EU210_ACK |
133 GEN9_PGCTL_SSB_EU311_ACK;
135 for (s = 0; s < info->sseu.max_slices; s++) {
136 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
137 /* skip disabled slice */
140 sseu->slice_mask |= BIT(s);
142 if (IS_GEN9_BC(gt->i915))
143 sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
145 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
148 if (IS_GEN9_LP(gt->i915)) {
149 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
150 /* skip disabled subslice */
153 sseu->subslice_mask.hsw[s] |= BIT(ss);
156 eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
157 eu_cnt = 2 * hweight32(eu_cnt);
159 sseu->eu_total += eu_cnt;
160 sseu->eu_per_subslice = max_t(unsigned int,
161 sseu->eu_per_subslice,
168 static void bdw_sseu_device_status(struct intel_gt *gt,
169 struct sseu_dev_info *sseu)
171 const struct intel_gt_info *info = >->info;
172 u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
175 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
177 if (sseu->slice_mask) {
178 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
179 for (s = 0; s < fls(sseu->slice_mask); s++)
180 sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
181 sseu->eu_total = sseu->eu_per_subslice *
182 intel_sseu_subslice_total(sseu);
184 /* subtract fused off EU(s) from enabled slice(s) */
185 for (s = 0; s < fls(sseu->slice_mask); s++) {
186 u8 subslice_7eu = info->sseu.subslice_7eu[s];
188 sseu->eu_total -= hweight8(subslice_7eu);
193 static void i915_print_sseu_info(struct seq_file *m,
194 bool is_available_info,
196 const struct sseu_dev_info *sseu)
198 const char *type = is_available_info ? "Available" : "Enabled";
200 seq_printf(m, " %s Slice Mask: %04x\n", type,
202 seq_printf(m, " %s Slice Total: %u\n", type,
203 hweight8(sseu->slice_mask));
204 seq_printf(m, " %s Subslice Total: %u\n", type,
205 intel_sseu_subslice_total(sseu));
206 intel_sseu_print_ss_info(type, sseu, m);
207 seq_printf(m, " %s EU Total: %u\n", type,
209 seq_printf(m, " %s EU Per Subslice: %u\n", type,
210 sseu->eu_per_subslice);
212 if (!is_available_info)
215 seq_printf(m, " Has Pooled EU: %s\n", str_yes_no(has_pooled_eu));
217 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
219 seq_printf(m, " Has Slice Power Gating: %s\n",
220 str_yes_no(sseu->has_slice_pg));
221 seq_printf(m, " Has Subslice Power Gating: %s\n",
222 str_yes_no(sseu->has_subslice_pg));
223 seq_printf(m, " Has EU Power Gating: %s\n",
224 str_yes_no(sseu->has_eu_pg));
228 * this is called from top-level debugfs as well, so we can't get the gt from
231 int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
233 struct drm_i915_private *i915 = gt->i915;
234 const struct intel_gt_info *info = >->info;
235 struct sseu_dev_info *sseu;
236 intel_wakeref_t wakeref;
238 if (GRAPHICS_VER(i915) < 8)
241 seq_puts(m, "SSEU Device Info\n");
242 i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
244 seq_puts(m, "SSEU Device Status\n");
246 sseu = kzalloc(sizeof(*sseu), GFP_KERNEL);
250 intel_sseu_set_info(sseu, info->sseu.max_slices,
251 info->sseu.max_subslices,
252 info->sseu.max_eus_per_subslice);
254 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
255 if (IS_CHERRYVIEW(i915))
256 cherryview_sseu_device_status(gt, sseu);
257 else if (IS_BROADWELL(i915))
258 bdw_sseu_device_status(gt, sseu);
259 else if (GRAPHICS_VER(i915) == 9)
260 gen9_sseu_device_status(gt, sseu);
261 else if (GRAPHICS_VER(i915) >= 11)
262 gen11_sseu_device_status(gt, sseu);
265 i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), sseu);
272 static int sseu_status_show(struct seq_file *m, void *unused)
274 struct intel_gt *gt = m->private;
276 return intel_sseu_status(m, gt);
278 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
280 static int sseu_topology_show(struct seq_file *m, void *unused)
282 struct intel_gt *gt = m->private;
283 struct drm_printer p = drm_seq_file_printer(m);
285 intel_sseu_print_topology(gt->i915, >->info.sseu, &p);
289 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_topology);
291 void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
293 static const struct intel_gt_debugfs_file files[] = {
294 { "sseu_status", &sseu_status_fops, NULL },
295 { "sseu_topology", &sseu_topology_fops, NULL },
298 intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);