]> Git Repo - J-linux.git/blob - drivers/gpu/drm/i915/gt/intel_engine_user.c
PM: sleep: Fix runtime PM based cpuidle support
[J-linux.git] / drivers / gpu / drm / i915 / gt / intel_engine_user.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/list.h>
7 #include <linux/list_sort.h>
8 #include <linux/llist.h>
9
10 #include "i915_drv.h"
11 #include "intel_engine.h"
12 #include "intel_engine_user.h"
13 #include "intel_gt.h"
14 #include "uc/intel_guc_submission.h"
15
16 struct intel_engine_cs *
17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18 {
19         struct rb_node *p = i915->uabi_engines.rb_node;
20
21         while (p) {
22                 struct intel_engine_cs *it =
23                         rb_entry(p, typeof(*it), uabi_node);
24
25                 if (class < it->uabi_class)
26                         p = p->rb_left;
27                 else if (class > it->uabi_class ||
28                          instance > it->uabi_instance)
29                         p = p->rb_right;
30                 else if (instance < it->uabi_instance)
31                         p = p->rb_left;
32                 else
33                         return it;
34         }
35
36         return NULL;
37 }
38
39 void intel_engine_add_user(struct intel_engine_cs *engine)
40 {
41         llist_add((struct llist_node *)&engine->uabi_node,
42                   (struct llist_head *)&engine->i915->uabi_engines);
43 }
44
45 static const u8 uabi_classes[] = {
46         [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
47         [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
48         [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
49         [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
50 };
51
52 static int engine_cmp(void *priv, const struct list_head *A,
53                       const struct list_head *B)
54 {
55         const struct intel_engine_cs *a =
56                 container_of((struct rb_node *)A, typeof(*a), uabi_node);
57         const struct intel_engine_cs *b =
58                 container_of((struct rb_node *)B, typeof(*b), uabi_node);
59
60         if (uabi_classes[a->class] < uabi_classes[b->class])
61                 return -1;
62         if (uabi_classes[a->class] > uabi_classes[b->class])
63                 return 1;
64
65         if (a->instance < b->instance)
66                 return -1;
67         if (a->instance > b->instance)
68                 return 1;
69
70         return 0;
71 }
72
73 static struct llist_node *get_engines(struct drm_i915_private *i915)
74 {
75         return llist_del_all((struct llist_head *)&i915->uabi_engines);
76 }
77
78 static void sort_engines(struct drm_i915_private *i915,
79                          struct list_head *engines)
80 {
81         struct llist_node *pos, *next;
82
83         llist_for_each_safe(pos, next, get_engines(i915)) {
84                 struct intel_engine_cs *engine =
85                         container_of((struct rb_node *)pos, typeof(*engine),
86                                      uabi_node);
87                 list_add((struct list_head *)&engine->uabi_node, engines);
88         }
89         list_sort(NULL, engines, engine_cmp);
90 }
91
92 static void set_scheduler_caps(struct drm_i915_private *i915)
93 {
94         static const struct {
95                 u8 engine;
96                 u8 sched;
97         } map[] = {
98 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
99                 MAP(HAS_PREEMPTION, PREEMPTION),
100                 MAP(HAS_SEMAPHORES, SEMAPHORES),
101                 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
102 #undef MAP
103         };
104         struct intel_engine_cs *engine;
105         u32 enabled, disabled;
106
107         enabled = 0;
108         disabled = 0;
109         for_each_uabi_engine(engine, i915) { /* all engines must agree! */
110                 int i;
111
112                 if (engine->sched_engine->schedule)
113                         enabled |= (I915_SCHEDULER_CAP_ENABLED |
114                                     I915_SCHEDULER_CAP_PRIORITY);
115                 else
116                         disabled |= (I915_SCHEDULER_CAP_ENABLED |
117                                      I915_SCHEDULER_CAP_PRIORITY);
118
119                 if (intel_uc_uses_guc_submission(&i915->gt.uc))
120                         enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
121
122                 for (i = 0; i < ARRAY_SIZE(map); i++) {
123                         if (engine->flags & BIT(map[i].engine))
124                                 enabled |= BIT(map[i].sched);
125                         else
126                                 disabled |= BIT(map[i].sched);
127                 }
128         }
129
130         i915->caps.scheduler = enabled & ~disabled;
131         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
132                 i915->caps.scheduler = 0;
133 }
134
135 const char *intel_engine_class_repr(u8 class)
136 {
137         static const char * const uabi_names[] = {
138                 [RENDER_CLASS] = "rcs",
139                 [COPY_ENGINE_CLASS] = "bcs",
140                 [VIDEO_DECODE_CLASS] = "vcs",
141                 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
142         };
143
144         if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
145                 return "xxx";
146
147         return uabi_names[class];
148 }
149
150 struct legacy_ring {
151         struct intel_gt *gt;
152         u8 class;
153         u8 instance;
154 };
155
156 static int legacy_ring_idx(const struct legacy_ring *ring)
157 {
158         static const struct {
159                 u8 base, max;
160         } map[] = {
161                 [RENDER_CLASS] = { RCS0, 1 },
162                 [COPY_ENGINE_CLASS] = { BCS0, 1 },
163                 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
164                 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
165         };
166
167         if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
168                 return INVALID_ENGINE;
169
170         if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
171                 return INVALID_ENGINE;
172
173         return map[ring->class].base + ring->instance;
174 }
175
176 static void add_legacy_ring(struct legacy_ring *ring,
177                             struct intel_engine_cs *engine)
178 {
179         if (engine->gt != ring->gt || engine->class != ring->class) {
180                 ring->gt = engine->gt;
181                 ring->class = engine->class;
182                 ring->instance = 0;
183         }
184
185         engine->legacy_idx = legacy_ring_idx(ring);
186         if (engine->legacy_idx != INVALID_ENGINE)
187                 ring->instance++;
188 }
189
190 void intel_engines_driver_register(struct drm_i915_private *i915)
191 {
192         struct legacy_ring ring = {};
193         u8 uabi_instances[4] = {};
194         struct list_head *it, *next;
195         struct rb_node **p, *prev;
196         LIST_HEAD(engines);
197
198         sort_engines(i915, &engines);
199
200         prev = NULL;
201         p = &i915->uabi_engines.rb_node;
202         list_for_each_safe(it, next, &engines) {
203                 struct intel_engine_cs *engine =
204                         container_of((struct rb_node *)it, typeof(*engine),
205                                      uabi_node);
206                 char old[sizeof(engine->name)];
207
208                 if (intel_gt_has_unrecoverable_error(engine->gt))
209                         continue; /* ignore incomplete engines */
210
211                 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
212                 engine->uabi_class = uabi_classes[engine->class];
213
214                 GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
215                 engine->uabi_instance = uabi_instances[engine->uabi_class]++;
216
217                 /* Replace the internal name with the final user facing name */
218                 memcpy(old, engine->name, sizeof(engine->name));
219                 scnprintf(engine->name, sizeof(engine->name), "%s%u",
220                           intel_engine_class_repr(engine->class),
221                           engine->uabi_instance);
222                 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
223
224                 rb_link_node(&engine->uabi_node, prev, p);
225                 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
226
227                 GEM_BUG_ON(intel_engine_lookup_user(i915,
228                                                     engine->uabi_class,
229                                                     engine->uabi_instance) != engine);
230
231                 /* Fix up the mapping to match default execbuf::user_map[] */
232                 add_legacy_ring(&ring, engine);
233
234                 prev = &engine->uabi_node;
235                 p = &prev->rb_right;
236         }
237
238         if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
239             IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
240                 struct intel_engine_cs *engine;
241                 unsigned int isolation;
242                 int class, inst;
243                 int errors = 0;
244
245                 for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
246                         for (inst = 0; inst < uabi_instances[class]; inst++) {
247                                 engine = intel_engine_lookup_user(i915,
248                                                                   class, inst);
249                                 if (!engine) {
250                                         pr_err("UABI engine not found for { class:%d, instance:%d }\n",
251                                                class, inst);
252                                         errors++;
253                                         continue;
254                                 }
255
256                                 if (engine->uabi_class != class ||
257                                     engine->uabi_instance != inst) {
258                                         pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
259                                                engine->name,
260                                                engine->uabi_class,
261                                                engine->uabi_instance,
262                                                class, inst);
263                                         errors++;
264                                         continue;
265                                 }
266                         }
267                 }
268
269                 /*
270                  * Make sure that classes with multiple engine instances all
271                  * share the same basic configuration.
272                  */
273                 isolation = intel_engines_has_context_isolation(i915);
274                 for_each_uabi_engine(engine, i915) {
275                         unsigned int bit = BIT(engine->uabi_class);
276                         unsigned int expected = engine->default_state ? bit : 0;
277
278                         if ((isolation & bit) != expected) {
279                                 pr_err("mismatching default context state for class %d on engine %s\n",
280                                        engine->uabi_class, engine->name);
281                                 errors++;
282                         }
283                 }
284
285                 if (drm_WARN(&i915->drm, errors,
286                              "Invalid UABI engine mapping found"))
287                         i915->uabi_engines = RB_ROOT;
288         }
289
290         set_scheduler_caps(i915);
291 }
292
293 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
294 {
295         struct intel_engine_cs *engine;
296         unsigned int which;
297
298         which = 0;
299         for_each_uabi_engine(engine, i915)
300                 if (engine->default_state)
301                         which |= BIT(engine->uabi_class);
302
303         return which;
304 }
This page took 0.052327 seconds and 4 git commands to generate.