]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
Merge tag 'linux_kselftest-next-6.12-rc1-fixes' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gfx.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
36
37 /* delay 0.1 second to enable gfx off feature */
38 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
39
40 #define GFX_OFF_NO_DELAY 0
41
42 /*
43  * GPU GFX IP block helpers function.
44  */
45
46 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
47                                 int pipe, int queue)
48 {
49         int bit = 0;
50
51         bit += mec * adev->gfx.mec.num_pipe_per_mec
52                 * adev->gfx.mec.num_queue_per_pipe;
53         bit += pipe * adev->gfx.mec.num_queue_per_pipe;
54         bit += queue;
55
56         return bit;
57 }
58
59 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
60                                  int *mec, int *pipe, int *queue)
61 {
62         *queue = bit % adev->gfx.mec.num_queue_per_pipe;
63         *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
64                 % adev->gfx.mec.num_pipe_per_mec;
65         *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
66                / adev->gfx.mec.num_pipe_per_mec;
67
68 }
69
70 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
71                                      int xcc_id, int mec, int pipe, int queue)
72 {
73         return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
74                         adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
75 }
76
77 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
78                                int me, int pipe, int queue)
79 {
80         int bit = 0;
81
82         bit += me * adev->gfx.me.num_pipe_per_me
83                 * adev->gfx.me.num_queue_per_pipe;
84         bit += pipe * adev->gfx.me.num_queue_per_pipe;
85         bit += queue;
86
87         return bit;
88 }
89
90 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
91                                 int *me, int *pipe, int *queue)
92 {
93         *queue = bit % adev->gfx.me.num_queue_per_pipe;
94         *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
95                 % adev->gfx.me.num_pipe_per_me;
96         *me = (bit / adev->gfx.me.num_queue_per_pipe)
97                 / adev->gfx.me.num_pipe_per_me;
98 }
99
100 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
101                                     int me, int pipe, int queue)
102 {
103         return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
104                         adev->gfx.me.queue_bitmap);
105 }
106
107 /**
108  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
109  *
110  * @mask: array in which the per-shader array disable masks will be stored
111  * @max_se: number of SEs
112  * @max_sh: number of SHs
113  *
114  * The bitmask of CUs to be disabled in the shader array determined by se and
115  * sh is stored in mask[se * max_sh + sh].
116  */
117 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
118 {
119         unsigned int se, sh, cu;
120         const char *p;
121
122         memset(mask, 0, sizeof(*mask) * max_se * max_sh);
123
124         if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
125                 return;
126
127         p = amdgpu_disable_cu;
128         for (;;) {
129                 char *next;
130                 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
131
132                 if (ret < 3) {
133                         DRM_ERROR("amdgpu: could not parse disable_cu\n");
134                         return;
135                 }
136
137                 if (se < max_se && sh < max_sh && cu < 16) {
138                         DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
139                         mask[se * max_sh + sh] |= 1u << cu;
140                 } else {
141                         DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
142                                   se, sh, cu);
143                 }
144
145                 next = strchr(p, ',');
146                 if (!next)
147                         break;
148                 p = next + 1;
149         }
150 }
151
152 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
153 {
154         return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
155 }
156
157 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
158 {
159         if (amdgpu_compute_multipipe != -1) {
160                 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
161                          amdgpu_compute_multipipe);
162                 return amdgpu_compute_multipipe == 1;
163         }
164
165         if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
166                 return true;
167
168         /* FIXME: spreading the queues across pipes causes perf regressions
169          * on POLARIS11 compute workloads */
170         if (adev->asic_type == CHIP_POLARIS11)
171                 return false;
172
173         return adev->gfx.mec.num_mec > 1;
174 }
175
176 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
177                                                 struct amdgpu_ring *ring)
178 {
179         int queue = ring->queue;
180         int pipe = ring->pipe;
181
182         /* Policy: use pipe1 queue0 as high priority graphics queue if we
183          * have more than one gfx pipe.
184          */
185         if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
186             adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
187                 int me = ring->me;
188                 int bit;
189
190                 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
191                 if (ring == &adev->gfx.gfx_ring[bit])
192                         return true;
193         }
194
195         return false;
196 }
197
198 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
199                                                struct amdgpu_ring *ring)
200 {
201         /* Policy: use 1st queue as high priority compute queue if we
202          * have more than one compute queue.
203          */
204         if (adev->gfx.num_compute_rings > 1 &&
205             ring == &adev->gfx.compute_ring[0])
206                 return true;
207
208         return false;
209 }
210
211 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
212 {
213         int i, j, queue, pipe;
214         bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
215         int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
216                                      adev->gfx.mec.num_queue_per_pipe,
217                                      adev->gfx.num_compute_rings);
218         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
219
220         if (multipipe_policy) {
221                 /* policy: make queues evenly cross all pipes on MEC1 only
222                  * for multiple xcc, just use the original policy for simplicity */
223                 for (j = 0; j < num_xcc; j++) {
224                         for (i = 0; i < max_queues_per_mec; i++) {
225                                 pipe = i % adev->gfx.mec.num_pipe_per_mec;
226                                 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
227                                          adev->gfx.mec.num_queue_per_pipe;
228
229                                 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
230                                         adev->gfx.mec_bitmap[j].queue_bitmap);
231                         }
232                 }
233         } else {
234                 /* policy: amdgpu owns all queues in the given pipe */
235                 for (j = 0; j < num_xcc; j++) {
236                         for (i = 0; i < max_queues_per_mec; ++i)
237                                 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
238                 }
239         }
240
241         for (j = 0; j < num_xcc; j++) {
242                 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
243                         bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
244         }
245 }
246
247 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
248 {
249         int i, queue, pipe;
250         bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
251         int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
252                                         adev->gfx.me.num_queue_per_pipe;
253
254         if (multipipe_policy) {
255                 /* policy: amdgpu owns the first queue per pipe at this stage
256                  * will extend to mulitple queues per pipe later */
257                 for (i = 0; i < max_queues_per_me; i++) {
258                         pipe = i % adev->gfx.me.num_pipe_per_me;
259                         queue = (i / adev->gfx.me.num_pipe_per_me) %
260                                 adev->gfx.me.num_queue_per_pipe;
261
262                         set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
263                                 adev->gfx.me.queue_bitmap);
264                 }
265         } else {
266                 for (i = 0; i < max_queues_per_me; ++i)
267                         set_bit(i, adev->gfx.me.queue_bitmap);
268         }
269
270         /* update the number of active graphics rings */
271         adev->gfx.num_gfx_rings =
272                 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
273 }
274
275 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
276                                   struct amdgpu_ring *ring, int xcc_id)
277 {
278         int queue_bit;
279         int mec, pipe, queue;
280
281         queue_bit = adev->gfx.mec.num_mec
282                     * adev->gfx.mec.num_pipe_per_mec
283                     * adev->gfx.mec.num_queue_per_pipe;
284
285         while (--queue_bit >= 0) {
286                 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
287                         continue;
288
289                 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
290
291                 /*
292                  * 1. Using pipes 2/3 from MEC 2 seems cause problems.
293                  * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
294                  * only can be issued on queue 0.
295                  */
296                 if ((mec == 1 && pipe > 1) || queue != 0)
297                         continue;
298
299                 ring->me = mec + 1;
300                 ring->pipe = pipe;
301                 ring->queue = queue;
302
303                 return 0;
304         }
305
306         dev_err(adev->dev, "Failed to find a queue for KIQ\n");
307         return -EINVAL;
308 }
309
310 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
311 {
312         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
313         struct amdgpu_irq_src *irq = &kiq->irq;
314         struct amdgpu_ring *ring = &kiq->ring;
315         int r = 0;
316
317         spin_lock_init(&kiq->ring_lock);
318
319         ring->adev = NULL;
320         ring->ring_obj = NULL;
321         ring->use_doorbell = true;
322         ring->xcc_id = xcc_id;
323         ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
324         ring->doorbell_index =
325                 (adev->doorbell_index.kiq +
326                  xcc_id * adev->doorbell_index.xcc_doorbell_range)
327                 << 1;
328
329         r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
330         if (r)
331                 return r;
332
333         ring->eop_gpu_addr = kiq->eop_gpu_addr;
334         ring->no_scheduler = true;
335         snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
336                  (unsigned char)xcc_id, (unsigned char)ring->me,
337                  (unsigned char)ring->pipe, (unsigned char)ring->queue);
338         r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
339                              AMDGPU_RING_PRIO_DEFAULT, NULL);
340         if (r)
341                 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
342
343         return r;
344 }
345
346 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
347 {
348         amdgpu_ring_fini(ring);
349 }
350
351 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
352 {
353         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
354
355         amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
356 }
357
358 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
359                         unsigned int hpd_size, int xcc_id)
360 {
361         int r;
362         u32 *hpd;
363         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
364
365         r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
366                                     AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
367                                     &kiq->eop_gpu_addr, (void **)&hpd);
368         if (r) {
369                 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
370                 return r;
371         }
372
373         memset(hpd, 0, hpd_size);
374
375         r = amdgpu_bo_reserve(kiq->eop_obj, true);
376         if (unlikely(r != 0))
377                 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
378         amdgpu_bo_kunmap(kiq->eop_obj);
379         amdgpu_bo_unreserve(kiq->eop_obj);
380
381         return 0;
382 }
383
384 /* create MQD for each compute/gfx queue */
385 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
386                            unsigned int mqd_size, int xcc_id)
387 {
388         int r, i, j;
389         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
390         struct amdgpu_ring *ring = &kiq->ring;
391         u32 domain = AMDGPU_GEM_DOMAIN_GTT;
392
393 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
394         /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
395         if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
396                 domain |= AMDGPU_GEM_DOMAIN_VRAM;
397 #endif
398
399         /* create MQD for KIQ */
400         if (!adev->enable_mes_kiq && !ring->mqd_obj) {
401                 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
402                  * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
403                  * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
404                  * KIQ MQD no matter SRIOV or Bare-metal
405                  */
406                 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
407                                             AMDGPU_GEM_DOMAIN_VRAM |
408                                             AMDGPU_GEM_DOMAIN_GTT,
409                                             &ring->mqd_obj,
410                                             &ring->mqd_gpu_addr,
411                                             &ring->mqd_ptr);
412                 if (r) {
413                         dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
414                         return r;
415                 }
416
417                 /* prepare MQD backup */
418                 kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
419                 if (!kiq->mqd_backup) {
420                         dev_warn(adev->dev,
421                                  "no memory to create MQD backup for ring %s\n", ring->name);
422                         return -ENOMEM;
423                 }
424         }
425
426         if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
427                 /* create MQD for each KGQ */
428                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
429                         ring = &adev->gfx.gfx_ring[i];
430                         if (!ring->mqd_obj) {
431                                 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
432                                                             domain, &ring->mqd_obj,
433                                                             &ring->mqd_gpu_addr, &ring->mqd_ptr);
434                                 if (r) {
435                                         dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
436                                         return r;
437                                 }
438
439                                 ring->mqd_size = mqd_size;
440                                 /* prepare MQD backup */
441                                 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
442                                 if (!adev->gfx.me.mqd_backup[i]) {
443                                         dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
444                                         return -ENOMEM;
445                                 }
446                         }
447                 }
448         }
449
450         /* create MQD for each KCQ */
451         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
452                 j = i + xcc_id * adev->gfx.num_compute_rings;
453                 ring = &adev->gfx.compute_ring[j];
454                 if (!ring->mqd_obj) {
455                         r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
456                                                     domain, &ring->mqd_obj,
457                                                     &ring->mqd_gpu_addr, &ring->mqd_ptr);
458                         if (r) {
459                                 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
460                                 return r;
461                         }
462
463                         ring->mqd_size = mqd_size;
464                         /* prepare MQD backup */
465                         adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
466                         if (!adev->gfx.mec.mqd_backup[j]) {
467                                 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
468                                 return -ENOMEM;
469                         }
470                 }
471         }
472
473         return 0;
474 }
475
476 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
477 {
478         struct amdgpu_ring *ring = NULL;
479         int i, j;
480         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
481
482         if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
483                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
484                         ring = &adev->gfx.gfx_ring[i];
485                         kfree(adev->gfx.me.mqd_backup[i]);
486                         amdgpu_bo_free_kernel(&ring->mqd_obj,
487                                               &ring->mqd_gpu_addr,
488                                               &ring->mqd_ptr);
489                 }
490         }
491
492         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
493                 j = i + xcc_id * adev->gfx.num_compute_rings;
494                 ring = &adev->gfx.compute_ring[j];
495                 kfree(adev->gfx.mec.mqd_backup[j]);
496                 amdgpu_bo_free_kernel(&ring->mqd_obj,
497                                       &ring->mqd_gpu_addr,
498                                       &ring->mqd_ptr);
499         }
500
501         ring = &kiq->ring;
502         kfree(kiq->mqd_backup);
503         amdgpu_bo_free_kernel(&ring->mqd_obj,
504                               &ring->mqd_gpu_addr,
505                               &ring->mqd_ptr);
506 }
507
508 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
509 {
510         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
511         struct amdgpu_ring *kiq_ring = &kiq->ring;
512         int i, r = 0;
513         int j;
514
515         if (adev->enable_mes) {
516                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
517                         j = i + xcc_id * adev->gfx.num_compute_rings;
518                         amdgpu_mes_unmap_legacy_queue(adev,
519                                                    &adev->gfx.compute_ring[j],
520                                                    RESET_QUEUES, 0, 0);
521                 }
522                 return 0;
523         }
524
525         if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
526                 return -EINVAL;
527
528         spin_lock(&kiq->ring_lock);
529         if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
530                                         adev->gfx.num_compute_rings)) {
531                 spin_unlock(&kiq->ring_lock);
532                 return -ENOMEM;
533         }
534
535         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
536                 j = i + xcc_id * adev->gfx.num_compute_rings;
537                 kiq->pmf->kiq_unmap_queues(kiq_ring,
538                                            &adev->gfx.compute_ring[j],
539                                            RESET_QUEUES, 0, 0);
540         }
541
542         /**
543          * This is workaround: only skip kiq_ring test
544          * during ras recovery in suspend stage for gfx9.4.3
545          */
546         if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
547             amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
548             amdgpu_ras_in_recovery(adev)) {
549                 spin_unlock(&kiq->ring_lock);
550                 return 0;
551         }
552
553         if (kiq_ring->sched.ready && !adev->job_hang)
554                 r = amdgpu_ring_test_helper(kiq_ring);
555         spin_unlock(&kiq->ring_lock);
556
557         return r;
558 }
559
560 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
561 {
562         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
563         struct amdgpu_ring *kiq_ring = &kiq->ring;
564         int i, r = 0;
565         int j;
566
567         if (adev->enable_mes) {
568                 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
569                         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
570                                 j = i + xcc_id * adev->gfx.num_gfx_rings;
571                                 amdgpu_mes_unmap_legacy_queue(adev,
572                                                       &adev->gfx.gfx_ring[j],
573                                                       PREEMPT_QUEUES, 0, 0);
574                         }
575                 }
576                 return 0;
577         }
578
579         if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
580                 return -EINVAL;
581
582         spin_lock(&kiq->ring_lock);
583         if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
584                 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
585                                                 adev->gfx.num_gfx_rings)) {
586                         spin_unlock(&kiq->ring_lock);
587                         return -ENOMEM;
588                 }
589
590                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
591                         j = i + xcc_id * adev->gfx.num_gfx_rings;
592                         kiq->pmf->kiq_unmap_queues(kiq_ring,
593                                                    &adev->gfx.gfx_ring[j],
594                                                    PREEMPT_QUEUES, 0, 0);
595                 }
596         }
597
598         if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
599                 r = amdgpu_ring_test_helper(kiq_ring);
600         spin_unlock(&kiq->ring_lock);
601
602         return r;
603 }
604
605 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
606                                         int queue_bit)
607 {
608         int mec, pipe, queue;
609         int set_resource_bit = 0;
610
611         amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
612
613         set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
614
615         return set_resource_bit;
616 }
617
618 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
619 {
620         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
621         struct amdgpu_ring *kiq_ring = &kiq->ring;
622         uint64_t queue_mask = ~0ULL;
623         int r, i, j;
624
625         amdgpu_device_flush_hdp(adev, NULL);
626
627         if (!adev->enable_uni_mes) {
628                 spin_lock(&kiq->ring_lock);
629                 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
630                 if (r) {
631                         dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
632                         spin_unlock(&kiq->ring_lock);
633                         return r;
634                 }
635
636                 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
637                 r = amdgpu_ring_test_helper(kiq_ring);
638                 spin_unlock(&kiq->ring_lock);
639                 if (r)
640                         dev_err(adev->dev, "KIQ failed to set resources\n");
641         }
642
643         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
644                 j = i + xcc_id * adev->gfx.num_compute_rings;
645                 r = amdgpu_mes_map_legacy_queue(adev,
646                                                 &adev->gfx.compute_ring[j]);
647                 if (r) {
648                         dev_err(adev->dev, "failed to map compute queue\n");
649                         return r;
650                 }
651         }
652
653         return 0;
654 }
655
656 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
657 {
658         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
659         struct amdgpu_ring *kiq_ring = &kiq->ring;
660         uint64_t queue_mask = 0;
661         int r, i, j;
662
663         if (adev->mes.enable_legacy_queue_map)
664                 return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
665
666         if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
667                 return -EINVAL;
668
669         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
670                 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
671                         continue;
672
673                 /* This situation may be hit in the future if a new HW
674                  * generation exposes more than 64 queues. If so, the
675                  * definition of queue_mask needs updating */
676                 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
677                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
678                         break;
679                 }
680
681                 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
682         }
683
684         amdgpu_device_flush_hdp(adev, NULL);
685
686         DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
687                  kiq_ring->queue);
688
689         spin_lock(&kiq->ring_lock);
690         r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
691                                         adev->gfx.num_compute_rings +
692                                         kiq->pmf->set_resources_size);
693         if (r) {
694                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
695                 spin_unlock(&kiq->ring_lock);
696                 return r;
697         }
698
699         kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
700         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
701                 j = i + xcc_id * adev->gfx.num_compute_rings;
702                 kiq->pmf->kiq_map_queues(kiq_ring,
703                                          &adev->gfx.compute_ring[j]);
704         }
705
706         r = amdgpu_ring_test_helper(kiq_ring);
707         spin_unlock(&kiq->ring_lock);
708         if (r)
709                 DRM_ERROR("KCQ enable failed\n");
710
711         return r;
712 }
713
714 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
715 {
716         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
717         struct amdgpu_ring *kiq_ring = &kiq->ring;
718         int r, i, j;
719
720         if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
721                 return -EINVAL;
722
723         amdgpu_device_flush_hdp(adev, NULL);
724
725         if (adev->mes.enable_legacy_queue_map) {
726                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
727                         j = i + xcc_id * adev->gfx.num_gfx_rings;
728                         r = amdgpu_mes_map_legacy_queue(adev,
729                                                         &adev->gfx.gfx_ring[j]);
730                         if (r) {
731                                 DRM_ERROR("failed to map gfx queue\n");
732                                 return r;
733                         }
734                 }
735
736                 return 0;
737         }
738
739         spin_lock(&kiq->ring_lock);
740         /* No need to map kcq on the slave */
741         if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
742                 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
743                                                 adev->gfx.num_gfx_rings);
744                 if (r) {
745                         DRM_ERROR("Failed to lock KIQ (%d).\n", r);
746                         spin_unlock(&kiq->ring_lock);
747                         return r;
748                 }
749
750                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
751                         j = i + xcc_id * adev->gfx.num_gfx_rings;
752                         kiq->pmf->kiq_map_queues(kiq_ring,
753                                                  &adev->gfx.gfx_ring[j]);
754                 }
755         }
756
757         r = amdgpu_ring_test_helper(kiq_ring);
758         spin_unlock(&kiq->ring_lock);
759         if (r)
760                 DRM_ERROR("KGQ enable failed\n");
761
762         return r;
763 }
764
765 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
766  *
767  * @adev: amdgpu_device pointer
768  * @bool enable true: enable gfx off feature, false: disable gfx off feature
769  *
770  * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
771  * 2. other client can send request to disable gfx off feature, the request should be honored.
772  * 3. other client can cancel their request of disable gfx off feature
773  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
774  */
775
776 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
777 {
778         unsigned long delay = GFX_OFF_DELAY_ENABLE;
779
780         if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
781                 return;
782
783         mutex_lock(&adev->gfx.gfx_off_mutex);
784
785         if (enable) {
786                 /* If the count is already 0, it means there's an imbalance bug somewhere.
787                  * Note that the bug may be in a different caller than the one which triggers the
788                  * WARN_ON_ONCE.
789                  */
790                 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
791                         goto unlock;
792
793                 adev->gfx.gfx_off_req_count--;
794
795                 if (adev->gfx.gfx_off_req_count == 0 &&
796                     !adev->gfx.gfx_off_state) {
797                         /* If going to s2idle, no need to wait */
798                         if (adev->in_s0ix) {
799                                 if (!amdgpu_dpm_set_powergating_by_smu(adev,
800                                                 AMD_IP_BLOCK_TYPE_GFX, true))
801                                         adev->gfx.gfx_off_state = true;
802                         } else {
803                                 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
804                                               delay);
805                         }
806                 }
807         } else {
808                 if (adev->gfx.gfx_off_req_count == 0) {
809                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
810
811                         if (adev->gfx.gfx_off_state &&
812                             !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
813                                 adev->gfx.gfx_off_state = false;
814
815                                 if (adev->gfx.funcs->init_spm_golden) {
816                                         dev_dbg(adev->dev,
817                                                 "GFXOFF is disabled, re-init SPM golden settings\n");
818                                         amdgpu_gfx_init_spm_golden(adev);
819                                 }
820                         }
821                 }
822
823                 adev->gfx.gfx_off_req_count++;
824         }
825
826 unlock:
827         mutex_unlock(&adev->gfx.gfx_off_mutex);
828 }
829
830 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
831 {
832         int r = 0;
833
834         mutex_lock(&adev->gfx.gfx_off_mutex);
835
836         r = amdgpu_dpm_set_residency_gfxoff(adev, value);
837
838         mutex_unlock(&adev->gfx.gfx_off_mutex);
839
840         return r;
841 }
842
843 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
844 {
845         int r = 0;
846
847         mutex_lock(&adev->gfx.gfx_off_mutex);
848
849         r = amdgpu_dpm_get_residency_gfxoff(adev, value);
850
851         mutex_unlock(&adev->gfx.gfx_off_mutex);
852
853         return r;
854 }
855
856 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
857 {
858         int r = 0;
859
860         mutex_lock(&adev->gfx.gfx_off_mutex);
861
862         r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
863
864         mutex_unlock(&adev->gfx.gfx_off_mutex);
865
866         return r;
867 }
868
869 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
870 {
871
872         int r = 0;
873
874         mutex_lock(&adev->gfx.gfx_off_mutex);
875
876         r = amdgpu_dpm_get_status_gfxoff(adev, value);
877
878         mutex_unlock(&adev->gfx.gfx_off_mutex);
879
880         return r;
881 }
882
883 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
884 {
885         int r;
886
887         if (amdgpu_ras_is_supported(adev, ras_block->block)) {
888                 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
889                         r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
890                         if (r)
891                                 return r;
892                 }
893
894                 r = amdgpu_ras_block_late_init(adev, ras_block);
895                 if (r)
896                         return r;
897
898                 if (adev->gfx.cp_ecc_error_irq.funcs) {
899                         r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
900                         if (r)
901                                 goto late_fini;
902                 }
903         } else {
904                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
905         }
906
907         return 0;
908 late_fini:
909         amdgpu_ras_block_late_fini(adev, ras_block);
910         return r;
911 }
912
913 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
914 {
915         int err = 0;
916         struct amdgpu_gfx_ras *ras = NULL;
917
918         /* adev->gfx.ras is NULL, which means gfx does not
919          * support ras function, then do nothing here.
920          */
921         if (!adev->gfx.ras)
922                 return 0;
923
924         ras = adev->gfx.ras;
925
926         err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
927         if (err) {
928                 dev_err(adev->dev, "Failed to register gfx ras block!\n");
929                 return err;
930         }
931
932         strcpy(ras->ras_block.ras_comm.name, "gfx");
933         ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
934         ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
935         adev->gfx.ras_if = &ras->ras_block.ras_comm;
936
937         /* If not define special ras_late_init function, use gfx default ras_late_init */
938         if (!ras->ras_block.ras_late_init)
939                 ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
940
941         /* If not defined special ras_cb function, use default ras_cb */
942         if (!ras->ras_block.ras_cb)
943                 ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
944
945         return 0;
946 }
947
948 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
949                                                 struct amdgpu_iv_entry *entry)
950 {
951         if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
952                 return adev->gfx.ras->poison_consumption_handler(adev, entry);
953
954         return 0;
955 }
956
957 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
958                 void *err_data,
959                 struct amdgpu_iv_entry *entry)
960 {
961         /* TODO ue will trigger an interrupt.
962          *
963          * When “Full RAS” is enabled, the per-IP interrupt sources should
964          * be disabled and the driver should only look for the aggregated
965          * interrupt via sync flood
966          */
967         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
968                 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
969                 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
970                     adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
971                         adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
972                 amdgpu_ras_reset_gpu(adev);
973         }
974         return AMDGPU_RAS_SUCCESS;
975 }
976
977 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
978                                   struct amdgpu_irq_src *source,
979                                   struct amdgpu_iv_entry *entry)
980 {
981         struct ras_common_if *ras_if = adev->gfx.ras_if;
982         struct ras_dispatch_if ih_data = {
983                 .entry = entry,
984         };
985
986         if (!ras_if)
987                 return 0;
988
989         ih_data.head = *ras_if;
990
991         DRM_ERROR("CP ECC ERROR IRQ\n");
992         amdgpu_ras_interrupt_dispatch(adev, &ih_data);
993         return 0;
994 }
995
996 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
997                 void *ras_error_status,
998                 void (*func)(struct amdgpu_device *adev, void *ras_error_status,
999                                 int xcc_id))
1000 {
1001         int i;
1002         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1003         uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1004         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1005
1006         if (err_data) {
1007                 err_data->ue_count = 0;
1008                 err_data->ce_count = 0;
1009         }
1010
1011         for_each_inst(i, xcc_mask)
1012                 func(adev, ras_error_status, i);
1013 }
1014
1015 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1016 {
1017         signed long r, cnt = 0;
1018         unsigned long flags;
1019         uint32_t seq, reg_val_offs = 0, value = 0;
1020         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1021         struct amdgpu_ring *ring = &kiq->ring;
1022
1023         if (amdgpu_device_skip_hw_access(adev))
1024                 return 0;
1025
1026         if (adev->mes.ring[0].sched.ready)
1027                 return amdgpu_mes_rreg(adev, reg);
1028
1029         BUG_ON(!ring->funcs->emit_rreg);
1030
1031         spin_lock_irqsave(&kiq->ring_lock, flags);
1032         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
1033                 pr_err("critical bug! too many kiq readers\n");
1034                 goto failed_unlock;
1035         }
1036         r = amdgpu_ring_alloc(ring, 32);
1037         if (r)
1038                 goto failed_unlock;
1039
1040         amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1041         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1042         if (r)
1043                 goto failed_undo;
1044
1045         amdgpu_ring_commit(ring);
1046         spin_unlock_irqrestore(&kiq->ring_lock, flags);
1047
1048         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1049
1050         /* don't wait anymore for gpu reset case because this way may
1051          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1052          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1053          * never return if we keep waiting in virt_kiq_rreg, which cause
1054          * gpu_recover() hang there.
1055          *
1056          * also don't wait anymore for IRQ context
1057          * */
1058         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1059                 goto failed_kiq_read;
1060
1061         might_sleep();
1062         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1063                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1064                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1065         }
1066
1067         if (cnt > MAX_KIQ_REG_TRY)
1068                 goto failed_kiq_read;
1069
1070         mb();
1071         value = adev->wb.wb[reg_val_offs];
1072         amdgpu_device_wb_free(adev, reg_val_offs);
1073         return value;
1074
1075 failed_undo:
1076         amdgpu_ring_undo(ring);
1077 failed_unlock:
1078         spin_unlock_irqrestore(&kiq->ring_lock, flags);
1079 failed_kiq_read:
1080         if (reg_val_offs)
1081                 amdgpu_device_wb_free(adev, reg_val_offs);
1082         dev_err(adev->dev, "failed to read reg:%x\n", reg);
1083         return ~0;
1084 }
1085
1086 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1087 {
1088         signed long r, cnt = 0;
1089         unsigned long flags;
1090         uint32_t seq;
1091         struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1092         struct amdgpu_ring *ring = &kiq->ring;
1093
1094         BUG_ON(!ring->funcs->emit_wreg);
1095
1096         if (amdgpu_device_skip_hw_access(adev))
1097                 return;
1098
1099         if (adev->mes.ring[0].sched.ready) {
1100                 amdgpu_mes_wreg(adev, reg, v);
1101                 return;
1102         }
1103
1104         spin_lock_irqsave(&kiq->ring_lock, flags);
1105         r = amdgpu_ring_alloc(ring, 32);
1106         if (r)
1107                 goto failed_unlock;
1108
1109         amdgpu_ring_emit_wreg(ring, reg, v);
1110         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1111         if (r)
1112                 goto failed_undo;
1113
1114         amdgpu_ring_commit(ring);
1115         spin_unlock_irqrestore(&kiq->ring_lock, flags);
1116
1117         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1118
1119         /* don't wait anymore for gpu reset case because this way may
1120          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1121          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1122          * never return if we keep waiting in virt_kiq_rreg, which cause
1123          * gpu_recover() hang there.
1124          *
1125          * also don't wait anymore for IRQ context
1126          * */
1127         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1128                 goto failed_kiq_write;
1129
1130         might_sleep();
1131         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1132
1133                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1134                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1135         }
1136
1137         if (cnt > MAX_KIQ_REG_TRY)
1138                 goto failed_kiq_write;
1139
1140         return;
1141
1142 failed_undo:
1143         amdgpu_ring_undo(ring);
1144 failed_unlock:
1145         spin_unlock_irqrestore(&kiq->ring_lock, flags);
1146 failed_kiq_write:
1147         dev_err(adev->dev, "failed to write reg:%x\n", reg);
1148 }
1149
1150 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1151 {
1152         if (amdgpu_num_kcq == -1) {
1153                 return 8;
1154         } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1155                 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1156                 return 8;
1157         }
1158         return amdgpu_num_kcq;
1159 }
1160
1161 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1162                                   uint32_t ucode_id)
1163 {
1164         const struct gfx_firmware_header_v1_0 *cp_hdr;
1165         const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1166         struct amdgpu_firmware_info *info = NULL;
1167         const struct firmware *ucode_fw;
1168         unsigned int fw_size;
1169
1170         switch (ucode_id) {
1171         case AMDGPU_UCODE_ID_CP_PFP:
1172                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1173                         adev->gfx.pfp_fw->data;
1174                 adev->gfx.pfp_fw_version =
1175                         le32_to_cpu(cp_hdr->header.ucode_version);
1176                 adev->gfx.pfp_feature_version =
1177                         le32_to_cpu(cp_hdr->ucode_feature_version);
1178                 ucode_fw = adev->gfx.pfp_fw;
1179                 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1180                 break;
1181         case AMDGPU_UCODE_ID_CP_RS64_PFP:
1182                 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1183                         adev->gfx.pfp_fw->data;
1184                 adev->gfx.pfp_fw_version =
1185                         le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1186                 adev->gfx.pfp_feature_version =
1187                         le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1188                 ucode_fw = adev->gfx.pfp_fw;
1189                 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1190                 break;
1191         case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1192         case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1193                 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1194                         adev->gfx.pfp_fw->data;
1195                 ucode_fw = adev->gfx.pfp_fw;
1196                 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1197                 break;
1198         case AMDGPU_UCODE_ID_CP_ME:
1199                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1200                         adev->gfx.me_fw->data;
1201                 adev->gfx.me_fw_version =
1202                         le32_to_cpu(cp_hdr->header.ucode_version);
1203                 adev->gfx.me_feature_version =
1204                         le32_to_cpu(cp_hdr->ucode_feature_version);
1205                 ucode_fw = adev->gfx.me_fw;
1206                 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1207                 break;
1208         case AMDGPU_UCODE_ID_CP_RS64_ME:
1209                 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1210                         adev->gfx.me_fw->data;
1211                 adev->gfx.me_fw_version =
1212                         le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1213                 adev->gfx.me_feature_version =
1214                         le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1215                 ucode_fw = adev->gfx.me_fw;
1216                 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1217                 break;
1218         case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1219         case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1220                 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1221                         adev->gfx.me_fw->data;
1222                 ucode_fw = adev->gfx.me_fw;
1223                 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1224                 break;
1225         case AMDGPU_UCODE_ID_CP_CE:
1226                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1227                         adev->gfx.ce_fw->data;
1228                 adev->gfx.ce_fw_version =
1229                         le32_to_cpu(cp_hdr->header.ucode_version);
1230                 adev->gfx.ce_feature_version =
1231                         le32_to_cpu(cp_hdr->ucode_feature_version);
1232                 ucode_fw = adev->gfx.ce_fw;
1233                 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1234                 break;
1235         case AMDGPU_UCODE_ID_CP_MEC1:
1236                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1237                         adev->gfx.mec_fw->data;
1238                 adev->gfx.mec_fw_version =
1239                         le32_to_cpu(cp_hdr->header.ucode_version);
1240                 adev->gfx.mec_feature_version =
1241                         le32_to_cpu(cp_hdr->ucode_feature_version);
1242                 ucode_fw = adev->gfx.mec_fw;
1243                 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1244                           le32_to_cpu(cp_hdr->jt_size) * 4;
1245                 break;
1246         case AMDGPU_UCODE_ID_CP_MEC1_JT:
1247                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1248                         adev->gfx.mec_fw->data;
1249                 ucode_fw = adev->gfx.mec_fw;
1250                 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1251                 break;
1252         case AMDGPU_UCODE_ID_CP_MEC2:
1253                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1254                         adev->gfx.mec2_fw->data;
1255                 adev->gfx.mec2_fw_version =
1256                         le32_to_cpu(cp_hdr->header.ucode_version);
1257                 adev->gfx.mec2_feature_version =
1258                         le32_to_cpu(cp_hdr->ucode_feature_version);
1259                 ucode_fw = adev->gfx.mec2_fw;
1260                 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1261                           le32_to_cpu(cp_hdr->jt_size) * 4;
1262                 break;
1263         case AMDGPU_UCODE_ID_CP_MEC2_JT:
1264                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1265                         adev->gfx.mec2_fw->data;
1266                 ucode_fw = adev->gfx.mec2_fw;
1267                 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1268                 break;
1269         case AMDGPU_UCODE_ID_CP_RS64_MEC:
1270                 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1271                         adev->gfx.mec_fw->data;
1272                 adev->gfx.mec_fw_version =
1273                         le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1274                 adev->gfx.mec_feature_version =
1275                         le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1276                 ucode_fw = adev->gfx.mec_fw;
1277                 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1278                 break;
1279         case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1280         case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1281         case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1282         case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1283                 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1284                         adev->gfx.mec_fw->data;
1285                 ucode_fw = adev->gfx.mec_fw;
1286                 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1287                 break;
1288         default:
1289                 dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1290                 return;
1291         }
1292
1293         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1294                 info = &adev->firmware.ucode[ucode_id];
1295                 info->ucode_id = ucode_id;
1296                 info->fw = ucode_fw;
1297                 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1298         }
1299 }
1300
1301 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1302 {
1303         return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1304                         adev->gfx.num_xcc_per_xcp : 1));
1305 }
1306
1307 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1308                                                 struct device_attribute *addr,
1309                                                 char *buf)
1310 {
1311         struct drm_device *ddev = dev_get_drvdata(dev);
1312         struct amdgpu_device *adev = drm_to_adev(ddev);
1313         int mode;
1314
1315         mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1316                                                AMDGPU_XCP_FL_NONE);
1317
1318         return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1319 }
1320
1321 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1322                                                 struct device_attribute *addr,
1323                                                 const char *buf, size_t count)
1324 {
1325         struct drm_device *ddev = dev_get_drvdata(dev);
1326         struct amdgpu_device *adev = drm_to_adev(ddev);
1327         enum amdgpu_gfx_partition mode;
1328         int ret = 0, num_xcc;
1329
1330         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1331         if (num_xcc % 2 != 0)
1332                 return -EINVAL;
1333
1334         if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1335                 mode = AMDGPU_SPX_PARTITION_MODE;
1336         } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1337                 /*
1338                  * DPX mode needs AIDs to be in multiple of 2.
1339                  * Each AID connects 2 XCCs.
1340                  */
1341                 if (num_xcc%4)
1342                         return -EINVAL;
1343                 mode = AMDGPU_DPX_PARTITION_MODE;
1344         } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1345                 if (num_xcc != 6)
1346                         return -EINVAL;
1347                 mode = AMDGPU_TPX_PARTITION_MODE;
1348         } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1349                 if (num_xcc != 8)
1350                         return -EINVAL;
1351                 mode = AMDGPU_QPX_PARTITION_MODE;
1352         } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1353                 mode = AMDGPU_CPX_PARTITION_MODE;
1354         } else {
1355                 return -EINVAL;
1356         }
1357
1358         ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1359
1360         if (ret)
1361                 return ret;
1362
1363         return count;
1364 }
1365
1366 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1367                                                 struct device_attribute *addr,
1368                                                 char *buf)
1369 {
1370         struct drm_device *ddev = dev_get_drvdata(dev);
1371         struct amdgpu_device *adev = drm_to_adev(ddev);
1372         char *supported_partition;
1373
1374         /* TBD */
1375         switch (NUM_XCC(adev->gfx.xcc_mask)) {
1376         case 8:
1377                 supported_partition = "SPX, DPX, QPX, CPX";
1378                 break;
1379         case 6:
1380                 supported_partition = "SPX, TPX, CPX";
1381                 break;
1382         case 4:
1383                 supported_partition = "SPX, DPX, CPX";
1384                 break;
1385         /* this seems only existing in emulation phase */
1386         case 2:
1387                 supported_partition = "SPX, CPX";
1388                 break;
1389         default:
1390                 supported_partition = "Not supported";
1391                 break;
1392         }
1393
1394         return sysfs_emit(buf, "%s\n", supported_partition);
1395 }
1396
1397 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1398 {
1399         struct amdgpu_device *adev = ring->adev;
1400         struct drm_gpu_scheduler *sched = &ring->sched;
1401         struct drm_sched_entity entity;
1402         struct dma_fence *f;
1403         struct amdgpu_job *job;
1404         struct amdgpu_ib *ib;
1405         int i, r;
1406
1407         /* Initialize the scheduler entity */
1408         r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1409                                   &sched, 1, NULL);
1410         if (r) {
1411                 dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1412                 goto err;
1413         }
1414
1415         r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
1416                                      64, 0,
1417                                      &job);
1418         if (r)
1419                 goto err;
1420
1421         job->enforce_isolation = true;
1422
1423         ib = &job->ibs[0];
1424         for (i = 0; i <= ring->funcs->align_mask; ++i)
1425                 ib->ptr[i] = ring->funcs->nop;
1426         ib->length_dw = ring->funcs->align_mask + 1;
1427
1428         f = amdgpu_job_submit(job);
1429
1430         r = dma_fence_wait(f, false);
1431         if (r)
1432                 goto err;
1433
1434         dma_fence_put(f);
1435
1436         /* Clean up the scheduler entity */
1437         drm_sched_entity_destroy(&entity);
1438         return 0;
1439
1440 err:
1441         return r;
1442 }
1443
1444 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1445 {
1446         int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1447         struct amdgpu_ring *ring;
1448         int num_xcc_to_clear;
1449         int i, r, xcc_id;
1450
1451         if (adev->gfx.num_xcc_per_xcp)
1452                 num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1453         else
1454                 num_xcc_to_clear = 1;
1455
1456         for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1457                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1458                         ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1459                         if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1460                                 r = amdgpu_gfx_run_cleaner_shader_job(ring);
1461                                 if (r)
1462                                         return r;
1463                                 num_xcc_to_clear--;
1464                                 break;
1465                         }
1466                 }
1467         }
1468
1469         if (num_xcc_to_clear)
1470                 return -ENOENT;
1471
1472         return 0;
1473 }
1474
1475 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1476                                                  struct device_attribute *attr,
1477                                                  const char *buf,
1478                                                  size_t count)
1479 {
1480         struct drm_device *ddev = dev_get_drvdata(dev);
1481         struct amdgpu_device *adev = drm_to_adev(ddev);
1482         int ret;
1483         long value;
1484
1485         if (amdgpu_in_reset(adev))
1486                 return -EPERM;
1487         if (adev->in_suspend && !adev->in_runpm)
1488                 return -EPERM;
1489
1490         ret = kstrtol(buf, 0, &value);
1491
1492         if (ret)
1493                 return -EINVAL;
1494
1495         if (value < 0)
1496                 return -EINVAL;
1497
1498         if (adev->xcp_mgr) {
1499                 if (value >= adev->xcp_mgr->num_xcps)
1500                         return -EINVAL;
1501         } else {
1502                 if (value > 1)
1503                         return -EINVAL;
1504         }
1505
1506         ret = pm_runtime_get_sync(ddev->dev);
1507         if (ret < 0) {
1508                 pm_runtime_put_autosuspend(ddev->dev);
1509                 return ret;
1510         }
1511
1512         ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1513
1514         pm_runtime_mark_last_busy(ddev->dev);
1515         pm_runtime_put_autosuspend(ddev->dev);
1516
1517         if (ret)
1518                 return ret;
1519
1520         return count;
1521 }
1522
1523 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1524                                                 struct device_attribute *attr,
1525                                                 char *buf)
1526 {
1527         struct drm_device *ddev = dev_get_drvdata(dev);
1528         struct amdgpu_device *adev = drm_to_adev(ddev);
1529         int i;
1530         ssize_t size = 0;
1531
1532         if (adev->xcp_mgr) {
1533                 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1534                         size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1535                         if (i < (adev->xcp_mgr->num_xcps - 1))
1536                                 size += sysfs_emit_at(buf, size, " ");
1537                 }
1538                 buf[size++] = '\n';
1539         } else {
1540                 size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1541         }
1542
1543         return size;
1544 }
1545
1546 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1547                                                 struct device_attribute *attr,
1548                                                 const char *buf, size_t count)
1549 {
1550         struct drm_device *ddev = dev_get_drvdata(dev);
1551         struct amdgpu_device *adev = drm_to_adev(ddev);
1552         long partition_values[MAX_XCP] = {0};
1553         int ret, i, num_partitions;
1554         const char *input_buf = buf;
1555
1556         for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1557                 ret = sscanf(input_buf, "%ld", &partition_values[i]);
1558                 if (ret <= 0)
1559                         break;
1560
1561                 /* Move the pointer to the next value in the string */
1562                 input_buf = strchr(input_buf, ' ');
1563                 if (input_buf) {
1564                         input_buf++;
1565                 } else {
1566                         i++;
1567                         break;
1568                 }
1569         }
1570         num_partitions = i;
1571
1572         if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1573                 return -EINVAL;
1574
1575         if (!adev->xcp_mgr && num_partitions != 1)
1576                 return -EINVAL;
1577
1578         for (i = 0; i < num_partitions; i++) {
1579                 if (partition_values[i] != 0 && partition_values[i] != 1)
1580                         return -EINVAL;
1581         }
1582
1583         mutex_lock(&adev->enforce_isolation_mutex);
1584
1585         for (i = 0; i < num_partitions; i++) {
1586                 if (adev->enforce_isolation[i] && !partition_values[i]) {
1587                         /* Going from enabled to disabled */
1588                         amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
1589                 } else if (!adev->enforce_isolation[i] && partition_values[i]) {
1590                         /* Going from disabled to enabled */
1591                         amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
1592                 }
1593                 adev->enforce_isolation[i] = partition_values[i];
1594         }
1595
1596         mutex_unlock(&adev->enforce_isolation_mutex);
1597
1598         return count;
1599 }
1600
1601 static DEVICE_ATTR(run_cleaner_shader, 0200,
1602                    NULL, amdgpu_gfx_set_run_cleaner_shader);
1603
1604 static DEVICE_ATTR(enforce_isolation, 0644,
1605                    amdgpu_gfx_get_enforce_isolation,
1606                    amdgpu_gfx_set_enforce_isolation);
1607
1608 static DEVICE_ATTR(current_compute_partition, 0644,
1609                    amdgpu_gfx_get_current_compute_partition,
1610                    amdgpu_gfx_set_compute_partition);
1611
1612 static DEVICE_ATTR(available_compute_partition, 0444,
1613                    amdgpu_gfx_get_available_compute_partition, NULL);
1614
1615 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1616 {
1617         int r;
1618
1619         r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1620         if (r)
1621                 return r;
1622
1623         r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
1624
1625         return r;
1626 }
1627
1628 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1629 {
1630         device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1631         device_remove_file(adev->dev, &dev_attr_available_compute_partition);
1632 }
1633
1634 int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
1635 {
1636         int r;
1637
1638         if (!amdgpu_sriov_vf(adev)) {
1639                 r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
1640                 if (r)
1641                         return r;
1642         }
1643
1644         r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
1645         if (r)
1646                 return r;
1647
1648         return 0;
1649 }
1650
1651 void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
1652 {
1653         if (!amdgpu_sriov_vf(adev))
1654                 device_remove_file(adev->dev, &dev_attr_enforce_isolation);
1655         device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
1656 }
1657
1658 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
1659                                       unsigned int cleaner_shader_size)
1660 {
1661         if (!adev->gfx.enable_cleaner_shader)
1662                 return -EOPNOTSUPP;
1663
1664         return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
1665                                        AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
1666                                        &adev->gfx.cleaner_shader_obj,
1667                                        &adev->gfx.cleaner_shader_gpu_addr,
1668                                        (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1669 }
1670
1671 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
1672 {
1673         if (!adev->gfx.enable_cleaner_shader)
1674                 return;
1675
1676         amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
1677                               &adev->gfx.cleaner_shader_gpu_addr,
1678                               (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1679 }
1680
1681 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
1682                                     unsigned int cleaner_shader_size,
1683                                     const void *cleaner_shader_ptr)
1684 {
1685         if (!adev->gfx.enable_cleaner_shader)
1686                 return;
1687
1688         if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
1689                 memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
1690                             cleaner_shader_size);
1691 }
1692
1693 /**
1694  * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
1695  * @adev: amdgpu_device pointer
1696  * @idx: Index of the scheduler to control
1697  * @enable: Whether to enable or disable the KFD scheduler
1698  *
1699  * This function is used to control the KFD (Kernel Fusion Driver) scheduler
1700  * from the KGD. It is part of the cleaner shader feature. This function plays
1701  * a key role in enforcing process isolation on the GPU.
1702  *
1703  * The function uses a reference count mechanism (kfd_sch_req_count) to keep
1704  * track of the number of requests to enable the KFD scheduler. When a request
1705  * to enable the KFD scheduler is made, the reference count is decremented.
1706  * When the reference count reaches zero, a delayed work is scheduled to
1707  * enforce isolation after a delay of GFX_SLICE_PERIOD.
1708  *
1709  * When a request to disable the KFD scheduler is made, the function first
1710  * checks if the reference count is zero. If it is, it cancels the delayed work
1711  * for enforcing isolation and checks if the KFD scheduler is active. If the
1712  * KFD scheduler is active, it sends a request to stop the KFD scheduler and
1713  * sets the KFD scheduler state to inactive. Then, it increments the reference
1714  * count.
1715  *
1716  * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
1717  * scheduler state and reference count are updated atomically.
1718  *
1719  * Note: If the reference count is already zero when a request to enable the
1720  * KFD scheduler is made, it means there's an imbalance bug somewhere. The
1721  * function triggers a warning in this case.
1722  */
1723 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
1724                                     bool enable)
1725 {
1726         mutex_lock(&adev->gfx.kfd_sch_mutex);
1727
1728         if (enable) {
1729                 /* If the count is already 0, it means there's an imbalance bug somewhere.
1730                  * Note that the bug may be in a different caller than the one which triggers the
1731                  * WARN_ON_ONCE.
1732                  */
1733                 if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
1734                         dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
1735                         goto unlock;
1736                 }
1737
1738                 adev->gfx.kfd_sch_req_count[idx]--;
1739
1740                 if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
1741                     adev->gfx.kfd_sch_inactive[idx]) {
1742                         schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1743                                               GFX_SLICE_PERIOD);
1744                 }
1745         } else {
1746                 if (adev->gfx.kfd_sch_req_count[idx] == 0) {
1747                         cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
1748                         if (!adev->gfx.kfd_sch_inactive[idx]) {
1749                                 amdgpu_amdkfd_stop_sched(adev, idx);
1750                                 adev->gfx.kfd_sch_inactive[idx] = true;
1751                         }
1752                 }
1753
1754                 adev->gfx.kfd_sch_req_count[idx]++;
1755         }
1756
1757 unlock:
1758         mutex_unlock(&adev->gfx.kfd_sch_mutex);
1759 }
1760
1761 /**
1762  * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
1763  *
1764  * @work: work_struct.
1765  *
1766  * This function is the work handler for enforcing shader isolation on AMD GPUs.
1767  * It counts the number of emitted fences for each GFX and compute ring. If there
1768  * are any fences, it schedules the `enforce_isolation_work` to be run after a
1769  * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
1770  * Driver (KFD) to resume the runqueue. The function is synchronized using the
1771  * `enforce_isolation_mutex`.
1772  */
1773 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
1774 {
1775         struct amdgpu_isolation_work *isolation_work =
1776                 container_of(work, struct amdgpu_isolation_work, work.work);
1777         struct amdgpu_device *adev = isolation_work->adev;
1778         u32 i, idx, fences = 0;
1779
1780         if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
1781                 idx = 0;
1782         else
1783                 idx = isolation_work->xcp_id;
1784
1785         if (idx >= MAX_XCP)
1786                 return;
1787
1788         mutex_lock(&adev->enforce_isolation_mutex);
1789         for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
1790                 if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
1791                         fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
1792         }
1793         for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
1794                 if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
1795                         fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
1796         }
1797         if (fences) {
1798                 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1799                                       GFX_SLICE_PERIOD);
1800         } else {
1801                 /* Tell KFD to resume the runqueue */
1802                 if (adev->kfd.init_complete) {
1803                         WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
1804                         WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
1805                                 amdgpu_amdkfd_start_sched(adev, idx);
1806                                 adev->gfx.kfd_sch_inactive[idx] = false;
1807                 }
1808         }
1809         mutex_unlock(&adev->enforce_isolation_mutex);
1810 }
1811
1812 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
1813 {
1814         struct amdgpu_device *adev = ring->adev;
1815         u32 idx;
1816
1817         if (!adev->gfx.enable_cleaner_shader)
1818                 return;
1819
1820         if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
1821                 idx = 0;
1822         else
1823                 idx = ring->xcp_id;
1824
1825         if (idx >= MAX_XCP)
1826                 return;
1827
1828         mutex_lock(&adev->enforce_isolation_mutex);
1829         if (adev->enforce_isolation[idx]) {
1830                 if (adev->kfd.init_complete)
1831                         amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
1832         }
1833         mutex_unlock(&adev->enforce_isolation_mutex);
1834 }
1835
1836 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
1837 {
1838         struct amdgpu_device *adev = ring->adev;
1839         u32 idx;
1840
1841         if (!adev->gfx.enable_cleaner_shader)
1842                 return;
1843
1844         if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
1845                 idx = 0;
1846         else
1847                 idx = ring->xcp_id;
1848
1849         if (idx >= MAX_XCP)
1850                 return;
1851
1852         mutex_lock(&adev->enforce_isolation_mutex);
1853         if (adev->enforce_isolation[idx]) {
1854                 if (adev->kfd.init_complete)
1855                         amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
1856         }
1857         mutex_unlock(&adev->enforce_isolation_mutex);
1858 }
This page took 0.148701 seconds and 4 git commands to generate.