2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
37 /* delay 0.1 second to enable gfx off feature */
38 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
40 #define GFX_OFF_NO_DELAY 0
43 * GPU GFX IP block helpers function.
46 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
51 bit += mec * adev->gfx.mec.num_pipe_per_mec
52 * adev->gfx.mec.num_queue_per_pipe;
53 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
59 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
60 int *mec, int *pipe, int *queue)
62 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
63 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
64 % adev->gfx.mec.num_pipe_per_mec;
65 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
66 / adev->gfx.mec.num_pipe_per_mec;
70 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
71 int xcc_id, int mec, int pipe, int queue)
73 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
74 adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
77 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
78 int me, int pipe, int queue)
82 bit += me * adev->gfx.me.num_pipe_per_me
83 * adev->gfx.me.num_queue_per_pipe;
84 bit += pipe * adev->gfx.me.num_queue_per_pipe;
90 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
91 int me, int pipe, int queue)
93 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
94 adev->gfx.me.queue_bitmap);
98 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
100 * @mask: array in which the per-shader array disable masks will be stored
101 * @max_se: number of SEs
102 * @max_sh: number of SHs
104 * The bitmask of CUs to be disabled in the shader array determined by se and
105 * sh is stored in mask[se * max_sh + sh].
107 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
109 unsigned int se, sh, cu;
112 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
114 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
117 p = amdgpu_disable_cu;
120 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
123 DRM_ERROR("amdgpu: could not parse disable_cu\n");
127 if (se < max_se && sh < max_sh && cu < 16) {
128 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
129 mask[se * max_sh + sh] |= 1u << cu;
131 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
135 next = strchr(p, ',');
142 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
144 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
147 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
149 if (amdgpu_compute_multipipe != -1) {
150 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
151 amdgpu_compute_multipipe);
152 return amdgpu_compute_multipipe == 1;
155 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
158 /* FIXME: spreading the queues across pipes causes perf regressions
159 * on POLARIS11 compute workloads */
160 if (adev->asic_type == CHIP_POLARIS11)
163 return adev->gfx.mec.num_mec > 1;
166 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
167 struct amdgpu_ring *ring)
169 int queue = ring->queue;
170 int pipe = ring->pipe;
172 /* Policy: use pipe1 queue0 as high priority graphics queue if we
173 * have more than one gfx pipe.
175 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
176 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
180 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
181 if (ring == &adev->gfx.gfx_ring[bit])
188 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
189 struct amdgpu_ring *ring)
191 /* Policy: use 1st queue as high priority compute queue if we
192 * have more than one compute queue.
194 if (adev->gfx.num_compute_rings > 1 &&
195 ring == &adev->gfx.compute_ring[0])
201 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
203 int i, j, queue, pipe;
204 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
205 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
206 adev->gfx.mec.num_queue_per_pipe,
207 adev->gfx.num_compute_rings);
208 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
210 if (multipipe_policy) {
211 /* policy: make queues evenly cross all pipes on MEC1 only
212 * for multiple xcc, just use the original policy for simplicity */
213 for (j = 0; j < num_xcc; j++) {
214 for (i = 0; i < max_queues_per_mec; i++) {
215 pipe = i % adev->gfx.mec.num_pipe_per_mec;
216 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
217 adev->gfx.mec.num_queue_per_pipe;
219 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
220 adev->gfx.mec_bitmap[j].queue_bitmap);
224 /* policy: amdgpu owns all queues in the given pipe */
225 for (j = 0; j < num_xcc; j++) {
226 for (i = 0; i < max_queues_per_mec; ++i)
227 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
231 for (j = 0; j < num_xcc; j++) {
232 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
233 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
237 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
240 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
241 int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
242 adev->gfx.me.num_queue_per_pipe;
244 if (multipipe_policy) {
245 /* policy: amdgpu owns the first queue per pipe at this stage
246 * will extend to mulitple queues per pipe later */
247 for (i = 0; i < max_queues_per_me; i++) {
248 pipe = i % adev->gfx.me.num_pipe_per_me;
249 queue = (i / adev->gfx.me.num_pipe_per_me) %
250 adev->gfx.me.num_queue_per_pipe;
252 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
253 adev->gfx.me.queue_bitmap);
256 for (i = 0; i < max_queues_per_me; ++i)
257 set_bit(i, adev->gfx.me.queue_bitmap);
260 /* update the number of active graphics rings */
261 adev->gfx.num_gfx_rings =
262 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
265 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
266 struct amdgpu_ring *ring, int xcc_id)
269 int mec, pipe, queue;
271 queue_bit = adev->gfx.mec.num_mec
272 * adev->gfx.mec.num_pipe_per_mec
273 * adev->gfx.mec.num_queue_per_pipe;
275 while (--queue_bit >= 0) {
276 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
279 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
282 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
283 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
284 * only can be issued on queue 0.
286 if ((mec == 1 && pipe > 1) || queue != 0)
296 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
300 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
302 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
303 struct amdgpu_irq_src *irq = &kiq->irq;
304 struct amdgpu_ring *ring = &kiq->ring;
307 spin_lock_init(&kiq->ring_lock);
310 ring->ring_obj = NULL;
311 ring->use_doorbell = true;
312 ring->xcc_id = xcc_id;
313 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
314 ring->doorbell_index =
315 (adev->doorbell_index.kiq +
316 xcc_id * adev->doorbell_index.xcc_doorbell_range)
319 r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
323 ring->eop_gpu_addr = kiq->eop_gpu_addr;
324 ring->no_scheduler = true;
325 snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
326 (unsigned char)xcc_id, (unsigned char)ring->me,
327 (unsigned char)ring->pipe, (unsigned char)ring->queue);
328 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
329 AMDGPU_RING_PRIO_DEFAULT, NULL);
331 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
336 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
338 amdgpu_ring_fini(ring);
341 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
343 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
345 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
348 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
349 unsigned int hpd_size, int xcc_id)
353 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
355 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
356 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
357 &kiq->eop_gpu_addr, (void **)&hpd);
359 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
363 memset(hpd, 0, hpd_size);
365 r = amdgpu_bo_reserve(kiq->eop_obj, true);
366 if (unlikely(r != 0))
367 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
368 amdgpu_bo_kunmap(kiq->eop_obj);
369 amdgpu_bo_unreserve(kiq->eop_obj);
374 /* create MQD for each compute/gfx queue */
375 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
376 unsigned int mqd_size, int xcc_id)
379 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
380 struct amdgpu_ring *ring = &kiq->ring;
381 u32 domain = AMDGPU_GEM_DOMAIN_GTT;
383 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
384 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
385 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
386 domain |= AMDGPU_GEM_DOMAIN_VRAM;
389 /* create MQD for KIQ */
390 if (!adev->enable_mes_kiq && !ring->mqd_obj) {
391 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
392 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
393 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
394 * KIQ MQD no matter SRIOV or Bare-metal
396 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
397 AMDGPU_GEM_DOMAIN_VRAM |
398 AMDGPU_GEM_DOMAIN_GTT,
403 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
407 /* prepare MQD backup */
408 kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
409 if (!kiq->mqd_backup) {
411 "no memory to create MQD backup for ring %s\n", ring->name);
416 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
417 /* create MQD for each KGQ */
418 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
419 ring = &adev->gfx.gfx_ring[i];
420 if (!ring->mqd_obj) {
421 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
422 domain, &ring->mqd_obj,
423 &ring->mqd_gpu_addr, &ring->mqd_ptr);
425 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
429 ring->mqd_size = mqd_size;
430 /* prepare MQD backup */
431 adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
432 if (!adev->gfx.me.mqd_backup[i]) {
433 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
440 /* create MQD for each KCQ */
441 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
442 j = i + xcc_id * adev->gfx.num_compute_rings;
443 ring = &adev->gfx.compute_ring[j];
444 if (!ring->mqd_obj) {
445 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
446 domain, &ring->mqd_obj,
447 &ring->mqd_gpu_addr, &ring->mqd_ptr);
449 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
453 ring->mqd_size = mqd_size;
454 /* prepare MQD backup */
455 adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
456 if (!adev->gfx.mec.mqd_backup[j]) {
457 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
466 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
468 struct amdgpu_ring *ring = NULL;
470 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
472 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
473 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
474 ring = &adev->gfx.gfx_ring[i];
475 kfree(adev->gfx.me.mqd_backup[i]);
476 amdgpu_bo_free_kernel(&ring->mqd_obj,
482 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
483 j = i + xcc_id * adev->gfx.num_compute_rings;
484 ring = &adev->gfx.compute_ring[j];
485 kfree(adev->gfx.mec.mqd_backup[j]);
486 amdgpu_bo_free_kernel(&ring->mqd_obj,
492 kfree(kiq->mqd_backup);
493 amdgpu_bo_free_kernel(&ring->mqd_obj,
498 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
500 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
501 struct amdgpu_ring *kiq_ring = &kiq->ring;
505 if (adev->enable_mes) {
506 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
507 j = i + xcc_id * adev->gfx.num_compute_rings;
508 amdgpu_mes_unmap_legacy_queue(adev,
509 &adev->gfx.compute_ring[j],
515 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
518 if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
521 spin_lock(&kiq->ring_lock);
522 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
523 adev->gfx.num_compute_rings)) {
524 spin_unlock(&kiq->ring_lock);
528 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
529 j = i + xcc_id * adev->gfx.num_compute_rings;
530 kiq->pmf->kiq_unmap_queues(kiq_ring,
531 &adev->gfx.compute_ring[j],
534 /* Submit unmap queue packet */
535 amdgpu_ring_commit(kiq_ring);
537 * Ring test will do a basic scratch register change check. Just run
538 * this to ensure that unmap queues that is submitted before got
539 * processed successfully before returning.
541 r = amdgpu_ring_test_helper(kiq_ring);
543 spin_unlock(&kiq->ring_lock);
548 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
550 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
551 struct amdgpu_ring *kiq_ring = &kiq->ring;
555 if (adev->enable_mes) {
556 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
557 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
558 j = i + xcc_id * adev->gfx.num_gfx_rings;
559 amdgpu_mes_unmap_legacy_queue(adev,
560 &adev->gfx.gfx_ring[j],
561 PREEMPT_QUEUES, 0, 0);
567 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
570 if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
573 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
574 spin_lock(&kiq->ring_lock);
575 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
576 adev->gfx.num_gfx_rings)) {
577 spin_unlock(&kiq->ring_lock);
581 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
582 j = i + xcc_id * adev->gfx.num_gfx_rings;
583 kiq->pmf->kiq_unmap_queues(kiq_ring,
584 &adev->gfx.gfx_ring[j],
585 PREEMPT_QUEUES, 0, 0);
587 /* Submit unmap queue packet */
588 amdgpu_ring_commit(kiq_ring);
591 * Ring test will do a basic scratch register change check.
592 * Just run this to ensure that unmap queues that is submitted
593 * before got processed successfully before returning.
595 r = amdgpu_ring_test_helper(kiq_ring);
596 spin_unlock(&kiq->ring_lock);
602 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
605 int mec, pipe, queue;
606 int set_resource_bit = 0;
608 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
610 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
612 return set_resource_bit;
615 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
617 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
618 struct amdgpu_ring *kiq_ring = &kiq->ring;
619 uint64_t queue_mask = ~0ULL;
622 amdgpu_device_flush_hdp(adev, NULL);
624 if (!adev->enable_uni_mes) {
625 spin_lock(&kiq->ring_lock);
626 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
628 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
629 spin_unlock(&kiq->ring_lock);
633 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
634 r = amdgpu_ring_test_helper(kiq_ring);
635 spin_unlock(&kiq->ring_lock);
637 dev_err(adev->dev, "KIQ failed to set resources\n");
640 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
641 j = i + xcc_id * adev->gfx.num_compute_rings;
642 r = amdgpu_mes_map_legacy_queue(adev,
643 &adev->gfx.compute_ring[j]);
645 dev_err(adev->dev, "failed to map compute queue\n");
653 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
655 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
656 struct amdgpu_ring *kiq_ring = &kiq->ring;
657 uint64_t queue_mask = 0;
660 if (adev->mes.enable_legacy_queue_map)
661 return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
663 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
666 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
667 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
670 /* This situation may be hit in the future if a new HW
671 * generation exposes more than 64 queues. If so, the
672 * definition of queue_mask needs updating */
673 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
674 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
678 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
681 amdgpu_device_flush_hdp(adev, NULL);
683 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
686 spin_lock(&kiq->ring_lock);
687 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
688 adev->gfx.num_compute_rings +
689 kiq->pmf->set_resources_size);
691 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
692 spin_unlock(&kiq->ring_lock);
696 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
697 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
698 j = i + xcc_id * adev->gfx.num_compute_rings;
699 kiq->pmf->kiq_map_queues(kiq_ring,
700 &adev->gfx.compute_ring[j]);
702 /* Submit map queue packet */
703 amdgpu_ring_commit(kiq_ring);
705 * Ring test will do a basic scratch register change check. Just run
706 * this to ensure that map queues that is submitted before got
707 * processed successfully before returning.
709 r = amdgpu_ring_test_helper(kiq_ring);
710 spin_unlock(&kiq->ring_lock);
712 DRM_ERROR("KCQ enable failed\n");
717 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
719 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
720 struct amdgpu_ring *kiq_ring = &kiq->ring;
723 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
726 amdgpu_device_flush_hdp(adev, NULL);
728 if (adev->mes.enable_legacy_queue_map) {
729 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
730 j = i + xcc_id * adev->gfx.num_gfx_rings;
731 r = amdgpu_mes_map_legacy_queue(adev,
732 &adev->gfx.gfx_ring[j]);
734 DRM_ERROR("failed to map gfx queue\n");
742 spin_lock(&kiq->ring_lock);
743 /* No need to map kcq on the slave */
744 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
745 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
746 adev->gfx.num_gfx_rings);
748 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
749 spin_unlock(&kiq->ring_lock);
753 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
754 j = i + xcc_id * adev->gfx.num_gfx_rings;
755 kiq->pmf->kiq_map_queues(kiq_ring,
756 &adev->gfx.gfx_ring[j]);
759 /* Submit map queue packet */
760 amdgpu_ring_commit(kiq_ring);
762 * Ring test will do a basic scratch register change check. Just run
763 * this to ensure that map queues that is submitted before got
764 * processed successfully before returning.
766 r = amdgpu_ring_test_helper(kiq_ring);
767 spin_unlock(&kiq->ring_lock);
769 DRM_ERROR("KGQ enable failed\n");
774 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
776 * @adev: amdgpu_device pointer
777 * @bool enable true: enable gfx off feature, false: disable gfx off feature
779 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
780 * 2. other client can send request to disable gfx off feature, the request should be honored.
781 * 3. other client can cancel their request of disable gfx off feature
782 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
785 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
787 unsigned long delay = GFX_OFF_DELAY_ENABLE;
789 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
792 mutex_lock(&adev->gfx.gfx_off_mutex);
795 /* If the count is already 0, it means there's an imbalance bug somewhere.
796 * Note that the bug may be in a different caller than the one which triggers the
799 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
802 adev->gfx.gfx_off_req_count--;
804 if (adev->gfx.gfx_off_req_count == 0 &&
805 !adev->gfx.gfx_off_state) {
806 /* If going to s2idle, no need to wait */
808 if (!amdgpu_dpm_set_powergating_by_smu(adev,
809 AMD_IP_BLOCK_TYPE_GFX, true, 0))
810 adev->gfx.gfx_off_state = true;
812 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
817 if (adev->gfx.gfx_off_req_count == 0) {
818 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
820 if (adev->gfx.gfx_off_state &&
821 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
822 adev->gfx.gfx_off_state = false;
824 if (adev->gfx.funcs->init_spm_golden) {
826 "GFXOFF is disabled, re-init SPM golden settings\n");
827 amdgpu_gfx_init_spm_golden(adev);
832 adev->gfx.gfx_off_req_count++;
836 mutex_unlock(&adev->gfx.gfx_off_mutex);
839 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
843 mutex_lock(&adev->gfx.gfx_off_mutex);
845 r = amdgpu_dpm_set_residency_gfxoff(adev, value);
847 mutex_unlock(&adev->gfx.gfx_off_mutex);
852 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
856 mutex_lock(&adev->gfx.gfx_off_mutex);
858 r = amdgpu_dpm_get_residency_gfxoff(adev, value);
860 mutex_unlock(&adev->gfx.gfx_off_mutex);
865 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
869 mutex_lock(&adev->gfx.gfx_off_mutex);
871 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
873 mutex_unlock(&adev->gfx.gfx_off_mutex);
878 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
883 mutex_lock(&adev->gfx.gfx_off_mutex);
885 r = amdgpu_dpm_get_status_gfxoff(adev, value);
887 mutex_unlock(&adev->gfx.gfx_off_mutex);
892 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
896 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
897 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
898 r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
903 r = amdgpu_ras_block_late_init(adev, ras_block);
907 if (amdgpu_sriov_vf(adev))
910 if (adev->gfx.cp_ecc_error_irq.funcs) {
911 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
916 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
921 amdgpu_ras_block_late_fini(adev, ras_block);
925 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
928 struct amdgpu_gfx_ras *ras = NULL;
930 /* adev->gfx.ras is NULL, which means gfx does not
931 * support ras function, then do nothing here.
938 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
940 dev_err(adev->dev, "Failed to register gfx ras block!\n");
944 strcpy(ras->ras_block.ras_comm.name, "gfx");
945 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
946 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
947 adev->gfx.ras_if = &ras->ras_block.ras_comm;
949 /* If not define special ras_late_init function, use gfx default ras_late_init */
950 if (!ras->ras_block.ras_late_init)
951 ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
953 /* If not defined special ras_cb function, use default ras_cb */
954 if (!ras->ras_block.ras_cb)
955 ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
960 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
961 struct amdgpu_iv_entry *entry)
963 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
964 return adev->gfx.ras->poison_consumption_handler(adev, entry);
969 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
971 struct amdgpu_iv_entry *entry)
973 /* TODO ue will trigger an interrupt.
975 * When “Full RAS” is enabled, the per-IP interrupt sources should
976 * be disabled and the driver should only look for the aggregated
977 * interrupt via sync flood
979 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
980 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
981 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
982 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
983 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
984 amdgpu_ras_reset_gpu(adev);
986 return AMDGPU_RAS_SUCCESS;
989 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
990 struct amdgpu_irq_src *source,
991 struct amdgpu_iv_entry *entry)
993 struct ras_common_if *ras_if = adev->gfx.ras_if;
994 struct ras_dispatch_if ih_data = {
1001 ih_data.head = *ras_if;
1003 DRM_ERROR("CP ECC ERROR IRQ\n");
1004 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1008 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
1009 void *ras_error_status,
1010 void (*func)(struct amdgpu_device *adev, void *ras_error_status,
1014 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1015 uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1016 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1019 err_data->ue_count = 0;
1020 err_data->ce_count = 0;
1023 for_each_inst(i, xcc_mask)
1024 func(adev, ras_error_status, i);
1027 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1029 signed long r, cnt = 0;
1030 unsigned long flags;
1031 uint32_t seq, reg_val_offs = 0, value = 0;
1032 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1033 struct amdgpu_ring *ring = &kiq->ring;
1035 if (amdgpu_device_skip_hw_access(adev))
1038 if (adev->mes.ring[0].sched.ready)
1039 return amdgpu_mes_rreg(adev, reg);
1041 BUG_ON(!ring->funcs->emit_rreg);
1043 spin_lock_irqsave(&kiq->ring_lock, flags);
1044 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
1045 pr_err("critical bug! too many kiq readers\n");
1048 r = amdgpu_ring_alloc(ring, 32);
1052 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1053 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1057 amdgpu_ring_commit(ring);
1058 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1060 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1062 /* don't wait anymore for gpu reset case because this way may
1063 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1064 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1065 * never return if we keep waiting in virt_kiq_rreg, which cause
1066 * gpu_recover() hang there.
1068 * also don't wait anymore for IRQ context
1070 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1071 goto failed_kiq_read;
1074 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1075 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1076 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1079 if (cnt > MAX_KIQ_REG_TRY)
1080 goto failed_kiq_read;
1083 value = adev->wb.wb[reg_val_offs];
1084 amdgpu_device_wb_free(adev, reg_val_offs);
1088 amdgpu_ring_undo(ring);
1090 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1093 amdgpu_device_wb_free(adev, reg_val_offs);
1094 dev_err(adev->dev, "failed to read reg:%x\n", reg);
1098 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1100 signed long r, cnt = 0;
1101 unsigned long flags;
1103 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1104 struct amdgpu_ring *ring = &kiq->ring;
1106 BUG_ON(!ring->funcs->emit_wreg);
1108 if (amdgpu_device_skip_hw_access(adev))
1111 if (adev->mes.ring[0].sched.ready) {
1112 amdgpu_mes_wreg(adev, reg, v);
1116 spin_lock_irqsave(&kiq->ring_lock, flags);
1117 r = amdgpu_ring_alloc(ring, 32);
1121 amdgpu_ring_emit_wreg(ring, reg, v);
1122 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1126 amdgpu_ring_commit(ring);
1127 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1129 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1131 /* don't wait anymore for gpu reset case because this way may
1132 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1133 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1134 * never return if we keep waiting in virt_kiq_rreg, which cause
1135 * gpu_recover() hang there.
1137 * also don't wait anymore for IRQ context
1139 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1140 goto failed_kiq_write;
1143 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1145 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1146 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1149 if (cnt > MAX_KIQ_REG_TRY)
1150 goto failed_kiq_write;
1155 amdgpu_ring_undo(ring);
1157 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1159 dev_err(adev->dev, "failed to write reg:%x\n", reg);
1162 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1164 if (amdgpu_num_kcq == -1) {
1166 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1167 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1170 return amdgpu_num_kcq;
1173 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1176 const struct gfx_firmware_header_v1_0 *cp_hdr;
1177 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1178 struct amdgpu_firmware_info *info = NULL;
1179 const struct firmware *ucode_fw;
1180 unsigned int fw_size;
1183 case AMDGPU_UCODE_ID_CP_PFP:
1184 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1185 adev->gfx.pfp_fw->data;
1186 adev->gfx.pfp_fw_version =
1187 le32_to_cpu(cp_hdr->header.ucode_version);
1188 adev->gfx.pfp_feature_version =
1189 le32_to_cpu(cp_hdr->ucode_feature_version);
1190 ucode_fw = adev->gfx.pfp_fw;
1191 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1193 case AMDGPU_UCODE_ID_CP_RS64_PFP:
1194 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1195 adev->gfx.pfp_fw->data;
1196 adev->gfx.pfp_fw_version =
1197 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1198 adev->gfx.pfp_feature_version =
1199 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1200 ucode_fw = adev->gfx.pfp_fw;
1201 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1203 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1204 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1205 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1206 adev->gfx.pfp_fw->data;
1207 ucode_fw = adev->gfx.pfp_fw;
1208 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1210 case AMDGPU_UCODE_ID_CP_ME:
1211 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1212 adev->gfx.me_fw->data;
1213 adev->gfx.me_fw_version =
1214 le32_to_cpu(cp_hdr->header.ucode_version);
1215 adev->gfx.me_feature_version =
1216 le32_to_cpu(cp_hdr->ucode_feature_version);
1217 ucode_fw = adev->gfx.me_fw;
1218 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1220 case AMDGPU_UCODE_ID_CP_RS64_ME:
1221 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1222 adev->gfx.me_fw->data;
1223 adev->gfx.me_fw_version =
1224 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1225 adev->gfx.me_feature_version =
1226 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1227 ucode_fw = adev->gfx.me_fw;
1228 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1230 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1231 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1232 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1233 adev->gfx.me_fw->data;
1234 ucode_fw = adev->gfx.me_fw;
1235 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1237 case AMDGPU_UCODE_ID_CP_CE:
1238 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1239 adev->gfx.ce_fw->data;
1240 adev->gfx.ce_fw_version =
1241 le32_to_cpu(cp_hdr->header.ucode_version);
1242 adev->gfx.ce_feature_version =
1243 le32_to_cpu(cp_hdr->ucode_feature_version);
1244 ucode_fw = adev->gfx.ce_fw;
1245 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1247 case AMDGPU_UCODE_ID_CP_MEC1:
1248 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1249 adev->gfx.mec_fw->data;
1250 adev->gfx.mec_fw_version =
1251 le32_to_cpu(cp_hdr->header.ucode_version);
1252 adev->gfx.mec_feature_version =
1253 le32_to_cpu(cp_hdr->ucode_feature_version);
1254 ucode_fw = adev->gfx.mec_fw;
1255 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1256 le32_to_cpu(cp_hdr->jt_size) * 4;
1258 case AMDGPU_UCODE_ID_CP_MEC1_JT:
1259 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1260 adev->gfx.mec_fw->data;
1261 ucode_fw = adev->gfx.mec_fw;
1262 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1264 case AMDGPU_UCODE_ID_CP_MEC2:
1265 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1266 adev->gfx.mec2_fw->data;
1267 adev->gfx.mec2_fw_version =
1268 le32_to_cpu(cp_hdr->header.ucode_version);
1269 adev->gfx.mec2_feature_version =
1270 le32_to_cpu(cp_hdr->ucode_feature_version);
1271 ucode_fw = adev->gfx.mec2_fw;
1272 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1273 le32_to_cpu(cp_hdr->jt_size) * 4;
1275 case AMDGPU_UCODE_ID_CP_MEC2_JT:
1276 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1277 adev->gfx.mec2_fw->data;
1278 ucode_fw = adev->gfx.mec2_fw;
1279 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1281 case AMDGPU_UCODE_ID_CP_RS64_MEC:
1282 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1283 adev->gfx.mec_fw->data;
1284 adev->gfx.mec_fw_version =
1285 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1286 adev->gfx.mec_feature_version =
1287 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1288 ucode_fw = adev->gfx.mec_fw;
1289 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1291 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1292 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1293 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1294 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1295 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1296 adev->gfx.mec_fw->data;
1297 ucode_fw = adev->gfx.mec_fw;
1298 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1301 dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1305 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1306 info = &adev->firmware.ucode[ucode_id];
1307 info->ucode_id = ucode_id;
1308 info->fw = ucode_fw;
1309 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1313 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1315 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1316 adev->gfx.num_xcc_per_xcp : 1));
1319 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1320 struct device_attribute *addr,
1323 struct drm_device *ddev = dev_get_drvdata(dev);
1324 struct amdgpu_device *adev = drm_to_adev(ddev);
1327 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1328 AMDGPU_XCP_FL_NONE);
1330 return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1333 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1334 struct device_attribute *addr,
1335 const char *buf, size_t count)
1337 struct drm_device *ddev = dev_get_drvdata(dev);
1338 struct amdgpu_device *adev = drm_to_adev(ddev);
1339 enum amdgpu_gfx_partition mode;
1340 int ret = 0, num_xcc;
1342 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1343 if (num_xcc % 2 != 0)
1346 if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1347 mode = AMDGPU_SPX_PARTITION_MODE;
1348 } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1350 * DPX mode needs AIDs to be in multiple of 2.
1351 * Each AID connects 2 XCCs.
1355 mode = AMDGPU_DPX_PARTITION_MODE;
1356 } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1359 mode = AMDGPU_TPX_PARTITION_MODE;
1360 } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1363 mode = AMDGPU_QPX_PARTITION_MODE;
1364 } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1365 mode = AMDGPU_CPX_PARTITION_MODE;
1370 ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1378 static const char *xcp_desc[] = {
1379 [AMDGPU_SPX_PARTITION_MODE] = "SPX",
1380 [AMDGPU_DPX_PARTITION_MODE] = "DPX",
1381 [AMDGPU_TPX_PARTITION_MODE] = "TPX",
1382 [AMDGPU_QPX_PARTITION_MODE] = "QPX",
1383 [AMDGPU_CPX_PARTITION_MODE] = "CPX",
1386 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1387 struct device_attribute *addr,
1390 struct drm_device *ddev = dev_get_drvdata(dev);
1391 struct amdgpu_device *adev = drm_to_adev(ddev);
1392 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1396 if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
1397 return sysfs_emit(buf, "Not supported\n");
1399 for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
1400 size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
1404 size += sysfs_emit_at(buf, size, "\n");
1409 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1411 struct amdgpu_device *adev = ring->adev;
1412 struct drm_gpu_scheduler *sched = &ring->sched;
1413 struct drm_sched_entity entity;
1414 struct dma_fence *f;
1415 struct amdgpu_job *job;
1416 struct amdgpu_ib *ib;
1419 /* Initialize the scheduler entity */
1420 r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1423 dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1427 r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
1433 job->enforce_isolation = true;
1436 for (i = 0; i <= ring->funcs->align_mask; ++i)
1437 ib->ptr[i] = ring->funcs->nop;
1438 ib->length_dw = ring->funcs->align_mask + 1;
1440 f = amdgpu_job_submit(job);
1442 r = dma_fence_wait(f, false);
1448 /* Clean up the scheduler entity */
1449 drm_sched_entity_destroy(&entity);
1456 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1458 int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1459 struct amdgpu_ring *ring;
1460 int num_xcc_to_clear;
1463 if (adev->gfx.num_xcc_per_xcp)
1464 num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1466 num_xcc_to_clear = 1;
1468 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1469 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1470 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1471 if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1472 r = amdgpu_gfx_run_cleaner_shader_job(ring);
1481 if (num_xcc_to_clear)
1488 * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
1489 * @dev: The device structure
1490 * @attr: The device attribute structure
1491 * @buf: The buffer containing the input data
1492 * @count: The size of the input data
1494 * Provides the sysfs interface to manually run a cleaner shader, which is
1495 * used to clear the GPU state between different tasks. Writing a value to the
1496 * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
1497 * The value written corresponds to the partition index on multi-partition
1498 * devices. On single-partition devices, the value should be '0'.
1500 * The cleaner shader clears the Local Data Store (LDS) and General Purpose
1501 * Registers (GPRs) to ensure data isolation between GPU workloads.
1503 * Return: The number of bytes written to the sysfs file.
1505 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1506 struct device_attribute *attr,
1510 struct drm_device *ddev = dev_get_drvdata(dev);
1511 struct amdgpu_device *adev = drm_to_adev(ddev);
1515 if (amdgpu_in_reset(adev))
1517 if (adev->in_suspend && !adev->in_runpm)
1520 ret = kstrtol(buf, 0, &value);
1528 if (adev->xcp_mgr) {
1529 if (value >= adev->xcp_mgr->num_xcps)
1536 ret = pm_runtime_get_sync(ddev->dev);
1538 pm_runtime_put_autosuspend(ddev->dev);
1542 ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1544 pm_runtime_mark_last_busy(ddev->dev);
1545 pm_runtime_put_autosuspend(ddev->dev);
1554 * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
1555 * @dev: The device structure
1556 * @attr: The device attribute structure
1557 * @buf: The buffer to store the output data
1559 * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
1560 * feature for each GPU partition. Reading from the 'enforce_isolation'
1561 * sysfs file returns the isolation settings for all partitions, where '0'
1562 * indicates disabled and '1' indicates enabled.
1564 * Return: The number of bytes read from the sysfs file.
1566 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1567 struct device_attribute *attr,
1570 struct drm_device *ddev = dev_get_drvdata(dev);
1571 struct amdgpu_device *adev = drm_to_adev(ddev);
1575 if (adev->xcp_mgr) {
1576 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1577 size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1578 if (i < (adev->xcp_mgr->num_xcps - 1))
1579 size += sysfs_emit_at(buf, size, " ");
1583 size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1590 * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
1591 * @dev: The device structure
1592 * @attr: The device attribute structure
1593 * @buf: The buffer containing the input data
1594 * @count: The size of the input data
1596 * This function allows control over the 'enforce_isolation' feature, which
1597 * serializes access to the graphics engine. Writing '1' or '0' to the
1598 * 'enforce_isolation' sysfs file enables or disables process isolation for
1599 * each partition. The input should specify the setting for all partitions.
1601 * Return: The number of bytes written to the sysfs file.
1603 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1604 struct device_attribute *attr,
1605 const char *buf, size_t count)
1607 struct drm_device *ddev = dev_get_drvdata(dev);
1608 struct amdgpu_device *adev = drm_to_adev(ddev);
1609 long partition_values[MAX_XCP] = {0};
1610 int ret, i, num_partitions;
1611 const char *input_buf = buf;
1613 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1614 ret = sscanf(input_buf, "%ld", &partition_values[i]);
1618 /* Move the pointer to the next value in the string */
1619 input_buf = strchr(input_buf, ' ');
1629 if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1632 if (!adev->xcp_mgr && num_partitions != 1)
1635 for (i = 0; i < num_partitions; i++) {
1636 if (partition_values[i] != 0 && partition_values[i] != 1)
1640 mutex_lock(&adev->enforce_isolation_mutex);
1642 for (i = 0; i < num_partitions; i++) {
1643 if (adev->enforce_isolation[i] && !partition_values[i]) {
1644 /* Going from enabled to disabled */
1645 amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
1646 amdgpu_mes_set_enforce_isolation(adev, i, false);
1647 } else if (!adev->enforce_isolation[i] && partition_values[i]) {
1648 /* Going from disabled to enabled */
1649 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
1650 amdgpu_mes_set_enforce_isolation(adev, i, true);
1652 adev->enforce_isolation[i] = partition_values[i];
1655 mutex_unlock(&adev->enforce_isolation_mutex);
1660 static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
1661 struct device_attribute *attr,
1664 struct drm_device *ddev = dev_get_drvdata(dev);
1665 struct amdgpu_device *adev = drm_to_adev(ddev);
1670 return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
1673 static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
1674 struct device_attribute *attr,
1677 struct drm_device *ddev = dev_get_drvdata(dev);
1678 struct amdgpu_device *adev = drm_to_adev(ddev);
1683 return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
1686 static DEVICE_ATTR(run_cleaner_shader, 0200,
1687 NULL, amdgpu_gfx_set_run_cleaner_shader);
1689 static DEVICE_ATTR(enforce_isolation, 0644,
1690 amdgpu_gfx_get_enforce_isolation,
1691 amdgpu_gfx_set_enforce_isolation);
1693 static DEVICE_ATTR(current_compute_partition, 0644,
1694 amdgpu_gfx_get_current_compute_partition,
1695 amdgpu_gfx_set_compute_partition);
1697 static DEVICE_ATTR(available_compute_partition, 0444,
1698 amdgpu_gfx_get_available_compute_partition, NULL);
1699 static DEVICE_ATTR(gfx_reset_mask, 0444,
1700 amdgpu_gfx_get_gfx_reset_mask, NULL);
1702 static DEVICE_ATTR(compute_reset_mask, 0444,
1703 amdgpu_gfx_get_compute_reset_mask, NULL);
1705 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
1707 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1708 bool xcp_switch_supported;
1714 xcp_switch_supported =
1715 (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1717 if (!xcp_switch_supported)
1718 dev_attr_current_compute_partition.attr.mode &=
1719 ~(S_IWUSR | S_IWGRP | S_IWOTH);
1721 r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1725 if (xcp_switch_supported)
1726 r = device_create_file(adev->dev,
1727 &dev_attr_available_compute_partition);
1732 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
1734 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1735 bool xcp_switch_supported;
1740 xcp_switch_supported =
1741 (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1742 device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1744 if (xcp_switch_supported)
1745 device_remove_file(adev->dev,
1746 &dev_attr_available_compute_partition);
1749 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
1753 r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
1756 if (adev->gfx.enable_cleaner_shader)
1757 r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
1762 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
1764 device_remove_file(adev->dev, &dev_attr_enforce_isolation);
1765 if (adev->gfx.enable_cleaner_shader)
1766 device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
1769 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
1773 if (!amdgpu_gpu_recovery)
1776 if (adev->gfx.num_gfx_rings) {
1777 r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
1782 if (adev->gfx.num_compute_rings) {
1783 r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
1791 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1793 if (!amdgpu_gpu_recovery)
1796 if (adev->gfx.num_gfx_rings)
1797 device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
1799 if (adev->gfx.num_compute_rings)
1800 device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
1803 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1807 r = amdgpu_gfx_sysfs_xcp_init(adev);
1809 dev_err(adev->dev, "failed to create xcp sysfs files");
1813 r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1815 dev_err(adev->dev, "failed to create isolation sysfs files");
1817 r = amdgpu_gfx_sysfs_reset_mask_init(adev);
1819 dev_err(adev->dev, "failed to create reset mask sysfs files");
1824 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1826 if (adev->dev->kobj.sd) {
1827 amdgpu_gfx_sysfs_xcp_fini(adev);
1828 amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1829 amdgpu_gfx_sysfs_reset_mask_fini(adev);
1833 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
1834 unsigned int cleaner_shader_size)
1836 if (!adev->gfx.enable_cleaner_shader)
1839 return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
1840 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
1841 &adev->gfx.cleaner_shader_obj,
1842 &adev->gfx.cleaner_shader_gpu_addr,
1843 (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1846 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
1848 if (!adev->gfx.enable_cleaner_shader)
1851 amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
1852 &adev->gfx.cleaner_shader_gpu_addr,
1853 (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1856 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
1857 unsigned int cleaner_shader_size,
1858 const void *cleaner_shader_ptr)
1860 if (!adev->gfx.enable_cleaner_shader)
1863 if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
1864 memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
1865 cleaner_shader_size);
1869 * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
1870 * @adev: amdgpu_device pointer
1871 * @idx: Index of the scheduler to control
1872 * @enable: Whether to enable or disable the KFD scheduler
1874 * This function is used to control the KFD (Kernel Fusion Driver) scheduler
1875 * from the KGD. It is part of the cleaner shader feature. This function plays
1876 * a key role in enforcing process isolation on the GPU.
1878 * The function uses a reference count mechanism (kfd_sch_req_count) to keep
1879 * track of the number of requests to enable the KFD scheduler. When a request
1880 * to enable the KFD scheduler is made, the reference count is decremented.
1881 * When the reference count reaches zero, a delayed work is scheduled to
1882 * enforce isolation after a delay of GFX_SLICE_PERIOD.
1884 * When a request to disable the KFD scheduler is made, the function first
1885 * checks if the reference count is zero. If it is, it cancels the delayed work
1886 * for enforcing isolation and checks if the KFD scheduler is active. If the
1887 * KFD scheduler is active, it sends a request to stop the KFD scheduler and
1888 * sets the KFD scheduler state to inactive. Then, it increments the reference
1891 * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
1892 * scheduler state and reference count are updated atomically.
1894 * Note: If the reference count is already zero when a request to enable the
1895 * KFD scheduler is made, it means there's an imbalance bug somewhere. The
1896 * function triggers a warning in this case.
1898 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
1901 mutex_lock(&adev->gfx.kfd_sch_mutex);
1904 /* If the count is already 0, it means there's an imbalance bug somewhere.
1905 * Note that the bug may be in a different caller than the one which triggers the
1908 if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
1909 dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
1913 adev->gfx.kfd_sch_req_count[idx]--;
1915 if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
1916 adev->gfx.kfd_sch_inactive[idx]) {
1917 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1918 msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
1921 if (adev->gfx.kfd_sch_req_count[idx] == 0) {
1922 cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
1923 if (!adev->gfx.kfd_sch_inactive[idx]) {
1924 amdgpu_amdkfd_stop_sched(adev, idx);
1925 adev->gfx.kfd_sch_inactive[idx] = true;
1929 adev->gfx.kfd_sch_req_count[idx]++;
1933 mutex_unlock(&adev->gfx.kfd_sch_mutex);
1937 * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
1939 * @work: work_struct.
1941 * This function is the work handler for enforcing shader isolation on AMD GPUs.
1942 * It counts the number of emitted fences for each GFX and compute ring. If there
1943 * are any fences, it schedules the `enforce_isolation_work` to be run after a
1944 * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
1945 * Driver (KFD) to resume the runqueue. The function is synchronized using the
1946 * `enforce_isolation_mutex`.
1948 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
1950 struct amdgpu_isolation_work *isolation_work =
1951 container_of(work, struct amdgpu_isolation_work, work.work);
1952 struct amdgpu_device *adev = isolation_work->adev;
1953 u32 i, idx, fences = 0;
1955 if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
1958 idx = isolation_work->xcp_id;
1963 mutex_lock(&adev->enforce_isolation_mutex);
1964 for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
1965 if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
1966 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
1968 for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
1969 if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
1970 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
1973 /* we've already had our timeslice, so let's wrap this up */
1974 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1975 msecs_to_jiffies(1));
1977 /* Tell KFD to resume the runqueue */
1978 if (adev->kfd.init_complete) {
1979 WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
1980 WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
1981 amdgpu_amdkfd_start_sched(adev, idx);
1982 adev->gfx.kfd_sch_inactive[idx] = false;
1985 mutex_unlock(&adev->enforce_isolation_mutex);
1989 * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
1990 * @adev: amdgpu_device pointer
1991 * @idx: Index of the GPU partition
1993 * When kernel submissions come in, the jobs are given a time slice and once
1994 * that time slice is up, if there are KFD user queues active, kernel
1995 * submissions are blocked until KFD has had its time slice. Once the KFD time
1996 * slice is up, KFD user queues are preempted and kernel submissions are
1997 * unblocked and allowed to run again.
2000 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
2003 unsigned long cjiffies;
2006 mutex_lock(&adev->enforce_isolation_mutex);
2007 if (adev->enforce_isolation[idx]) {
2008 /* set the initial values if nothing is set */
2009 if (!adev->gfx.enforce_isolation_jiffies[idx]) {
2010 adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2011 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2013 /* Make sure KFD gets a chance to run */
2014 if (amdgpu_amdkfd_compute_active(adev, idx)) {
2016 if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
2017 cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
2018 if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
2019 /* if our time is up, let KGD work drain before scheduling more */
2021 /* reset the timer period */
2022 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2024 /* set the timer period to what's left in our time slice */
2025 adev->gfx.enforce_isolation_time[idx] =
2026 GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
2029 /* if jiffies wrap around we will just wait a little longer */
2030 adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2033 /* if there is no KFD work, then set the full slice period */
2034 adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2035 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2038 mutex_unlock(&adev->enforce_isolation_mutex);
2041 msleep(GFX_SLICE_PERIOD_MS);
2045 * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
2046 * @ring: Pointer to the amdgpu_ring structure
2048 * Ring begin_use helper implementation for gfx which serializes access to the
2049 * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2050 * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2051 * each get a time slice when both are active.
2053 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2055 struct amdgpu_device *adev = ring->adev;
2057 bool sched_work = false;
2059 if (!adev->gfx.enable_cleaner_shader)
2062 if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2070 /* Don't submit more work until KFD has had some time */
2071 amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
2073 mutex_lock(&adev->enforce_isolation_mutex);
2074 if (adev->enforce_isolation[idx]) {
2075 if (adev->kfd.init_complete)
2078 mutex_unlock(&adev->enforce_isolation_mutex);
2081 amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
2085 * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
2086 * @ring: Pointer to the amdgpu_ring structure
2088 * Ring end_use helper implementation for gfx which serializes access to the
2089 * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2090 * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2091 * each get a time slice when both are active.
2093 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2095 struct amdgpu_device *adev = ring->adev;
2097 bool sched_work = false;
2099 if (!adev->gfx.enable_cleaner_shader)
2102 if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2110 mutex_lock(&adev->enforce_isolation_mutex);
2111 if (adev->enforce_isolation[idx]) {
2112 if (adev->kfd.init_complete)
2115 mutex_unlock(&adev->enforce_isolation_mutex);
2118 amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
2122 * debugfs for to enable/disable gfx job submission to specific core.
2124 #if defined(CONFIG_DEBUG_FS)
2125 static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
2127 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2130 struct amdgpu_ring *ring;
2135 mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
2136 if ((val & mask) == 0)
2139 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2140 ring = &adev->gfx.gfx_ring[i];
2142 ring->sched.ready = true;
2144 ring->sched.ready = false;
2146 /* publish sched.ready flag update effective immediately across smp */
2151 static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
2153 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2156 struct amdgpu_ring *ring;
2160 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2161 ring = &adev->gfx.gfx_ring[i];
2162 if (ring->sched.ready)
2170 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
2171 amdgpu_debugfs_gfx_sched_mask_get,
2172 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
2176 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
2178 #if defined(CONFIG_DEBUG_FS)
2179 struct drm_minor *minor = adev_to_drm(adev)->primary;
2180 struct dentry *root = minor->debugfs_root;
2183 if (!(adev->gfx.num_gfx_rings > 1))
2185 sprintf(name, "amdgpu_gfx_sched_mask");
2186 debugfs_create_file(name, 0600, root, adev,
2187 &amdgpu_debugfs_gfx_sched_mask_fops);
2192 * debugfs for to enable/disable compute job submission to specific core.
2194 #if defined(CONFIG_DEBUG_FS)
2195 static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
2197 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2200 struct amdgpu_ring *ring;
2205 mask = (1ULL << adev->gfx.num_compute_rings) - 1;
2206 if ((val & mask) == 0)
2209 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2210 ring = &adev->gfx.compute_ring[i];
2212 ring->sched.ready = true;
2214 ring->sched.ready = false;
2217 /* publish sched.ready flag update effective immediately across smp */
2222 static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
2224 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2227 struct amdgpu_ring *ring;
2231 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2232 ring = &adev->gfx.compute_ring[i];
2233 if (ring->sched.ready)
2241 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
2242 amdgpu_debugfs_compute_sched_mask_get,
2243 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
2247 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
2249 #if defined(CONFIG_DEBUG_FS)
2250 struct drm_minor *minor = adev_to_drm(adev)->primary;
2251 struct dentry *root = minor->debugfs_root;
2254 if (!(adev->gfx.num_compute_rings > 1))
2256 sprintf(name, "amdgpu_compute_sched_mask");
2257 debugfs_create_file(name, 0600, root, adev,
2258 &amdgpu_debugfs_compute_sched_mask_fops);