2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/firmware.h>
28 #include "amdgpu_gfx.h"
29 #include "amdgpu_rlc.h"
30 #include "amdgpu_ras.h"
31 #include "amdgpu_xcp.h"
33 /* delay 0.1 second to enable gfx off feature */
34 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
36 #define GFX_OFF_NO_DELAY 0
39 * GPU GFX IP block helpers function.
42 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
47 bit += mec * adev->gfx.mec.num_pipe_per_mec
48 * adev->gfx.mec.num_queue_per_pipe;
49 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
55 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
56 int *mec, int *pipe, int *queue)
58 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
59 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
60 % adev->gfx.mec.num_pipe_per_mec;
61 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
62 / adev->gfx.mec.num_pipe_per_mec;
66 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
67 int xcc_id, int mec, int pipe, int queue)
69 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
70 adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
73 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
74 int me, int pipe, int queue)
78 bit += me * adev->gfx.me.num_pipe_per_me
79 * adev->gfx.me.num_queue_per_pipe;
80 bit += pipe * adev->gfx.me.num_queue_per_pipe;
86 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
87 int *me, int *pipe, int *queue)
89 *queue = bit % adev->gfx.me.num_queue_per_pipe;
90 *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
91 % adev->gfx.me.num_pipe_per_me;
92 *me = (bit / adev->gfx.me.num_queue_per_pipe)
93 / adev->gfx.me.num_pipe_per_me;
96 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
97 int me, int pipe, int queue)
99 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
100 adev->gfx.me.queue_bitmap);
104 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
106 * @mask: array in which the per-shader array disable masks will be stored
107 * @max_se: number of SEs
108 * @max_sh: number of SHs
110 * The bitmask of CUs to be disabled in the shader array determined by se and
111 * sh is stored in mask[se * max_sh + sh].
113 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
115 unsigned int se, sh, cu;
118 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
120 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
123 p = amdgpu_disable_cu;
126 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
129 DRM_ERROR("amdgpu: could not parse disable_cu\n");
133 if (se < max_se && sh < max_sh && cu < 16) {
134 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
135 mask[se * max_sh + sh] |= 1u << cu;
137 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
141 next = strchr(p, ',');
148 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
150 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
153 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
155 if (amdgpu_compute_multipipe != -1) {
156 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
157 amdgpu_compute_multipipe);
158 return amdgpu_compute_multipipe == 1;
161 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
164 /* FIXME: spreading the queues across pipes causes perf regressions
165 * on POLARIS11 compute workloads */
166 if (adev->asic_type == CHIP_POLARIS11)
169 return adev->gfx.mec.num_mec > 1;
172 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
173 struct amdgpu_ring *ring)
175 int queue = ring->queue;
176 int pipe = ring->pipe;
178 /* Policy: use pipe1 queue0 as high priority graphics queue if we
179 * have more than one gfx pipe.
181 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
182 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
186 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
187 if (ring == &adev->gfx.gfx_ring[bit])
194 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
195 struct amdgpu_ring *ring)
197 /* Policy: use 1st queue as high priority compute queue if we
198 * have more than one compute queue.
200 if (adev->gfx.num_compute_rings > 1 &&
201 ring == &adev->gfx.compute_ring[0])
207 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
209 int i, j, queue, pipe;
210 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
211 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
212 adev->gfx.mec.num_queue_per_pipe,
213 adev->gfx.num_compute_rings);
214 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
216 if (multipipe_policy) {
217 /* policy: make queues evenly cross all pipes on MEC1 only
218 * for multiple xcc, just use the original policy for simplicity */
219 for (j = 0; j < num_xcc; j++) {
220 for (i = 0; i < max_queues_per_mec; i++) {
221 pipe = i % adev->gfx.mec.num_pipe_per_mec;
222 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
223 adev->gfx.mec.num_queue_per_pipe;
225 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
226 adev->gfx.mec_bitmap[j].queue_bitmap);
230 /* policy: amdgpu owns all queues in the given pipe */
231 for (j = 0; j < num_xcc; j++) {
232 for (i = 0; i < max_queues_per_mec; ++i)
233 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
237 for (j = 0; j < num_xcc; j++) {
238 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
239 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
243 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
246 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
247 int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
248 adev->gfx.me.num_queue_per_pipe;
250 if (multipipe_policy) {
251 /* policy: amdgpu owns the first queue per pipe at this stage
252 * will extend to mulitple queues per pipe later */
253 for (i = 0; i < max_queues_per_me; i++) {
254 pipe = i % adev->gfx.me.num_pipe_per_me;
255 queue = (i / adev->gfx.me.num_pipe_per_me) %
256 adev->gfx.me.num_queue_per_pipe;
258 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
259 adev->gfx.me.queue_bitmap);
262 for (i = 0; i < max_queues_per_me; ++i)
263 set_bit(i, adev->gfx.me.queue_bitmap);
266 /* update the number of active graphics rings */
267 adev->gfx.num_gfx_rings =
268 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
271 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
272 struct amdgpu_ring *ring, int xcc_id)
275 int mec, pipe, queue;
277 queue_bit = adev->gfx.mec.num_mec
278 * adev->gfx.mec.num_pipe_per_mec
279 * adev->gfx.mec.num_queue_per_pipe;
281 while (--queue_bit >= 0) {
282 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
285 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
288 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
289 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
290 * only can be issued on queue 0.
292 if ((mec == 1 && pipe > 1) || queue != 0)
302 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
306 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
307 struct amdgpu_ring *ring,
308 struct amdgpu_irq_src *irq, int xcc_id)
310 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
313 spin_lock_init(&kiq->ring_lock);
316 ring->ring_obj = NULL;
317 ring->use_doorbell = true;
318 ring->xcc_id = xcc_id;
319 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
320 ring->doorbell_index =
321 (adev->doorbell_index.kiq +
322 xcc_id * adev->doorbell_index.xcc_doorbell_range)
325 r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
329 ring->eop_gpu_addr = kiq->eop_gpu_addr;
330 ring->no_scheduler = true;
331 sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
332 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
333 AMDGPU_RING_PRIO_DEFAULT, NULL);
335 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
340 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
342 amdgpu_ring_fini(ring);
345 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
347 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
349 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
352 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
353 unsigned int hpd_size, int xcc_id)
357 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
359 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
360 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
361 &kiq->eop_gpu_addr, (void **)&hpd);
363 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
367 memset(hpd, 0, hpd_size);
369 r = amdgpu_bo_reserve(kiq->eop_obj, true);
370 if (unlikely(r != 0))
371 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
372 amdgpu_bo_kunmap(kiq->eop_obj);
373 amdgpu_bo_unreserve(kiq->eop_obj);
378 /* create MQD for each compute/gfx queue */
379 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
380 unsigned int mqd_size, int xcc_id)
383 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
384 struct amdgpu_ring *ring = &kiq->ring;
385 u32 domain = AMDGPU_GEM_DOMAIN_GTT;
387 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
388 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
389 domain |= AMDGPU_GEM_DOMAIN_VRAM;
391 /* create MQD for KIQ */
392 if (!adev->enable_mes_kiq && !ring->mqd_obj) {
393 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
394 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
395 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
396 * KIQ MQD no matter SRIOV or Bare-metal
398 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
399 AMDGPU_GEM_DOMAIN_VRAM |
400 AMDGPU_GEM_DOMAIN_GTT,
405 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
409 /* prepare MQD backup */
410 kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
411 if (!kiq->mqd_backup) {
413 "no memory to create MQD backup for ring %s\n", ring->name);
418 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
419 /* create MQD for each KGQ */
420 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
421 ring = &adev->gfx.gfx_ring[i];
422 if (!ring->mqd_obj) {
423 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
424 domain, &ring->mqd_obj,
425 &ring->mqd_gpu_addr, &ring->mqd_ptr);
427 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
431 ring->mqd_size = mqd_size;
432 /* prepare MQD backup */
433 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
434 if (!adev->gfx.me.mqd_backup[i]) {
435 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
442 /* create MQD for each KCQ */
443 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
444 j = i + xcc_id * adev->gfx.num_compute_rings;
445 ring = &adev->gfx.compute_ring[j];
446 if (!ring->mqd_obj) {
447 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
448 domain, &ring->mqd_obj,
449 &ring->mqd_gpu_addr, &ring->mqd_ptr);
451 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
455 ring->mqd_size = mqd_size;
456 /* prepare MQD backup */
457 adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
458 if (!adev->gfx.mec.mqd_backup[j]) {
459 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
468 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
470 struct amdgpu_ring *ring = NULL;
472 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
474 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
475 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
476 ring = &adev->gfx.gfx_ring[i];
477 kfree(adev->gfx.me.mqd_backup[i]);
478 amdgpu_bo_free_kernel(&ring->mqd_obj,
484 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
485 j = i + xcc_id * adev->gfx.num_compute_rings;
486 ring = &adev->gfx.compute_ring[j];
487 kfree(adev->gfx.mec.mqd_backup[j]);
488 amdgpu_bo_free_kernel(&ring->mqd_obj,
494 kfree(kiq->mqd_backup);
495 amdgpu_bo_free_kernel(&ring->mqd_obj,
500 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
502 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
503 struct amdgpu_ring *kiq_ring = &kiq->ring;
507 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
510 spin_lock(&kiq->ring_lock);
511 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
512 adev->gfx.num_compute_rings)) {
513 spin_unlock(&kiq->ring_lock);
517 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
518 j = i + xcc_id * adev->gfx.num_compute_rings;
519 kiq->pmf->kiq_unmap_queues(kiq_ring,
520 &adev->gfx.compute_ring[j],
524 if (kiq_ring->sched.ready && !adev->job_hang)
525 r = amdgpu_ring_test_helper(kiq_ring);
526 spin_unlock(&kiq->ring_lock);
531 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
533 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
534 struct amdgpu_ring *kiq_ring = &kiq->ring;
538 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
541 spin_lock(&kiq->ring_lock);
542 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
543 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
544 adev->gfx.num_gfx_rings)) {
545 spin_unlock(&kiq->ring_lock);
549 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
550 j = i + xcc_id * adev->gfx.num_gfx_rings;
551 kiq->pmf->kiq_unmap_queues(kiq_ring,
552 &adev->gfx.gfx_ring[j],
553 PREEMPT_QUEUES, 0, 0);
557 if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
558 r = amdgpu_ring_test_helper(kiq_ring);
559 spin_unlock(&kiq->ring_lock);
564 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
567 int mec, pipe, queue;
568 int set_resource_bit = 0;
570 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
572 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
574 return set_resource_bit;
577 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
579 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
580 struct amdgpu_ring *kiq_ring = &kiq->ring;
581 uint64_t queue_mask = 0;
584 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
587 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
588 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
591 /* This situation may be hit in the future if a new HW
592 * generation exposes more than 64 queues. If so, the
593 * definition of queue_mask needs updating */
594 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
595 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
599 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
602 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
604 amdgpu_device_flush_hdp(adev, NULL);
606 spin_lock(&kiq->ring_lock);
607 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
608 adev->gfx.num_compute_rings +
609 kiq->pmf->set_resources_size);
611 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
612 spin_unlock(&kiq->ring_lock);
616 if (adev->enable_mes)
619 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
620 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
621 j = i + xcc_id * adev->gfx.num_compute_rings;
622 kiq->pmf->kiq_map_queues(kiq_ring,
623 &adev->gfx.compute_ring[j]);
626 r = amdgpu_ring_test_helper(kiq_ring);
627 spin_unlock(&kiq->ring_lock);
629 DRM_ERROR("KCQ enable failed\n");
634 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
636 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
637 struct amdgpu_ring *kiq_ring = &kiq->ring;
640 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
643 amdgpu_device_flush_hdp(adev, NULL);
645 spin_lock(&kiq->ring_lock);
646 /* No need to map kcq on the slave */
647 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
648 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
649 adev->gfx.num_gfx_rings);
651 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
652 spin_unlock(&kiq->ring_lock);
656 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
657 j = i + xcc_id * adev->gfx.num_gfx_rings;
658 kiq->pmf->kiq_map_queues(kiq_ring,
659 &adev->gfx.gfx_ring[j]);
663 r = amdgpu_ring_test_helper(kiq_ring);
664 spin_unlock(&kiq->ring_lock);
666 DRM_ERROR("KCQ enable failed\n");
671 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
673 * @adev: amdgpu_device pointer
674 * @bool enable true: enable gfx off feature, false: disable gfx off feature
676 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
677 * 2. other client can send request to disable gfx off feature, the request should be honored.
678 * 3. other client can cancel their request of disable gfx off feature
679 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
682 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
684 unsigned long delay = GFX_OFF_DELAY_ENABLE;
686 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
689 mutex_lock(&adev->gfx.gfx_off_mutex);
692 /* If the count is already 0, it means there's an imbalance bug somewhere.
693 * Note that the bug may be in a different caller than the one which triggers the
696 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
699 adev->gfx.gfx_off_req_count--;
701 if (adev->gfx.gfx_off_req_count == 0 &&
702 !adev->gfx.gfx_off_state) {
703 /* If going to s2idle, no need to wait */
705 if (!amdgpu_dpm_set_powergating_by_smu(adev,
706 AMD_IP_BLOCK_TYPE_GFX, true))
707 adev->gfx.gfx_off_state = true;
709 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
714 if (adev->gfx.gfx_off_req_count == 0) {
715 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
717 if (adev->gfx.gfx_off_state &&
718 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
719 adev->gfx.gfx_off_state = false;
721 if (adev->gfx.funcs->init_spm_golden) {
723 "GFXOFF is disabled, re-init SPM golden settings\n");
724 amdgpu_gfx_init_spm_golden(adev);
729 adev->gfx.gfx_off_req_count++;
733 mutex_unlock(&adev->gfx.gfx_off_mutex);
736 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
740 mutex_lock(&adev->gfx.gfx_off_mutex);
742 r = amdgpu_dpm_set_residency_gfxoff(adev, value);
744 mutex_unlock(&adev->gfx.gfx_off_mutex);
749 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
753 mutex_lock(&adev->gfx.gfx_off_mutex);
755 r = amdgpu_dpm_get_residency_gfxoff(adev, value);
757 mutex_unlock(&adev->gfx.gfx_off_mutex);
762 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
766 mutex_lock(&adev->gfx.gfx_off_mutex);
768 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
770 mutex_unlock(&adev->gfx.gfx_off_mutex);
775 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
780 mutex_lock(&adev->gfx.gfx_off_mutex);
782 r = amdgpu_dpm_get_status_gfxoff(adev, value);
784 mutex_unlock(&adev->gfx.gfx_off_mutex);
789 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
793 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
794 if (!amdgpu_persistent_edc_harvesting_supported(adev))
795 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
797 r = amdgpu_ras_block_late_init(adev, ras_block);
801 if (adev->gfx.cp_ecc_error_irq.funcs) {
802 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
807 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
812 amdgpu_ras_block_late_fini(adev, ras_block);
816 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
819 struct amdgpu_gfx_ras *ras = NULL;
821 /* adev->gfx.ras is NULL, which means gfx does not
822 * support ras function, then do nothing here.
829 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
831 dev_err(adev->dev, "Failed to register gfx ras block!\n");
835 strcpy(ras->ras_block.ras_comm.name, "gfx");
836 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
837 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
838 adev->gfx.ras_if = &ras->ras_block.ras_comm;
840 /* If not define special ras_late_init function, use gfx default ras_late_init */
841 if (!ras->ras_block.ras_late_init)
842 ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
844 /* If not defined special ras_cb function, use default ras_cb */
845 if (!ras->ras_block.ras_cb)
846 ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
851 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
852 struct amdgpu_iv_entry *entry)
854 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
855 return adev->gfx.ras->poison_consumption_handler(adev, entry);
860 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
862 struct amdgpu_iv_entry *entry)
864 /* TODO ue will trigger an interrupt.
866 * When “Full RAS” is enabled, the per-IP interrupt sources should
867 * be disabled and the driver should only look for the aggregated
868 * interrupt via sync flood
870 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
871 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
872 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
873 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
874 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
875 amdgpu_ras_reset_gpu(adev);
877 return AMDGPU_RAS_SUCCESS;
880 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
881 struct amdgpu_irq_src *source,
882 struct amdgpu_iv_entry *entry)
884 struct ras_common_if *ras_if = adev->gfx.ras_if;
885 struct ras_dispatch_if ih_data = {
892 ih_data.head = *ras_if;
894 DRM_ERROR("CP ECC ERROR IRQ\n");
895 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
899 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
900 void *ras_error_status,
901 void (*func)(struct amdgpu_device *adev, void *ras_error_status,
905 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
906 uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
907 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
910 err_data->ue_count = 0;
911 err_data->ce_count = 0;
914 for_each_inst(i, xcc_mask)
915 func(adev, ras_error_status, i);
918 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
920 signed long r, cnt = 0;
922 uint32_t seq, reg_val_offs = 0, value = 0;
923 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
924 struct amdgpu_ring *ring = &kiq->ring;
926 if (amdgpu_device_skip_hw_access(adev))
929 if (adev->mes.ring.sched.ready)
930 return amdgpu_mes_rreg(adev, reg);
932 BUG_ON(!ring->funcs->emit_rreg);
934 spin_lock_irqsave(&kiq->ring_lock, flags);
935 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
936 pr_err("critical bug! too many kiq readers\n");
939 amdgpu_ring_alloc(ring, 32);
940 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
941 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
945 amdgpu_ring_commit(ring);
946 spin_unlock_irqrestore(&kiq->ring_lock, flags);
948 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
950 /* don't wait anymore for gpu reset case because this way may
951 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
952 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
953 * never return if we keep waiting in virt_kiq_rreg, which cause
954 * gpu_recover() hang there.
956 * also don't wait anymore for IRQ context
958 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
959 goto failed_kiq_read;
962 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
963 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
964 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
967 if (cnt > MAX_KIQ_REG_TRY)
968 goto failed_kiq_read;
971 value = adev->wb.wb[reg_val_offs];
972 amdgpu_device_wb_free(adev, reg_val_offs);
976 amdgpu_ring_undo(ring);
978 spin_unlock_irqrestore(&kiq->ring_lock, flags);
981 amdgpu_device_wb_free(adev, reg_val_offs);
982 dev_err(adev->dev, "failed to read reg:%x\n", reg);
986 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
988 signed long r, cnt = 0;
991 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
992 struct amdgpu_ring *ring = &kiq->ring;
994 BUG_ON(!ring->funcs->emit_wreg);
996 if (amdgpu_device_skip_hw_access(adev))
999 if (adev->mes.ring.sched.ready) {
1000 amdgpu_mes_wreg(adev, reg, v);
1004 spin_lock_irqsave(&kiq->ring_lock, flags);
1005 amdgpu_ring_alloc(ring, 32);
1006 amdgpu_ring_emit_wreg(ring, reg, v);
1007 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1011 amdgpu_ring_commit(ring);
1012 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1014 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1016 /* don't wait anymore for gpu reset case because this way may
1017 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1018 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1019 * never return if we keep waiting in virt_kiq_rreg, which cause
1020 * gpu_recover() hang there.
1022 * also don't wait anymore for IRQ context
1024 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1025 goto failed_kiq_write;
1028 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1030 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1031 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1034 if (cnt > MAX_KIQ_REG_TRY)
1035 goto failed_kiq_write;
1040 amdgpu_ring_undo(ring);
1041 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1043 dev_err(adev->dev, "failed to write reg:%x\n", reg);
1046 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1048 if (amdgpu_num_kcq == -1) {
1050 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1051 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1054 return amdgpu_num_kcq;
1057 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1060 const struct gfx_firmware_header_v1_0 *cp_hdr;
1061 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1062 struct amdgpu_firmware_info *info = NULL;
1063 const struct firmware *ucode_fw;
1064 unsigned int fw_size;
1067 case AMDGPU_UCODE_ID_CP_PFP:
1068 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1069 adev->gfx.pfp_fw->data;
1070 adev->gfx.pfp_fw_version =
1071 le32_to_cpu(cp_hdr->header.ucode_version);
1072 adev->gfx.pfp_feature_version =
1073 le32_to_cpu(cp_hdr->ucode_feature_version);
1074 ucode_fw = adev->gfx.pfp_fw;
1075 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1077 case AMDGPU_UCODE_ID_CP_RS64_PFP:
1078 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1079 adev->gfx.pfp_fw->data;
1080 adev->gfx.pfp_fw_version =
1081 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1082 adev->gfx.pfp_feature_version =
1083 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1084 ucode_fw = adev->gfx.pfp_fw;
1085 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1087 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1088 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1089 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1090 adev->gfx.pfp_fw->data;
1091 ucode_fw = adev->gfx.pfp_fw;
1092 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1094 case AMDGPU_UCODE_ID_CP_ME:
1095 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1096 adev->gfx.me_fw->data;
1097 adev->gfx.me_fw_version =
1098 le32_to_cpu(cp_hdr->header.ucode_version);
1099 adev->gfx.me_feature_version =
1100 le32_to_cpu(cp_hdr->ucode_feature_version);
1101 ucode_fw = adev->gfx.me_fw;
1102 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1104 case AMDGPU_UCODE_ID_CP_RS64_ME:
1105 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1106 adev->gfx.me_fw->data;
1107 adev->gfx.me_fw_version =
1108 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1109 adev->gfx.me_feature_version =
1110 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1111 ucode_fw = adev->gfx.me_fw;
1112 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1114 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1115 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1116 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1117 adev->gfx.me_fw->data;
1118 ucode_fw = adev->gfx.me_fw;
1119 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1121 case AMDGPU_UCODE_ID_CP_CE:
1122 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1123 adev->gfx.ce_fw->data;
1124 adev->gfx.ce_fw_version =
1125 le32_to_cpu(cp_hdr->header.ucode_version);
1126 adev->gfx.ce_feature_version =
1127 le32_to_cpu(cp_hdr->ucode_feature_version);
1128 ucode_fw = adev->gfx.ce_fw;
1129 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1131 case AMDGPU_UCODE_ID_CP_MEC1:
1132 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1133 adev->gfx.mec_fw->data;
1134 adev->gfx.mec_fw_version =
1135 le32_to_cpu(cp_hdr->header.ucode_version);
1136 adev->gfx.mec_feature_version =
1137 le32_to_cpu(cp_hdr->ucode_feature_version);
1138 ucode_fw = adev->gfx.mec_fw;
1139 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1140 le32_to_cpu(cp_hdr->jt_size) * 4;
1142 case AMDGPU_UCODE_ID_CP_MEC1_JT:
1143 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1144 adev->gfx.mec_fw->data;
1145 ucode_fw = adev->gfx.mec_fw;
1146 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1148 case AMDGPU_UCODE_ID_CP_MEC2:
1149 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1150 adev->gfx.mec2_fw->data;
1151 adev->gfx.mec2_fw_version =
1152 le32_to_cpu(cp_hdr->header.ucode_version);
1153 adev->gfx.mec2_feature_version =
1154 le32_to_cpu(cp_hdr->ucode_feature_version);
1155 ucode_fw = adev->gfx.mec2_fw;
1156 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1157 le32_to_cpu(cp_hdr->jt_size) * 4;
1159 case AMDGPU_UCODE_ID_CP_MEC2_JT:
1160 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1161 adev->gfx.mec2_fw->data;
1162 ucode_fw = adev->gfx.mec2_fw;
1163 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1165 case AMDGPU_UCODE_ID_CP_RS64_MEC:
1166 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1167 adev->gfx.mec_fw->data;
1168 adev->gfx.mec_fw_version =
1169 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1170 adev->gfx.mec_feature_version =
1171 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1172 ucode_fw = adev->gfx.mec_fw;
1173 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1175 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1176 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1177 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1178 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1179 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1180 adev->gfx.mec_fw->data;
1181 ucode_fw = adev->gfx.mec_fw;
1182 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1188 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1189 info = &adev->firmware.ucode[ucode_id];
1190 info->ucode_id = ucode_id;
1191 info->fw = ucode_fw;
1192 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1196 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1198 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1199 adev->gfx.num_xcc_per_xcp : 1));
1202 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1203 struct device_attribute *addr,
1206 struct drm_device *ddev = dev_get_drvdata(dev);
1207 struct amdgpu_device *adev = drm_to_adev(ddev);
1210 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1211 AMDGPU_XCP_FL_NONE);
1213 return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1216 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1217 struct device_attribute *addr,
1218 const char *buf, size_t count)
1220 struct drm_device *ddev = dev_get_drvdata(dev);
1221 struct amdgpu_device *adev = drm_to_adev(ddev);
1222 enum amdgpu_gfx_partition mode;
1223 int ret = 0, num_xcc;
1225 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1226 if (num_xcc % 2 != 0)
1229 if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1230 mode = AMDGPU_SPX_PARTITION_MODE;
1231 } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1233 * DPX mode needs AIDs to be in multiple of 2.
1234 * Each AID connects 2 XCCs.
1238 mode = AMDGPU_DPX_PARTITION_MODE;
1239 } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1242 mode = AMDGPU_TPX_PARTITION_MODE;
1243 } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1246 mode = AMDGPU_QPX_PARTITION_MODE;
1247 } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1248 mode = AMDGPU_CPX_PARTITION_MODE;
1253 ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1261 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1262 struct device_attribute *addr,
1265 struct drm_device *ddev = dev_get_drvdata(dev);
1266 struct amdgpu_device *adev = drm_to_adev(ddev);
1267 char *supported_partition;
1270 switch (NUM_XCC(adev->gfx.xcc_mask)) {
1272 supported_partition = "SPX, DPX, QPX, CPX";
1275 supported_partition = "SPX, TPX, CPX";
1278 supported_partition = "SPX, DPX, CPX";
1280 /* this seems only existing in emulation phase */
1282 supported_partition = "SPX, CPX";
1285 supported_partition = "Not supported";
1289 return sysfs_emit(buf, "%s\n", supported_partition);
1292 static DEVICE_ATTR(current_compute_partition, 0644,
1293 amdgpu_gfx_get_current_compute_partition,
1294 amdgpu_gfx_set_compute_partition);
1296 static DEVICE_ATTR(available_compute_partition, 0444,
1297 amdgpu_gfx_get_available_compute_partition, NULL);
1299 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1303 r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1307 r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
1312 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1314 device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1315 device_remove_file(adev->dev, &dev_attr_available_compute_partition);