]> Git Repo - J-linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
Merge tag 'drm-next-2020-10-19' of git://anongit.freedesktop.org/drm/drm
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gfx.c
index 78d37f92c7be53eb82f006f4d0da95519d3dcecf..8c9bacfdbc300c751e7fe3713548acd12e984882 100644 (file)
@@ -202,40 +202,29 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
 
 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
 {
-       int i, queue, pipe, mec;
+       int i, queue, pipe;
        bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
-
-       /* policy for amdgpu compute queue ownership */
-       for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
-               queue = i % adev->gfx.mec.num_queue_per_pipe;
-               pipe = (i / adev->gfx.mec.num_queue_per_pipe)
-                       % adev->gfx.mec.num_pipe_per_mec;
-               mec = (i / adev->gfx.mec.num_queue_per_pipe)
-                       / adev->gfx.mec.num_pipe_per_mec;
-
-               /* we've run out of HW */
-               if (mec >= adev->gfx.mec.num_mec)
-                       break;
-
-               if (multipipe_policy) {
-                       /* policy: amdgpu owns the first two queues of the first MEC */
-                       if (mec == 0 && queue < 2)
-                               set_bit(i, adev->gfx.mec.queue_bitmap);
-               } else {
-                       /* policy: amdgpu owns all queues in the first pipe */
-                       if (mec == 0 && pipe == 0)
-                               set_bit(i, adev->gfx.mec.queue_bitmap);
+       int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
+                                    adev->gfx.mec.num_queue_per_pipe,
+                                    adev->gfx.num_compute_rings);
+
+       if (multipipe_policy) {
+               /* policy: make queues evenly cross all pipes on MEC1 only */
+               for (i = 0; i < max_queues_per_mec; i++) {
+                       pipe = i % adev->gfx.mec.num_pipe_per_mec;
+                       queue = (i / adev->gfx.mec.num_pipe_per_mec) %
+                               adev->gfx.mec.num_queue_per_pipe;
+
+                       set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
+                                       adev->gfx.mec.queue_bitmap);
                }
+       } else {
+               /* policy: amdgpu owns all queues in the given pipe */
+               for (i = 0; i < max_queues_per_mec; ++i)
+                       set_bit(i, adev->gfx.mec.queue_bitmap);
        }
 
-       /* update the number of active compute rings */
-       adev->gfx.num_compute_rings =
-               bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
-
-       /* If you hit this case and edited the policy, you probably just
-        * need to increase AMDGPU_MAX_COMPUTE_RINGS */
-       if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
-               adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+       dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
 }
 
 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
@@ -571,8 +560,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
        if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
                schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
        } else if (!enable && adev->gfx.gfx_off_state) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
+               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
                        adev->gfx.gfx_off_state = false;
+
+                       if (adev->gfx.funcs->init_spm_golden) {
+                               dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
+                               amdgpu_gfx_init_spm_golden(adev);
+                       }
+               }
        }
 
        mutex_unlock(&adev->gfx.gfx_off_mutex);
@@ -698,6 +693,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -724,7 +722,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
         *
         * also don't wait anymore for IRQ context
         * */
-       if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+       if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
                goto failed_kiq_read;
 
        might_sleep();
@@ -748,7 +746,7 @@ failed_unlock:
 failed_kiq_read:
        if (reg_val_offs)
                amdgpu_device_wb_free(adev, reg_val_offs);
-       pr_err("failed to read reg:%x\n", reg);
+       dev_err(adev->dev, "failed to read reg:%x\n", reg);
        return ~0;
 }
 
@@ -762,6 +760,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 
        BUG_ON(!ring->funcs->emit_wreg);
 
+       if (adev->in_pci_err_recovery)
+               return;
+
        spin_lock_irqsave(&kiq->ring_lock, flags);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_wreg(ring, reg, v);
@@ -782,7 +783,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
         *
         * also don't wait anymore for IRQ context
         * */
-       if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+       if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
                goto failed_kiq_write;
 
        might_sleep();
@@ -801,5 +802,5 @@ failed_undo:
        amdgpu_ring_undo(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
 failed_kiq_write:
-       pr_err("failed to write reg:%x\n", reg);
+       dev_err(adev->dev, "failed to write reg:%x\n", reg);
 }
This page took 0.033204 seconds and 4 git commands to generate.