]> Git Repo - linux.git/commitdiff
Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <[email protected]>
Tue, 12 Mar 2019 05:11:40 +0000 (15:11 +1000)
committerDave Airlie <[email protected]>
Tue, 12 Mar 2019 05:11:50 +0000 (15:11 +1000)
Fixes for 5.1:
- Powerplay fixes
- DC fixes
- Fix locking around indirect register access in some cases
- KFD MQD fix
- Disable BACO for vega20 for now (fixes pending)

Signed-off-by: Dave Airlie <[email protected]>
From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index ead851413c0aa054c6c8e6a74d23a7cc739a5fa9,12d51d96491e0b71e28fe97017ab601c0d794c8d..bfa9062ce6b9fed957a5e52c592dc57ff257a02e
@@@ -652,14 -652,12 +652,14 @@@ void amdgpu_vm_move_to_lru_tail(struct 
        struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base;
  
 +#if 0
        if (vm->bulk_moveable) {
                spin_lock(&glob->lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
                spin_unlock(&glob->lru_lock);
                return;
        }
 +#endif
  
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
  
@@@ -947,10 -945,6 +947,6 @@@ int amdgpu_vm_alloc_pts(struct amdgpu_d
                if (r)
                        return r;
  
-               r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
-               if (r)
-                       goto error_free_pt;
                if (vm->use_cpu_for_update) {
                        r = amdgpu_bo_kmap(pt, NULL);
                        if (r)
                pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
  
                amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+               r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
+               if (r)
+                       goto error_free_pt;
        }
  
        return 0;
@@@ -3033,13 -3031,14 +3033,14 @@@ int amdgpu_vm_init(struct amdgpu_devic
        if (r)
                goto error_unreserve;
  
+       amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
        r = amdgpu_vm_clear_bo(adev, vm, root,
                               adev->vm_manager.root_level,
                               vm->pte_support_ats);
        if (r)
                goto error_unreserve;
  
-       amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
        amdgpu_bo_unreserve(vm->root.base.bo);
  
        if (pasid) {
index 2f26581b93ff5c4bacf77f3ca9d5619a2d979e32,2e7f4d2ae73a24e2df6334485a85ede2e17ff3c9..fb27783d7a542d565e1c002d03fc051d055039be
@@@ -303,11 -303,12 +303,11 @@@ static void dm_pflip_high_irq(void *int
                return;
        }
  
 +      /* Update to correct count(s) if racing with vblank irq */
 +      amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
  
        /* wake up userspace */
        if (amdgpu_crtc->event) {
 -              /* Update to correct count(s) if racing with vblank irq */
 -              drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
 -
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
  
                /* page flip completed. clean up */
@@@ -785,13 -786,12 +785,13 @@@ static int dm_suspend(void *handle
        struct amdgpu_display_manager *dm = &adev->dm;
        int ret = 0;
  
 +      WARN_ON(adev->dm.cached_state);
 +      adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
 +
        s3_handle_mst(adev->ddev, true);
  
        amdgpu_dm_irq_suspend(adev);
  
 -      WARN_ON(adev->dm.cached_state);
 -      adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
  
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
  
@@@ -886,6 -886,7 +886,7 @@@ static void emulated_link_detect(struc
                return;
        }
  
+       /* dc_sink_create returns a new reference */
        link->local_sink = sink;
  
        edid_status = dm_helpers_read_local_edid(
@@@ -952,6 -953,8 +953,8 @@@ static int dm_resume(void *handle
                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
                        aconnector->fake_enable = false;
  
+               if (aconnector->dc_sink)
+                       dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
                amdgpu_dm_update_connector_after_detect(aconnector);
                mutex_unlock(&aconnector->hpd_lock);
@@@ -1061,6 -1064,8 +1064,8 @@@ amdgpu_dm_update_connector_after_detect
  
  
        sink = aconnector->dc_link->local_sink;
+       if (sink)
+               dc_sink_retain(sink);
  
        /*
         * Edid mgmt connector gets first update only in mode_valid hook and then
                                 * to it anymore after disconnect, so on next crtc to connector
                                 * reshuffle by UMD we will get into unwanted dc_sink release
                                 */
-                               if (aconnector->dc_sink != aconnector->dc_em_sink)
-                                       dc_sink_release(aconnector->dc_sink);
+                               dc_sink_release(aconnector->dc_sink);
                        }
                        aconnector->dc_sink = sink;
+                       dc_sink_retain(aconnector->dc_sink);
                        amdgpu_dm_update_freesync_caps(connector,
                                        aconnector->edid);
                } else {
                        amdgpu_dm_update_freesync_caps(connector, NULL);
-                       if (!aconnector->dc_sink)
+                       if (!aconnector->dc_sink) {
                                aconnector->dc_sink = aconnector->dc_em_sink;
-                       else if (aconnector->dc_sink != aconnector->dc_em_sink)
                                dc_sink_retain(aconnector->dc_sink);
+                       }
                }
  
                mutex_unlock(&dev->mode_config.mutex);
+               if (sink)
+                       dc_sink_release(sink);
                return;
        }
  
         * TODO: temporary guard to look for proper fix
         * if this sink is MST sink, we should not do anything
         */
-       if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+       if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+               dc_sink_release(sink);
                return;
+       }
  
        if (aconnector->dc_sink == sink) {
                /*
                 */
                DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
                                aconnector->connector_id);
+               if (sink)
+                       dc_sink_release(sink);
                return;
        }
  
                        amdgpu_dm_update_freesync_caps(connector, NULL);
  
                aconnector->dc_sink = sink;
+               dc_sink_retain(aconnector->dc_sink);
                if (sink->dc_edid.length == 0) {
                        aconnector->edid = NULL;
                        drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
                amdgpu_dm_update_freesync_caps(connector, NULL);
                drm_connector_update_edid_property(connector, NULL);
                aconnector->num_modes = 0;
+               dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
                aconnector->edid = NULL;
        }
  
        mutex_unlock(&dev->mode_config.mutex);
+       if (sink)
+               dc_sink_release(sink);
  }
  
  static void handle_hpd_irq(void *param)
@@@ -2977,6 -2994,7 +2994,7 @@@ create_stream_for_sink(struct amdgpu_dm
                        return stream;
        } else {
                sink = aconnector->dc_sink;
+               dc_sink_retain(sink);
        }
  
        stream = dc_create_stream_for_sink(sink);
        update_stream_signal(stream, sink);
  
  finish:
-       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
-               dc_sink_release(sink);
+       dc_sink_release(sink);
  
        return stream;
  }
@@@ -3301,6 -3318,14 +3318,14 @@@ static void amdgpu_dm_connector_destroy
                dm->backlight_dev = NULL;
        }
  #endif
+       if (aconnector->dc_em_sink)
+               dc_sink_release(aconnector->dc_em_sink);
+       aconnector->dc_em_sink = NULL;
+       if (aconnector->dc_sink)
+               dc_sink_release(aconnector->dc_sink);
+       aconnector->dc_sink = NULL;
        drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
@@@ -3398,10 -3423,12 +3423,12 @@@ static void create_eml_sink(struct amdg
                (edid->extensions + 1) * EDID_LENGTH,
                &init_params);
  
-       if (aconnector->base.force == DRM_FORCE_ON)
+       if (aconnector->base.force == DRM_FORCE_ON) {
                aconnector->dc_sink = aconnector->dc_link->local_sink ?
                aconnector->dc_link->local_sink :
                aconnector->dc_em_sink;
+               dc_sink_retain(aconnector->dc_sink);
+       }
  }
  
  static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
@@@ -4645,8 -4672,6 +4672,8 @@@ static void amdgpu_dm_commit_planes(str
        struct amdgpu_bo *abo;
        uint64_t tiling_flags, dcc_address;
        uint32_t target, target_vblank;
 +      uint64_t last_flip_vblank;
 +      bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
  
        struct {
                struct dc_surface_update surface_updates[MAX_SURFACES];
         * hopefully eliminating dc_*_update structs in their entirety.
         */
        if (flip_count) {
 -              target = (uint32_t)drm_crtc_vblank_count(pcrtc) + *wait_for_vblank;
 +              if (!vrr_active) {
 +                      /* Use old throttling in non-vrr fixed refresh rate mode
 +                       * to keep flip scheduling based on target vblank counts
 +                       * working in a backwards compatible way, e.g., for
 +                       * clients using the GLX_OML_sync_control extension or
 +                       * DRI3/Present extension with defined target_msc.
 +                       */
 +                      last_flip_vblank = drm_crtc_vblank_count(pcrtc);
 +              }
 +              else {
 +                      /* For variable refresh rate mode only:
 +                       * Get vblank of last completed flip to avoid > 1 vrr
 +                       * flips per video frame by use of throttling, but allow
 +                       * flip programming anywhere in the possibly large
 +                       * variable vrr vblank interval for fine-grained flip
 +                       * timing control and more opportunity to avoid stutter
 +                       * on late submission of flips.
 +                       */
 +                      spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 +                      last_flip_vblank = acrtc_attach->last_flip_vblank;
 +                      spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 +              }
 +
 +              target = (uint32_t)last_flip_vblank + *wait_for_vblank;
 +
                /* Prepare wait for target vblank early - before the fence-waits */
                target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
                                amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
This page took 0.091232 seconds and 4 git commands to generate.