]> Git Repo - J-linux.git/commitdiff
Merge tag 'amd-drm-next-6.11-2024-06-22' of https://gitlab.freedesktop.org/agd5f...
authorDave Airlie <[email protected]>
Thu, 27 Jun 2024 07:18:49 +0000 (17:18 +1000)
committerDave Airlie <[email protected]>
Thu, 27 Jun 2024 07:18:49 +0000 (17:18 +1000)
amd-drm-next-6.11-2024-06-22:

amdgpu:
- HPD fixes
- PSR fixes
- DCC updates
- DCN 4.0.1 fixes
- FAMS fixes
- Misc code cleanups
- SR-IOV fixes
- GPUVM TLB flush cleanups
- Make VCN less verbose
- ACPI backlight fixes
- MES fixes
- Firmware loading cleanups
- Replay fixes
- LTTPR fixes
- Trap handler fixes
- Cursor and overlay fixes
- Primary plane zpos fixes
- DML 2.1 fixes
- RAS updates
- USB4 fixes
- MALL fixes
- Reserved VMID fix
- Silence UBSAN warnings

amdkfd:
- Misc code cleanups

From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Signed-off-by: Dave Airlie <[email protected]>
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c

index 8de2d05d0735bc7ab3027f912ebe9ed5bc4b0da9,9a92dd3c9fb83081f30126aba1c09432e057d99c..58906bf7448e6164a3151279aa100096a7576219
@@@ -295,8 -295,8 +295,8 @@@ int amdgpu_ttm_copy_mem_to_mem(struct a
        struct amdgpu_res_cursor src_mm, dst_mm;
        struct dma_fence *fence = NULL;
        int r = 0;
        uint32_t copy_flags = 0;
+       struct amdgpu_bo *abo_src, *abo_dst;
  
        if (!adev->mman.buffer_funcs_enabled) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                if (r)
                        goto error;
  
+               abo_src = ttm_to_amdgpu_bo(src->bo);
+               abo_dst = ttm_to_amdgpu_bo(dst->bo);
                if (tmz)
                        copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
+               if (abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+                       copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
+               if (abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+                       copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
  
                r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
                                       &next, false, true, copy_flags);
@@@ -383,12 -389,11 +389,12 @@@ static int amdgpu_move_blit(struct ttm_
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
  
 -              r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
 -                                      false);
 +              r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
 +                                     false);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
 +                      amdgpu_vram_mgr_set_cleared(bo->resource);
                        dma_fence_put(fence);
                        fence = wipe_fence;
                }
@@@ -2236,71 -2241,6 +2242,71 @@@ static int amdgpu_ttm_fill_mem(struct a
        return 0;
  }
  
 +/**
 + * amdgpu_ttm_clear_buffer - clear memory buffers
 + * @bo: amdgpu buffer object
 + * @resv: reservation object
 + * @fence: dma_fence associated with the operation
 + *
 + * Clear the memory buffer resource.
 + *
 + * Returns:
 + * 0 for success or a negative error code on failure.
 + */
 +int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
 +                          struct dma_resv *resv,
 +                          struct dma_fence **fence)
 +{
 +      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 +      struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 +      struct amdgpu_res_cursor cursor;
 +      u64 addr;
 +      int r;
 +
 +      if (!adev->mman.buffer_funcs_enabled)
 +              return -EINVAL;
 +
 +      if (!fence)
 +              return -EINVAL;
 +
 +      *fence = dma_fence_get_stub();
 +
 +      amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
 +
 +      mutex_lock(&adev->mman.gtt_window_lock);
 +      while (cursor.remaining) {
 +              struct dma_fence *next = NULL;
 +              u64 size;
 +
 +              if (amdgpu_res_cleared(&cursor)) {
 +                      amdgpu_res_next(&cursor, cursor.size);
 +                      continue;
 +              }
 +
 +              /* Never clear more than 256MiB at once to avoid timeouts */
 +              size = min(cursor.size, 256ULL << 20);
 +
 +              r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
 +                                        1, ring, false, &size, &addr);
 +              if (r)
 +                      goto err;
 +
 +              r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
 +                                      &next, true, true);
 +              if (r)
 +                      goto err;
 +
 +              dma_fence_put(*fence);
 +              *fence = next;
 +
 +              amdgpu_res_next(&cursor, size);
 +      }
 +err:
 +      mutex_unlock(&adev->mman.gtt_window_lock);
 +
 +      return r;
 +}
 +
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
                        struct dma_resv *resv,
index b6f53129dea3011f4cf74d4f632d35ef6a8d988e,7c903a6c9ddb414b42e673bd354de15428cc2a53..f2eb1cf364c512594c8b2695edaebf6047f4bf71
@@@ -38,6 -38,8 +38,6 @@@
  #define AMDGPU_GTT_MAX_TRANSFER_SIZE  512
  #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS       2
  
 -#define AMDGPU_POISON 0xd0bed0be
 -
  extern const struct attribute_group amdgpu_vram_mgr_attr_group;
  extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
  
@@@ -110,6 -112,8 +110,8 @@@ struct amdgpu_copy_mem 
  };
  
  #define AMDGPU_COPY_FLAGS_TMZ         (1 << 0)
+ #define AMDGPU_COPY_FLAGS_READ_DECOMPRESSED   (1 << 1)
+ #define AMDGPU_COPY_FLAGS_WRITE_COMPRESSED    (1 << 2)
  
  int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
  void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
@@@ -146,7 -150,6 +148,6 @@@ int amdgpu_ttm_init(struct amdgpu_devic
  void amdgpu_ttm_fini(struct amdgpu_device *adev);
  void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
                                        bool enable);
  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
@@@ -158,9 -161,6 +159,9 @@@ int amdgpu_ttm_copy_mem_to_mem(struct a
                               uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f);
 +int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
 +                          struct dma_resv *resv,
 +                          struct dma_fence **fence);
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
                        struct dma_resv *resv,
index ac18cddeb1912c1cecc52a46b9b6aae4810131e5,60404385d4d0e6eea49612682c903d0c5156e8da..c96407379224d5467596dcc059a5aa9d43105602
  #include <linux/types.h>
  #include <linux/pm_runtime.h>
  #include <linux/pci.h>
+ #include <linux/power_supply.h>
  #include <linux/firmware.h>
  #include <linux/component.h>
  #include <linux/dmi.h>
+ #include <linux/sort.h>
  
  #include <drm/display/drm_dp_mst_helper.h>
  #include <drm/display/drm_hdmi_helper.h>
@@@ -375,6 -377,20 +377,20 @@@ static inline void reverse_planes_order
                swap(array_of_surface_update[i], array_of_surface_update[j]);
  }
  
+ /*
+  * DC will program planes with their z-order determined by their ordering
+  * in the dc_surface_updates array. This comparator is used to sort them
+  * by descending zpos.
+  */
+ static int dm_plane_layer_index_cmp(const void *a, const void *b)
+ {
+       const struct dc_surface_update *sa = (struct dc_surface_update *)a;
+       const struct dc_surface_update *sb = (struct dc_surface_update *)b;
+       /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
+       return sb->surface->layer_index - sa->surface->layer_index;
+ }
  /**
   * update_planes_and_stream_adapter() - Send planes to be updated in DC
   *
@@@ -399,7 -415,8 +415,8 @@@ static inline bool update_planes_and_st
                                                    struct dc_stream_update *stream_update,
                                                    struct dc_surface_update *array_of_surface_update)
  {
-       reverse_planes_order(array_of_surface_update, planes_count);
+       sort(array_of_surface_update, planes_count,
+            sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
  
        /*
         * Previous frame finished and HW is ready for optimization.
@@@ -534,7 -551,7 +551,7 @@@ static void dm_vupdate_high_irq(void *i
        if (acrtc) {
                vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
                drm_dev = acrtc->base.dev;
 -              vblank = &drm_dev->vblank[acrtc->base.index];
 +              vblank = drm_crtc_vblank_crtc(&acrtc->base);
                previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
                frame_duration_ns = vblank->time - previous_timestamp;
  
@@@ -774,9 -791,9 +791,9 @@@ static void dmub_hpd_callback(struct am
                aconnector = to_amdgpu_dm_connector(connector);
                if (link && aconnector->dc_link == link) {
                        if (notify->type == DMUB_NOTIFICATION_HPD)
-                               DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
-                       else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
                                DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+                       else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+                               DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
                        else
                                DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
                                                notify->type, link_index);
        drm_connector_list_iter_end(&iter);
  
        if (hpd_aconnector) {
-               if (notify->type == DMUB_NOTIFICATION_HPD)
+               if (notify->type == DMUB_NOTIFICATION_HPD) {
+                       if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
+                               DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
                        handle_hpd_irq_helper(hpd_aconnector);
-               else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+               } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
                        handle_hpd_rx_irq(hpd_aconnector);
+               }
        }
  }
  
@@@ -859,7 -879,31 +879,31 @@@ static void dm_dmub_outbox1_low_irq(voi
        struct dmcub_trace_buf_entry entry = { 0 };
        u32 count = 0;
        struct dmub_hpd_work *dmub_hpd_wrk;
-       struct dc_link *plink = NULL;
+       static const char *const event_type[] = {
+               "NO_DATA",
+               "AUX_REPLY",
+               "HPD",
+               "HPD_IRQ",
+               "SET_CONFIGC_REPLY",
+               "DPIA_NOTIFICATION",
+       };
+       do {
+               if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+                       trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+                                                       entry.param0, entry.param1);
+                       DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+                                entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+               } else
+                       break;
+               count++;
+       } while (count <= DMUB_TRACE_MAX_READ);
+       if (count > DMUB_TRACE_MAX_READ)
+               DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
  
        if (dc_enable_dmub_notifications(adev->dm.dc) &&
                irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
                                continue;
                        }
                        if (!dm->dmub_callback[notify.type]) {
-                               DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+                               DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
+                                       event_type[notify.type]);
                                continue;
                        }
                        if (dm->dmub_thread_offload[notify.type] == true) {
                                }
                                INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
                                dmub_hpd_wrk->adev = adev;
-                               if (notify.type == DMUB_NOTIFICATION_HPD) {
-                                       plink = adev->dm.dc->links[notify.link_index];
-                                       if (plink) {
-                                               plink->hpd_status =
-                                                       notify.hpd_status == DP_HPD_PLUG;
-                                       }
-                               }
                                queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
                        } else {
                                dm->dmub_callback[notify.type](adev, &notify);
                        }
                } while (notify.pending_notification);
        }
-       do {
-               if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
-                       trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
-                                                       entry.param0, entry.param1);
-                       DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
-                                entry.trace_code, entry.tick_count, entry.param0, entry.param1);
-               } else
-                       break;
-               count++;
-       } while (count <= DMUB_TRACE_MAX_READ);
-       if (count > DMUB_TRACE_MAX_READ)
-               DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
  }
  
  static int dm_set_clockgating_state(void *handle,
@@@ -957,8 -977,8 +977,8 @@@ static void amdgpu_dm_fbc_init(struct d
  
  
        list_for_each_entry(mode, &connector->modes, head) {
-               if (max_size < mode->htotal * mode->vtotal)
-                       max_size = mode->htotal * mode->vtotal;
+               if (max_size < (unsigned long) mode->htotal * mode->vtotal)
+                       max_size = (unsigned long) mode->htotal * mode->vtotal;
        }
  
        if (max_size) {
@@@ -2855,7 -2875,8 +2875,8 @@@ static int dm_suspend(void *handle
  
                dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
  
-               dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+               if (dm->cached_dc_state)
+                       dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
  
                amdgpu_dm_commit_zero_streams(dm->dc);
  
@@@ -3159,7 -3180,7 +3180,7 @@@ static int dm_resume(void *handle
                 * this is the case when traversing through already created end sink
                 * MST connectors, should be skipped
                 */
-               if (aconnector && aconnector->mst_root)
+               if (aconnector->mst_root)
                        continue;
  
                mutex_lock(&aconnector->hpd_lock);
@@@ -4571,6 -4592,7 +4592,7 @@@ amdgpu_dm_register_backlight_device(str
        struct drm_device *drm = aconnector->base.dev;
        struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
        struct backlight_properties props = { 0 };
+       struct amdgpu_dm_backlight_caps caps = { 0 };
        char bl_name[16];
  
        if (aconnector->bl_idx == -1)
                return;
        }
  
+       amdgpu_acpi_get_backlight_caps(&caps);
+       if (caps.caps_valid) {
+               if (power_supply_is_system_supplied() > 0)
+                       props.brightness = caps.ac_level;
+               else
+                       props.brightness = caps.dc_level;
+       } else
+               props.brightness = AMDGPU_MAX_BL_LEVEL;
        props.max_brightness = AMDGPU_MAX_BL_LEVEL;
-       props.brightness = AMDGPU_MAX_BL_LEVEL;
        props.type = BACKLIGHT_RAW;
  
        snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@@ -6392,13 -6422,13 +6422,13 @@@ static void apply_dsc_policy_for_stream
        dc_dsc_policy_set_enable_dsc_when_not_needed(
                aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
  
-       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+       if (sink->sink_signal == SIGNAL_TYPE_EDP &&
            !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
            dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
  
                apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
  
-       } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+       } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
                if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
                        if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
                                                dsc_caps,
@@@ -7073,7 -7103,8 +7103,8 @@@ static void create_eml_sink(struct amdg
                aconnector->dc_sink = aconnector->dc_link->local_sink ?
                aconnector->dc_link->local_sink :
                aconnector->dc_em_sink;
-               dc_sink_retain(aconnector->dc_sink);
+               if (aconnector->dc_sink)
+                       dc_sink_retain(aconnector->dc_sink);
        }
  }
  
@@@ -7900,7 -7931,8 +7931,8 @@@ static int amdgpu_dm_connector_get_mode
                                drm_add_modes_noedid(connector, 1920, 1080);
        } else {
                amdgpu_dm_connector_ddc_get_modes(connector, edid);
-               amdgpu_dm_connector_add_common_modes(encoder, connector);
+               if (encoder)
+                       amdgpu_dm_connector_add_common_modes(encoder, connector);
                amdgpu_dm_connector_add_freesync_modes(connector, edid);
        }
        amdgpu_dm_fbc_init(connector);
@@@ -8745,8 -8777,24 +8777,24 @@@ static void amdgpu_dm_commit_planes(str
         * Disable the cursor first if we're disabling all the planes.
         * It'll remain on the screen after the planes are re-enabled
         * if we don't.
+        *
+        * If the cursor is transitioning from native to overlay mode, the
+        * native cursor needs to be disabled first.
         */
-       if (acrtc_state->active_planes == 0)
+       if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
+           dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
+               struct dc_cursor_position cursor_position = {0};
+               if (!dc_stream_set_cursor_position(acrtc_state->stream,
+                                                  &cursor_position))
+                       drm_err(dev, "DC failed to disable native cursor\n");
+               bundle->stream_update.cursor_position =
+                               &acrtc_state->stream->cursor_position;
+       }
+       if (acrtc_state->active_planes == 0 &&
+           dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
                amdgpu_dm_commit_cursors(state);
  
        /* update planes when needed */
                struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
  
                /* Cursor plane is handled after stream updates */
-               if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+               if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+                   acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
                        if ((fb && crtc == pcrtc) ||
                            (old_plane_state->fb && old_plane_state->crtc == pcrtc)) {
                                cursor_update = true;
         * to be disabling a single plane - those pipes are being disabled.
         */
        if (acrtc_state->active_planes &&
-           (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0))
+           (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) &&
+           acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
                amdgpu_dm_commit_cursors(state);
  
  cleanup:
@@@ -9744,6 -9794,8 +9794,8 @@@ static void amdgpu_dm_atomic_commit_tai
                for (j = 0; j < status->plane_count; j++)
                        dummy_updates[j].surface = status->plane_states[0];
  
+               sort(dummy_updates, status->plane_count,
+                    sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
  
                mutex_lock(&dm->dc_lock);
                dc_exit_ips_for_hw_access(dm->dc);
@@@ -10434,7 -10486,8 +10486,8 @@@ static bool should_reset_plane(struct d
  {
        struct drm_plane *other;
        struct drm_plane_state *old_other_state, *new_other_state;
-       struct drm_crtc_state *new_crtc_state;
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state;
        struct amdgpu_device *adev = drm_to_adev(plane->dev);
        int i;
  
  
        new_crtc_state =
                drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+       old_crtc_state =
+               drm_atomic_get_old_crtc_state(state, old_plane_state->crtc);
  
        if (!new_crtc_state)
                return true;
  
+       /*
+        * A change in cursor mode means a new dc pipe needs to be acquired or
+        * released from the state
+        */
+       old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
+       new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+       if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+           old_dm_crtc_state != NULL &&
+           old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) {
+               return true;
+       }
        /* CRTC Degamma changes currently require us to recreate planes. */
        if (new_crtc_state->color_mgmt_changed)
                return true;
  
+       /*
+        * On zpos change, planes need to be reordered by removing and re-adding
+        * them one by one to the dc state, in order of descending zpos.
+        *
+        * TODO: We can likely skip bandwidth validation if the only thing that
+        * changed about the plane was it'z z-ordering.
+        */
+       if (new_crtc_state->zpos_changed)
+               return true;
        if (drm_atomic_crtc_needs_modeset(new_crtc_state))
                return true;
  
@@@ -10611,6 -10688,68 +10688,68 @@@ static int dm_check_cursor_fb(struct am
        return 0;
  }
  
+ /*
+  * Helper function for checking the cursor in native mode
+  */
+ static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc,
+                                       struct drm_plane *plane,
+                                       struct drm_plane_state *new_plane_state,
+                                       bool enable)
+ {
+       struct amdgpu_crtc *new_acrtc;
+       int ret;
+       if (!enable || !new_plane_crtc ||
+           drm_atomic_plane_disabling(plane->state, new_plane_state))
+               return 0;
+       new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+       if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+               DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+               return -EINVAL;
+       }
+       if (new_plane_state->fb) {
+               ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+                                               new_plane_state->fb);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
+ static bool dm_should_update_native_cursor(struct drm_atomic_state *state,
+                                          struct drm_crtc *old_plane_crtc,
+                                          struct drm_crtc *new_plane_crtc,
+                                          bool enable)
+ {
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+       if (!enable) {
+               if (old_plane_crtc == NULL)
+                       return true;
+               old_crtc_state = drm_atomic_get_old_crtc_state(
+                       state, old_plane_crtc);
+               dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+               return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
+       } else {
+               if (new_plane_crtc == NULL)
+                       return true;
+               new_crtc_state = drm_atomic_get_new_crtc_state(
+                       state, new_plane_crtc);
+               dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+               return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
+       }
+ }
  static int dm_update_plane_state(struct dc *dc,
                                 struct drm_atomic_state *state,
                                 struct drm_plane *plane,
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
        struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
-       struct amdgpu_crtc *new_acrtc;
-       bool needs_reset;
+       bool needs_reset, update_native_cursor;
        int ret = 0;
  
  
        dm_new_plane_state = to_dm_plane_state(new_plane_state);
        dm_old_plane_state = to_dm_plane_state(old_plane_state);
  
-       if (plane->type == DRM_PLANE_TYPE_CURSOR) {
-               if (!enable || !new_plane_crtc ||
-                       drm_atomic_plane_disabling(plane->state, new_plane_state))
-                       return 0;
-               new_acrtc = to_amdgpu_crtc(new_plane_crtc);
-               if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
-                       DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
-                       return -EINVAL;
-               }
+       update_native_cursor = dm_should_update_native_cursor(state,
+                                                             old_plane_crtc,
+                                                             new_plane_crtc,
+                                                             enable);
  
-               if (new_plane_state->fb) {
-                       ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
-                                                new_plane_state->fb);
-                       if (ret)
-                               return ret;
-               }
+       if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) {
+               ret = dm_check_native_cursor_state(new_plane_crtc, plane,
+                                                   new_plane_state, enable);
+               if (ret)
+                       return ret;
  
                return 0;
        }
  
                ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
                if (ret)
-                       return ret;
+                       goto out;
  
                WARN_ON(dm_new_plane_state->dc_state);
  
                dc_new_plane_state = dc_create_plane_state(dc);
-               if (!dc_new_plane_state)
-                       return -ENOMEM;
-               /* Block top most plane from being a video plane */
-               if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
-                       if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
-                               return -EINVAL;
-                       *is_top_most_overlay = false;
+               if (!dc_new_plane_state) {
+                       ret = -ENOMEM;
+                       goto out;
                }
  
                DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
                        new_crtc_state);
                if (ret) {
                        dc_plane_state_release(dc_new_plane_state);
-                       return ret;
+                       goto out;
                }
  
                ret = dm_atomic_get_state(state, &dm_state);
                if (ret) {
                        dc_plane_state_release(dc_new_plane_state);
-                       return ret;
+                       goto out;
                }
  
                /*
                                dm_state->context)) {
  
                        dc_plane_state_release(dc_new_plane_state);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
  
                dm_new_plane_state->dc_state = dc_new_plane_state;
                *lock_and_validation_needed = true;
        }
  
+ out:
+       /* If enabling cursor overlay failed, attempt fallback to native mode */
+       if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) {
+               ret = dm_check_native_cursor_state(new_plane_crtc, plane,
+                                                   new_plane_state, enable);
+               if (ret)
+                       return ret;
+               dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE;
+       }
  
        return ret;
  }
@@@ -10816,99 -10951,64 +10951,64 @@@ dm_get_plane_scale(struct drm_plane_sta
        *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
  }
  
- static int dm_check_crtc_cursor(struct drm_atomic_state *state,
-                               struct drm_crtc *crtc,
-                               struct drm_crtc_state *new_crtc_state)
+ /*
+  * The normalized_zpos value cannot be used by this iterator directly. It's only
+  * calculated for enabled planes, potentially causing normalized_zpos collisions
+  * between enabled/disabled planes in the atomic state. We need a unique value
+  * so that the iterator will not generate the same object twice, or loop
+  * indefinitely.
+  */
+ static inline struct __drm_planes_state *__get_next_zpos(
+       struct drm_atomic_state *state,
+       struct __drm_planes_state *prev)
  {
-       struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *new_cursor_state, *new_underlying_state;
-       int i;
-       int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
-       bool any_relevant_change = false;
-       /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
-        * cursor per pipe but it's going to inherit the scaling and
-        * positioning from the underlying pipe. Check the cursor plane's
-        * blending properties match the underlying planes'.
-        */
-       /* If no plane was enabled or changed scaling, no need to check again */
-       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
-               int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
-               if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
-                       continue;
-               if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
-                       any_relevant_change = true;
-                       break;
-               }
-               if (new_plane_state->fb == old_plane_state->fb &&
-                   new_plane_state->crtc_w == old_plane_state->crtc_w &&
-                   new_plane_state->crtc_h == old_plane_state->crtc_h)
-                       continue;
-               dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
-               dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+       unsigned int highest_zpos = 0, prev_zpos = 256;
+       uint32_t highest_id = 0, prev_id = UINT_MAX;
+       struct drm_plane_state *new_plane_state;
+       struct drm_plane *plane;
+       int i, highest_i = -1;
  
-               if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
-                       any_relevant_change = true;
-                       break;
-               }
+       if (prev != NULL) {
+               prev_zpos = prev->new_state->zpos;
+               prev_id = prev->ptr->base.id;
        }
  
-       if (!any_relevant_change)
-               return 0;
-       new_cursor_state = drm_atomic_get_plane_state(state, cursor);
-       if (IS_ERR(new_cursor_state))
-               return PTR_ERR(new_cursor_state);
-       if (!new_cursor_state->fb)
-               return 0;
-       dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
-       /* Need to check all enabled planes, even if this commit doesn't change
-        * their state
-        */
-       i = drm_atomic_add_affected_planes(state, crtc);
-       if (i)
-               return i;
-       for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
-               /* Narrow down to non-cursor planes on the same CRTC as the cursor */
-               if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+               /* Skip planes with higher zpos than the previously returned */
+               if (new_plane_state->zpos > prev_zpos ||
+                   (new_plane_state->zpos == prev_zpos &&
+                    plane->base.id >= prev_id))
                        continue;
  
-               /* Ignore disabled planes */
-               if (!new_underlying_state->fb)
-                       continue;
-               dm_get_plane_scale(new_underlying_state,
-                                  &underlying_scale_w, &underlying_scale_h);
-               if (cursor_scale_w != underlying_scale_w ||
-                   cursor_scale_h != underlying_scale_h) {
-                       drm_dbg_atomic(crtc->dev,
-                                      "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
-                                      cursor->base.id, cursor->name, underlying->base.id, underlying->name);
-                       return -EINVAL;
+               /* Save the index of the plane with highest zpos */
+               if (new_plane_state->zpos > highest_zpos ||
+                   (new_plane_state->zpos == highest_zpos &&
+                    plane->base.id > highest_id)) {
+                       highest_zpos = new_plane_state->zpos;
+                       highest_id = plane->base.id;
+                       highest_i = i;
                }
-               /* If this plane covers the whole CRTC, no need to check planes underneath */
-               if (new_underlying_state->crtc_x <= 0 &&
-                   new_underlying_state->crtc_y <= 0 &&
-                   new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
-                   new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
-                       break;
        }
  
-       return 0;
+       if (highest_i < 0)
+               return NULL;
+       return &state->planes[highest_i];
  }
  
+ /*
+  * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate
+  * by descending zpos, as read from the new plane state. This is the same
+  * ordering as defined by drm_atomic_normalize_zpos().
+  */
+ #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \
+       for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \
+            __i != NULL; __i = __get_next_zpos((__state), __i))                \
+               for_each_if(((plane) = __i->ptr,                                \
+                            (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
+                            (old_plane_state) = __i->old_state,                \
+                            (new_plane_state) = __i->new_state, 1))
  static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
  {
        struct drm_connector *connector;
        return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
  }
  
+ /**
+  * DOC: Cursor Modes - Native vs Overlay
+  *
+  * In native mode, the cursor uses a integrated cursor pipe within each DCN hw
+  * plane. It does not require a dedicated hw plane to enable, but it is
+  * subjected to the same z-order and scaling as the hw plane. It also has format
+  * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB
+  * hw plane.
+  *
+  * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its
+  * own scaling and z-pos. It also has no blending restrictions. It lends to a
+  * cursor behavior more akin to a DRM client's expectations. However, it does
+  * occupy an extra DCN plane, and therefore will only be used if a DCN plane is
+  * available.
+  */
+ /**
+  * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc
+  * @adev: amdgpu device
+  * @state: DRM atomic state
+  * @dm_crtc_state: amdgpu state for the CRTC containing the cursor
+  * @cursor_mode: Returns the required cursor mode on dm_crtc_state
+  *
+  * Get whether the cursor should be enabled in native mode, or overlay mode, on
+  * the dm_crtc_state.
+  *
+  * The cursor should be enabled in overlay mode if there exists an underlying
+  * plane - on which the cursor may be blended - that is either YUV formatted, or
+  * scaled differently from the cursor.
+  *
+  * Since zpos info is required, drm_atomic_normalize_zpos must be called before
+  * calling this function.
+  *
+  * Return: 0 on success, or an error code if getting the cursor plane state
+  * failed.
+  */
+ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
+                                  struct drm_atomic_state *state,
+                                  struct dm_crtc_state *dm_crtc_state,
+                                  enum amdgpu_dm_cursor_mode *cursor_mode)
+ {
+       struct drm_plane_state *old_plane_state, *plane_state, *cursor_state;
+       struct drm_crtc_state *crtc_state = &dm_crtc_state->base;
+       struct drm_plane *plane;
+       bool consider_mode_change = false;
+       bool entire_crtc_covered = false;
+       bool cursor_changed = false;
+       int underlying_scale_w, underlying_scale_h;
+       int cursor_scale_w, cursor_scale_h;
+       int i;
+       /* Overlay cursor not supported on HW before DCN */
+       if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) {
+               *cursor_mode = DM_CURSOR_NATIVE_MODE;
+               return 0;
+       }
+       /* Init cursor_mode to be the same as current */
+       *cursor_mode = dm_crtc_state->cursor_mode;
+       /*
+        * Cursor mode can change if a plane's format changes, scale changes, is
+        * enabled/disabled, or z-order changes.
+        */
+       for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) {
+               int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+               /* Only care about planes on this CRTC */
+               if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0)
+                       continue;
+               if (plane->type == DRM_PLANE_TYPE_CURSOR)
+                       cursor_changed = true;
+               if (drm_atomic_plane_enabling(old_plane_state, plane_state) ||
+                   drm_atomic_plane_disabling(old_plane_state, plane_state) ||
+                   old_plane_state->fb->format != plane_state->fb->format) {
+                       consider_mode_change = true;
+                       break;
+               }
+               dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h);
+               dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+               if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+                       consider_mode_change = true;
+                       break;
+               }
+       }
+       if (!consider_mode_change && !crtc_state->zpos_changed)
+               return 0;
+       /*
+        * If no cursor change on this CRTC, and not enabled on this CRTC, then
+        * no need to set cursor mode. This avoids needlessly locking the cursor
+        * state.
+        */
+       if (!cursor_changed &&
+           !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) {
+               return 0;
+       }
+       cursor_state = drm_atomic_get_plane_state(state,
+                                                 crtc_state->crtc->cursor);
+       if (IS_ERR(cursor_state))
+               return PTR_ERR(cursor_state);
+       /* Cursor is disabled */
+       if (!cursor_state->fb)
+               return 0;
+       /* For all planes in descending z-order (all of which are below cursor
+        * as per zpos definitions), check their scaling and format
+        */
+       for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) {
+               /* Only care about non-cursor planes on this CRTC */
+               if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 ||
+                   plane->type == DRM_PLANE_TYPE_CURSOR)
+                       continue;
+               /* Underlying plane is YUV format - use overlay cursor */
+               if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) {
+                       *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+                       return 0;
+               }
+               dm_get_plane_scale(plane_state,
+                                  &underlying_scale_w, &underlying_scale_h);
+               dm_get_plane_scale(cursor_state,
+                                  &cursor_scale_w, &cursor_scale_h);
+               /* Underlying plane has different scale - use overlay cursor */
+               if (cursor_scale_w != underlying_scale_w &&
+                   cursor_scale_h != underlying_scale_h) {
+                       *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+                       return 0;
+               }
+               /* If this plane covers the whole CRTC, no need to check planes underneath */
+               if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 &&
+                   plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay &&
+                   plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) {
+                       entire_crtc_covered = true;
+                       break;
+               }
+       }
+       /* If planes do not cover the entire CRTC, use overlay mode to enable
+        * cursor over holes
+        */
+       if (entire_crtc_covered)
+               *cursor_mode = DM_CURSOR_NATIVE_MODE;
+       else
+               *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+       return 0;
+ }
  /**
   * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
   *
@@@ -11108,8 -11367,23 +11367,23 @@@ static int amdgpu_dm_atomic_check(struc
                goto fail;
        }
  
+       /*
+        * Determine whether cursors on each CRTC should be enabled in native or
+        * overlay mode.
+        */
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+               dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+               ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
+                                             &dm_new_crtc_state->cursor_mode);
+               if (ret) {
+                       drm_dbg(dev, "Failed to determine cursor mode\n");
+                       goto fail;
+               }
+       }
        /* Remove exiting planes if they are modified */
-       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+       for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
                if (old_plane_state->fb && new_plane_state->fb &&
                    get_mem_type(old_plane_state->fb) !=
                    get_mem_type(new_plane_state->fb))
        }
  
        /* Add new/modified planes */
-       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+       for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
                ret = dm_update_plane_state(dc, state, plane,
                                            old_plane_state,
                                            new_plane_state,
                        drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc);
        }
  
-       /* Check cursor planes scaling */
+       /* Check cursor planes restrictions */
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-               ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+               enum amdgpu_dm_cursor_mode required_cursor_mode;
+               /* Overlay cusor not subject to native cursor restrictions */
+               dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+               if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+                       continue;
+               /* If HW can only do native cursor, check restrictions again */
+               ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
+                                             &required_cursor_mode);
                if (ret) {
-                       drm_dbg_atomic(dev, "dm_check_crtc_cursor() failed\n");
+                       drm_dbg_driver(crtc->dev,
+                                      "[CRTC:%d:%s] Checking cursor mode failed\n",
+                                      crtc->base.id, crtc->name);
+                       goto fail;
+               } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) {
+                       drm_dbg_driver(crtc->dev,
+                                      "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n",
+                                      crtc->base.id, crtc->name);
+                       ret = -EINVAL;
                        goto fail;
                }
        }
@@@ -11806,6 -12098,12 +12098,12 @@@ void amdgpu_dm_trigger_timing_sync(stru
        mutex_unlock(&adev->dm.dc_lock);
  }
  
+ static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
+ {
+       if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
+               dc_exit_ips_for_hw_access(dc);
+ }
  void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
                       u32 value, const char *func_name)
  {
                return;
        }
  #endif
+       amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
        cgs_write_register(ctx->cgs_device, address, value);
        trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
  }
@@@ -11839,6 -12139,8 +12139,8 @@@ uint32_t dm_read_reg_func(const struct 
                return 0;
        }
  
+       amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
        value = cgs_read_register(ctx->cgs_device, address);
  
        trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
index 2648d2b5be3ebd25079f7b782dec151d1b7fe780,659dd67be1baa126b094fdd368daa390222938cc..adbf560d6a74c4b03413301f7bd3f3293006f96c
@@@ -364,7 -364,7 +364,7 @@@ void dm_helpers_dp_mst_send_payload_all
        mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
        new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
  
 -      ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
 +      ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
  
        if (ret) {
                amdgpu_dm_set_mst_status(&aconnector->mst_status,
@@@ -807,9 -807,6 +807,6 @@@ bool dm_helpers_dp_write_dsc_enable
        uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
        uint8_t ret = 0;
  
-       if (!stream)
-               return false;
        if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
                if (!aconnector->dsc_aux)
                        return false;
This page took 0.127589 seconds and 4 git commands to generate.