]> Git Repo - J-linux.git/commitdiff
Merge tag 'amd-drm-next-5.20-2022-07-05' of https://gitlab.freedesktop.org/agd5f...
authorDave Airlie <[email protected]>
Tue, 12 Jul 2022 01:07:30 +0000 (11:07 +1000)
committerDave Airlie <[email protected]>
Tue, 12 Jul 2022 01:07:32 +0000 (11:07 +1000)
amd-drm-next-5.20-2022-07-05:

amdgpu:
- Various spelling and grammer fixes
- Various eDP fixes
- Various DMCUB fixes
- VCN fixes
- GMC 11 fixes
- RAS fixes
- TMZ support for GC 10.3.7
- GPUVM TLB flush fixes
- SMU 13.0.x updates
- DCN 3.2 Support
- DCN 3.2.1 Support
- MES updates
- GFX11 modifiers support
- USB-C fixes
- MMHUB 3.0.1 support
- SDMA 6.0 doorbell fixes
- Initial devcoredump support
- Enable high priority gfx queue on asics which support it
- Enable GPU reset for SMU 13.0.4
- OLED display fixes
- MPO fixes
- DC frame size fixes
- ASPM support for PCIE 7.4/7.6
- GPU reset support for SMU 13.0.0
- GFX11 updates
- VCN JPEG fix
- BACO support for SMU 13.0.7
- VCN instance handling fix
- GFX8 GPUVM TLB flush fix
- GPU reset rework
- VCN 4.0.2 support
- GTT size fixes
- DP link training fixes
- LSDMA 6.0.1 support
- Various backlight fixes
- Color encoding fixes
- Backlight config cleanup
- VCN 4.x unified queue cleanup

amdkfd:
- MMU notifier fixes
- Updates for GC 10.3.6 and 10.3.7
- P2P DMA support using dma-buf
- Add available memory IOCTL
- SDMA 6.0.1 fix
- MES fixes
- HMM profiler support

radeon:
- License fix
- Backlight config cleanup

UAPI:
- Add available memory IOCTL to amdkfd
  Proposed userspace: https://www.mail-archive.com/[email protected]/msg75743.html
- HMM profiler support for amdkfd
  Proposed userspace: https://lists.freedesktop.org/archives/amd-gfx/2022-June/080805.html

Signed-off-by: Dave Airlie <[email protected]>
From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
include/linux/workqueue.h
include/uapi/drm/drm_fourcc.h
kernel/workqueue.c

index 450d32ccd69dfe1fb3377a3ac54ea2af484df1f8,dbe2904e015b4a575ea9f5c09eae02f5d0dbc145..d788a00043a5caa4bfc54906450299bfc8941e87
@@@ -37,7 -37,6 +37,7 @@@
  #include <drm/drm_fixed.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/drm_fb_helper.h>
 +#include <drm/drm_framebuffer.h>
  #include <drm/drm_plane_helper.h>
  #include <drm/drm_probe_helper.h>
  #include <linux/i2c.h>
@@@ -350,15 -349,11 +350,11 @@@ struct amdgpu_mode_info 
  
  #define AMDGPU_MAX_BL_LEVEL 0xFF
  
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  struct amdgpu_backlight_privdata {
        struct amdgpu_encoder *encoder;
        uint8_t negative;
  };
  
- #endif
  struct amdgpu_atom_ss {
        uint16_t percentage;
        uint16_t percentage_divider;
index 2f4422d4c8a4f1e82037c887bc008c76e59be749,1bd65b41ba4d94267eaa5727941e3a7508c23dc5..41e4774abdb0cc504f71bd895ee600442858711c
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_uapi.h>
  #include <drm/drm_atomic_helper.h>
 +#include <drm/drm_blend.h>
  #include <drm/drm_fb_helper.h>
  #include <drm/drm_fourcc.h>
  #include <drm/drm_edid.h>
  #include <drm/drm_vblank.h>
  #include <drm/drm_audio_component.h>
 +#include <drm/drm_gem_atomic_helper.h>
  
  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
  
  #include "dcn/dcn_1_0_offset.h"
  #include "dcn/dcn_1_0_sh_mask.h"
  #include "soc15_hw_ip.h"
+ #include "soc15_common.h"
  #include "vega10_ip_offset.h"
  
  #include "soc15_common.h"
  
+ #include "gc/gc_11_0_0_offset.h"
+ #include "gc/gc_11_0_0_sh_mask.h"
  #include "modules/inc/mod_freesync.h"
  #include "modules/power/power_helpers.h"
  #include "modules/inc/mod_info_packet.h"
@@@ -120,6 -122,11 +124,11 @@@ MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB)
  #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
  MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
  
+ #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+ MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+ #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+ MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
  #define FIRMWARE_RAVEN_DMCU           "amdgpu/raven_dmcu.bin"
  MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
  
@@@ -1258,10 -1265,20 +1267,20 @@@ static void vblank_control_worker(struc
  
        DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
  
-       /* Control PSR based on vblank requirements from OS */
+       /*
+        * Control PSR based on vblank requirements from OS
+        *
+        * If panel supports PSR SU, there's no need to disable PSR when OS is
+        * submitting fast atomic commits (we infer this by whether the OS
+        * requests vblank events). Fast atomic commits will simply trigger a
+        * full-frame-update (FFU); a specific case of selective-update (SU)
+        * where the SU region is the full hactive*vactive region. See
+        * fill_dc_dirty_rects().
+        */
        if (vblank_work->stream && vblank_work->stream->link) {
                if (vblank_work->enable) {
-                       if (vblank_work->stream->link->psr_settings.psr_allow_active)
+                       if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
+                           vblank_work->stream->link->psr_settings.psr_allow_active)
                                amdgpu_dm_psr_disable(vblank_work->stream);
                } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
                           !vblank_work->stream->link->psr_settings.psr_allow_active &&
@@@ -1509,6 -1526,8 +1528,8 @@@ static int amdgpu_dm_init(struct amdgpu
                DRM_INFO("Seamless boot condition check passed\n");
        }
  
+       init_data.flags.enable_mipi_converter_optimization = true;
        INIT_LIST_HEAD(&adev->dm.da_list);
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
@@@ -1803,6 -1822,8 +1824,8 @@@ static int load_dmcu_fw(struct amdgpu_d
                case IP_VERSION(3, 1, 3):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
+               case IP_VERSION(3, 2, 0):
+               case IP_VERSION(3, 2, 1):
                        return 0;
                default:
                        break;
@@@ -1926,6 -1947,14 +1949,14 @@@ static int dm_dmub_sw_init(struct amdgp
                dmub_asic = DMUB_ASIC_DCN316;
                fw_name_dmub = FIRMWARE_DCN316_DMUB;
                break;
+       case IP_VERSION(3, 2, 0):
+               dmub_asic = DMUB_ASIC_DCN32;
+               fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+               break;
+       case IP_VERSION(3, 2, 1):
+               dmub_asic = DMUB_ASIC_DCN321;
+               fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+               break;
        default:
                /* ASIC doesn't support DMUB. */
                return 0;
@@@ -2172,7 -2201,8 +2203,8 @@@ static void s3_handle_mst(struct drm_de
                } else {
                        ret = drm_dp_mst_topology_mgr_resume(mgr, true);
                        if (ret < 0) {
-                               drm_dp_mst_topology_mgr_set_mst(mgr, false);
+                               dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+                                       aconnector->dc_link);
                                need_hotplug = true;
                        }
                }
@@@ -2554,34 -2584,6 +2586,6 @@@ cleanup
        return;
  }
  
- static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
- {
-       struct dc_stream_state *stream_state;
-       struct amdgpu_dm_connector *aconnector = link->priv;
-       struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
-       struct dc_stream_update stream_update;
-       bool dpms_off = true;
-       memset(&stream_update, 0, sizeof(stream_update));
-       stream_update.dpms_off = &dpms_off;
-       mutex_lock(&adev->dm.dc_lock);
-       stream_state = dc_stream_find_from_link(link);
-       if (stream_state == NULL) {
-               DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
-               mutex_unlock(&adev->dm.dc_lock);
-               return;
-       }
-       stream_update.stream = stream_state;
-       acrtc_state->force_dpms_off = true;
-       dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
-                                    stream_state, &stream_update,
-                                    stream_state->ctx->dc->current_state);
-       mutex_unlock(&adev->dm.dc_lock);
- }
  static int dm_resume(void *handle)
  {
        struct amdgpu_device *adev = handle;
@@@ -2814,7 -2816,7 +2818,7 @@@ static struct drm_mode_config_helper_fu
  
  static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
  {
-       u32 max_cll, min_cll, max, min, q, r;
+       u32 max_avg, min_cll, max, min, q, r;
        struct amdgpu_dm_backlight_caps *caps;
        struct amdgpu_display_manager *dm;
        struct drm_connector *conn_base;
        caps = &dm->backlight_caps[i];
        caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
        caps->aux_support = false;
-       max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+       max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
        min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
  
        if (caps->ext_caps->bits.oled == 1 /*||
         * The results of the above expressions can be verified at
         * pre_computed_values.
         */
-       q = max_cll >> 5;
-       r = max_cll % 32;
+       q = max_avg >> 5;
+       r = max_avg % 32;
        max = (1 << q) * pre_computed_values[r];
  
        // min luminance: maxLum * (CV/255)^2 / 100
@@@ -3032,16 -3034,13 +3036,13 @@@ static void handle_hpd_irq_helper(struc
        struct drm_device *dev = connector->dev;
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-       struct dm_crtc_state *dm_crtc_state = NULL;
+ #endif
  
        if (adev->dm.disable_hpd_irq)
                return;
  
-       if (dm_con_state->base.state && dm_con_state->base.crtc)
-               dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
-                                       dm_con_state->base.state,
-                                       dm_con_state->base.crtc));
        /*
         * In case of failure or MST no need to update connector status or notify the OS
         * since (for MST case) MST does this in its own context.
                        drm_kms_helper_connector_hotplug_event(connector);
  
        } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
-               if (new_connection_type == dc_connection_none &&
-                   aconnector->dc_link->type == dc_connection_none &&
-                   dm_crtc_state)
-                       dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
                amdgpu_dm_update_connector_after_detect(aconnector);
  
                drm_modeset_lock_all(dev);
@@@ -3868,9 -3862,6 +3864,6 @@@ static int amdgpu_dm_mode_config_init(s
  #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
  #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
  
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-       defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
                                            int bl_idx)
  {
@@@ -4074,7 -4065,6 +4067,6 @@@ amdgpu_dm_register_backlight_device(str
        else
                DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
  }
- #endif
  
  static int initialize_plane(struct amdgpu_display_manager *dm,
                            struct amdgpu_mode_info *mode_info, int plane_id,
  static void register_backlight_device(struct amdgpu_display_manager *dm,
                                      struct dc_link *link)
  {
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-       defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
        if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
            link->type != dc_connection_none) {
                /*
                        dm->num_of_edps++;
                }
        }
- #endif
  }
  
  
@@@ -4235,6 -4221,8 +4223,8 @@@ static int amdgpu_dm_initialize_drm_dev
        case IP_VERSION(3, 1, 3):
        case IP_VERSION(3, 1, 5):
        case IP_VERSION(3, 1, 6):
+       case IP_VERSION(3, 2, 0):
+       case IP_VERSION(3, 2, 1):
        case IP_VERSION(2, 1, 0):
                if (register_outbox_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                case IP_VERSION(3, 1, 3):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
+               case IP_VERSION(3, 2, 0):
+               case IP_VERSION(3, 2, 1):
                        psr_feature_enabled = true;
                        break;
                default:
                }
        }
  
-       /* Disable vblank IRQs aggressively for power-saving. */
-       adev_to_drm(adev)->vblank_disable_immediate = true;
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
                case IP_VERSION(3, 1, 3):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
+               case IP_VERSION(3, 2, 0):
+               case IP_VERSION(3, 2, 1):
                        if (dcn10_register_irq_handlers(dm->adev)) {
                                DRM_ERROR("DM: Failed to initialize IRQ\n");
                                goto fail;
@@@ -4556,6 -4545,8 +4547,8 @@@ static int dm_early_init(void *handle
                case IP_VERSION(3, 1, 3):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
+               case IP_VERSION(3, 2, 0):
+               case IP_VERSION(3, 2, 1):
                        adev->mode_info.num_crtc = 4;
                        adev->mode_info.num_hpd = 4;
                        adev->mode_info.num_dig = 4;
@@@ -4865,7 -4856,9 +4858,9 @@@ fill_gfx9_tiling_info_from_modifier(con
        unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
        unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
        unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
-       unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
+       unsigned int pipes_log2;
+       pipes_log2 = min(5u, mod_pipe_xor_bits);
  
        fill_gfx9_tiling_info_from_device(adev, tiling_info);
  
@@@ -5201,8 -5194,73 +5196,73 @@@ add_gfx10_3_modifiers(const struct amdg
                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
  }
  
+ static void
+ add_gfx11_modifiers(struct amdgpu_device *adev,
+                     uint64_t **mods, uint64_t *size, uint64_t *capacity)
+ {
+       int num_pipes = 0;
+       int pipe_xor_bits = 0;
+       int num_pkrs = 0;
+       int pkrs = 0;
+       u32 gb_addr_config;
+       u8 i = 0;
+       unsigned swizzle_r_x;
+       uint64_t modifier_r_x;
+       uint64_t modifier_dcc_best;
+       uint64_t modifier_dcc_4k;
+       /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
+        * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
+       gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
+       ASSERT(gb_addr_config != 0);
+       num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
+       pkrs = ilog2(num_pkrs);
+       num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
+       pipe_xor_bits = ilog2(num_pipes);
+       for (i = 0; i < 2; i++) {
+               /* Insert the best one first. */
+               /* R_X swizzle modes are the best for rendering and DCC requires them. */
+               if (num_pipes > 16)
+                       swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
+               else
+                       swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
+               modifier_r_x = AMD_FMT_MOD |
+                              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
+                              AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+                              AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
+                              AMD_FMT_MOD_SET(PACKERS, pkrs);
+               /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
+               modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
+                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
+                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+                                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
+               /* DCC settings for 4K and greater resolutions. (required by display hw) */
+               modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
+                                 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+                                 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+                                 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
+               add_modifier(mods, size, capacity, modifier_dcc_best);
+               add_modifier(mods, size, capacity, modifier_dcc_4k);
+               add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
+               add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
+               add_modifier(mods, size, capacity, modifier_r_x);
+       }
+       add_modifier(mods, size, capacity, AMD_FMT_MOD |
+              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
+                        AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
+ }
  static int
- get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
+ get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
  {
        uint64_t size = 0, capacity = 128;
        *mods = NULL;
                else
                        add_gfx10_1_modifiers(adev, mods, &size, &capacity);
                break;
+       case AMDGPU_FAMILY_GC_11_0_0:
+               add_gfx11_modifiers(adev, mods, &size, &capacity);
+               break;
        }
  
        add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@@ -5272,7 -5333,7 +5335,7 @@@ fill_gfx9_plane_attributes_from_modifie
                dcc->enable = 1;
                dcc->meta_pitch = afb->base.pitches[1];
                dcc->independent_64b_blks = independent_64b_blks;
-               if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+               if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
                        if (independent_64b_blks && independent_128b_blks)
                                dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
                        else if (independent_128b_blks)
@@@ -5640,6 -5701,117 +5703,117 @@@ static int fill_dc_plane_attributes(str
        return 0;
  }
  
+ /**
+  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+  *
+  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+  *         remote fb
+  * @old_plane_state: Old state of @plane
+  * @new_plane_state: New state of @plane
+  * @crtc_state: New state of CRTC connected to the @plane
+  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+  *
+  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+  * (referred to as "damage clips" in DRM nomenclature) that require updating on
+  * the eDP remote buffer. The responsibility of specifying the dirty regions is
+  * amdgpu_dm's.
+  *
+  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+  * plane with regions that require flushing to the eDP remote buffer. In
+  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+  * implicitly provide damage clips without any client support via the plane
+  * bounds.
+  *
+  * Today, amdgpu_dm only supports the MPO and cursor usecase.
+  *
+  * TODO: Also enable for FB_DAMAGE_CLIPS
+  */
+ static void fill_dc_dirty_rects(struct drm_plane *plane,
+                               struct drm_plane_state *old_plane_state,
+                               struct drm_plane_state *new_plane_state,
+                               struct drm_crtc_state *crtc_state,
+                               struct dc_flip_addrs *flip_addrs)
+ {
+       struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+       struct rect *dirty_rects = flip_addrs->dirty_rects;
+       uint32_t num_clips;
+       bool bb_changed;
+       bool fb_changed;
+       uint32_t i = 0;
+       flip_addrs->dirty_rect_count = 0;
+       /*
+        * Cursor plane has it's own dirty rect update interface. See
+        * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+        */
+       if (plane->type == DRM_PLANE_TYPE_CURSOR)
+               return;
+       /*
+        * Today, we only consider MPO use-case for PSR SU. If MPO not
+        * requested, and there is a plane update, do FFU.
+        */
+       if (!dm_crtc_state->mpo_requested) {
+               dirty_rects[0].x = 0;
+               dirty_rects[0].y = 0;
+               dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
+               dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
+               flip_addrs->dirty_rect_count = 1;
+               DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+                                new_plane_state->plane->base.id,
+                                dm_crtc_state->base.mode.crtc_hdisplay,
+                                dm_crtc_state->base.mode.crtc_vdisplay);
+               return;
+       }
+       /*
+        * MPO is requested. Add entire plane bounding box to dirty rects if
+        * flipped to or damaged.
+        *
+        * If plane is moved or resized, also add old bounding box to dirty
+        * rects.
+        */
+       num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+       fb_changed = old_plane_state->fb->base.id !=
+                    new_plane_state->fb->base.id;
+       bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+                     old_plane_state->crtc_y != new_plane_state->crtc_y ||
+                     old_plane_state->crtc_w != new_plane_state->crtc_w ||
+                     old_plane_state->crtc_h != new_plane_state->crtc_h);
+       DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+                        new_plane_state->plane->base.id,
+                        bb_changed, fb_changed, num_clips);
+       if (num_clips || fb_changed || bb_changed) {
+               dirty_rects[i].x = new_plane_state->crtc_x;
+               dirty_rects[i].y = new_plane_state->crtc_y;
+               dirty_rects[i].width = new_plane_state->crtc_w;
+               dirty_rects[i].height = new_plane_state->crtc_h;
+               DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+                                new_plane_state->plane->base.id,
+                                dirty_rects[i].x, dirty_rects[i].y,
+                                dirty_rects[i].width, dirty_rects[i].height);
+               i += 1;
+       }
+       /* Add old plane bounding-box if plane is moved or resized */
+       if (bb_changed) {
+               dirty_rects[i].x = old_plane_state->crtc_x;
+               dirty_rects[i].y = old_plane_state->crtc_y;
+               dirty_rects[i].width = old_plane_state->crtc_w;
+               dirty_rects[i].height = old_plane_state->crtc_h;
+               DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+                               old_plane_state->plane->base.id,
+                               dirty_rects[i].x, dirty_rects[i].y,
+                               dirty_rects[i].width, dirty_rects[i].height);
+               i += 1;
+       }
+       flip_addrs->dirty_rect_count = i;
+ }
  static void update_stream_scaling_settings(const struct drm_display_mode *mode,
                                           const struct dm_connector_state *dm_state,
                                           struct dc_stream_state *stream)
@@@ -6587,13 -6759,13 +6761,13 @@@ dm_crtc_duplicate_state(struct drm_crt
        state->freesync_config = cur->freesync_config;
        state->cm_has_degamma = cur->cm_has_degamma;
        state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-       state->force_dpms_off = cur->force_dpms_off;
+       state->mpo_requested = cur->mpo_requested;
        /* TODO Duplicate dc_stream after objects are stream object is flattened */
  
        return &state->base;
  }
  
 -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 +#ifdef CONFIG_DEBUG_FS
  static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
  {
        crtc_debugfs_init(crtc);
@@@ -6679,7 -6851,7 +6853,7 @@@ static void dm_disable_vblank(struct dr
        dm_set_vblank(crtc, false);
  }
  
- /* Implemented only the options currently availible for the driver */
+ /* Implemented only the options currently available for the driver */
  static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
        .reset = dm_crtc_reset_state,
        .destroy = amdgpu_dm_crtc_destroy,
        .enable_vblank = dm_enable_vblank,
        .disable_vblank = dm_disable_vblank,
        .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 +#if defined(CONFIG_DEBUG_FS)
        .late_register = amdgpu_dm_crtc_late_register,
  #endif
  };
@@@ -6846,15 -7018,12 +7020,12 @@@ static void amdgpu_dm_connector_destroy
        if (aconnector->mst_mgr.dev)
                drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
  
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-       defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
        for (i = 0; i < dm->num_of_edps; i++) {
                if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
                        backlight_device_unregister(dm->backlight_dev[i]);
                        dm->backlight_dev[i] = NULL;
                }
        }
- #endif
  
        if (aconnector->dc_em_sink)
                dc_sink_release(aconnector->dc_em_sink);
@@@ -7042,7 -7211,11 +7213,11 @@@ create_validate_stream_for_sink(struct 
                        break;
                }
  
-               dc_result = dc_validate_stream(adev->dm.dc, stream);
+               if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+                       dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+               if (dc_result == DC_OK)
+                       dc_result = dc_validate_stream(adev->dm.dc, stream);
  
                if (dc_result != DC_OK) {
                        DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
@@@ -7342,7 -7515,7 +7517,7 @@@ static void dm_encoder_helper_disable(s
  
  }
  
static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
  {
        switch (display_color_depth) {
                case COLOR_DEPTH_666:
@@@ -7600,10 -7773,6 +7775,10 @@@ static int dm_plane_helper_prepare_fb(s
                goto error_unpin;
        }
  
 +      r = drm_gem_plane_helper_prepare_fb(plane, new_state);
 +      if (unlikely(r != 0))
 +              goto error_unpin;
 +
        amdgpu_bo_unreserve(rbo);
  
        afb->address = amdgpu_bo_gpu_offset(rbo);
@@@ -9138,7 -9307,9 +9313,7 @@@ static void amdgpu_dm_commit_planes(str
        struct dm_crtc_state *dm_old_crtc_state =
                        to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
        int planes_count = 0, vpos, hpos;
 -      long r;
        unsigned long flags;
 -      struct amdgpu_bo *abo;
        uint32_t target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool pflip_present = false;
                        continue;
                }
  
 -              abo = gem_to_amdgpu_bo(fb->obj[0]);
 -
 -              /*
 -               * Wait for all fences on this FB. Do limited wait to avoid
 -               * deadlock during GPU reset when this fence will not signal
 -               * but we hold reservation lock for the BO.
 -               */
 -              r = dma_resv_wait_timeout(abo->tbo.base.resv,
 -                                        DMA_RESV_USAGE_WRITE, false,
 -                                        msecs_to_jiffies(5000));
 -              if (unlikely(r <= 0))
 -                      DRM_ERROR("Waiting for fences timed out!");
 -
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state,
                        afb->tiling_flags,
                bundle->surface_updates[planes_count].plane_info =
                        &bundle->plane_infos[planes_count];
  
+               fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
+                                   new_crtc_state,
+                                   &bundle->flip_addrs[planes_count]);
                /*
                 * Only allow immediate flips for fast updates that don't
                 * change FB pitch, DCC state, rotation or mirroing.
                 * and rely on sending it from software.
                 */
                if (acrtc_attach->base.state->event &&
-                   acrtc_state->active_planes > 0 &&
-                   !acrtc_state->force_dpms_off) {
+                   acrtc_state->active_planes > 0) {
                        drm_crtc_vblank_get(pcrtc);
  
                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
  
                        /* Allow PSR when skip count is 0. */
                        acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+                       /*
+                        * If sink supports PSR SU, there is no need to rely on
+                        * a vblank event disable request to enable PSR. PSR SU
+                        * can be enabled immediately once OS demonstrates an
+                        * adequate number of fast atomic commits to notify KMD
+                        * of update events. See `vblank_control_worker()`.
+                        */
+                       if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+                           acrtc_attach->dm_irq_params.allow_psr_entry &&
+                           !acrtc_state->stream->link->psr_settings.psr_allow_active)
+                               amdgpu_dm_psr_enable(acrtc_state->stream);
                } else {
                        acrtc_attach->dm_irq_params.allow_psr_entry = false;
                }
@@@ -9552,14 -9751,9 +9742,14 @@@ static void amdgpu_dm_atomic_commit_tai
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
        int crtc_disable_count = 0;
        bool mode_set_reset_required = false;
 +      int r;
  
        trace_amdgpu_dm_atomic_commit_tail_begin(state);
  
 +      r = drm_atomic_helper_wait_for_fences(dev, state, false);
 +      if (unlikely(r))
 +              DRM_ERROR("Waiting for fences timed out!");
 +
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
  
        dm_state = dm_atomic_get_new_state(state);
        /* Update audio instances for each connector. */
        amdgpu_dm_commit_audio(dev, state);
  
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||         \
-       defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
        /* restore the backlight level */
        for (i = 0; i < dm->num_of_edps; i++) {
                if (dm->backlight_dev[i] &&
                    (dm->actual_brightness[i] != dm->brightness[i]))
                        amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
        }
- #endif
        /*
         * send vblank event on all events not handled in flip and
         * mark consumed event for drm_atomic_helper_commit_hw_done
@@@ -10368,7 -10560,7 +10556,7 @@@ static int dm_update_crtc_state(struct 
                 * added MST connectors not found in existing crtc_state in the chained mode
                 * TODO: need to dig out the root cause of that
                 */
-               if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+               if (!aconnector)
                        goto skip_modeset;
  
                if (modereset_required(new_crtc_state))
@@@ -10979,7 -11171,10 +11167,10 @@@ static int amdgpu_dm_atomic_check(struc
                                }
                        }
                }
-               pre_validate_dsc(state, &dm_state, vars);
+               if (!pre_validate_dsc(state, &dm_state, vars)) {
+                       ret = -EINVAL;
+                       goto fail;
+               }
        }
  #endif
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  #if defined(CONFIG_DRM_AMD_DC_DCN)
                if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
                        DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+                       ret = -EINVAL;
                        goto fail;
                }
  
index 2a78ae0075216194d923801c8ed0d79756a33b5e,4d5824edafaadb7499944fedcc287400574471a3..b64507f294ca86143c257af140233aca28aca28e
@@@ -540,11 -540,11 +540,11 @@@ static ssize_t dp_phy_settings_write(st
  
        /* apply phy settings from user */
        for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
-               link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+               link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING =
                                (enum dc_voltage_swing) (param[0]);
-               link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+               link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS =
                                (enum dc_pre_emphasis) (param[1]);
-               link_lane_settings.lane_settings[r].POST_CURSOR2 =
+               link_lane_settings.hw_lane_settings[r].POST_CURSOR2 =
                                (enum dc_post_cursor2) (param[2]);
        }
  
@@@ -738,7 -738,7 +738,7 @@@ static ssize_t dp_phy_test_pattern_debu
        }
  
        for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
-               link_training_settings.lane_settings[i] = link->cur_lane_setting[i];
+               link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i];
  
        dc_link_set_test_pattern(
                link,
@@@ -871,18 -871,28 +871,18 @@@ static int psr_capability_show(struct s
  }
  
  /*
 - * Returns the current and maximum output bpc for the connector.
 - * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
 + * Returns the current bpc for the crtc.
 + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc
   */
 -static int output_bpc_show(struct seq_file *m, void *data)
 +static int amdgpu_current_bpc_show(struct seq_file *m, void *data)
  {
 -      struct drm_connector *connector = m->private;
 -      struct drm_device *dev = connector->dev;
 -      struct drm_crtc *crtc = NULL;
 +      struct drm_crtc *crtc = m->private;
 +      struct drm_device *dev = crtc->dev;
        struct dm_crtc_state *dm_crtc_state = NULL;
        int res = -ENODEV;
        unsigned int bpc;
  
        mutex_lock(&dev->mode_config.mutex);
 -      drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 -
 -      if (connector->state == NULL)
 -              goto unlock;
 -
 -      crtc = connector->state->crtc;
 -      if (crtc == NULL)
 -              goto unlock;
 -
        drm_modeset_lock(&crtc->mutex, NULL);
        if (crtc->state == NULL)
                goto unlock;
        }
  
        seq_printf(m, "Current: %u\n", bpc);
 -      seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
        res = 0;
  
  unlock:
 -      if (crtc)
 -              drm_modeset_unlock(&crtc->mutex);
 -
 -      drm_modeset_unlock(&dev->mode_config.connection_mutex);
 +      drm_modeset_unlock(&crtc->mutex);
        mutex_unlock(&dev->mode_config.mutex);
  
        return res;
  }
 +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc);
  
  /*
   * Example usage:
@@@ -2526,6 -2539,7 +2526,6 @@@ static int target_backlight_show(struc
  DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
  DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
  DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 -DEFINE_SHOW_ATTRIBUTE(output_bpc);
  DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
  #ifdef CONFIG_DRM_AMD_DC_HDCP
  DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
@@@ -2772,6 -2786,7 +2772,6 @@@ static const struct 
        const struct file_operations *fops;
  } connector_debugfs_entries[] = {
                {"force_yuv420_output", &force_yuv420_output_fops},
 -              {"output_bpc", &output_bpc_fops},
                {"trigger_hotplug", &trigger_hotplug_debugfs_fops},
                {"internal_display", &internal_display_fops}
  };
@@@ -3155,10 -3170,9 +3155,10 @@@ static int crc_win_update_get(void *dat
  
  DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get,
                         crc_win_update_set, "%llu\n");
 -
 +#endif
  void crtc_debugfs_init(struct drm_crtc *crtc)
  {
 +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
        struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry);
  
        if (!dir)
                                   &crc_win_y_end_fops);
        debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
                                   &crc_win_update_fops);
 -
 -}
  #endif
 +      debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
 +                          crtc, &amdgpu_current_bpc_fops);
 +}
 +
  /*
   * Writes DTN log state to the user supplied buffer.
   * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
index cbc47aecd00f5eb937fab394ae91305e08efa80c,1982ec0b55d4bfccbe71dcd0bc22796d6c99419f..710797b2f0df592f296a40e8501d8562f4b0ac70
@@@ -329,51 -329,6 +329,6 @@@ static uint8_t get_dpcd_link_rate(cons
        return link_rate;
  }
  
- static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
- {
-       const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff};
-       const uint8_t offset = dp_convert_to_count(
-                       link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-       uint32_t vendor_lttpr_write_address = 0xF004F;
-       if (offset != 0xFF)
-               vendor_lttpr_write_address +=
-                               ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-       /* W/A for certain LTTPR to reset their lane settings, part one of two */
-       core_link_write_dpcd(
-                       link,
-                       vendor_lttpr_write_address,
-                       &vendor_lttpr_write_data[0],
-                       sizeof(vendor_lttpr_write_data));
- }
- static void vendor_specific_lttpr_wa_one_two(
-       struct dc_link *link,
-       const uint8_t rate)
- {
-       if (link->apply_vendor_specific_lttpr_link_rate_wa) {
-               uint8_t toggle_rate = 0x0;
-               if (rate == 0x6)
-                       toggle_rate = 0xA;
-               else
-                       toggle_rate = 0x6;
-               if (link->vendor_specific_lttpr_link_rate_wa == rate) {
-                       /* W/A for certain LTTPR to reset internal state for link training */
-                       core_link_write_dpcd(
-                                       link,
-                                       DP_LINK_BW_SET,
-                                       &toggle_rate,
-                                       1);
-               }
-               /* Store the last attempted link rate for this link */
-               link->vendor_specific_lttpr_link_rate_wa = rate;
-       }
- }
  static void dp_fixed_vs_pe_read_lane_adjust(
        struct dc_link *link,
        union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
        }
  }
  
- static void vendor_specific_lttpr_wa_four(
-       struct dc_link *link,
-       bool apply_wa)
- {
-       const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8};
-       const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0};
-       const uint8_t offset = dp_convert_to_count(
-                       link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-       uint32_t vendor_lttpr_write_address = 0xF004F;
-       uint8_t sink_status = 0;
-       uint8_t i;
-       if (offset != 0xFF)
-               vendor_lttpr_write_address +=
-                               ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-       /* W/A to pass through DPCD write of TPS=0 to DPRX */
-       if (apply_wa) {
-               core_link_write_dpcd(
-                               link,
-                               vendor_lttpr_write_address,
-                               &vendor_lttpr_write_data_one[0],
-                               sizeof(vendor_lttpr_write_data_one));
-       }
-       /* clear training pattern set */
-       dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
-       if (apply_wa) {
-               core_link_write_dpcd(
-                               link,
-                               vendor_lttpr_write_address,
-                               &vendor_lttpr_write_data_two[0],
-                               sizeof(vendor_lttpr_write_data_two));
-       }
-       /* poll for intra-hop disable */
-       for (i = 0; i < 10; i++) {
-               if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
-                               (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
-                       break;
-               udelay(1000);
-       }
- }
  static void dp_fixed_vs_pe_set_retimer_lane_settings(
        struct dc_link *link,
        const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
@@@ -561,14 -471,6 +471,6 @@@ enum dc_status dpcd_set_link_settings
                                &lt_settings->link_settings.link_rate_set, 1);
        } else {
                rate = get_dpcd_link_rate(&lt_settings->link_settings);
-               if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                                       (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
-                                       link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
-                       vendor_specific_lttpr_wa_one_start(link);
-               if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                                       (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
-                       vendor_specific_lttpr_wa_one_two(link, rate);
  
                status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
        }
@@@ -638,7 -540,7 +540,7 @@@ static void dpcd_set_lt_pattern_and_lan
        uint32_t dpcd_base_lt_offset;
  
        uint8_t dpcd_lt_buffer[5] = {0};
-       union dpcd_training_pattern dpcd_pattern = { 0 };
+       union dpcd_training_pattern dpcd_pattern = {0};
        uint32_t size_in_bytes;
        bool edp_workaround = false; /* TODO link_prop.INTERNAL */
        dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
@@@ -793,7 -695,7 +695,7 @@@ bool dp_is_interlane_aligned(union lane
  void dp_hw_to_dpcd_lane_settings(
                const struct link_training_settings *lt_settings,
                const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
 -              union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
 +              union dpcd_training_lane dpcd_lane_settings[])
  {
        uint8_t lane = 0;
  
@@@ -823,7 -725,7 +725,7 @@@ void dp_decide_lane_settings
                const struct link_training_settings *lt_settings,
                const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
                struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
 -              union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
 +              union dpcd_training_lane dpcd_lane_settings[])
  {
        uint32_t lane;
  
@@@ -944,7 -846,7 +846,7 @@@ static void override_lane_settings(cons
  
                return;
  
-       for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+       for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
                if (lt_settings->voltage_swing)
                        lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
                if (lt_settings->pre_emphasis)
@@@ -1011,19 -913,10 +913,10 @@@ enum dc_status dp_get_lane_status_and_l
                        offset,
                        lane01_status_address, dpcd_buf[0],
                        lane01_status_address + 1, dpcd_buf[1]);
-       } else {
-               DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
-                       __func__,
-                       lane01_status_address, dpcd_buf[0],
-                       lane01_status_address + 1, dpcd_buf[1]);
-       }
-       lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
  
-       if (is_repeater(link, offset))
                lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
                                ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
  
-       if (is_repeater(link, offset)) {
                DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
                                " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
                                        __func__,
                                        lane01_adjust_address + 1,
                                        dpcd_buf[lane_adjust_offset + 1]);
        } else {
+               DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+                       __func__,
+                       lane01_status_address, dpcd_buf[0],
+                       lane01_status_address + 1, dpcd_buf[1]);
+               lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
                DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
                        __func__,
                        lane01_adjust_address,
@@@ -1303,12 -1203,6 +1203,6 @@@ static enum link_training_result perfor
                                        dp_translate_training_aux_read_interval(
                                                link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
  
-               if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                               (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
-                               link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
-                       wait_time_microsec = 16000;
-               }
                dp_wait_for_training_aux_rd_interval(
                                link,
                                wait_time_microsec);
  
                /* 5. check CR done*/
                if (!dp_is_cr_done(lane_count, dpcd_lane_status))
-                       return LINK_TRAINING_EQ_FAIL_CR;
+                       return dpcd_lane_status[0].bits.CR_DONE_0 ?
+                                       LINK_TRAINING_EQ_FAIL_CR_PARTIAL :
+                                       LINK_TRAINING_EQ_FAIL_CR;
  
                /* 6. check CHEQ done*/
                if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
@@@ -1367,7 -1263,7 +1263,7 @@@ static enum link_training_result perfor
        enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
        union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
        union lane_align_status_updated dpcd_lane_status_updated;
-       union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+       union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
  
        retries_cr = 0;
        retry_count = 0;
                /* 3. wait receiver to lock-on*/
                wait_time_microsec = lt_settings->cr_pattern_time;
  
-               if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                               (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) {
-                       wait_time_microsec = 16000;
-               }
                dp_wait_for_training_aux_rd_interval(
                                link,
                                wait_time_microsec);
@@@ -1658,22 -1549,23 +1549,23 @@@ static void override_training_settings
                lt_settings->always_match_dpcd_with_hw_lane_settings = false;
        }
        for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
-               lt_settings->lane_settings[lane].VOLTAGE_SWING =
+               lt_settings->hw_lane_settings[lane].VOLTAGE_SWING =
                        lt_settings->voltage_swing != NULL ?
                        *lt_settings->voltage_swing :
                        VOLTAGE_SWING_LEVEL0;
-               lt_settings->lane_settings[lane].PRE_EMPHASIS =
+               lt_settings->hw_lane_settings[lane].PRE_EMPHASIS =
                        lt_settings->pre_emphasis != NULL ?
                        *lt_settings->pre_emphasis
                        : PRE_EMPHASIS_DISABLED;
-               lt_settings->lane_settings[lane].POST_CURSOR2 =
+               lt_settings->hw_lane_settings[lane].POST_CURSOR2 =
                        lt_settings->post_cursor2 != NULL ?
                        *lt_settings->post_cursor2
                        : POST_CURSOR2_DISABLED;
        }
  
-       dp_hw_to_dpcd_lane_settings(lt_settings,
-                       lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+       if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+               dp_hw_to_dpcd_lane_settings(lt_settings,
+                               lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
  
        /* Initialize training timings */
        if (overrides->cr_pattern_time != NULL)
@@@ -1882,6 -1774,9 +1774,9 @@@ static void print_status_message
        case LINK_TRAINING_EQ_FAIL_CR:
                lt_result = "CR failed in EQ";
                break;
+       case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
+               lt_result = "CR failed in EQ partially";
+               break;
        case LINK_TRAINING_EQ_FAIL_EQ:
                lt_result = "EQ failed";
                break;
                                link_rate,
                                lt_settings->link_settings.lane_count,
                                lt_result,
-                               lt_settings->lane_settings[0].VOLTAGE_SWING,
-                               lt_settings->lane_settings[0].PRE_EMPHASIS,
+                               lt_settings->hw_lane_settings[0].VOLTAGE_SWING,
+                               lt_settings->hw_lane_settings[0].PRE_EMPHASIS,
                                lt_spread);
  }
  
@@@ -2074,7 -1969,8 +1969,8 @@@ static enum link_training_result dp_per
        uint32_t wait_time = 0;
        union lane_align_status_updated dpcd_lane_status_updated = {0};
        union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
-       enum link_training_result status = LINK_TRAINING_SUCCESS;
+       enum dc_status status = DC_OK;
+       enum link_training_result result = LINK_TRAINING_SUCCESS;
        union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
  
        /* Transmit 128b/132b_TPS1 over Main-Link */
                        lt_settings->pattern_for_eq, DPRX);
  
        /* poll for channel EQ done */
-       while (status == LINK_TRAINING_SUCCESS) {
+       while (result == LINK_TRAINING_SUCCESS) {
                dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
                wait_time += aux_rd_interval;
-               dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+               status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
                                &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
                dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
                        lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
                dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
-               if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+               if (status != DC_OK) {
+                       result = LINK_TRAINING_ABORT;
+               } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
                                dpcd_lane_status)) {
                        /* pass */
                        break;
                } else if (loop_count >= lt_settings->eq_loop_count_limit) {
-                       status = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+                       result = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
                } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
-                       status = DP_128b_132b_LT_FAILED;
+                       result = DP_128b_132b_LT_FAILED;
                } else {
                        dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
                        dpcd_set_lane_settings(link, lt_settings, DPRX);
        }
  
        /* poll for EQ interlane align done */
-       while (status == LINK_TRAINING_SUCCESS) {
-               if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+       while (result == LINK_TRAINING_SUCCESS) {
+               if (status != DC_OK) {
+                       result = LINK_TRAINING_ABORT;
+               } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
                        /* pass */
                        break;
                } else if (wait_time >= lt_settings->eq_wait_time_limit) {
-                       status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+                       result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
                } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
-                       status = DP_128b_132b_LT_FAILED;
+                       result = DP_128b_132b_LT_FAILED;
                } else {
                        dp_wait_for_training_aux_rd_interval(link,
                                        lt_settings->eq_pattern_time);
                        wait_time += lt_settings->eq_pattern_time;
-                       dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+                       status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
                                        &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
                }
        }
  
-       return status;
+       return result;
  }
  
  static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
                struct link_training_settings *lt_settings)
  {
        /* Assumption: assume hardware has transmitted eq pattern */
-       enum link_training_result status = LINK_TRAINING_SUCCESS;
+       enum dc_status status = DC_OK;
+       enum link_training_result result = LINK_TRAINING_SUCCESS;
        union lane_align_status_updated dpcd_lane_status_updated = {0};
        union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
-       union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+       union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
        uint32_t wait_time = 0;
  
        /* initiate CDS done sequence */
        dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
  
        /* poll for CDS interlane align done and symbol lock */
-       while (status == LINK_TRAINING_SUCCESS) {
+       while (result  == LINK_TRAINING_SUCCESS) {
                dp_wait_for_training_aux_rd_interval(link,
                                lt_settings->cds_pattern_time);
                wait_time += lt_settings->cds_pattern_time;
-               dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+               status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
                                                &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
-               if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+               if (status != DC_OK) {
+                       result = LINK_TRAINING_ABORT;
+               } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
                                dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
                        /* pass */
                        break;
                } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
-                       status = DP_128b_132b_LT_FAILED;
+                       result = DP_128b_132b_LT_FAILED;
                } else if (wait_time >= lt_settings->cds_wait_time_limit) {
-                       status = DP_128b_132b_CDS_DONE_TIMEOUT;
+                       result = DP_128b_132b_CDS_DONE_TIMEOUT;
                }
        }
  
-       return status;
+       return result;
  }
  
  static enum link_training_result dp_perform_8b_10b_link_training(
@@@ -2370,6 -2273,7 +2273,7 @@@ static enum link_training_result dp_per
                        link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
        const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
        const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
+       uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
        uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
        uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
        uint32_t vendor_lttpr_write_address = 0xF004F;
        if (offset != 0xFF) {
                vendor_lttpr_write_address +=
                                ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+               /* Certain display and cable configuration require extra delay */
+               if (offset > 2)
+                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
        }
  
        /* Vendor specific: Reset lane settings */
  
        /* Perform Clock Recovery Sequence */
        if (status == LINK_TRAINING_SUCCESS) {
+               const uint8_t max_vendor_dpcd_retries = 10;
                uint32_t retries_cr;
                uint32_t retry_count;
                uint32_t wait_time_microsec;
                union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
                union lane_align_status_updated dpcd_lane_status_updated;
                union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+               enum dc_status dpcd_status = DC_OK;
+               uint8_t i = 0;
  
                retries_cr = 0;
                retry_count = 0;
                                                lt_settings->pattern_for_cr,
                                                0);
                                /* Vendor specific: Disable intercept */
-                               core_link_write_dpcd(
-                                               link,
-                                               vendor_lttpr_write_address,
-                                               &vendor_lttpr_write_data_intercept_dis[0],
-                                               sizeof(vendor_lttpr_write_data_intercept_dis));
+                               for (i = 0; i < max_vendor_dpcd_retries; i++) {
+                                       msleep(pre_disable_intercept_delay_ms);
+                                       dpcd_status = core_link_write_dpcd(
+                                                       link,
+                                                       vendor_lttpr_write_address,
+                                                       &vendor_lttpr_write_data_intercept_dis[0],
+                                                       sizeof(vendor_lttpr_write_data_intercept_dis));
+                                       if (dpcd_status == DC_OK)
+                                               break;
+                                       core_link_write_dpcd(
+                                                       link,
+                                                       vendor_lttpr_write_address,
+                                                       &vendor_lttpr_write_data_intercept_en[0],
+                                                       sizeof(vendor_lttpr_write_data_intercept_en));
+                               }
                        } else {
                                vendor_lttpr_write_data_vs[3] = 0;
                                vendor_lttpr_write_data_pe[3] = 0;
@@@ -2718,14 -2641,7 +2641,7 @@@ enum link_training_result dc_link_dp_pe
                        &lt_settings);
  
        /* reset previous training states */
-       if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                       (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
-                       link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
-               link->apply_vendor_specific_lttpr_link_rate_wa = true;
-               vendor_specific_lttpr_wa_four(link, true);
-       } else {
-               dpcd_exit_training_mode(link);
-       }
+       dpcd_exit_training_mode(link);
  
        /* configure link prior to entering training mode */
        dpcd_configure_lttpr_mode(link, &lt_settings);
                ASSERT(0);
  
        /* exit training mode */
-       if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                       (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
-                       link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
-               link->apply_vendor_specific_lttpr_link_rate_wa = false;
-               vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS));
-       } else {
-               dpcd_exit_training_mode(link);
-       }
+       dpcd_exit_training_mode(link);
  
        /* switch to video idle */
        if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
@@@ -2804,8 -2713,8 +2713,8 @@@ bool perform_link_training_with_retries
        j = 0;
        while (j < attempts && fail_count < (attempts * 10)) {
  
-               DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
-                       __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+               DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n",
+                       __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
                        cur_link_settings.lane_count);
  
                dp_enable_link_phy(
  
                fail_count++;
                dp_trace_lt_fail_count_update(link, fail_count, false);
-               /* latest link training still fail, skip delay and keep PHY on
-                */
-               if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
-                       break;
+               if (link->ep_type == DISPLAY_ENDPOINT_PHY) {
+                       /* latest link training still fail or link training is aborted
+                        * skip delay and keep PHY on
+                        */
+                       if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT))
+                               break;
+               }
  
-               DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
-                       __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
-                       cur_link_settings.lane_count);
+               DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n",
+                       __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+                       cur_link_settings.lane_count, status);
  
                dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
  
                         */
                        req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
                        link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
-                       if (req_bw > link_bw)
-                               break;
+                       is_link_bw_low = (req_bw > link_bw);
+                       is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+                               (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+                       if (is_link_bw_low)
+                               DC_LOG_WARNING(
+                                       "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+                                       __func__, link->link_index, req_bw, link_bw);
                }
  
                msleep(delay_between_attempts);
@@@ -3596,11 -3513,6 +3513,6 @@@ static bool decide_fallback_link_settin
                struct dc_link_settings *cur,
                enum link_training_result training_result)
  {
-       if (!cur)
-               return false;
-       if (!max)
-               return false;
        if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
                        link->dc->debug.force_dp2_lt_fallback_method)
                return decide_fallback_link_setting_max_bw_policy(link, max, cur,
                break;
        }
        case LINK_TRAINING_EQ_FAIL_EQ:
+       case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
        {
                if (!reached_minimum_lane_count(cur->lane_count)) {
                        cur->lane_count = reduce_lane_count(cur->lane_count);
@@@ -4186,8 -4099,7 +4099,7 @@@ static void dp_test_send_phy_test_patte
                        &dpcd_lane_adjustment[0].raw,
                        sizeof(dpcd_lane_adjustment));
  
-       if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                       (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+       if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
                        link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
                dp_fixed_vs_pe_read_lane_adjust(
                                link,
@@@ -5118,16 -5030,13 +5030,13 @@@ static bool dpcd_read_sink_ext_caps(str
        return true;
  }
  
- bool dp_retrieve_lttpr_cap(struct dc_link *link)
+ /* Logic to determine LTTPR mode */
+ static void determine_lttpr_mode(struct dc_link *link)
  {
-       uint8_t lttpr_dpcd_data[8];
        bool allow_lttpr_non_transparent_mode = 0;
        bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
        bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
-       enum dc_status status = DC_ERROR_UNEXPECTED;
-       bool is_lttpr_present = false;
  
-       memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
  
        if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
                        link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
                allow_lttpr_non_transparent_mode = 1;
        }
  
-       /*
-        * Logic to determine LTTPR mode
-        */
        link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
        if (vbios_lttpr_enable && vbios_lttpr_interop)
                link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
            link->dc->debug.dpia_debug.bits.force_non_lttpr)
                link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
  #endif
+ }
+ bool dp_retrieve_lttpr_cap(struct dc_link *link)
+ {
+       uint8_t lttpr_dpcd_data[8];
+       enum dc_status status = DC_ERROR_UNEXPECTED;
+       bool is_lttpr_present = false;
+       memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+       /* Logic to determine LTTPR mode*/
+       determine_lttpr_mode(link);
  
        if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+               if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+                               !link->dc->debug.disable_fixed_vs_aux_timeout_wa) {
+                       /* Fixed VS workaround for AUX timeout */
+                       const uint32_t fixed_vs_address = 0xF004F;
+                       const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+                       core_link_write_dpcd(
+                                       link,
+                                       fixed_vs_address,
+                                       fixed_vs_data,
+                                       sizeof(fixed_vs_data));
+               }
                /* By reading LTTPR capability, RX assumes that we will enable
                 * LTTPR extended aux timeout if LTTPR is present.
                 */
@@@ -5276,11 -5207,23 +5207,23 @@@ static enum dc_status wa_try_to_wake_dp
        uint64_t time_taken_ms = 0;
        enum dc_connection_type type = dc_connection_none;
  
-       status = core_link_read_dpcd(
-                       link,
-                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
-                       &dpcd_data,
-                       sizeof(dpcd_data));
+       determine_lttpr_mode(link);
+       /* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
+        * be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
+        */
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+               status = core_link_read_dpcd(
+                               link,
+                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+                               &dpcd_data,
+                               sizeof(dpcd_data));
+       else
+               status = core_link_read_dpcd(
+                               link,
+                               DP_SET_POWER,
+                               &dpcd_data,
+                               sizeof(dpcd_data));
  
        if (status != DC_OK) {
                DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
@@@ -5794,6 -5737,7 +5737,7 @@@ void detect_edp_sink_caps(struct dc_lin
        uint32_t link_rate_in_khz;
        enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
        uint8_t backlight_adj_cap;
+       uint8_t general_edp_cap;
  
        retrieve_link_cap(link);
        link->dpcd_caps.edp_supported_link_rates_count = 0;
        link->dpcd_caps.dynamic_backlight_capable_edp =
                                (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
  
+       core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
+                                               &general_edp_cap, sizeof(general_edp_cap));
+       link->dpcd_caps.set_power_state_capable_edp =
+                               (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
        dc_link_set_default_brightness_aux(link);
  
        core_link_read_dpcd(link, DP_EDP_DPCD_REV,
@@@ -6119,8 -6069,7 +6069,7 @@@ bool dc_link_dp_set_test_pattern
        if (is_dp_phy_pattern(test_pattern)) {
                /* Set DPCD Lane Settings before running test pattern */
                if (p_link_settings != NULL) {
-                       if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
-                                       (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+                       if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
                                        link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
                                dp_fixed_vs_pe_set_retimer_lane_settings(
                                                link,
@@@ -6861,10 -6810,21 +6810,21 @@@ bool dpcd_write_128b_132b_sst_payload_a
        if (allocate)   {
                avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
                req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+               /// Validation should filter out modes that exceed link BW
+               ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT);
+               if (req_slot_count > MAX_MTP_SLOT_COUNT)
+                       return false;
        } else {
                /// Leave req_slot_count = 0 if allocate is false.
        }
  
+       proposed_table->stream_count = 1; /// Always 1 stream for SST
+       proposed_table->stream_allocations[0].slot_count = req_slot_count;
+       proposed_table->stream_allocations[0].vcp_id = vc_id;
+       if (link->aux_access_disabled)
+               return true;
        /// Write DPCD 2C0 = 1 to start updating
        update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
        core_link_write_dpcd(
                        &start_time_slot,
                        1);
  
-       ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW
        core_link_write_dpcd(
                        link,
                        DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
                // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
        }
  
-       proposed_table->stream_count = 1; /// Always 1 stream for SST
-       proposed_table->stream_allocations[0].slot_count = req_slot_count;
-       proposed_table->stream_allocations[0].vcp_id = vc_id;
        return result;
  }
  
@@@ -6952,6 -6907,8 +6907,8 @@@ bool dpcd_poll_for_allocation_change_tr
        union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
        union lane_align_status_updated lane_status_updated;
  
+       if (link->aux_access_disabled)
+               return true;
        for (i = 0; i < act_retries; i++) {
                get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
  
@@@ -7099,7 -7056,8 +7056,8 @@@ void dp_enable_link_phy
        unsigned int i;
  
        if (link->connector_signal == SIGNAL_TYPE_EDP) {
-               link->dc->hwss.edp_power_control(link, true);
+               if (!link->dc->config.edp_no_power_sequencing)
+                       link->dc->hwss.edp_power_control(link, true);
                link->dc->hwss.edp_wait_for_hpd_ready(link, true);
        }
  
index 5f8809f6990dd12a323fbedc3f810848f5ea9bb9,974b8fe1dbb6b0dceebe9c11fbf9e7407d254c42..8f828ec76c355136b0c7fd0f0940ded5f2ed0876
@@@ -781,7 -781,7 +781,7 @@@ int smu_v11_0_set_allowed_mask(struct s
                goto failed;
        }
  
 -      bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
 +      bitmap_to_arr32(feature_mask, feature->allowed, 64);
  
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
                                          feature_mask[1], NULL);
@@@ -2197,3 -2197,12 +2197,12 @@@ int smu_v11_0_restore_user_od_settings(
  
        return ret;
  }
+ void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
+ {
+       struct amdgpu_device *adev = smu->adev;
+       smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+       smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+       smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ }
index ef9b56de143bbe620eaa4460762d2f5f44bd2591,0e59ab2192bf4fb14a455e5f5d571b037c224e78..8342703ce7d6f3c175d4e134ed609d6df63ea02a
@@@ -60,6 -60,15 +60,15 @@@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.b
  MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
  MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
  
+ #define mmMP1_SMN_C2PMSG_66                                                                            0x0282
+ #define mmMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
+ #define mmMP1_SMN_C2PMSG_82                                                                            0x0292
+ #define mmMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
+ #define mmMP1_SMN_C2PMSG_90                                                                            0x029a
+ #define mmMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
  #define SMU13_VOLTAGE_SCALE 4
  
  #define LINK_WIDTH_MAX                                6
@@@ -264,8 -273,16 +273,16 @@@ int smu_v13_0_check_fw_status(struct sm
        struct amdgpu_device *adev = smu->adev;
        uint32_t mp1_fw_flags;
  
-       mp1_fw_flags = RREG32_PCIE(MP1_Public |
-                                  (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+       switch (adev->ip_versions[MP1_HWIP][0]) {
+       case IP_VERSION(13, 0, 4):
+               mp1_fw_flags = RREG32_PCIE(MP1_Public |
+                                          (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
+               break;
+       default:
+               mp1_fw_flags = RREG32_PCIE(MP1_Public |
+                                          (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+               break;
+       }
  
        if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
            MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
@@@ -714,6 -731,8 +731,8 @@@ int smu_v13_0_get_vbios_bootup_values(s
                        smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
                        smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
                        smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+               } else if ((frev == 3) && (crev == 1)) {
+                       return 0;
                } else if ((frev == 4) && (crev == 0)) {
                        smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
  
@@@ -837,7 -856,7 +856,7 @@@ int smu_v13_0_set_allowed_mask(struct s
            feature->feature_num < 64)
                return -EINVAL;
  
 -      bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
 +      bitmap_to_arr32(feature_mask, feature->allowed, 64);
  
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
                                              feature_mask[1], NULL);
@@@ -1067,10 -1086,7 +1086,7 @@@ int smu_v13_0_set_power_limit(struct sm
  
  int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
  {
-       if (smu->smu_table.thermal_controller_type)
-               return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
-       return 0;
+       return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
  }
  
  int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
@@@ -2257,7 -2273,8 +2273,8 @@@ int smu_v13_0_baco_set_state(struct smu
        if (state == SMU_BACO_STATE_ENTER) {
                ret = smu_cmn_send_smc_msg_with_param(smu,
                                                      SMU_MSG_EnterBaco,
-                                                     0,
+                                                     smu_baco->maco_support ?
+                                                     BACO_SEQ_BAMACO : BACO_SEQ_BACO,
                                                      NULL);
        } else {
                ret = smu_cmn_send_smc_msg(smu,
@@@ -2297,6 -2314,16 +2314,16 @@@ int smu_v13_0_baco_exit(struct smu_cont
                                        SMU_BACO_STATE_EXIT);
  }
  
+ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+ {
+       uint16_t index;
+       index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+                                              SMU_MSG_EnableGfxImu);
+       return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ }
  int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
                                enum PP_OD_DPM_TABLE_COMMAND type,
                                long input[], uint32_t size)
@@@ -2386,3 -2413,23 +2413,23 @@@ int smu_v13_0_set_default_dpm_tables(st
        return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
                                    smu_table->clocks_table, false);
  }
+ void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
+ {
+       struct amdgpu_device *adev = smu->adev;
+       smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+       smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+       smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ }
+ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ {
+       int ret = 0;
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+       if (!ret)
+               msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+       return ret;
+ }
index 62e75dd40d9a634648b6beab951de0cb624da161,9e41e122619388552e8dc7e26cc60c579d02bc05..a0143dd244300769d66190f1e56fdf72802acafd
@@@ -406,7 -406,7 +406,7 @@@ alloc_workqueue(const char *fmt, unsign
   * alloc_ordered_workqueue - allocate an ordered workqueue
   * @fmt: printf format for the name of the workqueue
   * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
 - * @args...: args for @fmt
 + * @args: args for @fmt
   *
   * Allocate an ordered workqueue.  An ordered workqueue executes at
   * most one work item at any given time in the queued order.  They are
@@@ -445,7 -445,7 +445,7 @@@ extern bool mod_delayed_work_on(int cpu
                        struct delayed_work *dwork, unsigned long delay);
  extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
  
 -extern void flush_workqueue(struct workqueue_struct *wq);
 +extern void __flush_workqueue(struct workqueue_struct *wq);
  extern void drain_workqueue(struct workqueue_struct *wq);
  
  extern int schedule_on_each_cpu(work_func_t func);
  int execute_in_process_context(work_func_t fn, struct execute_work *);
  
  extern bool flush_work(struct work_struct *work);
+ extern bool cancel_work(struct work_struct *work);
  extern bool cancel_work_sync(struct work_struct *work);
  
  extern bool flush_delayed_work(struct delayed_work *dwork);
@@@ -563,23 -564,15 +564,23 @@@ static inline bool schedule_work(struc
        return queue_work(system_wq, work);
  }
  
 +/*
 + * Detect attempt to flush system-wide workqueues at compile time when possible.
 + *
 + * See https://lkml.kernel.org/r/[email protected]
 + * for reasons and steps for converting system-wide workqueues into local workqueues.
 + */
 +extern void __warn_flushing_systemwide_wq(void)
 +      __compiletime_warning("Please avoid flushing system-wide workqueues.");
 +
  /**
   * flush_scheduled_work - ensure that any scheduled work has run to completion.
   *
   * Forces execution of the kernel-global workqueue and blocks until its
   * completion.
   *
 - * Think twice before calling this function!  It's very easy to get into
 - * trouble if you don't take great care.  Either of the following situations
 - * will lead to deadlock:
 + * It's very easy to get into trouble if you don't take great care.
 + * Either of the following situations will lead to deadlock:
   *
   *    One of the work items currently on the workqueue needs to acquire
   *    a lock held by your code or its caller.
   * need to know that a particular work item isn't queued and isn't running.
   * In such cases you should use cancel_delayed_work_sync() or
   * cancel_work_sync() instead.
 + *
 + * Please stop calling this function! A conversion to stop flushing system-wide
 + * workqueues is in progress. This function will be removed after all in-tree
 + * users stopped calling this function.
   */
 -static inline void flush_scheduled_work(void)
 -{
 -      flush_workqueue(system_wq);
 -}
 +/*
 + * The background of commit 771c035372a036f8 ("deprecate the
 + * '__deprecated' attribute warnings entirely and for good") is that,
 + * since Linus builds all modules between every single pull he does,
 + * the standard kernel build needs to be _clean_ in order to be able to
 + * notice when new problems happen. Therefore, don't emit warning while
 + * there are in-tree users.
 + */
 +#define flush_scheduled_work()                                                \
 +({                                                                    \
 +      if (0)                                                          \
 +              __warn_flushing_systemwide_wq();                        \
 +      __flush_workqueue(system_wq);                                   \
 +})
 +
 +/*
 + * Although there is no longer in-tree caller, for now just emit warning
 + * in order to give out-of-tree callers time to update.
 + */
 +#define flush_workqueue(wq)                                           \
 +({                                                                    \
 +      struct workqueue_struct *_wq = (wq);                            \
 +                                                                      \
 +      if ((__builtin_constant_p(_wq == system_wq) &&                  \
 +           _wq == system_wq) ||                                       \
 +          (__builtin_constant_p(_wq == system_highpri_wq) &&          \
 +           _wq == system_highpri_wq) ||                               \
 +          (__builtin_constant_p(_wq == system_long_wq) &&             \
 +           _wq == system_long_wq) ||                                  \
 +          (__builtin_constant_p(_wq == system_unbound_wq) &&          \
 +           _wq == system_unbound_wq) ||                               \
 +          (__builtin_constant_p(_wq == system_freezable_wq) &&        \
 +           _wq == system_freezable_wq) ||                             \
 +          (__builtin_constant_p(_wq == system_power_efficient_wq) &&  \
 +           _wq == system_power_efficient_wq) ||                       \
 +          (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
 +           _wq == system_freezable_power_efficient_wq))               \
 +              __warn_flushing_systemwide_wq();                        \
 +      __flush_workqueue(_wq);                                         \
 +})
  
  /**
   * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
index c1b4cfda7507521c2ec262b952ddff95afd9a71a,65af4a724b671aaeaaef599a441145234e2ab80d..0206f812c56995d2c608a921457195b2568dc0d2
@@@ -559,7 -559,7 +559,7 @@@ extern "C" 
   *
   * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
   * and at index 1. The clear color is stored at index 2, and the pitch should
 - * be ignored. The clear color structure is 256 bits. The first 128 bits
 + * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
   * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
   * by 32 bits. The raw clear color is consumed by the 3d engine and generates
   * the converted clear color of size 64 bits. The first 32 bits store the Lower
   * outside of the GEM object in a reserved memory area dedicated for the
   * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
   * main surface pitch is required to be a multiple of four Tile 4 widths. The
 - * clear color is stored at plane index 1 and the pitch should be ignored. The
 - * format of the 256 bits of clear color data matches the one used for the
 - * I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
 + * clear color is stored at plane index 1 and the pitch should be 64 bytes
 + * aligned. The format of the 256 bits of clear color data matches the one used
 + * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
   * for details.
   */
  #define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
@@@ -1363,6 -1363,7 +1363,7 @@@ drm_fourcc_canonicalize_nvidia_format_m
  #define AMD_FMT_MOD_TILE_VER_GFX9 1
  #define AMD_FMT_MOD_TILE_VER_GFX10 2
  #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+ #define AMD_FMT_MOD_TILE_VER_GFX11 4
  
  /*
   * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
  #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
  #define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
  #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+ #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
  
  #define AMD_FMT_MOD_DCC_BLOCK_64B 0
  #define AMD_FMT_MOD_DCC_BLOCK_128B 1
  #define AMD_FMT_MOD_PIPE_MASK 0x7
  
  #define AMD_FMT_MOD_SET(field, value) \
-       ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
+       ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
  #define AMD_FMT_MOD_GET(field, value) \
        (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
  #define AMD_FMT_MOD_CLEAR(field) \
-       (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+       (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
  
  #if defined(__cplusplus)
  }
diff --combined kernel/workqueue.c
index 1ea50f6be843698965a14a13a7498834fc510e36,20d226d5bbc2689eea4f3d54080f092748f470fe..04200f341b5a64902f542f8cd006dd2fb096599b
@@@ -2788,13 -2788,13 +2788,13 @@@ static bool flush_workqueue_prep_pwqs(s
  }
  
  /**
 - * flush_workqueue - ensure that any scheduled work has run to completion.
 + * __flush_workqueue - ensure that any scheduled work has run to completion.
   * @wq: workqueue to flush
   *
   * This function sleeps until all work items which were queued on entry
   * have finished execution, but it is not livelocked by new incoming ones.
   */
 -void flush_workqueue(struct workqueue_struct *wq)
 +void __flush_workqueue(struct workqueue_struct *wq)
  {
        struct wq_flusher this_flusher = {
                .list = LIST_HEAD_INIT(this_flusher.list),
  out_unlock:
        mutex_unlock(&wq->mutex);
  }
 -EXPORT_SYMBOL(flush_workqueue);
 +EXPORT_SYMBOL(__flush_workqueue);
  
  /**
   * drain_workqueue - drain a workqueue
@@@ -2971,7 -2971,7 +2971,7 @@@ void drain_workqueue(struct workqueue_s
                wq->flags |= __WQ_DRAINING;
        mutex_unlock(&wq->mutex);
  reflush:
 -      flush_workqueue(wq);
 +      __flush_workqueue(wq);
  
        mutex_lock(&wq->mutex);
  
@@@ -3258,6 -3258,15 +3258,15 @@@ static bool __cancel_work(struct work_s
        return ret;
  }
  
+ /*
+  * See cancel_delayed_work()
+  */
+ bool cancel_work(struct work_struct *work)
+ {
+       return __cancel_work(work, false);
+ }
+ EXPORT_SYMBOL(cancel_work);
  /**
   * cancel_delayed_work - cancel a delayed work
   * @dwork: delayed_work to cancel
@@@ -5001,7 -5010,7 +5010,7 @@@ static void unbind_workers(int cpu
  
                for_each_pool_worker(worker, pool) {
                        kthread_set_per_cpu(worker->task, -1);
 -                      WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
 +                      WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
                }
  
                mutex_unlock(&wq_pool_attach_mutex);
@@@ -6111,11 -6120,3 +6120,11 @@@ void __init workqueue_init(void
        wq_online = true;
        wq_watchdog_init();
  }
 +
 +/*
 + * Despite the naming, this is a no-op function which is here only for avoiding
 + * link error. Since compile-time warning may fail to catch, we will need to
 + * emit run-time warning from __flush_workqueue().
 + */
 +void __warn_flushing_systemwide_wq(void) { }
 +EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
This page took 0.263319 seconds and 4 git commands to generate.