]> Git Repo - J-linux.git/commitdiff
Merge v6.8-rc6 into drm-next
authorDaniel Vetter <[email protected]>
Mon, 26 Feb 2024 10:41:07 +0000 (11:41 +0100)
committerDaniel Vetter <[email protected]>
Mon, 26 Feb 2024 10:41:07 +0000 (11:41 +0100)
Thomas Zimmermann asked to backmerge -rc6 for drm-misc branches,
there's a few same-area-changed conflicts (xe and amdgpu mostly) that
are getting a bit too annoying.

Signed-off-by: Daniel Vetter <[email protected]>
20 files changed:
1  2 
MAINTAINERS
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_fw.c
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/accel/ivpu/ivpu_hw_40xx.c
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_pm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_tv.c
drivers/gpu/drm/meson/meson_encoder_cvbs.c
drivers/gpu/drm/meson/meson_encoder_dsi.c
drivers/gpu/drm/meson/meson_encoder_hdmi.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/scheduler/sched_main.c
include/uapi/drm/nouveau_drm.h

diff --combined MAINTAINERS
index 3527a2ece6cdea300f7599a65d3bbc57536a704d,2ecaaec6a6bf40b1cd3cccaef0f8db81c2e627b5..7e7e7c378913c630fb0a1da2392c223cec75d917
@@@ -4169,14 -4169,14 +4169,14 @@@ F:   drivers/firmware/broadcom/tee_bnxt_f
  F:    drivers/net/ethernet/broadcom/bnxt/
  F:    include/linux/firmware/broadcom/tee_bnxt_fw.h
  
- BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
- M:    Arend van Spriel <[email protected]>
- M:    Franky Lin <[email protected]>
- M:    Hante Meuleman <[email protected]>
+ BROADCOM BRCM80211 IEEE802.11 WIRELESS DRIVERS
+ M:    Arend van Spriel <[email protected]>
  L:    [email protected]
+ L:    [email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/net/wireless/broadcom/brcm80211/
+ F:    include/linux/platform_data/brcmfmac.h
  
  BROADCOM BRCMSTB GPIO DRIVER
  M:    Doug Berger <[email protected]>
@@@ -5378,7 -5378,7 +5378,7 @@@ CONTROL GROUP - MEMORY RESOURCE CONTROL
  M:    Johannes Weiner <[email protected]>
  M:    Michal Hocko <[email protected]>
  M:    Roman Gushchin <[email protected]>
- M:    Shakeel Butt <shakeel[email protected]>
+ M:    Shakeel Butt <shakeel[email protected]>
  R:    Muchun Song <[email protected]>
  L:    [email protected]
  L:    [email protected]
@@@ -5610,6 -5610,11 +5610,11 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
  F:    drivers/net/can/ctucanfd/
  
+ CVE ASSIGNMENT CONTACT
+ M:    CVE Assignment Team <[email protected]>
+ S:    Maintained
+ F:    Documentation/process/cve.rst
  CW1200 WLAN driver
  S:    Orphan
  F:    drivers/net/wireless/st/cw1200/
@@@ -7025,7 -7030,7 +7030,7 @@@ X:      drivers/gpu/drm/mediatek
  X:    drivers/gpu/drm/msm/
  X:    drivers/gpu/drm/nouveau/
  X:    drivers/gpu/drm/radeon/
 -X:    drivers/gpu/drm/renesas/
 +X:    drivers/gpu/drm/renesas/rcar-du/
  X:    drivers/gpu/drm/tegra/
  
  DRM DRIVERS FOR ALLWINNER A10
@@@ -7193,22 -7198,12 +7198,22 @@@ F:   Documentation/devicetree/bindings/di
  F:    Documentation/devicetree/bindings/display/renesas,du.yaml
  F:    drivers/gpu/drm/renesas/rcar-du/
  
 +DRM DRIVERS FOR RENESAS RZ
 +M:    Biju Das <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
 +F:    drivers/gpu/drm/renesas/rz-du/
 +
  DRM DRIVERS FOR RENESAS SHMOBILE
  M:    Laurent Pinchart <[email protected]>
  M:    Geert Uytterhoeven <[email protected]>
  L:    [email protected]
  L:    [email protected]
  S:    Supported
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
  F:    drivers/gpu/drm/renesas/shmobile/
  F:    include/linux/platform_data/shmob_drm.h
@@@ -7318,7 -7313,6 +7323,7 @@@ F:      drivers/gpu/drm/xlnx
  
  DRM GPU SCHEDULER
  M:    Luben Tuikov <[email protected]>
 +M:    Matthew Brost <[email protected]>
  L:    [email protected]
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -10478,6 -10472,7 +10483,6 @@@ F:   drivers/media/rc/img-ir
  
  IMGTEC POWERVR DRM DRIVER
  M:    Frank Binns <[email protected]>
 -M:    Donald Robson <[email protected]>
  M:    Matt Coster <[email protected]>
  S:    Supported
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -10811,11 -10806,11 +10816,11 @@@ F:        drivers/gpio/gpio-tangier.
  
  INTEL GVT-g DRIVERS (Intel GPU Virtualization)
  M:    Zhenyu Wang <[email protected]>
- M:    Zhi Wang <zhi.a.wang@intel.com>
+ M:    Zhi Wang <zhi.wang.linux@gmail.com>
  L:    [email protected]
  L:    [email protected]
  S:    Supported
- W:    https://01.org/igvt-g
+ W:    https://github.com/intel/gvt-linux/wiki
  T:    git https://github.com/intel/gvt-linux.git
  F:    drivers/gpu/drm/i915/gvt/
  
@@@ -11137,7 -11132,6 +11142,6 @@@ S:   Supporte
  F:    drivers/net/wireless/intel/iwlegacy/
  
  INTEL WIRELESS WIFI LINK (iwlwifi)
- M:    Gregory Greenman <[email protected]>
  M:    Miri Korenblit <[email protected]>
  L:    [email protected]
  S:    Supported
@@@ -15248,6 -15242,8 +15252,8 @@@ F:   Documentation/networking
  F:    Documentation/networking/net_cachelines/
  F:    Documentation/process/maintainer-netdev.rst
  F:    Documentation/userspace-api/netlink/
+ F:    include/linux/framer/framer-provider.h
+ F:    include/linux/framer/framer.h
  F:    include/linux/in.h
  F:    include/linux/indirect_call_wrapper.h
  F:    include/linux/net.h
@@@ -15335,7 -15331,7 +15341,7 @@@ K:   \bmdo
  NETWORKING [MPTCP]
  M:    Matthieu Baerts <[email protected]>
  M:    Mat Martineau <[email protected]>
- R:    Geliang Tang <geliang[email protected]>
+ R:    Geliang Tang <geliang@kernel.org>
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
@@@ -16848,6 -16844,7 +16854,7 @@@ F:   drivers/pci/controller/dwc/*designwa
  
  PCI DRIVER FOR TI DRA7XX/J721E
  M:    Vignesh Raghavendra <[email protected]>
+ R:    Siddharth Vadapalli <[email protected]>
  L:    [email protected]
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
@@@ -17193,7 -17190,7 +17200,7 @@@ R:   John Garry <[email protected]
  R:    Will Deacon <[email protected]>
  R:    James Clark <[email protected]>
  R:    Mike Leach <[email protected]>
- R:    Leo Yan <leo.yan@linaro.org>
+ R:    Leo Yan <leo.yan@linux.dev>
  L:    [email protected] (moderated for non-subscribers)
  S:    Supported
  F:    tools/build/feature/test-libopencsd.c
@@@ -18442,7 -18439,7 +18449,7 @@@ S:   Supporte
  F:    drivers/infiniband/sw/rdmavt
  
  RDS - RELIABLE DATAGRAM SOCKETS
- M:    Santosh Shilimkar <santosh.shilimkar@oracle.com>
+ M:    Allison Henderson <allison.henderson@oracle.com>
  L:    [email protected]
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
@@@ -22020,6 -22017,14 +22027,14 @@@ F: Documentation/devicetree/bindings/me
  F:    drivers/media/i2c/ds90*
  F:    include/media/i2c/ds90*
  
+ TI HDC302X HUMIDITY DRIVER
+ M:    Javier Carrasco <[email protected]>
+ M:    Li peiyu <[email protected]>
+ L:    [email protected]
+ S:    Maintained
+ F:    Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml
+ F:    drivers/iio/humidity/hdc3020.c
  TI ICSSG ETHERNET DRIVER (ICSSG)
  R:    MD Danish Anwar <[email protected]>
  R:    Roger Quadros <[email protected]>
@@@ -22875,9 -22880,8 +22890,8 @@@ S:   Maintaine
  F:    drivers/usb/typec/mux/pi3usb30532.c
  
  USB TYPEC PORT CONTROLLER DRIVERS
- M:    Guenter Roeck <[email protected]>
  L:    [email protected]
- S:    Maintained
+ S:    Orphan
  F:    drivers/usb/typec/tcpm/
  
  USB UHCI DRIVER
index 3f2439117582c1278aa1e74af015f5159707ed97,4b06402269869335c324770fc572a57bea7316f7..39f6d1b98fd6a50d5d9df2defe305a23b36f9bcf
@@@ -45,11 -45,11 +45,11 @@@ MODULE_PARM_DESC(test_mode, "Test mode 
  
  u8 ivpu_pll_min_ratio;
  module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
 -MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
 +MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
  
  u8 ivpu_pll_max_ratio = U8_MAX;
  module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
 -MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
 +MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
  
  bool ivpu_disable_mmu_cont_pages;
  module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
@@@ -328,13 -328,13 +328,13 @@@ static int ivpu_wait_for_ready(struct i
        ivpu_ipc_consumer_del(vdev, &cons);
  
        if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
 -              ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
 +              ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
                         ipc_hdr.data_addr);
                return -EIO;
        }
  
        if (!ret)
 -              ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
 +              ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
  
        return ret;
  }
@@@ -480,9 -480,8 +480,8 @@@ static int ivpu_pci_init(struct ivpu_de
        /* Clear any pending errors */
        pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
  
-       /* VPU 37XX does not require 10m D3hot delay */
-       if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
-               pdev->d3hot_delay = 0;
+       /* NPU does not require 10m D3hot delay */
+       pdev->d3hot_delay = 0;
  
        ret = pcim_enable_device(pdev);
        if (ret) {
@@@ -533,7 -532,6 +532,7 @@@ static int ivpu_dev_init(struct ivpu_de
        atomic64_set(&vdev->unique_id_counter, 0);
        xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
        xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
 +      xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
        lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
        INIT_LIST_HEAD(&vdev->bo_list);
  
@@@ -607,7 -605,6 +606,7 @@@ err_power_down
        if (IVPU_WA(d3hot_after_power_off))
                pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
  err_xa_destroy:
 +      xa_destroy(&vdev->db_xa);
        xa_destroy(&vdev->submitted_jobs_xa);
        xa_destroy(&vdev->context_xa);
        return ret;
@@@ -643,8 -640,6 +642,8 @@@ static void ivpu_dev_fini(struct ivpu_d
        ivpu_mmu_reserved_context_fini(vdev);
        ivpu_mmu_global_context_fini(vdev);
  
 +      drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
 +      xa_destroy(&vdev->db_xa);
        drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
        xa_destroy(&vdev->submitted_jobs_xa);
        drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
index dfa91d48f901f031dcfdf32d80771fcaf9b5b620,5fa8bd4603d5be6f1fba8c43ba058e6a9b4f3676..1457300828bf15ae3ca466a1a77fd457b8a2a3c5
  
  static char *ivpu_firmware;
  module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
 -MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
 +MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
  
 -/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
  static struct {
        int gen;
        const char *name;
  } fw_names[] = {
        { IVPU_HW_37XX, "vpu_37xx.bin" },
 -      { IVPU_HW_37XX, "mtl_vpu.bin" },
        { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
        { IVPU_HW_40XX, "vpu_40xx.bin" },
        { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
@@@ -220,7 -222,6 +220,6 @@@ ivpu_fw_init_wa(struct ivpu_device *vde
        const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
  
        if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
-           (ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
            (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
                vdev->wa.disable_d0i3_msg = true;
  
@@@ -249,7 -250,6 +248,7 @@@ static int ivpu_fw_update_global_range(
  static int ivpu_fw_mem_init(struct ivpu_device *vdev)
  {
        struct ivpu_fw_info *fw = vdev->fw;
 +      struct ivpu_addr_range fw_range;
        int log_verb_size;
        int ret;
  
        if (ret)
                return ret;
  
 -      fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
 +      fw_range.start = fw->runtime_addr;
 +      fw_range.end = fw->runtime_addr + fw->runtime_size;
 +      fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
 +                               DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
        if (!fw->mem) {
 -              ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
 +              ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
                return -ENOMEM;
        }
  
 -      fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
 -                                                DRM_IVPU_BO_CACHED);
 +      fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
 +                                               DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
        if (!fw->mem_log_crit) {
 -              ivpu_err(vdev, "Failed to allocate critical log buffer\n");
 +              ivpu_err(vdev, "Failed to create critical log buffer\n");
                ret = -ENOMEM;
                goto err_free_fw_mem;
        }
        else
                log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
  
 -      fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
 +      fw->mem_log_verb = ivpu_bo_create_global(vdev, log_verb_size,
 +                                               DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
        if (!fw->mem_log_verb) {
 -              ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
 +              ivpu_err(vdev, "Failed to create verbose log buffer\n");
                ret = -ENOMEM;
                goto err_free_log_crit;
        }
  
        if (fw->shave_nn_size) {
 -              fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
 -                                                        fw->shave_nn_size, DRM_IVPU_BO_WC);
 +              fw->mem_shave_nn = ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.shave,
 +                                                fw->shave_nn_size, DRM_IVPU_BO_WC);
                if (!fw->mem_shave_nn) {
 -                      ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
 +                      ivpu_err(vdev, "Failed to create shavenn buffer\n");
                        ret = -ENOMEM;
                        goto err_free_log_verb;
                }
        return 0;
  
  err_free_log_verb:
 -      ivpu_bo_free_internal(fw->mem_log_verb);
 +      ivpu_bo_free(fw->mem_log_verb);
  err_free_log_crit:
 -      ivpu_bo_free_internal(fw->mem_log_crit);
 +      ivpu_bo_free(fw->mem_log_crit);
  err_free_fw_mem:
 -      ivpu_bo_free_internal(fw->mem);
 +      ivpu_bo_free(fw->mem);
        return ret;
  }
  
@@@ -313,13 -309,13 +312,13 @@@ static void ivpu_fw_mem_fini(struct ivp
        struct ivpu_fw_info *fw = vdev->fw;
  
        if (fw->mem_shave_nn) {
 -              ivpu_bo_free_internal(fw->mem_shave_nn);
 +              ivpu_bo_free(fw->mem_shave_nn);
                fw->mem_shave_nn = NULL;
        }
  
 -      ivpu_bo_free_internal(fw->mem_log_verb);
 -      ivpu_bo_free_internal(fw->mem_log_crit);
 -      ivpu_bo_free_internal(fw->mem);
 +      ivpu_bo_free(fw->mem_log_verb);
 +      ivpu_bo_free(fw->mem_log_crit);
 +      ivpu_bo_free(fw->mem);
  
        fw->mem_log_verb = NULL;
        fw->mem_log_crit = NULL;
@@@ -473,8 -469,6 +472,8 @@@ static void ivpu_fw_boot_params_print(s
                 boot_params->d0i3_residency_time_us);
        ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
                 boot_params->d0i3_entry_vpu_ts);
 +      ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
 +               boot_params->system_time_us);
  }
  
  void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
                boot_params->d0i3_residency_time_us =
                        ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts);
                boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts;
 +              boot_params->system_time_us = ktime_to_us(ktime_get_real());
  
                ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
                         boot_params->d0i3_residency_time_us);
                ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
                         boot_params->d0i3_entry_vpu_ts);
 +              ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
 +                       boot_params->system_time_us);
  
                boot_params->save_restore_ret_address = 0;
                vdev->pm->is_warmboot = true;
        boot_params->d0i3_residency_time_us = 0;
        boot_params->d0i3_entry_vpu_ts = 0;
  
 +      boot_params->system_time_us = ktime_to_us(ktime_get_real());
        wmb(); /* Flush WC buffers after writing bootparams */
  
        ivpu_fw_boot_params_print(vdev, boot_params);
index be91c6744b129449c00cfdf1f41f62dc8cad08e9,89af1006df5587ba560415a7d669251648f3c3e8..9a0c9498baba293cece13e9584f21f7b2067c681
@@@ -13,7 -13,7 +13,7 @@@
  #include "ivpu_pm.h"
  
  #define TILE_FUSE_ENABLE_BOTH        0x0
 -#define TILE_SKU_BOTH_MTL            0x3630
 +#define TILE_SKU_BOTH                0x3630
  
  /* Work point configuration values */
  #define CONFIG_1_TILE                0x01
@@@ -228,7 -228,7 +228,7 @@@ static int ivpu_pll_drive(struct ivpu_d
  
                ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
                if (ret) {
 -                      ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
 +                      ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
                        return ret;
                }
        }
@@@ -510,22 -510,12 +510,12 @@@ static int ivpu_boot_pwr_domain_enable(
        return ret;
  }
  
- static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
- {
-       ivpu_boot_dpu_active_drive(vdev, false);
-       ivpu_boot_pwr_island_isolation_drive(vdev, true);
-       ivpu_boot_pwr_island_trickle_drive(vdev, false);
-       ivpu_boot_pwr_island_drive(vdev, false);
-       return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
- }
  static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
  {
        u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
  
        val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
-       val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+       val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
        val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
  
        REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@@ -599,7 -589,7 +589,7 @@@ static int ivpu_hw_37xx_info_init(struc
        struct ivpu_hw_info *hw = vdev->hw;
  
        hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
 -      hw->sku = TILE_SKU_BOTH_MTL;
 +      hw->sku = TILE_SKU_BOTH;
        hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
  
        ivpu_pll_init_frequency_ratios(vdev);
        return 0;
  }
  
+ static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
+ {
+       int ret;
+       u32 val;
+       if (IVPU_WA(punit_disabled))
+               return 0;
+       ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+       if (ret) {
+               ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+               return ret;
+       }
+       val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+       val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+       ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+       if (ret)
+               ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+       return ret;
+ }
  static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
  {
        int ret = 0;
  
-       if (ivpu_boot_pwr_domain_disable(vdev)) {
-               ivpu_err(vdev, "Failed to disable power domain\n");
+       if (ivpu_hw_37xx_ip_reset(vdev)) {
+               ivpu_err(vdev, "Failed to reset NPU\n");
                ret = -EIO;
        }
  
@@@ -661,6 -676,11 +676,11 @@@ static int ivpu_hw_37xx_power_up(struc
  {
        int ret;
  
+       /* PLL requests may fail when powering down, so issue WP 0 here */
+       ret = ivpu_pll_disable(vdev);
+       if (ret)
+               ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
        ret = ivpu_hw_37xx_d0i3_disable(vdev);
        if (ret)
                ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@@ -742,10 -762,10 +762,10 @@@ static int ivpu_hw_37xx_power_down(stru
        ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
  
        if (!ivpu_hw_37xx_is_idle(vdev))
 -              ivpu_warn(vdev, "VPU not idle during power down\n");
 +              ivpu_warn(vdev, "NPU not idle during power down\n");
  
        if (ivpu_hw_37xx_reset(vdev)) {
 -              ivpu_err(vdev, "Failed to reset VPU\n");
 +              ivpu_err(vdev, "Failed to reset NPU\n");
                ret = -EIO;
        }
  
index d4663a42416b7ad3ae24cf68bef1983616ae948d,a1523d0b1ef3660709ae087003a703fb4f8237bd..e4eddbf5d11c250bb8ddd2a27843242166896217
@@@ -24,7 -24,7 +24,7 @@@
  #define SKU_HW_ID_SHIFT              16u
  #define SKU_HW_ID_MASK               0xffff0000u
  
- #define PLL_CONFIG_DEFAULT           0x1
+ #define PLL_CONFIG_DEFAULT           0x0
  #define PLL_CDYN_DEFAULT             0x80
  #define PLL_EPP_DEFAULT              0x80
  #define PLL_REF_CLK_FREQ           (50 * 1000000)
@@@ -80,11 -80,11 +80,11 @@@ static char *ivpu_platform_to_str(u32 p
  {
        switch (platform) {
        case IVPU_PLATFORM_SILICON:
 -              return "IVPU_PLATFORM_SILICON";
 +              return "SILICON";
        case IVPU_PLATFORM_SIMICS:
 -              return "IVPU_PLATFORM_SIMICS";
 +              return "SIMICS";
        case IVPU_PLATFORM_FPGA:
 -              return "IVPU_PLATFORM_FPGA";
 +              return "FPGA";
        default:
                return "Invalid platform";
        }
@@@ -530,7 -530,7 +530,7 @@@ static void ivpu_boot_no_snoop_enable(s
        u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
  
        val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
-       val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
        val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
  
        REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@@ -704,7 -704,6 +704,6 @@@ static int ivpu_hw_40xx_info_init(struc
  {
        struct ivpu_hw_info *hw = vdev->hw;
        u32 tile_disable;
-       u32 tile_enable;
        u32 fuse;
  
        fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
        else
                ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
  
-       tile_enable = (~tile_disable) & TILE_MAX_MASK;
-       hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
-       hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
        hw->tile_fuse = tile_disable;
        hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
  
@@@ -773,7 -768,7 +768,7 @@@ static int ivpu_hw_40xx_reset(struct iv
        int ret = 0;
  
        if (ivpu_hw_40xx_ip_reset(vdev)) {
 -              ivpu_err(vdev, "Failed to reset VPU IP\n");
 +              ivpu_err(vdev, "Failed to reset NPU IP\n");
                ret = -EIO;
        }
  
@@@ -931,7 -926,7 +926,7 @@@ static int ivpu_hw_40xx_power_down(stru
        ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
  
        if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
 -              ivpu_warn(vdev, "Failed to reset the VPU\n");
 +              ivpu_warn(vdev, "Failed to reset the NPU\n");
  
        if (ivpu_pll_disable(vdev)) {
                ivpu_err(vdev, "Failed to disable PLL\n");
index a9f2fe1bddea718dd5cde63c4dc37471c8ee6d0b,e70cfb8593390e489e9f9868fb6c2420733ae241..a49bc9105ed0c98fce6571737096977c6fc4d51b
@@@ -30,26 -30,19 +30,26 @@@ static void ivpu_cmdq_ring_db(struct iv
  
  static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
  {
 +      struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
        struct ivpu_device *vdev = file_priv->vdev;
        struct vpu_job_queue_header *jobq_header;
        struct ivpu_cmdq *cmdq;
 +      int ret;
  
        cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
        if (!cmdq)
                return NULL;
  
 -      cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
 +      ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
 +      if (ret) {
 +              ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
 +              goto err_free_cmdq;
 +      }
 +
 +      cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
        if (!cmdq->mem)
 -              goto cmdq_free;
 +              goto err_erase_xa;
  
 -      cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
        cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
                                  sizeof(struct vpu_job_queue_entry));
  
@@@ -62,9 -55,7 +62,9 @@@
  
        return cmdq;
  
 -cmdq_free:
 +err_erase_xa:
 +      xa_erase(&vdev->db_xa, cmdq->db_id);
 +err_free_cmdq:
        kfree(cmdq);
        return NULL;
  }
@@@ -74,8 -65,7 +74,8 @@@ static void ivpu_cmdq_free(struct ivpu_
        if (!cmdq)
                return;
  
 -      ivpu_bo_free_internal(cmdq->mem);
 +      ivpu_bo_free(cmdq->mem);
 +      xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
        kfree(cmdq);
  }
  
@@@ -304,7 -294,7 +304,7 @@@ static int ivpu_job_signal_and_destroy(
                return -ENOENT;
  
        if (job->file_priv->has_mmu_faults)
-               job_status = VPU_JSM_STATUS_ABORTED;
+               job_status = DRM_IVPU_JOB_STATUS_ABORTED;
  
        job->bos[CMD_BUF_IDX]->job_status = job_status;
        dma_fence_signal(job->done_fence);
@@@ -325,7 -315,7 +325,7 @@@ void ivpu_jobs_abort_all(struct ivpu_de
        unsigned long id;
  
        xa_for_each(&vdev->submitted_jobs_xa, id, job)
-               ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED);
+               ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
  }
  
  static int ivpu_job_submit(struct ivpu_job *job)
index 280fd3936f602da29f68d259504973557841f278,5f73854234ba93da22b00113376c296df1ebd35a..9973945dff5dac22d4a0eb8f0107340d4464a98a
@@@ -22,7 -22,7 +22,7 @@@
  
  static bool ivpu_disable_recovery;
  module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
 -MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
 +MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
  
  static unsigned long ivpu_tdr_timeout_ms;
  module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
@@@ -58,11 -58,14 +58,14 @@@ static int ivpu_suspend(struct ivpu_dev
  {
        int ret;
  
+       /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+       pci_save_state(to_pci_dev(vdev->drm.dev));
        ret = ivpu_shutdown(vdev);
-       if (ret) {
+       if (ret)
                ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
-               return ret;
-       }
+       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
  
        return ret;
  }
@@@ -71,6 -74,9 +74,9 @@@ static int ivpu_resume(struct ivpu_devi
  {
        int ret;
  
+       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+       pci_restore_state(to_pci_dev(vdev->drm.dev));
  retry:
        ret = ivpu_hw_power_up(vdev);
        if (ret) {
@@@ -112,23 -118,28 +118,28 @@@ static void ivpu_pm_recovery_work(struc
        char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
        int ret;
  
 -      ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
 +      ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
  
        ret = pm_runtime_resume_and_get(vdev->drm.dev);
        if (ret)
 -              ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
 +              ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
  
        ivpu_fw_log_dump(vdev);
  
- retry:
-       ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
-       if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
-               cond_resched();
-               goto retry;
-       }
+       atomic_inc(&vdev->pm->reset_counter);
+       atomic_set(&vdev->pm->reset_pending, 1);
+       down_write(&vdev->pm->reset_lock);
  
-       if (ret && ret != -EAGAIN)
-               ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+       ivpu_suspend(vdev);
+       ivpu_pm_prepare_cold_boot(vdev);
+       ivpu_jobs_abort_all(vdev);
+       ret = ivpu_resume(vdev);
+       if (ret)
+               ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+       up_write(&vdev->pm->reset_lock);
+       atomic_set(&vdev->pm->reset_pending, 0);
  
        kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
        pm_runtime_mark_last_busy(vdev->drm.dev);
@@@ -200,9 -211,6 +211,6 @@@ int ivpu_pm_suspend_cb(struct device *d
        ivpu_suspend(vdev);
        ivpu_pm_prepare_warm_boot(vdev);
  
-       pci_save_state(to_pci_dev(dev));
-       pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
        ivpu_dbg(vdev, PM, "Suspend done.\n");
  
        return 0;
@@@ -216,9 -224,6 +224,6 @@@ int ivpu_pm_resume_cb(struct device *de
  
        ivpu_dbg(vdev, PM, "Resume..\n");
  
-       pci_set_power_state(to_pci_dev(dev), PCI_D0);
-       pci_restore_state(to_pci_dev(dev));
        ret = ivpu_resume(vdev);
        if (ret)
                ivpu_err(vdev, "Failed to resume: %d\n", ret);
@@@ -255,10 -260,10 +260,10 @@@ int ivpu_pm_runtime_suspend_cb(struct d
  
        ret = ivpu_suspend(vdev);
        if (ret)
 -              ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
 +              ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
  
        if (!hw_is_idle) {
 -              ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
 +              ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
                ivpu_fw_log_dump(vdev);
                ivpu_pm_prepare_cold_boot(vdev);
        } else {
index 47338a8200edfde07be32b31fe4c263e247aed17,5853cf022917680cbc52796c533f34d687537130..bcdd4f28b64c60e88d104634962b00d92dec3917
@@@ -67,7 -67,6 +67,7 @@@
  #include "amdgpu_dm_debugfs.h"
  #endif
  #include "amdgpu_dm_psr.h"
 +#include "amdgpu_dm_replay.h"
  
  #include "ivsrcid/ivsrcid_vislands30.h"
  
@@@ -1844,21 -1843,12 +1844,12 @@@ static int amdgpu_dm_init(struct amdgpu
                        DRM_ERROR("amdgpu: fail to register dmub aux callback");
                        goto error;
                }
-               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
-                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
-                       goto error;
-               }
-               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
-                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
-                       goto error;
-               }
-       }
-       /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
-        * It is expected that DMUB will resend any pending notifications at this point, for
-        * example HPD from DPIA.
-        */
-       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+                * It is expected that DMUB will resend any pending notifications at this point. Note
+                * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+                * align legacy interface initialization sequence. Connection status will be proactivly
+                * detected once in the amdgpu_dm_initialize_drm_device.
+                */
                dc_enable_dmub_outbox(adev->dm.dc);
  
                /* DPIA trace goes to dmesg logs only if outbox is enabled */
@@@ -1939,15 -1929,17 +1930,15 @@@ static void amdgpu_dm_fini(struct amdgp
                adev->dm.hdcp_workqueue = NULL;
        }
  
 -      if (adev->dm.dc)
 +      if (adev->dm.dc) {
                dc_deinit_callbacks(adev->dm.dc);
 -
 -      if (adev->dm.dc)
                dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
 -
 -      if (dc_enable_dmub_notifications(adev->dm.dc)) {
 -              kfree(adev->dm.dmub_notify);
 -              adev->dm.dmub_notify = NULL;
 -              destroy_workqueue(adev->dm.delayed_hpd_wq);
 -              adev->dm.delayed_hpd_wq = NULL;
 +              if (dc_enable_dmub_notifications(adev->dm.dc)) {
 +                      kfree(adev->dm.dmub_notify);
 +                      adev->dm.dmub_notify = NULL;
 +                      destroy_workqueue(adev->dm.delayed_hpd_wq);
 +                      adev->dm.delayed_hpd_wq = NULL;
 +              }
        }
  
        if (adev->dm.dmub_bo)
@@@ -2120,17 -2112,6 +2111,17 @@@ static int dm_dmub_sw_init(struct amdgp
        const struct dmcub_firmware_header_v1_0 *hdr;
        enum dmub_asic dmub_asic;
        enum dmub_status status;
 +      static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_0_INST_CONST
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_1_STACK
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_2_BSS_DATA
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_3_VBIOS
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_4_MAILBOX
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_5_TRACEBUFF
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_6_FW_STATE
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_7_SCRATCH_MEM
 +              DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_SHARED_STATE
 +      };
        int r;
  
        switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
                adev->dm.dmub_fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
                PSP_HEADER_BYTES;
 -      region_params.is_mailbox_in_inbox = false;
 +      region_params.window_memory_type = window_memory_type;
  
        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
                                           &region_info);
        memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
        memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
        memory_params.region_info = &region_info;
 +      memory_params.window_memory_type = window_memory_type;
  
        adev->dm.dmub_fb_info =
                kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@@ -2298,6 -2278,7 +2289,7 @@@ static int dm_sw_fini(void *handle
  
        if (adev->dm.dmub_srv) {
                dmub_srv_destroy(adev->dm.dmub_srv);
+               kfree(adev->dm.dmub_srv);
                adev->dm.dmub_srv = NULL;
        }
  
@@@ -3547,6 -3528,14 +3539,14 @@@ static void register_hpd_handlers(struc
        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+               if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+                       DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+       }
        list_for_each_entry(connector,
                        &dev->mode_config.connector_list, head) {
  
                                        handle_hpd_rx_irq,
                                        (void *) aconnector);
                }
-               if (adev->dm.hpd_rx_offload_wq)
-                       adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
-                               aconnector;
        }
  }
  
@@@ -4410,7 -4395,6 +4406,7 @@@ static int amdgpu_dm_initialize_drm_dev
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
        bool psr_feature_enabled = false;
 +      bool replay_feature_enabled = false;
        int max_overlay = dm->dc->caps.max_slave_planes;
  
        dm->display_indexes_num = dm->dc->caps.max_streams;
                }
        }
  
 +      /* Determine whether to enable Replay support by default. */
 +      if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
 +              switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 +              case IP_VERSION(3, 1, 4):
 +              case IP_VERSION(3, 1, 5):
 +              case IP_VERSION(3, 1, 6):
 +              case IP_VERSION(3, 2, 0):
 +              case IP_VERSION(3, 2, 1):
 +              case IP_VERSION(3, 5, 0):
 +                      replay_feature_enabled = true;
 +                      break;
 +              default:
 +                      replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
 +                      break;
 +              }
 +      }
 +
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
                        goto fail;
                }
  
+               if (dm->hpd_rx_offload_wq)
+                       dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+                               aconnector;
                if (!dc_link_detect_connection_type(link, &new_connection_type))
                        DRM_ERROR("KMS: Failed to detect connector\n");
  
                                amdgpu_dm_update_connector_after_detect(aconnector);
                                setup_backlight_device(dm, aconnector);
  
 +                              /* Disable PSR if Replay can be enabled */
 +                              if (replay_feature_enabled)
 +                                      if (amdgpu_dm_set_replay_caps(link, aconnector))
 +                                              psr_feature_enabled = false;
 +
                                if (psr_feature_enabled)
                                        amdgpu_dm_set_psr_caps(link);
  
@@@ -6443,82 -6409,10 +6443,82 @@@ int amdgpu_dm_connector_atomic_get_prop
        return ret;
  }
  
 +/**
 + * DOC: panel power savings
 + *
 + * The display manager allows you to set your desired **panel power savings**
 + * level (between 0-4, with 0 representing off), e.g. using the following::
 + *
 + *   # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
 + *
 + * Modifying this value can have implications on color accuracy, so tread
 + * carefully.
 + */
 +
 +static ssize_t panel_power_savings_show(struct device *device,
 +                                      struct device_attribute *attr,
 +                                      char *buf)
 +{
 +      struct drm_connector *connector = dev_get_drvdata(device);
 +      struct drm_device *dev = connector->dev;
 +      u8 val;
 +
 +      drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 +      val = to_dm_connector_state(connector->state)->abm_level ==
 +              ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
 +              to_dm_connector_state(connector->state)->abm_level;
 +      drm_modeset_unlock(&dev->mode_config.connection_mutex);
 +
 +      return sysfs_emit(buf, "%u\n", val);
 +}
 +
 +static ssize_t panel_power_savings_store(struct device *device,
 +                                       struct device_attribute *attr,
 +                                       const char *buf, size_t count)
 +{
 +      struct drm_connector *connector = dev_get_drvdata(device);
 +      struct drm_device *dev = connector->dev;
 +      long val;
 +      int ret;
 +
 +      ret = kstrtol(buf, 0, &val);
 +
 +      if (ret)
 +              return ret;
 +
 +      if (val < 0 || val > 4)
 +              return -EINVAL;
 +
 +      drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 +      to_dm_connector_state(connector->state)->abm_level = val ?:
 +              ABM_LEVEL_IMMEDIATE_DISABLE;
 +      drm_modeset_unlock(&dev->mode_config.connection_mutex);
 +
 +      drm_kms_helper_hotplug_event(dev);
 +
 +      return count;
 +}
 +
 +static DEVICE_ATTR_RW(panel_power_savings);
 +
 +static struct attribute *amdgpu_attrs[] = {
 +      &dev_attr_panel_power_savings.attr,
 +      NULL
 +};
 +
 +static const struct attribute_group amdgpu_group = {
 +      .name = "amdgpu",
 +      .attrs = amdgpu_attrs
 +};
 +
  static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
  {
        struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
  
 +      if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
 +          amdgpu_dm_abm_level < 0)
 +              sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
 +
        drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
  }
  
@@@ -6580,12 -6474,9 +6580,12 @@@ void amdgpu_dm_connector_funcs_reset(st
                state->vcpi_slots = 0;
                state->pbn = 0;
  
 -              if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
 -                      state->abm_level = amdgpu_dm_abm_level ?:
 -                              ABM_LEVEL_IMMEDIATE_DISABLE;
 +              if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
 +                      if (amdgpu_dm_abm_level <= 0)
 +                              state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
 +                      else
 +                              state->abm_level = amdgpu_dm_abm_level;
 +              }
  
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@@ -6623,14 -6514,6 +6623,14 @@@ amdgpu_dm_connector_late_register(struc
                to_amdgpu_dm_connector(connector);
        int r;
  
 +      if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
 +          amdgpu_dm_abm_level < 0) {
 +              r = sysfs_create_group(&connector->kdev->kobj,
 +                                     &amdgpu_group);
 +              if (r)
 +                      return r;
 +      }
 +
        amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
  
        if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
  static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
  {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
        struct dc_link *dc_link = aconnector->dc_link;
        struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
        struct edid *edid;
+       struct i2c_adapter *ddc;
+       if (dc_link->aux_mode)
+               ddc = &aconnector->dm_dp_aux.aux.ddc;
+       else
+               ddc = &aconnector->i2c->base;
  
        /*
         * Note: drm_get_edid gets edid in the following order:
         * 2) firmware EDID if set via edid_firmware module parameter
         * 3) regular DDC read.
         */
-       edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+       edid = drm_get_edid(connector, ddc);
        if (!edid) {
                DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
                return;
@@@ -6703,12 -6591,18 +6708,18 @@@ static int get_modes(struct drm_connect
  static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
  {
        struct drm_connector *connector = &aconnector->base;
-       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
+       struct dc_link *dc_link = aconnector->dc_link;
        struct dc_sink_init_data init_params = {
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_VIRTUAL
        };
        struct edid *edid;
+       struct i2c_adapter *ddc;
+       if (dc_link->aux_mode)
+               ddc = &aconnector->dm_dp_aux.aux.ddc;
+       else
+               ddc = &aconnector->i2c->base;
  
        /*
         * Note: drm_get_edid gets edid in the following order:
         * 2) firmware EDID if set via edid_firmware module parameter
         * 3) regular DDC read.
         */
-       edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+       edid = drm_get_edid(connector, ddc);
        if (!edid) {
                DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
                return;
@@@ -7654,8 -7548,7 +7665,8 @@@ void amdgpu_dm_connector_init_helper(st
        aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
  
        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
 -          (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
 +          (dc_is_dmcu_initialized(adev->dm.dc) ||
 +           adev->dm.dc->ctx->dmub_srv) && amdgpu_dm_abm_level < 0) {
                drm_object_attach_property(&aconnector->base.base,
                                adev->mode_info.abm_level_property, 0);
        }
@@@ -8653,22 -8546,10 +8664,22 @@@ static void amdgpu_dm_commit_planes(str
                        dm_update_pflip_irq_state(drm_to_adev(dev),
                                                  acrtc_attach);
  
 -              if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
 -                              acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
 -                              !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
 -                      amdgpu_dm_link_setup_psr(acrtc_state->stream);
 +              if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
 +                      if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
 +                                      !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
 +                              struct amdgpu_dm_connector *aconn =
 +                                      (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
 +                              amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
 +                      } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
 +                                      !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
 +
 +                              struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
 +                                      acrtc_state->stream->dm_stream_context;
 +
 +                              if (!aconn->disallow_edp_enter_psr)
 +                                      amdgpu_dm_link_setup_psr(acrtc_state->stream);
 +                      }
 +              }
  
                /* Decrement skip count when PSR is enabled and we're doing fast updates. */
                if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
                            !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
  #endif
                            !acrtc_state->stream->link->psr_settings.psr_allow_active &&
 +                          !aconn->disallow_edp_enter_psr &&
                            (timestamp_ns -
                            acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
                            500000000)
@@@ -8958,12 -8838,11 +8969,12 @@@ static void amdgpu_dm_commit_streams(st
                }
        } /* for_each_crtc_in_state() */
  
 -      /* if there mode set or reset, disable eDP PSR */
 +      /* if there mode set or reset, disable eDP PSR, Replay */
        if (mode_set_reset_required) {
                if (dm->vblank_control_workqueue)
                        flush_workqueue(dm->vblank_control_workqueue);
  
 +              amdgpu_dm_replay_disable_all(dm);
                amdgpu_dm_psr_disable_all(dm);
        }
  
index 0bc32537e2ebdc9229425370429adc7b10799589,363d522603a21744c02e3e3497a2907862b02fd1..6083b1dcf050a60e35b491a27f018befeb3c5d43
@@@ -74,10 -74,7 +74,10 @@@ void dc_dmub_srv_wait_idle(struct dc_dm
        struct dc_context *dc_ctx = dc_dmub_srv->ctx;
        enum dmub_status status;
  
 -      status = dmub_srv_wait_for_idle(dmub, 100000);
 +      do {
 +              status = dmub_srv_wait_for_idle(dmub, 100000);
 +      } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 +
        if (status != DMUB_STATUS_OK) {
                DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
                dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@@ -128,7 -125,7 +128,7 @@@ bool dc_dmub_srv_cmd_list_queue_execute
                unsigned int count,
                union dmub_rb_cmd *cmd_list)
  {
-       struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+       struct dc_context *dc_ctx;
        struct dmub_srv *dmub;
        enum dmub_status status;
        int i;
        if (!dc_dmub_srv || !dc_dmub_srv->dmub)
                return false;
  
+       dc_ctx = dc_dmub_srv->ctx;
        dmub = dc_dmub_srv->dmub;
  
        for (i = 0 ; i < count; i++) {
                        if (status == DMUB_STATUS_POWER_STATE_D3)
                                return false;
  
 -                      dmub_srv_wait_for_idle(dmub, 100000);
 +                      do {
 +                              status = dmub_srv_wait_for_idle(dmub, 100000);
 +                      } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
  
                        /* Requeue the command. */
                        status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@@ -191,9 -187,7 +192,9 @@@ bool dc_dmub_srv_wait_for_idle(struct d
  
        // Wait for DMUB to process command
        if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
 -              status = dmub_srv_wait_for_idle(dmub, 100000);
 +              do {
 +                      status = dmub_srv_wait_for_idle(dmub, 100000);
 +              } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
  
                if (status != DMUB_STATUS_OK) {
                        DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
@@@ -787,22 -781,21 +788,22 @@@ static void populate_subvp_cmd_pipe_inf
        } else if (subvp_pipe->next_odm_pipe) {
                pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
        } else {
 -              pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
 +              pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
        }
  
        // Find phantom pipe index based on phantom stream
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
  
 -              if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
 +              if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
 +                              phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
                        pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
                        if (phantom_pipe->bottom_pipe) {
                                pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
                        } else if (phantom_pipe->next_odm_pipe) {
                                pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
                        } else {
 -                              pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
 +                              pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
                        }
                        break;
                }
@@@ -1169,7 -1162,7 +1170,7 @@@ void dc_dmub_srv_subvp_save_surf_addr(c
  
  bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
  {
-       struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+       struct dc_context *dc_ctx;
        enum dmub_status status;
  
        if (!dc_dmub_srv || !dc_dmub_srv->dmub)
        if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
                return true;
  
+       dc_ctx = dc_dmub_srv->ctx;
        if (wait) {
                if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
                        do {
  
  static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
  {
 +      struct dc_dmub_srv *dc_dmub_srv;
        union dmub_rb_cmd cmd = {0};
  
        if (dc->debug.dmcub_emulation)
                return;
  
 +      if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
 +              return;
 +
 +      dc_dmub_srv = dc->ctx->dmub_srv;
 +
        memset(&cmd, 0, sizeof(cmd));
        cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
        cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
        cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
  
        if (allow_idle) {
 -              if (dc->hwss.set_idle_state)
 -                      dc->hwss.set_idle_state(dc, true);
 +              volatile struct dmub_shared_state_ips_driver *ips_driver =
 +                      &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
 +              union dmub_shared_state_ips_driver_signals new_signals;
 +
 +              dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
 +
 +              memset(&new_signals, 0, sizeof(new_signals));
 +
 +              if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
 +                  dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
 +                      new_signals.bits.allow_pg = 1;
 +                      new_signals.bits.allow_ips1 = 1;
 +                      new_signals.bits.allow_ips2 = 1;
 +                      new_signals.bits.allow_z10 = 1;
 +              } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
 +                      new_signals.bits.allow_ips1 = 1;
 +              } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
 +                      new_signals.bits.allow_pg = 1;
 +                      new_signals.bits.allow_ips1 = 1;
 +              } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
 +                      new_signals.bits.allow_pg = 1;
 +                      new_signals.bits.allow_ips1 = 1;
 +                      new_signals.bits.allow_ips2 = 1;
 +              }
 +
 +              ips_driver->signals = new_signals;
        }
  
        /* NOTE: This does not use the "wake" interface since this is part of the wake path. */
        /* We also do not perform a wait since DMCUB could enter idle after the notification. */
 -      dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 +      dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
  }
  
  static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
  {
 -      uint32_t allow_state = 0;
 -      uint32_t commit_state = 0;
 +      struct dc_dmub_srv *dc_dmub_srv;
  
        if (dc->debug.dmcub_emulation)
                return;
        if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
                return;
  
 -      if (dc->hwss.get_idle_state &&
 -              dc->hwss.set_idle_state &&
 -              dc->clk_mgr->funcs->exit_low_power_state) {
 +      dc_dmub_srv = dc->ctx->dmub_srv;
  
 -              allow_state = dc->hwss.get_idle_state(dc);
 -              dc->hwss.set_idle_state(dc, false);
 +      if (dc->clk_mgr->funcs->exit_low_power_state) {
 +              volatile const struct dmub_shared_state_ips_fw *ips_fw =
 +                      &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
 +              volatile struct dmub_shared_state_ips_driver *ips_driver =
 +                      &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
 +              union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
  
 -              if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
 -                      // Wait for evaluation time
 -                      for (;;) {
 -                              udelay(dc->debug.ips2_eval_delay_us);
 -                              commit_state = dc->hwss.get_idle_state(dc);
 -                              if (commit_state & DMUB_IPS2_ALLOW_MASK)
 -                                      break;
 +              ips_driver->signals.all = 0;
  
 -                              /* allow was still set, retry eval delay */
 -                              dc->hwss.set_idle_state(dc, false);
 -                      }
 +              if (prev_driver_signals.bits.allow_ips2) {
 +                      udelay(dc->debug.ips2_eval_delay_us);
  
 -                      if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
 +                      if (ips_fw->signals.bits.ips2_commit) {
                                // Tell PMFW to exit low power state
                                dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
  
                                // Wait for IPS2 entry upper bound
                                udelay(dc->debug.ips2_entry_delay_us);
 -                              dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
  
 -                              for (;;) {
 -                                      commit_state = dc->hwss.get_idle_state(dc);
 -                                      if (commit_state & DMUB_IPS2_COMMIT_MASK)
 -                                              break;
 +                              dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
  
 +                              while (ips_fw->signals.bits.ips2_commit)
                                        udelay(1);
 -                              }
  
                                if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
                                        ASSERT(0);
  
 -                              /* TODO: See if we can return early here - IPS2 should go
 -                               * back directly to IPS0 and clear the flags, but it will
 -                               * be safer to directly notify DMCUB of this.
 -                               */
 -                              allow_state = dc->hwss.get_idle_state(dc);
 +                              dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
                        }
                }
  
                dc_dmub_srv_notify_idle(dc, false);
 -              if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
 -                      for (;;) {
 -                              commit_state = dc->hwss.get_idle_state(dc);
 -                              if (commit_state & DMUB_IPS1_COMMIT_MASK)
 -                                      break;
 -
 +              if (prev_driver_signals.bits.allow_ips1) {
 +                      while (ips_fw->signals.bits.ips1_commit)
                                udelay(1);
 -                      }
 +
                }
        }
  
@@@ -1381,7 -1364,7 +1384,7 @@@ bool dc_wake_and_execute_dmub_cmd_list(
        else
                result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
  
 -      if (result && reallow_idle)
 +      if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
                dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
  
        return result;
@@@ -1430,7 -1413,7 +1433,7 @@@ bool dc_wake_and_execute_gpint(const st
  
        result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
  
 -      if (result && reallow_idle)
 +      if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
                dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
  
        return result;
index a4d25b0e5c9171b8bfaf3ce2547ec7fb5eb7f008,65f9f66933bba2785fc3b64f7040e676c2afd352..6795624f16e769aedcd73ace83a3f1d629e2533d
   * to one or more &drm_encoder, which are then each connected to one
   * &drm_connector.
   *
 - * To create a CRTC, a KMS drivers allocates and zeroes an instances of
 + * To create a CRTC, a KMS driver allocates and zeroes an instance of
   * &struct drm_crtc (possibly as part of a larger structure) and registers it
   * with a call to drm_crtc_init_with_planes().
   *
 - * The CRTC is also the entry point for legacy modeset operationssee
 - * &drm_crtc_funcs.set_config, legacy plane operations, see
 - * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
 + * The CRTC is also the entry point for legacy modeset operations (see
 + * &drm_crtc_funcs.set_config), legacy plane operations (see
 + * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2), and other legacy
   * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
   * features are controlled through &drm_property and
   * &drm_mode_config_funcs.atomic_check.
@@@ -904,6 -904,7 +904,7 @@@ out
        connector_set = NULL;
        fb = NULL;
        mode = NULL;
+       num_connectors = 0;
  
        DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
  
index 4dd79036776569b9a34a32076f3f4f56f7190e44,a6c19de462928ed70da033a60b10f08061bd1dc8..a0e94217b511a1551a0077b98e2bebc99d98513e
@@@ -441,9 -441,6 +441,9 @@@ int drm_syncobj_find_fence(struct drm_f
        u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
        int ret;
  
 +      if (flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
 +              return -EINVAL;
 +
        if (!syncobj)
                return -ENOENT;
  
@@@ -1044,10 -1041,8 +1044,10 @@@ static signed long drm_syncobj_array_wa
        uint32_t signaled_count, i;
  
        if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
 -                   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
 +                   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
 +              might_sleep();
                lockdep_assert_none_held_once();
 +      }
  
        points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
        if (points == NULL)
@@@ -1423,10 -1418,21 +1423,21 @@@ syncobj_eventfd_entry_func(struct drm_s
  
        /* This happens inside the syncobj lock */
        fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+       if (!fence)
+               return;
        ret = dma_fence_chain_find_seqno(&fence, entry->point);
-       if (ret != 0 || !fence) {
+       if (ret != 0) {
+               /* The given seqno has not been submitted yet. */
                dma_fence_put(fence);
                return;
+       } else if (!fence) {
+               /* If dma_fence_chain_find_seqno returns 0 but sets the fence
+                * to NULL, it implies that the given seqno is signaled and a
+                * later seqno has already been submitted. Assign a stub fence
+                * so that the eventfd still gets signaled below.
+                */
+               fence = dma_fence_get_stub();
        }
  
        list_del_init(&entry->node);
index ab415f41924d7d1a615ae19d19c11c750f94e27b,ae647d03af25cd48a3151b362c5ed1e752bdba60..5045c34a16be1c6f2879112b5fc91878491a8f6c
@@@ -56,7 -56,6 +56,7 @@@
  #include "intel_cx0_phy.h"
  #include "intel_ddi.h"
  #include "intel_de.h"
 +#include "intel_display_driver.h"
  #include "intel_display_types.h"
  #include "intel_dp.h"
  #include "intel_dp_aux.h"
@@@ -2356,6 -2355,9 +2356,9 @@@ intel_dp_compute_config_limits(struct i
        limits->min_rate = intel_dp_common_rate(intel_dp, 0);
        limits->max_rate = intel_dp_max_link_rate(intel_dp);
  
+       /* FIXME 128b/132b SST support missing */
+       limits->max_rate = min(limits->max_rate, 810000);
        limits->min_lane_count = 1;
        limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
  
@@@ -2617,38 -2619,58 +2620,38 @@@ static void intel_dp_compute_vsc_sdp(st
                                     struct intel_crtc_state *crtc_state,
                                     const struct drm_connector_state *conn_state)
  {
 -      struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
 +      struct drm_dp_vsc_sdp *vsc;
  
 -      /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
 -      if (crtc_state->has_psr)
 +      if ((!intel_dp->colorimetry_support ||
 +           !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) &&
 +          !crtc_state->has_psr)
                return;
  
 -      if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
 -              return;
 +      vsc = &crtc_state->infoframes.vsc;
  
        crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
        vsc->sdp_type = DP_SDP_VSC;
 -      intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
 -                                       &crtc_state->infoframes.vsc);
 -}
  
 -void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
 -                                const struct intel_crtc_state *crtc_state,
 -                                const struct drm_connector_state *conn_state,
 -                                struct drm_dp_vsc_sdp *vsc)
 -{
 -      vsc->sdp_type = DP_SDP_VSC;
 -
 -      if (crtc_state->has_psr2) {
 -              if (intel_dp->psr.colorimetry_support &&
 -                  intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
 -                      /* [PSR2, +Colorimetry] */
 -                      intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
 -                                                       vsc);
 -              } else {
 -                      /*
 -                       * [PSR2, -Colorimetry]
 -                       * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
 -                       * 3D stereo + PSR/PSR2 + Y-coordinate.
 -                       */
 -                      vsc->revision = 0x4;
 -                      vsc->length = 0xe;
 -              }
 +      /* Needs colorimetry */
 +      if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
 +              intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
 +                                               vsc);
 +      } else if (crtc_state->has_psr2) {
 +              /*
 +               * [PSR2 without colorimetry]
 +               * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
 +               * 3D stereo + PSR/PSR2 + Y-coordinate.
 +               */
 +              vsc->revision = 0x4;
 +              vsc->length = 0xe;
        } else if (crtc_state->has_panel_replay) {
 -              if (intel_dp->psr.colorimetry_support &&
 -                  intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
 -                      /* [Panel Replay with colorimetry info] */
 -                      intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
 -                                                       vsc);
 -              } else {
 -                      /*
 -                       * [Panel Replay without colorimetry info]
 -                       * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
 -                       * VSC SDP supporting 3D stereo + Panel Replay.
 -                       */
 -                      vsc->revision = 0x6;
 -                      vsc->length = 0x10;
 -              }
 +              /*
 +               * [Panel Replay without colorimetry info]
 +               * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
 +               * VSC SDP supporting 3D stereo + Panel Replay.
 +               */
 +              vsc->revision = 0x6;
 +              vsc->length = 0x10;
        } else {
                /*
                 * [PSR1]
@@@ -3326,6 -3348,13 +3329,6 @@@ bool intel_dp_initial_fastset_check(str
                fastset = false;
        }
  
 -      if (CAN_PSR(intel_dp)) {
 -              drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n",
 -                          encoder->base.base.id, encoder->base.name);
 -              crtc_state->uapi.mode_changed = true;
 -              fastset = false;
 -      }
 -
        return fastset;
  }
  
@@@ -4262,6 -4291,24 +4265,6 @@@ static void intel_write_dp_sdp(struct i
        dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
  }
  
 -void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
 -                          const struct intel_crtc_state *crtc_state,
 -                          const struct drm_dp_vsc_sdp *vsc)
 -{
 -      struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 -      struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 -      struct dp_sdp sdp = {};
 -      ssize_t len;
 -
 -      len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
 -
 -      if (drm_WARN_ON(&dev_priv->drm, len < 0))
 -              return;
 -
 -      dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
 -                                      &sdp, len);
 -}
 -
  void intel_dp_set_infoframes(struct intel_encoder *encoder,
                             bool enable,
                             const struct intel_crtc_state *crtc_state,
        if (!enable)
                return;
  
 -      /* When PSR is enabled, VSC SDP is handled by PSR routine */
 -      if (!crtc_state->has_psr)
 -              intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
 +      intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
  
        intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
  }
@@@ -4419,6 -4468,10 +4422,6 @@@ static void intel_read_dp_vsc_sdp(struc
        struct dp_sdp sdp = {};
        int ret;
  
 -      /* When PSR is enabled, VSC SDP is handled by PSR routine */
 -      if (crtc_state->has_psr)
 -              return;
 -
        if ((crtc_state->infoframes.enable &
             intel_hdmi_infoframe_enable(type)) == 0)
                return;
@@@ -4629,36 -4682,31 +4632,36 @@@ static void intel_dp_phy_pattern_update
        struct drm_dp_phy_test_params *data =
                        &intel_dp->compliance.test_data.phytest;
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 +      struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        enum pipe pipe = crtc->pipe;
        u32 pattern_val;
  
        switch (data->phy_pattern) {
 -      case DP_PHY_TEST_PATTERN_NONE:
 +      case DP_LINK_QUAL_PATTERN_DISABLE:
                drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
                intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
 +              if (DISPLAY_VER(dev_priv) >= 10)
 +                      intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
 +                                   DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
 +                                   DP_TP_CTL_LINK_TRAIN_NORMAL);
                break;
 -      case DP_PHY_TEST_PATTERN_D10_2:
 +      case DP_LINK_QUAL_PATTERN_D10_2:
                drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
                intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
                               DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
                break;
 -      case DP_PHY_TEST_PATTERN_ERROR_COUNT:
 +      case DP_LINK_QUAL_PATTERN_ERROR_RATE:
                drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
                intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
                               DDI_DP_COMP_CTL_ENABLE |
                               DDI_DP_COMP_CTL_SCRAMBLED_0);
                break;
 -      case DP_PHY_TEST_PATTERN_PRBS7:
 +      case DP_LINK_QUAL_PATTERN_PRBS7:
                drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
                intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
                               DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
                break;
 -      case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
 +      case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
                /*
                 * FIXME: Ideally pattern should come from DPCD 0x250. As
                 * current firmware of DPR-100 could not set it, so hardcoding
                               DDI_DP_COMP_CTL_ENABLE |
                               DDI_DP_COMP_CTL_CUSTOM80);
                break;
 -      case DP_PHY_TEST_PATTERN_CP2520:
 +      case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
                /*
                 * FIXME: Ideally pattern should come from DPCD 0x24A. As
                 * current firmware of DPR-100 could not set it, so hardcoding
                               DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
                               pattern_val);
                break;
 +      case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
 +              if (DISPLAY_VER(dev_priv) < 10)  {
 +                      drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
 +                      break;
 +              }
 +              drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
 +              intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
 +              intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
 +                           DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
 +                           DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
 +              break;
        default:
 -              WARN(1, "Invalid Phy Test Pattern\n");
 +              drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
        }
  }
  
@@@ -5419,24 -5456,8 +5422,24 @@@ edp_detect(struct intel_dp *intel_dp
        return connector_status_connected;
  }
  
 +void intel_digital_port_lock(struct intel_encoder *encoder)
 +{
 +      struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 +
 +      if (dig_port->lock)
 +              dig_port->lock(dig_port);
 +}
 +
 +void intel_digital_port_unlock(struct intel_encoder *encoder)
 +{
 +      struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 +
 +      if (dig_port->unlock)
 +              dig_port->unlock(dig_port);
 +}
 +
  /*
 - * intel_digital_port_connected - is the specified port connected?
 + * intel_digital_port_connected_locked - is the specified port connected?
   * @encoder: intel_encoder
   *
   * In cases where there's a connector physically connected but it can't be used
   * pretty much treat the port as disconnected. This is relevant for type-C
   * (starting on ICL) where there's ownership involved.
   *
 + * The caller must hold the lock acquired by calling intel_digital_port_lock()
 + * when calling this function.
 + *
   * Return %true if port is connected, %false otherwise.
   */
 -bool intel_digital_port_connected(struct intel_encoder *encoder)
 +bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 +      bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
        bool is_connected = false;
        intel_wakeref_t wakeref;
  
 -      with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
 -              is_connected = dig_port->connected(encoder);
 +      with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
 +              unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
 +
 +              do {
 +                      is_connected = dig_port->connected(encoder);
 +                      if (is_connected || is_glitch_free)
 +                              break;
 +                      usleep_range(10, 30);
 +              } while (time_before(jiffies, wait_expires));
 +      }
  
        return is_connected;
  }
  
 +bool intel_digital_port_connected(struct intel_encoder *encoder)
 +{
 +      bool ret;
 +
 +      intel_digital_port_lock(encoder);
 +      ret = intel_digital_port_connected_locked(encoder);
 +      intel_digital_port_unlock(encoder);
 +
 +      return ret;
 +}
 +
  static const struct drm_edid *
  intel_dp_get_edid(struct intel_dp *intel_dp)
  {
@@@ -5675,9 -5673,6 +5678,9 @@@ intel_dp_detect(struct drm_connector *c
        if (!intel_display_device_enabled(dev_priv))
                return connector_status_disconnected;
  
 +      if (!intel_display_driver_check_access(dev_priv))
 +              return connector->status;
 +
        /* Can't disconnect eDP */
        if (intel_dp_is_edp(intel_dp))
                status = edp_detect(intel_dp);
@@@ -5778,10 -5773,6 +5781,10 @@@ intel_dp_force(struct drm_connector *co
  
        drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
                    connector->base.id, connector->name);
 +
 +      if (!intel_display_driver_check_access(dev_priv))
 +              return;
 +
        intel_dp_unset_edid(intel_dp);
  
        if (connector->status != connector_status_connected)
@@@ -6066,7 -6057,7 +6069,7 @@@ static void intel_dp_oob_hotplug_event(
        spin_unlock_irq(&i915->irq_lock);
  
        if (need_work)
 -              queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
 +              intel_hpd_schedule_detection(i915);
  }
  
  static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@@ -6509,7 -6500,6 +6512,7 @@@ intel_dp_init_connector(struct intel_di
                connector->interlace_allowed = true;
  
        intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 +      intel_connector->base.polled = intel_connector->polled;
  
        intel_connector_attach_encoder(intel_connector, intel_encoder);
  
                                    "HDCP init failed, skipping.\n");
        }
  
 +      intel_dp->colorimetry_support =
 +              intel_dp_get_colorimetry_status(intel_dp);
 +
        intel_dp->frl.is_trained = false;
        intel_dp->frl.trained_rate_gbps = 0;
  
index 2571ef5a1b211221f7a21afc442a0c9b96dbdc7f,2915d7afe5ccc2facdaeaee164e7b9c60796f361..093106c1e10148f56a6955669d3324aebc178ac7
@@@ -44,7 -44,6 +44,7 @@@
  #include "intel_connector.h"
  #include "intel_crtc.h"
  #include "intel_de.h"
 +#include "intel_display_driver.h"
  #include "intel_display_types.h"
  #include "intel_fdi.h"
  #include "intel_fifo_underrun.h"
@@@ -1210,7 -1209,7 +1210,7 @@@ static bool intel_sdvo_set_tv_format(st
        struct intel_sdvo_tv_format format;
        u32 format_map;
  
-       format_map = 1 << conn_state->tv.mode;
+       format_map = 1 << conn_state->tv.legacy_mode;
        memset(&format, 0, sizeof(format));
        memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
  
@@@ -2141,9 -2140,6 +2141,9 @@@ intel_sdvo_detect(struct drm_connector 
        if (!intel_display_device_enabled(i915))
                return connector_status_disconnected;
  
 +      if (!intel_display_driver_check_access(i915))
 +              return connector->status;
 +
        if (!intel_sdvo_set_target_output(intel_sdvo,
                                          intel_sdvo_connector->output_flag))
                return connector_status_unknown;
@@@ -2302,7 -2298,7 +2302,7 @@@ static int intel_sdvo_get_tv_modes(stru
         * Read the list of supported input resolutions for the selected TV
         * format.
         */
-       format_map = 1 << conn_state->tv.mode;
+       format_map = 1 << conn_state->tv.legacy_mode;
        memcpy(&tv_res, &format_map,
               min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
  
@@@ -2367,7 -2363,7 +2367,7 @@@ intel_sdvo_connector_atomic_get_propert
                int i;
  
                for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
-                       if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+                       if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
                                *val = i;
  
                                return 0;
@@@ -2423,7 -2419,7 +2423,7 @@@ intel_sdvo_connector_atomic_set_propert
        struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
  
        if (property == intel_sdvo_connector->tv_format) {
-               state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+               state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
  
                if (state->crtc) {
                        struct drm_crtc_state *crtc_state =
@@@ -2809,7 -2805,6 +2809,7 @@@ intel_sdvo_dvi_init(struct intel_sdvo *
        } else {
                intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
        }
 +      intel_connector->base.polled = intel_connector->polled;
        encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
        connector->connector_type = DRM_MODE_CONNECTOR_DVID;
  
@@@ -2885,7 -2880,6 +2885,7 @@@ intel_sdvo_analog_init(struct intel_sdv
        intel_connector = &intel_sdvo_connector->base;
        connector = &intel_connector->base;
        intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 +      intel_connector->base.polled = intel_connector->polled;
        encoder->encoder_type = DRM_MODE_ENCODER_DAC;
        connector->connector_type = DRM_MODE_CONNECTOR_VGA;
  
@@@ -3082,7 -3076,7 +3082,7 @@@ static bool intel_sdvo_tv_create_proper
                drm_property_add_enum(intel_sdvo_connector->tv_format, i,
                                      tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
  
-       intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+       intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
        drm_object_attach_property(&intel_sdvo_connector->base.base.base,
                                   intel_sdvo_connector->tv_format, 0);
        return true;
index a96bcfcf90a3d3191297740e9c98981eb180574a,992a725de751a2d1925c23da8763e5ea7dce4714..2b77d399f1a11a3f91181d995e718c8873c4ed6b
@@@ -40,7 -40,6 +40,7 @@@
  #include "intel_crtc.h"
  #include "intel_de.h"
  #include "intel_display_irq.h"
 +#include "intel_display_driver.h"
  #include "intel_display_types.h"
  #include "intel_dpll.h"
  #include "intel_hotplug.h"
@@@ -950,7 -949,7 +950,7 @@@ intel_disable_tv(struct intel_atomic_st
  
  static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
  {
-       int format = conn_state->tv.mode;
+       int format = conn_state->tv.legacy_mode;
  
        return &tv_modes[format];
  }
@@@ -1328,7 -1327,7 +1328,7 @@@ intel_tv_compute_config(struct intel_en
         * the active portion. Hence following this formula seems
         * more trouble that it's worth.
         *
 -       * if (GRAPHICS_VER(dev_priv) == 4) {
 +       * if (DISPLAY_VER(dev_priv) == 4) {
         *      num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
         *      den = tv_mode->clock;
         * } else {
@@@ -1705,7 -1704,7 +1705,7 @@@ static void intel_tv_find_better_format
                        break;
        }
  
-       connector->state->tv.mode = i;
+       connector->state->tv.legacy_mode = i;
  }
  
  static int
@@@ -1724,9 -1723,6 +1724,9 @@@ intel_tv_detect(struct drm_connector *c
        if (!intel_display_device_enabled(i915))
                return connector_status_disconnected;
  
 +      if (!intel_display_driver_check_access(i915))
 +              return connector->status;
 +
        if (force) {
                struct drm_atomic_state *state;
  
@@@ -1863,7 -1859,7 +1863,7 @@@ static int intel_tv_atomic_check(struc
        old_state = drm_atomic_get_old_connector_state(state, connector);
        new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
  
-       if (old_state->tv.mode != new_state->tv.mode ||
+       if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
            old_state->tv.margins.left != new_state->tv.margins.left ||
            old_state->tv.margins.right != new_state->tv.margins.right ||
            old_state->tv.margins.top != new_state->tv.margins.top ||
@@@ -1900,7 -1896,7 +1900,7 @@@ static void intel_tv_add_properties(str
        conn_state->tv.margins.right = 46;
        conn_state->tv.margins.bottom = 37;
  
-       conn_state->tv.mode = 0;
+       conn_state->tv.legacy_mode = 0;
  
        /* Create TV properties then attach current values */
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
  
        drm_object_attach_property(&connector->base,
                                   i915->drm.mode_config.legacy_tv_mode_property,
-                                  conn_state->tv.mode);
+                                  conn_state->tv.legacy_mode);
        drm_object_attach_property(&connector->base,
                                   i915->drm.mode_config.tv_left_margin_property,
                                   conn_state->tv.margins.left);
@@@ -1994,7 -1990,6 +1994,7 @@@ intel_tv_init(struct drm_i915_private *
         * More recent chipsets favour HDMI rather than integrated S-Video.
         */
        intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 +      intel_connector->base.polled = intel_connector->polled;
  
        drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
                           DRM_MODE_CONNECTOR_SVIDEO);
index 17c7b00a18c959a6e3f150ff08446645bd4b97ac,3407450435e2057dd3973441ba6e31485e69ee6d..d1191de855d910f9845bf2d5aef336e391982ba2
@@@ -219,7 -219,7 +219,7 @@@ static const struct drm_bridge_funcs me
        .atomic_reset = drm_atomic_helper_bridge_reset,
  };
  
 -int meson_encoder_cvbs_init(struct meson_drm *priv)
 +int meson_encoder_cvbs_probe(struct meson_drm *priv)
  {
        struct drm_device *drm = priv->drm;
        struct meson_encoder_cvbs *meson_encoder_cvbs;
  
        meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
        of_node_put(remote);
 -      if (!meson_encoder_cvbs->next_bridge) {
 -              dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
 -              return -EPROBE_DEFER;
 -      }
 +      if (!meson_encoder_cvbs->next_bridge)
 +              return dev_err_probe(priv->dev, -EPROBE_DEFER,
 +                                   "Failed to find CVBS Connector bridge\n");
  
        /* CVBS Encoder Bridge */
        meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
        /* Encoder */
        ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder,
                                      DRM_MODE_ENCODER_TVDAC);
 -      if (ret) {
 -              dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret);
 -              return ret;
 -      }
 +      if (ret)
 +              return dev_err_probe(priv->dev, ret,
 +                                   "Failed to init CVBS encoder\n");
  
        meson_encoder_cvbs->encoder.possible_crtcs = BIT(0);
  
  
        /* Initialize & attach Bridge Connector */
        connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder);
 -      if (IS_ERR(connector)) {
 -              dev_err(priv->dev, "Unable to create CVBS bridge connector\n");
 -              return PTR_ERR(connector);
 -      }
 +      if (IS_ERR(connector))
 +              return dev_err_probe(priv->dev, PTR_ERR(connector),
 +                                   "Unable to create CVBS bridge connector\n");
 +
        drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
  
        priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
@@@ -292,6 -294,5 +292,5 @@@ void meson_encoder_cvbs_remove(struct m
        if (priv->encoders[MESON_ENC_CVBS]) {
                meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
                drm_bridge_remove(&meson_encoder_cvbs->bridge);
-               drm_bridge_remove(meson_encoder_cvbs->next_bridge);
        }
  }
index 79c7ee31677f1710c28f7afa6251b47290d8423c,311b91630fbe536cf724223a1fa71e565ba2c778..7816902f590753d2f5e15aea45df0ce86e7595fb
@@@ -100,7 -100,7 +100,7 @@@ static const struct drm_bridge_funcs me
        .atomic_reset = drm_atomic_helper_bridge_reset,
  };
  
 -int meson_encoder_dsi_init(struct meson_drm *priv)
 +int meson_encoder_dsi_probe(struct meson_drm *priv)
  {
        struct meson_encoder_dsi *meson_encoder_dsi;
        struct device_node *remote;
        }
  
        meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote);
 -      if (!meson_encoder_dsi->next_bridge) {
 -              dev_dbg(priv->dev, "Failed to find DSI transceiver bridge\n");
 -              return -EPROBE_DEFER;
 -      }
 +      if (!meson_encoder_dsi->next_bridge)
 +              return dev_err_probe(priv->dev, -EPROBE_DEFER,
 +                                   "Failed to find DSI transceiver bridge\n");
  
        /* DSI Encoder Bridge */
        meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
        /* Encoder */
        ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder,
                                      DRM_MODE_ENCODER_DSI);
 -      if (ret) {
 -              dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret);
 -              return ret;
 -      }
 +      if (ret)
 +              return dev_err_probe(priv->dev, ret,
 +                                   "Failed to init DSI encoder\n");
  
        meson_encoder_dsi->encoder.possible_crtcs = BIT(0);
  
        /* Attach DSI Encoder Bridge to Encoder */
        ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0);
 -      if (ret) {
 -              dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
 -              return ret;
 -      }
 +      if (ret)
 +              return dev_err_probe(priv->dev, ret,
 +                                   "Failed to attach bridge\n");
  
        /*
         * We should have now in place:
@@@ -165,6 -168,5 +165,5 @@@ void meson_encoder_dsi_remove(struct me
        if (priv->encoders[MESON_ENC_DSI]) {
                meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
                drm_bridge_remove(&meson_encoder_dsi->bridge);
-               drm_bridge_remove(meson_encoder_dsi->next_bridge);
        }
  }
index c2b4a1e336d15e8acedddbd3590961cceb3cb173,c4686568c9ca5d81b4066315681263e0fbd848a2..0593a1cde906ffab10c010c40942fb910059b2ab
@@@ -323,31 -323,19 +323,31 @@@ static void meson_encoder_hdmi_hpd_noti
                                          enum drm_connector_status status)
  {
        struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
 -      struct edid *edid;
  
        if (!encoder_hdmi->cec_notifier)
                return;
  
        if (status == connector_status_connected) {
 -              edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector);
 -              if (!edid)
 +              const struct drm_edid *drm_edid;
 +              const struct edid *edid;
 +
 +              drm_edid = drm_bridge_edid_read(encoder_hdmi->next_bridge,
 +                                              encoder_hdmi->connector);
 +              if (!drm_edid)
                        return;
  
 +              /*
 +               * FIXME: The CEC physical address should be set using
 +               * cec_notifier_set_phys_addr(encoder_hdmi->cec_notifier,
 +               * connector->display_info.source_physical_address) from a path
 +               * that has read the EDID and called
 +               * drm_edid_connector_update().
 +               */
 +              edid = drm_edid_raw(drm_edid);
 +
                cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
  
 -              kfree(edid);
 +              drm_edid_free(drm_edid);
        } else
                cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
  }
@@@ -366,7 -354,7 +366,7 @@@ static const struct drm_bridge_funcs me
        .atomic_reset = drm_atomic_helper_bridge_reset,
  };
  
 -int meson_encoder_hdmi_init(struct meson_drm *priv)
 +int meson_encoder_hdmi_probe(struct meson_drm *priv)
  {
        struct meson_encoder_hdmi *meson_encoder_hdmi;
        struct platform_device *pdev;
  
        meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
        if (!meson_encoder_hdmi->next_bridge) {
 -              dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
 -              ret = -EPROBE_DEFER;
 +              ret = dev_err_probe(priv->dev, -EPROBE_DEFER,
 +                                  "Failed to find HDMI transceiver bridge\n");
                goto err_put_node;
        }
  
        ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder,
                                      DRM_MODE_ENCODER_TMDS);
        if (ret) {
 -              dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
 +              dev_err_probe(priv->dev, ret, "Failed to init HDMI encoder\n");
                goto err_put_node;
        }
  
        ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL,
                                DRM_BRIDGE_ATTACH_NO_CONNECTOR);
        if (ret) {
 -              dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
 +              dev_err_probe(priv->dev, ret, "Failed to attach bridge\n");
                goto err_put_node;
        }
  
        meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm,
                                                        &meson_encoder_hdmi->encoder);
        if (IS_ERR(meson_encoder_hdmi->connector)) {
 -              dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
 -              ret = PTR_ERR(meson_encoder_hdmi->connector);
 +              ret = dev_err_probe(priv->dev,
 +                                  PTR_ERR(meson_encoder_hdmi->connector),
 +                                  "Unable to create HDMI bridge connector\n");
                goto err_put_node;
        }
        drm_connector_attach_encoder(meson_encoder_hdmi->connector,
@@@ -487,6 -474,5 +487,5 @@@ void meson_encoder_hdmi_remove(struct m
        if (priv->encoders[MESON_ENC_HDMI]) {
                meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
                drm_bridge_remove(&meson_encoder_hdmi->bridge);
-               drm_bridge_remove(meson_encoder_hdmi->next_bridge);
        }
  }
index 4d10089154991bf7eab945caddb11d6b16dc8059,5e4565c5011a976d1c8057e9366d9e1da03de97a..b4da82ddbb6b2f180b80bbe258e933b76965a3ec
@@@ -112,7 -112,7 +112,7 @@@ nouveau_svmm_bind(struct drm_device *de
  {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct drm_nouveau_svm_bind *args = data;
 -      unsigned target, cmd, priority;
 +      unsigned target, cmd;
        unsigned long addr, end;
        struct mm_struct *mm;
  
                return -EINVAL;
        }
  
 -      priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
 -      priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
 -
        /* FIXME support CPU target ie all target value < GPU_VRAM */
        target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
        target &= NOUVEAU_SVM_BIND_TARGET_MASK;
@@@ -923,14 -926,15 +923,14 @@@ nouveau_pfns_map(struct nouveau_svmm *s
                 unsigned long addr, u64 *pfns, unsigned long npages)
  {
        struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
 -      int ret;
  
        args->p.addr = addr;
        args->p.size = npages << PAGE_SHIFT;
  
        mutex_lock(&svmm->mutex);
  
 -      ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args,
 -                              struct_size(args, p.phys, npages), NULL);
 +      nvif_object_ioctl(&svmm->vmm->vmm.object, args,
 +                        struct_size(args, p.phys, npages), NULL);
  
        mutex_unlock(&svmm->mutex);
  }
@@@ -1007,7 -1011,7 +1007,7 @@@ nouveau_svm_fault_buffer_ctor(struct no
        if (ret)
                return ret;
  
-       buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
+       buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
        if (!buffer->fault)
                return -ENOMEM;
  
index 8acbef7ae53d116a995e46c2e5de930cc9166758,d442b893275b971a53adc42b3a06973eebd8bdbb..7e90c9f95611a00acb4a2f4fc551fdac6b02c520
@@@ -1178,21 -1178,24 +1178,24 @@@ static void drm_sched_run_job_work(stru
        struct drm_sched_entity *entity;
        struct dma_fence *fence;
        struct drm_sched_fence *s_fence;
-       struct drm_sched_job *sched_job = NULL;
+       struct drm_sched_job *sched_job;
        int r;
  
        if (READ_ONCE(sched->pause_submit))
                return;
  
        /* Find entity with a ready job */
-       while (!sched_job && (entity = drm_sched_select_entity(sched))) {
-               sched_job = drm_sched_entity_pop_job(entity);
-               if (!sched_job)
-                       complete_all(&entity->entity_idle);
-       }
+       entity = drm_sched_select_entity(sched);
        if (!entity)
                return; /* No more work */
  
+       sched_job = drm_sched_entity_pop_job(entity);
+       if (!sched_job) {
+               complete_all(&entity->entity_idle);
+               drm_sched_run_job_queue(sched);
+               return;
+       }
        s_fence = sched_job->s_fence;
  
        atomic_add(sched_job->credits, &sched->credit_count);
@@@ -1248,7 -1251,7 +1251,7 @@@ int drm_sched_init(struct drm_gpu_sched
                   long timeout, struct workqueue_struct *timeout_wq,
                   atomic_t *score, const char *name, struct device *dev)
  {
 -      int i, ret;
 +      int i;
  
        sched->ops = ops;
        sched->credit_limit = credit_limit;
  
                sched->own_submit_wq = true;
        }
 -      ret = -ENOMEM;
 +
        sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
                                        GFP_KERNEL | __GFP_ZERO);
        if (!sched->sched_rq)
 -              goto Out_free;
 +              goto Out_check_own;
        sched->num_rqs = num_rqs;
        for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
                sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
  Out_unroll:
        for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
                kfree(sched->sched_rq[i]);
 -Out_free:
 +
        kfree(sched->sched_rq);
        sched->sched_rq = NULL;
 +Out_check_own:
        if (sched->own_submit_wq)
                destroy_workqueue(sched->submit_wq);
        drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
 -      return ret;
 +      return -ENOMEM;
  }
  EXPORT_SYMBOL(drm_sched_init);
  
index c95ef8a4d94a48ddcae055e536c9a4431f83dcae,77d7ff0d5b110da4a05a4a7730d01bbd2d7c581e..cd84227f1b42ff53e21427de90d0dee1b4ffebbd
@@@ -54,6 -54,20 +54,20 @@@ extern "C" 
   */
  #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX   17
  
+ /*
+  * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
+  *
+  * Query the VRAM BAR size.
+  */
+ #define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+ /*
+  * NOUVEAU_GETPARAM_VRAM_USED
+  *
+  * Get remaining VRAM size.
+  */
+ #define NOUVEAU_GETPARAM_VRAM_USED 19
  struct drm_nouveau_getparam {
        __u64 param;
        __u64 value;
@@@ -238,32 -252,34 +252,32 @@@ struct drm_nouveau_vm_init 
  struct drm_nouveau_vm_bind_op {
        /**
         * @op: the operation type
 +       *
 +       * Supported values:
 +       *
 +       * %DRM_NOUVEAU_VM_BIND_OP_MAP - Map a GEM object to the GPU's VA
 +       * space. Optionally, the &DRM_NOUVEAU_VM_BIND_SPARSE flag can be
 +       * passed to instruct the kernel to create sparse mappings for the
 +       * given range.
 +       *
 +       * %DRM_NOUVEAU_VM_BIND_OP_UNMAP - Unmap an existing mapping in the
 +       * GPU's VA space. If the region the mapping is located in is a
 +       * sparse region, new sparse mappings are created where the unmapped
 +       * (memory backed) mapping was mapped previously. To remove a sparse
 +       * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
         */
        __u32 op;
 -/**
 - * @DRM_NOUVEAU_VM_BIND_OP_MAP:
 - *
 - * Map a GEM object to the GPU's VA space. Optionally, the
 - * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
 - * create sparse mappings for the given range.
 - */
  #define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
 -/**
 - * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
 - *
 - * Unmap an existing mapping in the GPU's VA space. If the region the mapping
 - * is located in is a sparse region, new sparse mappings are created where the
 - * unmapped (memory backed) mapping was mapped previously. To remove a sparse
 - * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
 - */
  #define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
        /**
         * @flags: the flags for a &drm_nouveau_vm_bind_op
 +       *
 +       * Supported values:
 +       *
 +       * %DRM_NOUVEAU_VM_BIND_SPARSE - Indicates that an allocated VA
 +       * space region should be sparse.
         */
        __u32 flags;
 -/**
 - * @DRM_NOUVEAU_VM_BIND_SPARSE:
 - *
 - * Indicates that an allocated VA space region should be sparse.
 - */
  #define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
        /**
         * @handle: the handle of the DRM GEM object to map
@@@ -299,17 -315,17 +313,17 @@@ struct drm_nouveau_vm_bind 
        __u32 op_count;
        /**
         * @flags: the flags for a &drm_nouveau_vm_bind ioctl
 +       *
 +       * Supported values:
 +       *
 +       * %DRM_NOUVEAU_VM_BIND_RUN_ASYNC - Indicates that the given VM_BIND
 +       * operation should be executed asynchronously by the kernel.
 +       *
 +       * If this flag is not supplied the kernel executes the associated
 +       * operations synchronously and doesn't accept any &drm_nouveau_sync
 +       * objects.
         */
        __u32 flags;
 -/**
 - * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
 - *
 - * Indicates that the given VM_BIND operation should be executed asynchronously
 - * by the kernel.
 - *
 - * If this flag is not supplied the kernel executes the associated operations
 - * synchronously and doesn't accept any &drm_nouveau_sync objects.
 - */
  #define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
        /**
         * @wait_count: the number of wait &drm_nouveau_syncs
This page took 0.243927 seconds and 4 git commands to generate.