]> Git Repo - linux.git/commitdiff
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
authorDave Airlie <[email protected]>
Thu, 15 Jun 2023 04:11:22 +0000 (14:11 +1000)
committerDave Airlie <[email protected]>
Thu, 15 Jun 2023 04:11:22 +0000 (14:11 +1000)
amd-drm-next-6.5-2023-06-02:

amdgpu:
- SR-IOV fixes
- Warning fixes
- Misc code cleanups and spelling fixes
- DCN 3.2 updates
- Improved DC FAMS support for better power management
- Improved DC SubVP support for better power management
- DCN 3.1.x fixes
- Max IB size query
- DC GPU reset fixes
- RAS updates
- DCN 3.0.x fixes
- S/G display fixes
- CP shadow buffer support
- Implement connector force callback
- Z8 power improvements
- PSP 13.0.10 vbflash support
- Mode2 reset fixes
- Store MQDs in VRAM to improve queue switch latency
- VCN 3.x fixes
- JPEG 3.x fixes
- Enable DC_FP on LoongArch
- GFXOFF fixes
- GC 9.4.3 partition support
- SDMA 4.4.2 partition support
- VCN/JPEG 4.0.3 partition support
- VCN 4.0.3 updates
- NBIO 7.9 updates
- GC 9.4.3 updates
- Take NUMA into account when allocating memory
- Handle NUMA for partitions
- SMU 13.0.6 updates
- GC 9.4.3 RAS updates
- Stop including unused swiotlb.h
- SMU 13.0.7 fixes
- Fix clock output ordering on some APUs
- Clean up DC FPGA code
- GFX9 preemption fixes
- Misc irq fixes
- S0ix fixes
- Add new DRM_AMDGPU_WERROR config parameter to help with CI
- PCIe fix for RDNA2
- kdoc fixes
- Documentation updates

amdkfd:
- Query TTM mem limit rather than hardcoding it
- GC 9.4.3 partition support
- Handle NUMA for partitions

radeon:
- Fix possible double free
- Stop including unused swiotlb.h
- Fix possible division by zero

ttm:
- Add query for TTM mem limit
- Add NUMA awareness to pools
- Export ttm_pool_fini()

UAPI:
- Add new ctx query flag to better handle GPU resets
  Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22290
- Add new interface to query and set shadow buffer for RDNA3
  Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21986
- Add new INFO query for max IB size
  Proposed userspace: https://gitlab.freedesktop.org/bnieuwenhuizen/mesa/-/commits/ib-rejection-v3

amd-drm-next-6.5-2023-06-09:

amdgpu:
- S0ix fixes
- Initial SMU13 Overdrive support
- kdoc fixes
- Misc clode cleanups
- Flexible array fixes
- Display OTG fixes
- SMU 13.0.6 updates
- Revert some broken clock counter updates
- Misc display fixes
- GFX9 preemption fixes
- Add support for newer EEPROM bad page table format
- Add missing radeon secondary id
- Add support for new colorspace KMS API
- CSA fix
- Stable pstate fixes for APUs
- make vbl interface admin only
- Handle PCI accelerator class

amdkfd:
- Add debugger support for gdb

radeon:
- Fix possible UAF

drm:
- Add Colorspace functionality

UAPI:
- Add debugger interface for enabling gdb
  Proposed userspace: https://github.com/ROCm-Developer-Tools/ROCdbgapi/tree/wip-dbgapi
- Add KMS colorspace API
  Discussion: https://lists.freedesktop.org/archives/dri-devel/2023-June/408128.html

From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
17 files changed:
1  2 
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/i915/display/intel_connector.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vc4/vc4_hdmi.c
include/drm/display/drm_dp.h
include/drm/drm_connector.h
include/drm/ttm/ttm_pool.h
include/linux/pci_ids.h

diff --combined drivers/gpu/drm/Makefile
index 982d9e06168a43c566c7c0ce649251f61526cc19,b119089c312f923d2e22e965d46a14cbedeca384..7a09a89b493befd9d93d65cb4b303f1b57261c1a
@@@ -140,6 -140,7 +140,7 @@@ obj-$(CONFIG_DRM_TTM)      += ttm
  obj-$(CONFIG_DRM_SCHED)       += scheduler/
  obj-$(CONFIG_DRM_RADEON)+= radeon/
  obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
+ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdxcp/
  obj-$(CONFIG_DRM_I915)        += i915/
  obj-$(CONFIG_DRM_KMB_DISPLAY)  += kmb/
  obj-$(CONFIG_DRM_MGAG200) += mgag200/
@@@ -156,7 -157,8 +157,7 @@@ obj-$(CONFIG_DRM_UDL) += udl
  obj-$(CONFIG_DRM_AST) += ast/
  obj-$(CONFIG_DRM_ARMADA) += armada/
  obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/
 -obj-y                 += rcar-du/
 -obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 +obj-y                 += renesas/
  obj-y                 += omapdrm/
  obj-$(CONFIG_DRM_SUN4I) += sun4i/
  obj-y                 += tilcdc/
index c9a41c997c6c777282596d5b0a6e0113c394448b,999d008b6b484dbb208a9fe25dd99beacc27d60f..3b711babd4e2e5e49e650952675e56e42c81e016
@@@ -50,6 -50,7 +50,7 @@@
  #include "amdgpu_ras.h"
  #include "amdgpu_xgmi.h"
  #include "amdgpu_reset.h"
+ #include "../amdxcp/amdgpu_xcp_drv.h"
  
  /*
   * KMS wrapper.
   *   3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
   *            tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
   *            gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
+  *   3.53.0 - Support for GFX11 CP GFX shadowing
+  *   3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
   */
  #define KMS_DRIVER_MAJOR      3
- #define KMS_DRIVER_MINOR      52
+ #define KMS_DRIVER_MINOR      54
  #define KMS_DRIVER_PATCHLEVEL 0
  
  unsigned int amdgpu_vram_limit = UINT_MAX;
@@@ -150,7 -153,7 +153,7 @@@ uint amdgpu_pg_mask = 0xffffffff
  uint amdgpu_sdma_phase_quantum = 32;
  char *amdgpu_disable_cu;
  char *amdgpu_virtual_display;
+ bool enforce_isolation;
  /*
   * OverDrive(bit 14) disabled by default
   * GFX DCS(bit 19) disabled by default
@@@ -191,6 -194,7 +194,7 @@@ int amdgpu_smartshift_bias
  int amdgpu_use_xgmi_p2p = 1;
  int amdgpu_vcnfw_log;
  int amdgpu_sg_display = -1; /* auto */
+ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
  
  static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
  
@@@ -819,6 -823,13 +823,13 @@@ MODULE_PARM_DESC(no_queue_eviction_on_v
  module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
  #endif
  
+ /**
+  * DOC: mtype_local (int)
+  */
+ int amdgpu_mtype_local;
+ MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
+ module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
  /**
   * DOC: pcie_p2p (bool)
   * Enable PCIe P2P (requires large-BAR). Default value: true (on)
@@@ -948,6 -959,28 +959,28 @@@ MODULE_PARM_DESC(smu_pptable_id
        "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
  module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
  
+ /**
+  * DOC: partition_mode (int)
+  * Used to override the default SPX mode.
+  */
+ MODULE_PARM_DESC(
+       user_partt_mode,
+       "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \
+                                               0 = AMDGPU_SPX_PARTITION_MODE, \
+                                               1 = AMDGPU_DPX_PARTITION_MODE, \
+                                               2 = AMDGPU_TPX_PARTITION_MODE, \
+                                               3 = AMDGPU_QPX_PARTITION_MODE, \
+                                               4 = AMDGPU_CPX_PARTITION_MODE)");
+ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
+ /**
+  * DOC: enforce_isolation (bool)
+  * enforce process isolation between graphics and compute via using the same reserved vmid.
+  */
+ module_param(enforce_isolation, bool, 0444);
+ MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
  /* These devices are not supported by amdgpu.
   * They are supported by the mach64, r128, radeon drivers
   */
@@@ -1615,6 -1648,7 +1648,7 @@@ static const u16 amdgpu_unsupported_pci
        0x5874,
        0x5940,
        0x5941,
+       0x5b70,
        0x5b72,
        0x5b73,
        0x5b74,
  };
  
  static const struct pci_device_id pciidlist[] = {
- #ifdef  CONFIG_DRM_AMDGPU_SI
+ #ifdef CONFIG_DRM_AMDGPU_SI
        {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
        {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
        {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
          .class_mask = 0xffffff,
          .driver_data = CHIP_IP_DISCOVERY },
  
+       { PCI_DEVICE(0x1002, PCI_ANY_ID),
+         .class = PCI_CLASS_ACCELERATOR_PROCESSING << 8,
+         .class_mask = 0xffffff,
+         .driver_data = CHIP_IP_DISCOVERY },
        {0, 0, 0}
  };
  
@@@ -2161,6 -2200,10 +2200,10 @@@ retry_init
                goto err_pci;
        }
  
+       ret = amdgpu_xcp_dev_register(adev, ent);
+       if (ret)
+               goto err_pci;
        /*
         * 1. don't init fbdev on hw without DCE
         * 2. don't init fbdev if there are no connectors
@@@ -2233,6 -2276,7 +2276,7 @@@ amdgpu_pci_remove(struct pci_dev *pdev
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct amdgpu_device *adev = drm_to_adev(dev);
  
+       amdgpu_xcp_dev_unplug(adev);
        drm_dev_unplug(dev);
  
        if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
@@@ -2747,7 -2791,7 +2791,7 @@@ static const struct file_operations amd
        .compat_ioctl = amdgpu_kms_compat_ioctl,
  #endif
  #ifdef CONFIG_PROC_FS
 -      .show_fdinfo = amdgpu_show_fdinfo
 +      .show_fdinfo = drm_show_fdinfo,
  #endif
  };
  
@@@ -2802,9 -2846,6 +2846,9 @@@ static const struct drm_driver amdgpu_k
        .dumb_map_offset = amdgpu_mode_dumb_mmap,
        .fops = &amdgpu_driver_kms_fops,
        .release = &amdgpu_driver_release_kms,
 +#ifdef CONFIG_PROC_FS
 +      .show_fdinfo = amdgpu_show_fdinfo,
 +#endif
  
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .patchlevel = KMS_DRIVER_PATCHLEVEL,
  };
  
+ const struct drm_driver amdgpu_partition_driver = {
+       .driver_features =
+           DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ |
+           DRIVER_SYNCOBJ_TIMELINE,
+       .open = amdgpu_driver_open_kms,
+       .postclose = amdgpu_driver_postclose_kms,
+       .lastclose = amdgpu_driver_lastclose_kms,
+       .ioctls = amdgpu_ioctls_kms,
+       .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
+       .dumb_create = amdgpu_mode_dumb_create,
+       .dumb_map_offset = amdgpu_mode_dumb_mmap,
+       .fops = &amdgpu_driver_kms_fops,
+       .release = &amdgpu_driver_release_kms,
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_import = amdgpu_gem_prime_import,
+       .gem_prime_mmap = drm_gem_prime_mmap,
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = KMS_DRIVER_MAJOR,
+       .minor = KMS_DRIVER_MINOR,
+       .patchlevel = KMS_DRIVER_PATCHLEVEL,
+ };
  static struct pci_error_handlers amdgpu_pci_err_handler = {
        .error_detected = amdgpu_pci_error_detected,
        .mmio_enabled   = amdgpu_pci_mmio_enabled,
@@@ -2886,9 -2954,11 +2957,11 @@@ static void __exit amdgpu_exit(void
        amdgpu_amdkfd_fini();
        pci_unregister_driver(&amdgpu_kms_pci_driver);
        amdgpu_unregister_atpx_handler();
+       amdgpu_acpi_release();
        amdgpu_sync_fini();
        amdgpu_fence_slab_fini();
        mmu_notifier_synchronize();
+       amdgpu_xcp_drv_release();
  }
  
  module_init(amdgpu_init);
index 1b54a9aaae70cca9111cfe8d37cd2355c62f3dc7,d655c5bc951fd68aba6a0761088fb8bee714d509..6a27b000a246e7d04dca0a8d42b92a5a0ff96d8e
@@@ -44,6 -44,7 +44,7 @@@
  #include "amdgpu_amdkfd.h"
  #include "kfd_smi_events.h"
  #include "amdgpu_dma_buf.h"
+ #include "kfd_debug.h"
  
  static long kfd_ioctl(struct file *, unsigned int, unsigned long);
  static int kfd_open(struct inode *, struct file *);
@@@ -93,7 -94,7 +94,7 @@@ int kfd_chardev_init(void
        if (err < 0)
                goto err_register_chrdev;
  
 -      kfd_class = class_create(THIS_MODULE, kfd_dev_name);
 +      kfd_class = class_create(kfd_dev_name);
        err = PTR_ERR(kfd_class);
        if (IS_ERR(kfd_class))
                goto err_class_create;
@@@ -142,15 -143,13 +143,13 @@@ static int kfd_open(struct inode *inode
                return -EPERM;
        }
  
-       process = kfd_create_process(filep);
+       process = kfd_create_process(current);
        if (IS_ERR(process))
                return PTR_ERR(process);
  
-       if (kfd_is_locked()) {
-               dev_dbg(kfd_device, "kfd is locked!\n"
-                               "process %d unreferenced", process->pasid);
+       if (kfd_process_init_cwsr_apu(process, filep)) {
                kfd_unref_process(process);
-               return -EAGAIN;
+               return -EFAULT;
        }
  
        /* filep now owns the reference returned by kfd_create_process */
@@@ -186,7 -185,12 +185,12 @@@ static int kfd_ioctl_get_version(struc
  static int set_queue_properties_from_user(struct queue_properties *q_properties,
                                struct kfd_ioctl_create_queue_args *args)
  {
-       if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+       /*
+        * Repurpose queue percentage to accommodate new features:
+        * bit 0-7: queue percentage
+        * bit 8-15: pm4_target_xcc
+        */
+       if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
                pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
                return -EINVAL;
        }
  
        q_properties->is_interop = false;
        q_properties->is_gws = false;
-       q_properties->queue_percent = args->queue_percentage;
+       q_properties->queue_percent = args->queue_percentage & 0xFF;
+       /* bit 8-15 are repurposed to be PM4 target XCC */
+       q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
        q_properties->priority = args->queue_priority;
        q_properties->queue_address = args->ring_base_address;
        q_properties->queue_size = args->ring_size;
@@@ -293,7 -299,7 +299,7 @@@ static int kfd_ioctl_create_queue(struc
                                        void *data)
  {
        struct kfd_ioctl_create_queue_args *args = data;
-       struct kfd_dev *dev;
+       struct kfd_node *dev;
        int err = 0;
        unsigned int queue_id;
        struct kfd_process_device *pdd;
        }
  
        if (!pdd->doorbell_index &&
-           kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+           kfd_alloc_process_doorbells(dev->kfd, &pdd->doorbell_index) < 0) {
                err = -ENOMEM;
                goto err_alloc_doorbells;
        }
        /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
         * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
         */
-       if (dev->shared_resources.enable_mes &&
+       if (dev->kfd->shared_resources.enable_mes &&
                        ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
                        >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
                struct amdgpu_bo_va_mapping *wptr_mapping;
        pr_debug("Write ptr address   == 0x%016llX\n",
                        args->write_pointer_address);
  
+       kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
        return 0;
  
  err_create_queue:
@@@ -442,7 -449,12 +449,12 @@@ static int kfd_ioctl_update_queue(struc
        struct kfd_ioctl_update_queue_args *args = data;
        struct queue_properties properties;
  
-       if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+       /*
+        * Repurpose queue percentage to accommodate new features:
+        * bit 0-7: queue percentage
+        * bit 8-15: pm4_target_xcc
+        */
+       if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
                pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
                return -EINVAL;
        }
  
        properties.queue_address = args->ring_base_address;
        properties.queue_size = args->ring_size;
-       properties.queue_percent = args->queue_percentage;
+       properties.queue_percent = args->queue_percentage & 0xFF;
+       /* bit 8-15 are repurposed to be PM4 target XCC */
+       properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
        properties.priority = args->queue_priority;
  
        pr_debug("Updating queue id %d for pasid 0x%x\n",
@@@ -524,8 -538,6 +538,6 @@@ static int kfd_ioctl_set_cu_mask(struc
                goto out;
        }
  
-       minfo.update_flag = UPDATE_FLAG_CU_MASK;
        mutex_lock(&p->mutex);
  
        retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
@@@ -887,7 -899,7 +899,7 @@@ static int kfd_ioctl_set_scratch_backin
  {
        struct kfd_ioctl_set_scratch_backing_va_args *args = data;
        struct kfd_process_device *pdd;
-       struct kfd_dev *dev;
+       struct kfd_node *dev;
        long err;
  
        mutex_lock(&p->mutex);
@@@ -1006,19 -1018,26 +1018,26 @@@ err_drm_file
        return ret;
  }
  
- bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+ bool kfd_dev_is_large_bar(struct kfd_node *dev)
  {
        if (debug_largebar) {
                pr_debug("Simulate large-bar allocation on non large-bar machine\n");
                return true;
        }
  
-       if (dev->use_iommu_v2)
+       if (dev->kfd->use_iommu_v2)
                return false;
  
        if (dev->local_mem_info.local_mem_size_private == 0 &&
-                       dev->local_mem_info.local_mem_size_public > 0)
+           dev->local_mem_info.local_mem_size_public > 0)
+               return true;
+       if (dev->local_mem_info.local_mem_size_public == 0 &&
+           dev->kfd->adev->gmc.is_app_apu) {
+               pr_debug("APP APU, Consider like a large bar system\n");
                return true;
+       }
        return false;
  }
  
@@@ -1030,7 -1049,8 +1049,8 @@@ static int kfd_ioctl_get_available_memo
  
        if (!pdd)
                return -EINVAL;
-       args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
+       args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
+                                                       pdd->dev->node_id);
        kfd_unlock_pdd(pdd);
        return 0;
  }
@@@ -1041,7 -1061,7 +1061,7 @@@ static int kfd_ioctl_alloc_memory_of_gp
        struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
        struct kfd_process_device *pdd;
        void *mem;
-       struct kfd_dev *dev;
+       struct kfd_node *dev;
        int idr_handle;
        long err;
        uint64_t offset = args->mmap_offset;
        }
  
        if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
-               if (args->size != kfd_doorbell_process_slice(dev)) {
+               if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
                        err = -EINVAL;
                        goto err_unlock;
                }
@@@ -1231,7 -1251,7 +1251,7 @@@ static int kfd_ioctl_map_memory_to_gpu(
        struct kfd_ioctl_map_memory_to_gpu_args *args = data;
        struct kfd_process_device *pdd, *peer_pdd;
        void *mem;
-       struct kfd_dev *dev;
+       struct kfd_node *dev;
        long err = 0;
        int i;
        uint32_t *devices_arr = NULL;
@@@ -1405,7 -1425,7 +1425,7 @@@ static int kfd_ioctl_unmap_memory_from_
                args->n_success = i+1;
        }
  
-       flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+       flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
        if (flush_tlb) {
                err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
                                (struct kgd_mem *) mem, true);
@@@ -1445,7 -1465,7 +1465,7 @@@ static int kfd_ioctl_alloc_queue_gws(st
        int retval;
        struct kfd_ioctl_alloc_queue_gws_args *args = data;
        struct queue *q;
-       struct kfd_dev *dev;
+       struct kfd_node *dev;
  
        mutex_lock(&p->mutex);
        q = pqm_get_user_queue(&p->pqm, args->queue_id);
                goto out_unlock;
        }
  
+       if (!kfd_dbg_has_gws_support(dev) && p->debug_trap_enabled) {
+               retval = -EBUSY;
+               goto out_unlock;
+       }
        retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
        mutex_unlock(&p->mutex);
  
@@@ -1482,10 -1507,11 +1507,11 @@@ static int kfd_ioctl_get_dmabuf_info(st
                struct kfd_process *p, void *data)
  {
        struct kfd_ioctl_get_dmabuf_info_args *args = data;
-       struct kfd_dev *dev = NULL;
+       struct kfd_node *dev = NULL;
        struct amdgpu_device *dmabuf_adev;
        void *metadata_buffer = NULL;
        uint32_t flags;
+       int8_t xcp_id;
        unsigned int i;
        int r;
  
        r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
                                          &dmabuf_adev, &args->size,
                                          metadata_buffer, args->metadata_size,
-                                         &args->metadata_size, &flags);
+                                         &args->metadata_size, &flags, &xcp_id);
        if (r)
                goto exit;
  
-       /* Reverse-lookup gpu_id from kgd pointer */
-       dev = kfd_device_by_adev(dmabuf_adev);
-       if (!dev) {
-               r = -EINVAL;
-               goto exit;
-       }
-       args->gpu_id = dev->id;
+       if (xcp_id >= 0)
+               args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
+       else
+               args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
        args->flags = flags;
  
        /* Copy metadata buffer to user mode */
@@@ -1596,7 -1619,7 +1619,7 @@@ static int kfd_ioctl_export_dmabuf(stru
        struct kfd_ioctl_export_dmabuf_args *args = data;
        struct kfd_process_device *pdd;
        struct dma_buf *dmabuf;
-       struct kfd_dev *dev;
+       struct kfd_node *dev;
        void *mem;
        int ret = 0;
  
@@@ -2178,7 -2201,7 +2201,7 @@@ static int criu_restore_devices(struct 
        }
  
        for (i = 0; i < args->num_devices; i++) {
-               struct kfd_dev *dev;
+               struct kfd_node *dev;
                struct kfd_process_device *pdd;
                struct file *drm_file;
  
                }
  
                if (!pdd->doorbell_index &&
-                   kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+                   kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) {
                        ret = -ENOMEM;
                        goto exit;
                }
@@@ -2268,7 -2291,8 +2291,8 @@@ static int criu_restore_memory_of_gpu(s
        u64 offset;
  
        if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
-               if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
+               if (bo_bucket->size !=
+                               kfd_doorbell_process_slice(pdd->dev->kfd))
                        return -EINVAL;
  
                offset = kfd_get_process_doorbells(pdd);
@@@ -2350,7 -2374,7 +2374,7 @@@ static int criu_restore_bo(struct kfd_p
  
        /* now map these BOs to GPU/s */
        for (j = 0; j < p->n_pdds; j++) {
-               struct kfd_dev *peer;
+               struct kfd_node *peer;
                struct kfd_process_device *peer_pdd;
  
                if (!bo_priv->mapped_gpuids[j])
@@@ -2715,6 -2739,356 +2739,356 @@@ static int kfd_ioctl_criu(struct file *
        return ret;
  }
  
+ static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
+                       bool enable_ttmp_setup)
+ {
+       int i = 0, ret = 0;
+       if (p->is_runtime_retry)
+               goto retry;
+       if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+               return -EBUSY;
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+               if (pdd->qpd.queue_count)
+                       return -EEXIST;
+       }
+       p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+       p->runtime_info.r_debug = r_debug;
+       p->runtime_info.ttmp_setup = enable_ttmp_setup;
+       if (p->runtime_info.ttmp_setup) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *pdd = p->pdds[i];
+                       if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
+                               amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+                               pdd->dev->kfd2kgd->enable_debug_trap(
+                                               pdd->dev->adev,
+                                               true,
+                                               pdd->dev->vm_info.last_vmid_kfd);
+                       } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+                               pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
+                                               pdd->dev->adev,
+                                               false,
+                                               0);
+                       }
+               }
+       }
+ retry:
+       if (p->debug_trap_enabled) {
+               if (!p->is_runtime_retry) {
+                       kfd_dbg_trap_activate(p);
+                       kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
+                                       p, NULL, 0, false, NULL, 0);
+               }
+               mutex_unlock(&p->mutex);
+               ret = down_interruptible(&p->runtime_enable_sema);
+               mutex_lock(&p->mutex);
+               p->is_runtime_retry = !!ret;
+       }
+       return ret;
+ }
+ static int runtime_disable(struct kfd_process *p)
+ {
+       int i = 0, ret;
+       bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
+       p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
+       p->runtime_info.r_debug = 0;
+       if (p->debug_trap_enabled) {
+               if (was_enabled)
+                       kfd_dbg_trap_deactivate(p, false, 0);
+               if (!p->is_runtime_retry)
+                       kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
+                                       p, NULL, 0, false, NULL, 0);
+               mutex_unlock(&p->mutex);
+               ret = down_interruptible(&p->runtime_enable_sema);
+               mutex_lock(&p->mutex);
+               p->is_runtime_retry = !!ret;
+               if (ret)
+                       return ret;
+       }
+       if (was_enabled && p->runtime_info.ttmp_setup) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *pdd = p->pdds[i];
+                       if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
+                               amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+               }
+       }
+       p->runtime_info.ttmp_setup = false;
+       /* disable ttmp setup */
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+               if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+                       pdd->spi_dbg_override =
+                                       pdd->dev->kfd2kgd->disable_debug_trap(
+                                       pdd->dev->adev,
+                                       false,
+                                       pdd->dev->vm_info.last_vmid_kfd);
+                       if (!pdd->dev->kfd->shared_resources.enable_mes)
+                               debug_refresh_runlist(pdd->dev->dqm);
+                       else
+                               kfd_dbg_set_mes_debug_mode(pdd);
+               }
+       }
+       return 0;
+ }
+ static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
+ {
+       struct kfd_ioctl_runtime_enable_args *args = data;
+       int r;
+       mutex_lock(&p->mutex);
+       if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
+               r = runtime_enable(p, args->r_debug,
+                               !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
+       else
+               r = runtime_disable(p);
+       mutex_unlock(&p->mutex);
+       return r;
+ }
+ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
+ {
+       struct kfd_ioctl_dbg_trap_args *args = data;
+       struct task_struct *thread = NULL;
+       struct mm_struct *mm = NULL;
+       struct pid *pid = NULL;
+       struct kfd_process *target = NULL;
+       struct kfd_process_device *pdd = NULL;
+       int r = 0;
+       if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+               pr_err("Debugging does not support sched_policy %i", sched_policy);
+               return -EINVAL;
+       }
+       pid = find_get_pid(args->pid);
+       if (!pid) {
+               pr_debug("Cannot find pid info for %i\n", args->pid);
+               r = -ESRCH;
+               goto out;
+       }
+       thread = get_pid_task(pid, PIDTYPE_PID);
+       if (!thread) {
+               r = -ESRCH;
+               goto out;
+       }
+       mm = get_task_mm(thread);
+       if (!mm) {
+               r = -ESRCH;
+               goto out;
+       }
+       if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
+               bool create_process;
+               rcu_read_lock();
+               create_process = thread && thread != current && ptrace_parent(thread) == current;
+               rcu_read_unlock();
+               target = create_process ? kfd_create_process(thread) :
+                                       kfd_lookup_process_by_pid(pid);
+       } else {
+               target = kfd_lookup_process_by_pid(pid);
+       }
+       if (IS_ERR_OR_NULL(target)) {
+               pr_debug("Cannot find process PID %i to debug\n", args->pid);
+               r = target ? PTR_ERR(target) : -ESRCH;
+               goto out;
+       }
+       /* Check if target is still PTRACED. */
+       rcu_read_lock();
+       if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
+                               && ptrace_parent(target->lead_thread) != current) {
+               pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
+               r = -EPERM;
+       }
+       rcu_read_unlock();
+       if (r)
+               goto out;
+       mutex_lock(&target->mutex);
+       if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
+               pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
+               r = -EINVAL;
+               goto unlock_out;
+       }
+       if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
+                       (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
+                        args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
+                        args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
+                        args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
+                        args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
+                        args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
+                        args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
+               r = -EPERM;
+               goto unlock_out;
+       }
+       if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
+           args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
+               int user_gpu_id = kfd_process_get_user_gpu_id(target,
+                               args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
+                                       args->set_node_address_watch.gpu_id :
+                                       args->clear_node_address_watch.gpu_id);
+               pdd = kfd_process_device_data_by_id(target, user_gpu_id);
+               if (user_gpu_id == -EINVAL || !pdd) {
+                       r = -ENODEV;
+                       goto unlock_out;
+               }
+       }
+       switch (args->op) {
+       case KFD_IOC_DBG_TRAP_ENABLE:
+               if (target != p)
+                       target->debugger_process = p;
+               r = kfd_dbg_trap_enable(target,
+                                       args->enable.dbg_fd,
+                                       (void __user *)args->enable.rinfo_ptr,
+                                       &args->enable.rinfo_size);
+               if (!r)
+                       target->exception_enable_mask = args->enable.exception_mask;
+               break;
+       case KFD_IOC_DBG_TRAP_DISABLE:
+               r = kfd_dbg_trap_disable(target);
+               break;
+       case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
+               r = kfd_dbg_send_exception_to_runtime(target,
+                               args->send_runtime_event.gpu_id,
+                               args->send_runtime_event.queue_id,
+                               args->send_runtime_event.exception_mask);
+               break;
+       case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
+               kfd_dbg_set_enabled_debug_exception_mask(target,
+                               args->set_exceptions_enabled.exception_mask);
+               break;
+       case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
+               r = kfd_dbg_trap_set_wave_launch_override(target,
+                               args->launch_override.override_mode,
+                               args->launch_override.enable_mask,
+                               args->launch_override.support_request_mask,
+                               &args->launch_override.enable_mask,
+                               &args->launch_override.support_request_mask);
+               break;
+       case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
+               r = kfd_dbg_trap_set_wave_launch_mode(target,
+                               args->launch_mode.launch_mode);
+               break;
+       case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
+               r = suspend_queues(target,
+                               args->suspend_queues.num_queues,
+                               args->suspend_queues.grace_period,
+                               args->suspend_queues.exception_mask,
+                               (uint32_t *)args->suspend_queues.queue_array_ptr);
+               break;
+       case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
+               r = resume_queues(target, args->resume_queues.num_queues,
+                               (uint32_t *)args->resume_queues.queue_array_ptr);
+               break;
+       case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
+               r = kfd_dbg_trap_set_dev_address_watch(pdd,
+                               args->set_node_address_watch.address,
+                               args->set_node_address_watch.mask,
+                               &args->set_node_address_watch.id,
+                               args->set_node_address_watch.mode);
+               break;
+       case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
+               r = kfd_dbg_trap_clear_dev_address_watch(pdd,
+                               args->clear_node_address_watch.id);
+               break;
+       case KFD_IOC_DBG_TRAP_SET_FLAGS:
+               r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
+               break;
+       case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
+               r = kfd_dbg_ev_query_debug_event(target,
+                               &args->query_debug_event.queue_id,
+                               &args->query_debug_event.gpu_id,
+                               args->query_debug_event.exception_mask,
+                               &args->query_debug_event.exception_mask);
+               break;
+       case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
+               r = kfd_dbg_trap_query_exception_info(target,
+                               args->query_exception_info.source_id,
+                               args->query_exception_info.exception_code,
+                               args->query_exception_info.clear_exception,
+                               (void __user *)args->query_exception_info.info_ptr,
+                               &args->query_exception_info.info_size);
+               break;
+       case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
+               r = pqm_get_queue_snapshot(&target->pqm,
+                               args->queue_snapshot.exception_mask,
+                               (void __user *)args->queue_snapshot.snapshot_buf_ptr,
+                               &args->queue_snapshot.num_queues,
+                               &args->queue_snapshot.entry_size);
+               break;
+       case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
+               r = kfd_dbg_trap_device_snapshot(target,
+                               args->device_snapshot.exception_mask,
+                               (void __user *)args->device_snapshot.snapshot_buf_ptr,
+                               &args->device_snapshot.num_devices,
+                               &args->device_snapshot.entry_size);
+               break;
+       default:
+               pr_err("Invalid option: %i\n", args->op);
+               r = -EINVAL;
+       }
+ unlock_out:
+       mutex_unlock(&target->mutex);
+ out:
+       if (thread)
+               put_task_struct(thread);
+       if (mm)
+               mmput(mm);
+       if (pid)
+               put_pid(pid);
+       if (target)
+               kfd_unref_process(target);
+       return r;
+ }
  #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
        [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
                            .cmd_drv = 0, .name = #ioctl}
@@@ -2827,6 -3201,12 +3201,12 @@@ static const struct amdkfd_ioctl_desc a
  
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
                                kfd_ioctl_export_dmabuf, 0),
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
+                       kfd_ioctl_runtime_enable, 0),
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
+                       kfd_ioctl_set_debug_trap, 0),
  };
  
  #define AMDKFD_CORE_IOCTL_COUNT       ARRAY_SIZE(amdkfd_ioctls)
@@@ -2947,7 -3327,7 +3327,7 @@@ err_i1
        return retcode;
  }
  
- static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
                      struct vm_area_struct *vma)
  {
        phys_addr_t address;
  static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
  {
        struct kfd_process *process;
-       struct kfd_dev *dev = NULL;
+       struct kfd_node *dev = NULL;
        unsigned long mmap_offset;
        unsigned int gpu_id;
  
index 2d8e55e29637fcb45371367e6c672f0fa6c26bd9,4e99b8836827db2e44dc2d386ca5079058a0d4dd..bf0a655d009e60e78235ca542706ca3f23440df9
@@@ -8,7 -8,7 +8,7 @@@ config DRM_AMD_D
        depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
        select SND_HDA_COMPONENT if SND_HDA_CORE
        # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
-       select DRM_AMD_DC_FP if (X86 || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
 -      select DRM_AMD_DC_FP if (X86 || LOONGARCH || PPC64 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
++      select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
        help
          Choose this option if you want to use the new display engine
          support for AMDGPU. This adds required support for Vega and
@@@ -42,16 -42,13 +42,13 @@@ config DEBUG_KERNEL_D
          Choose this option if you want to hit kdgb_break in assert.
  
  config DRM_AMD_SECURE_DISPLAY
-         bool "Enable secure display support"
-         depends on DEBUG_FS
-         depends on DRM_AMD_DC_FP
-         help
-             Choose this option if you want to
-             support secure display
-             This option enables the calculation
-             of crc of specific region via debugfs.
-             Cooperate with specific DMCU FW.
+       bool "Enable secure display support"
+       depends on DEBUG_FS
+       depends on DRM_AMD_DC_FP
+       help
+         Choose this option if you want to support secure display
  
+         This option enables the calculation of crc of specific region via
+         debugfs. Cooperate with specific DMCU FW.
  
  endmenu
index 8b4b186c57f515fd541eaa15ce55e0b3ebf2dadb,e14704b764e0fe23cd5f3779ea27268f8dcc0f28..b6bef202b6bbbc841f8928153567e0181d29fb9c
@@@ -365,6 -365,14 +365,14 @@@ static inline void reverse_planes_order
   * adjustments and preparation before calling it. This function is a wrapper
   * for the dc_update_planes_and_stream that does any required configuration
   * before passing control to DC.
+  *
+  * @dc: Display Core control structure
+  * @update_type: specify whether it is FULL/MEDIUM/FAST update
+  * @planes_count: planes count to update
+  * @stream: stream state
+  * @stream_update: stream update
+  * @array_of_surface_update: dc surface update pointer
+  *
   */
  static inline bool update_planes_and_stream_adapter(struct dc *dc,
                                                    int update_type,
@@@ -1646,11 -1654,6 +1654,6 @@@ static int amdgpu_dm_init(struct amdgpu
        if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
                init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
  
-       /* Disable SubVP + DRR config by default */
-       init_data.flags.disable_subvp_drr = true;
-       if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
-               init_data.flags.disable_subvp_drr = false;
        init_data.flags.seamless_boot_edp_requested = false;
  
        if (check_seamless_boot_capability(adev)) {
        adev->dm.dc = dc_create(&init_data);
  
        if (adev->dm.dc) {
-               DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
+               DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+                        dce_version_to_string(adev->dm.dc->ctx->dce_version));
        } else {
-               DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+               DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
+                        dce_version_to_string(adev->dm.dc->ctx->dce_version));
                goto error;
        }
  
  
                dc_init_callbacks(adev->dm.dc, &init_params);
        }
- #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
-       if (!adev->dm.secure_display_ctxs) {
-               DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
-       }
- #endif
        if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
                adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
                goto error;
        }
  
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+       if (!adev->dm.secure_display_ctxs)
+               DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ #endif
  
        DRM_DEBUG_DRIVER("KMS initialized.\n");
  
@@@ -2479,20 -2483,25 +2483,25 @@@ static void dm_gpureset_toggle_interrup
                if (acrtc && state->stream_status[i].plane_count != 0) {
                        irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
                        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
-                       DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
-                                     acrtc->crtc_id, enable ? "en" : "dis", rc);
                        if (rc)
                                DRM_WARN("Failed to %s pflip interrupts\n",
                                         enable ? "enable" : "disable");
  
                        if (enable) {
-                               rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
-                               if (rc)
-                                       DRM_WARN("Failed to enable vblank interrupts\n");
-                       } else {
-                               amdgpu_dm_crtc_disable_vblank(&acrtc->base);
-                       }
+                               if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+                                       rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+                       } else
+                               rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+                       if (rc)
+                               DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
  
+                       irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+                       /* During gpu-reset we disable and then enable vblank irq, so
+                        * don't use amdgpu_irq_get/put() to avoid refcount change.
+                        */
+                       if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+                               DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
                }
        }
  
@@@ -2852,7 -2861,7 +2861,7 @@@ static int dm_resume(void *handle
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->dc_link->type == dc_connection_mst_branch)
+               if (aconnector && aconnector->mst_root)
                        continue;
  
                mutex_lock(&aconnector->hpd_lock);
@@@ -4474,6 -4483,10 +4483,6 @@@ static int amdgpu_dm_initialize_drm_dev
                amdgpu_set_panel_orientation(&aconnector->base);
        }
  
 -      /* If we didn't find a panel, notify the acpi video detection */
 -      if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
 -              acpi_video_report_nolcd();
 -
        /* Software is initialized. Now we can register interrupt handlers. */
        switch (adev->asic_type) {
  #if defined(CONFIG_DRM_AMD_DC_SI)
@@@ -5326,21 -5339,44 +5335,44 @@@ get_aspect_ratio(const struct drm_displ
  }
  
  static enum dc_color_space
- get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+                      const struct drm_connector_state *connector_state)
  {
        enum dc_color_space color_space = COLOR_SPACE_SRGB;
  
-       switch (dc_crtc_timing->pixel_encoding) {
-       case PIXEL_ENCODING_YCBCR422:
-       case PIXEL_ENCODING_YCBCR444:
-       case PIXEL_ENCODING_YCBCR420:
-       {
+       switch (connector_state->colorspace) {
+       case DRM_MODE_COLORIMETRY_BT601_YCC:
+               if (dc_crtc_timing->flags.Y_ONLY)
+                       color_space = COLOR_SPACE_YCBCR601_LIMITED;
+               else
+                       color_space = COLOR_SPACE_YCBCR601;
+               break;
+       case DRM_MODE_COLORIMETRY_BT709_YCC:
+               if (dc_crtc_timing->flags.Y_ONLY)
+                       color_space = COLOR_SPACE_YCBCR709_LIMITED;
+               else
+                       color_space = COLOR_SPACE_YCBCR709;
+               break;
+       case DRM_MODE_COLORIMETRY_OPRGB:
+               color_space = COLOR_SPACE_ADOBERGB;
+               break;
+       case DRM_MODE_COLORIMETRY_BT2020_RGB:
+       case DRM_MODE_COLORIMETRY_BT2020_YCC:
+               if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+                       color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+               else
+                       color_space = COLOR_SPACE_2020_YCBCR;
+               break;
+       case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+       default:
+               if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+                       color_space = COLOR_SPACE_SRGB;
                /*
                 * 27030khz is the separation point between HDTV and SDTV
                 * according to HDMI spec, we use YCbCr709 and YCbCr601
                 * respectively
                 */
-               if (dc_crtc_timing->pix_clk_100hz > 270300) {
+               } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
                        if (dc_crtc_timing->flags.Y_ONLY)
                                color_space =
                                        COLOR_SPACE_YCBCR709_LIMITED;
                        else
                                color_space = COLOR_SPACE_YCBCR601;
                }
-       }
-       break;
-       case PIXEL_ENCODING_RGB:
-               color_space = COLOR_SPACE_SRGB;
-               break;
-       default:
-               WARN_ON(1);
                break;
        }
  
@@@ -5500,7 -5527,7 +5523,7 @@@ static void fill_stream_properties_from
                }
        }
  
-       stream->output_color_space = get_output_color_space(timing_out);
+       stream->output_color_space = get_output_color_space(timing_out, connector_state);
  }
  
  static void fill_audio_info(struct audio_info *audio_info,
@@@ -5942,15 -5969,14 +5965,14 @@@ create_stream_for_sink(struct amdgpu_dm
  {
        struct drm_display_mode *preferred_mode = NULL;
        struct drm_connector *drm_connector;
-       const struct drm_connector_state *con_state =
-               dm_state ? &dm_state->base : NULL;
+       const struct drm_connector_state *con_state = &dm_state->base;
        struct dc_stream_state *stream = NULL;
        struct drm_display_mode mode;
        struct drm_display_mode saved_mode;
        struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
        bool recalculate_timing = false;
-       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool scale = dm_state->scaling != RMX_OFF;
        int mode_refresh;
        int preferred_refresh = 0;
        enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing = amdgpu_freesync_vid_mode &&
-                                is_freesync_video_mode(&mode, aconnector);
+               recalculate_timing = is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
                        drm_mode_copy(&saved_mode, &mode);
  
        if (recalculate_timing)
                drm_mode_set_crtcinfo(&saved_mode, 0);
-       else if (!dm_state)
+       else
                drm_mode_set_crtcinfo(&mode, 0);
  
        /*
@@@ -6342,6 -6367,31 +6363,31 @@@ amdgpu_dm_connector_late_register(struc
        return 0;
  }
  
+ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+ {
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       struct dc_link *dc_link = aconnector->dc_link;
+       struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+       struct edid *edid;
+       if (!connector->edid_override)
+               return;
+       drm_edid_override_connector_update(&aconnector->base);
+       edid = aconnector->base.edid_blob_ptr->data;
+       aconnector->edid = edid;
+       /* Update emulated (virtual) sink's EDID */
+       if (dc_em_sink && dc_link) {
+               memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+               memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+               dm_helpers_parse_edid_caps(
+                       dc_link,
+                       &dc_em_sink->dc_edid,
+                       &dc_em_sink->edid_caps);
+       }
+ }
  static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
        .reset = amdgpu_dm_connector_funcs_reset,
        .detect = amdgpu_dm_connector_detect,
        .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
        .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
        .late_register = amdgpu_dm_connector_late_register,
-       .early_unregister = amdgpu_dm_connector_unregister
+       .early_unregister = amdgpu_dm_connector_unregister,
+       .force = amdgpu_dm_connector_funcs_force
  };
  
  static int get_modes(struct drm_connector *connector)
@@@ -6369,11 -6420,19 +6416,19 @@@ static void create_eml_sink(struct amdg
        struct edid *edid;
  
        if (!aconnector->base.edid_blob_ptr) {
-               DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
-                               aconnector->base.name);
+               /* if connector->edid_override valid, pass
+                * it to edid_override to edid_blob_ptr
+                */
  
-               aconnector->base.force = DRM_FORCE_OFF;
-               return;
+               drm_edid_override_connector_update(&aconnector->base);
+               if (!aconnector->base.edid_blob_ptr) {
+                       DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+                                       aconnector->base.name);
+                       aconnector->base.force = DRM_FORCE_OFF;
+                       return;
+               }
        }
  
        edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
@@@ -6558,7 -6617,9 +6613,9 @@@ enum drm_mode_status amdgpu_dm_connecto
                goto fail;
        }
  
-       stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
+       stream = create_validate_stream_for_sink(aconnector, mode,
+                                                to_dm_connector_state(connector->state),
+                                                NULL);
        if (stream) {
                dc_stream_release(stream);
                result = MODE_OK;
@@@ -6652,6 -6713,14 +6709,14 @@@ amdgpu_dm_connector_atomic_check(struc
        if (!crtc)
                return 0;
  
+       if (new_con_state->colorspace != old_con_state->colorspace) {
+               new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(new_crtc_state))
+                       return PTR_ERR(new_crtc_state);
+               new_crtc_state->mode_changed = true;
+       }
        if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
                struct dc_info_packet hdr_infopacket;
  
                 * set is permissible, however. So only force a
                 * modeset if we're entering or exiting HDR.
                 */
-               new_crtc_state->mode_changed =
+               new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
                        !old_con_state->hdr_output_metadata ||
                        !new_con_state->hdr_output_metadata;
        }
@@@ -6737,7 -6806,7 +6802,7 @@@ static int dm_encoder_helper_atomic_che
        int clock, bpp = 0;
        bool is_y420 = false;
  
-       if (!aconnector->mst_output_port || !aconnector->dc_sink)
+       if (!aconnector->mst_output_port)
                return 0;
  
        mst_port = aconnector->mst_output_port;
@@@ -7163,7 -7232,7 +7228,7 @@@ static void amdgpu_dm_connector_add_fre
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
  
-       if (!(amdgpu_freesync_vid_mode && edid))
+       if (!edid)
                return;
  
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@@ -7199,6 -7268,12 +7264,12 @@@ static int amdgpu_dm_connector_get_mode
        return amdgpu_dm_connector->num_modes;
  }
  
+ static const u32 supported_colorspaces =
+       BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+       BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+       BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+       BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
  void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                                     struct amdgpu_dm_connector *aconnector,
                                     int connector_type,
                                adev->mode_info.abm_level_property, 0);
        }
  
+       if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+               if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+                       drm_connector_attach_colorspace_property(&aconnector->base);
+       } else if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+                  connector_type == DRM_MODE_CONNECTOR_eDP) {
+               if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+                       drm_connector_attach_colorspace_property(&aconnector->base);
+       }
        if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
            connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
            connector_type == DRM_MODE_CONNECTOR_eDP) {
@@@ -9208,8 -9292,7 +9288,7 @@@ static int dm_update_crtc_state(struct 
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
-               if (amdgpu_freesync_vid_mode &&
-                   dm_new_crtc_state->stream &&
+               if (dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
                        goto skip_modeset;
  
                }
  
                /* Now check if we should set freesync video mode */
-               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+               if (dm_new_crtc_state->stream &&
+                   dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+                   dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
                        set_freesync_fixed_config(dm_new_crtc_state);
  
                        goto skip_modeset;
-               } else if (amdgpu_freesync_vid_mode && aconnector &&
+               } else if (aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
                        struct drm_display_mode *high_mode;
@@@ -10323,7 -10408,7 +10404,7 @@@ static bool dm_edid_parser_send_cea(str
        input->cea_total_length = total_length;
        memcpy(input->payload, data, length);
  
-       res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
+       res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
        if (!res) {
                DRM_ERROR("EDID CEA parser failed\n");
                return false;
@@@ -10773,3 -10858,13 +10854,13 @@@ bool check_seamless_boot_capability(str
  
        return false;
  }
+ bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+ {
+       return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+ }
+ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+ {
+       return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+ }
index 8d9444db092abecbdfa504ad7f904680294f7b8b,96fa68f166e0bb12040cea88534dc256cab69d2d..6a811755e2e6f4fa01763f6dd66d1ee02da69efb
@@@ -182,23 -182,32 +182,32 @@@ void dcn32_init_clocks(struct clk_mgr *
        dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
                        &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
                        &num_entries_per_clk->num_dcfclk_levels);
+       clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
  
        /* SOCCLK */
        dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
                                        &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
                                        &num_entries_per_clk->num_socclk_levels);
+       clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
  
        /* DTBCLK */
-       if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
+       if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) {
                dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
                                &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
                                &num_entries_per_clk->num_dtbclk_levels);
+               clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz =
+                               dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+       }
  
        /* DISPCLK */
        dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
                        &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
                        &num_entries_per_clk->num_dispclk_levels);
        num_levels = num_entries_per_clk->num_dispclk_levels;
+       clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+       //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+       if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
+               clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
  
        if (num_entries_per_clk->num_dcfclk_levels &&
                        num_entries_per_clk->num_dtbclk_levels &&
        DC_FP_END();
  }
  
+ static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
+                       struct dc_state *context,
+                       int ref_dtbclk_khz)
+ {
+       struct dccg *dccg = clk_mgr->dccg;
+       uint32_t tg_mask = 0;
+       int i;
+       for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+               struct dtbclk_dto_params dto_params = {0};
+               /* use mask to program DTO once per tg */
+               if (pipe_ctx->stream_res.tg &&
+                               !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
+                       tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
+                       dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+                       dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
+                       dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
+                       //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
+               }
+       }
+ }
  /* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming),
   * update DPPCLK to be the exact frequency that will be set after the DPPCLK
   * divider is updated. This will prevent rounding issues that could cause DPP
@@@ -433,10 -468,6 +468,6 @@@ static void dcn32_update_clocks(struct 
        bool update_uclk = false, update_fclk = false;
        bool p_state_change_support;
        bool fclk_p_state_change_support;
-       int total_plane_count;
-       if (dc->work_arounds.skip_clock_update)
-               return;
  
        if (clk_mgr_base->clks.dispclk_khz == 0 ||
                        (dc->debug.force_clock_mode & 0x1)) {
  
                clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
  
-               total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
-               fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+               fclk_p_state_change_support = new_clocks->fclk_p_state_change_support;
  
-               if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+               if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
+                               !dc->work_arounds.clock_update_disable_mask.fclk) {
                        clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
  
                        /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
                        new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
                                        new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
  
-               if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+               if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) &&
+                               !dc->work_arounds.clock_update_disable_mask.dcfclk) {
                        clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
                        dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
                }
  
-               if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+               if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) &&
+                               !dc->work_arounds.clock_update_disable_mask.dcfclk_ds) {
                        clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
                        dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
                }
                        dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
                }
  
-               p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
-               if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+               p_state_change_support = new_clocks->p_state_change_support;
+               if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) &&
+                               !dc->work_arounds.clock_update_disable_mask.uclk) {
                        clk_mgr_base->clks.p_state_change_support = p_state_change_support;
  
                        /* to disable P-State switching, set UCLK min = max */
                        update_fclk = true;
                }
  
-               if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+               if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk &&
+                               !dc->work_arounds.clock_update_disable_mask.fclk) {
                        /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
                        dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
                }
  
                /* Always update saved value, even if new value not set due to P-State switching unsupported */
-               if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
+               if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz) &&
+                               !dc->work_arounds.clock_update_disable_mask.uclk) {
                        clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
                        update_uclk = true;
                }
  
                /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
                if (clk_mgr_base->clks.p_state_change_support &&
-                               (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
+                               (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
+                               !dc->work_arounds.clock_update_disable_mask.uclk)
                        dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
  
                if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
                /* DCCG requires KHz precision for DTBCLK */
                clk_mgr_base->clks.ref_dtbclk_khz =
                                dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+               dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
        }
  
        if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
@@@ -789,6 -826,7 +826,7 @@@ static void dcn32_get_memclk_states_fro
        dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
                        &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
                        &num_entries_per_clk->num_memclk_levels);
+       clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
  
        /* memclk must have at least one level */
        num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1;
        dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
                        &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
                        &num_entries_per_clk->num_fclk_levels);
+       clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
  
        if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
                num_levels = num_entries_per_clk->num_memclk_levels;
@@@ -913,7 -952,6 +952,7 @@@ void dcn32_clk_mgr_construct
                        clk_mgr->base.clks.ref_dtbclk_khz = 268750;
        }
  
 +
        /* integer part is now VCO frequency in kHz */
        clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
  
index 422fbf79da64fb95eda7627455870a0dbdb351cb,20f668d28364c02c8acbb26017641c6272636ed8..5934b1d70e48552e25d1059010f3ea9a195fef6b
@@@ -313,6 -313,10 +313,10 @@@ void dcn20_init_blank
        }
        opp = dc->res_pool->opps[opp_id_src0];
  
+       /* don't override the blank pattern if already enabled with the correct one. */
+       if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp))
+               return;
        if (num_opps == 2) {
                otg_active_width = otg_active_width / 2;
  
@@@ -1357,6 -1361,7 +1361,7 @@@ static void dcn20_detect_pipe_changes(s
                new_pipe->update_flags.bits.dppclk = 1;
                new_pipe->update_flags.bits.hubp_interdependent = 1;
                new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+               new_pipe->update_flags.bits.unbounded_req = 1;
                new_pipe->update_flags.bits.gamut_remap = 1;
                new_pipe->update_flags.bits.scaler = 1;
                new_pipe->update_flags.bits.viewport = 1;
                                memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
                        new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
        }
+       if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+               new_pipe->update_flags.bits.unbounded_req = 1;
  }
  
  static void dcn20_update_dchubp_dpp(
                        &pipe_ctx->ttu_regs,
                        &pipe_ctx->rq_regs,
                        &pipe_ctx->pipe_dlg_param);
-               if (hubp->funcs->set_unbounded_requesting)
-                       hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
        }
+       if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+               hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
        if (pipe_ctx->update_flags.bits.hubp_interdependent)
                hubp->funcs->hubp_setup_interdependent(
                        hubp,
@@@ -1732,6 -1741,17 +1741,17 @@@ static void dcn20_program_pipe
  
                if (hws->funcs.setup_vupdate_interrupt)
                        hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+               if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+                       unsigned int k1_div, k2_div;
+                       hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
+                       dc->res_pool->dccg->funcs->set_pixel_rate_div(
+                               dc->res_pool->dccg,
+                               pipe_ctx->stream_res.tg->inst,
+                               k1_div, k2_div);
+               }
        }
  
        if (pipe_ctx->update_flags.bits.odm)
@@@ -2113,20 -2133,11 +2133,20 @@@ void dcn20_optimize_bandwidth
        if (hubbub->funcs->program_compbuf_size)
                hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
  
 +      if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
 +              dc_dmub_srv_p_state_delegate(dc,
 +                      true, context);
 +              context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
 +              dc->clk_mgr->clks.fw_based_mclk_switching = true;
 +      } else {
 +              dc->clk_mgr->clks.fw_based_mclk_switching = false;
 +      }
 +
        dc->clk_mgr->funcs->update_clocks(
                        dc->clk_mgr,
                        context,
                        true);
-       if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+       if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
                for (i = 0; i < dc->res_pool->pipe_count; ++i) {
                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
  
                                && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
                                && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
                                        pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
-                                               pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+                                               pipe_ctx->dlg_regs.min_dst_y_next_start);
                }
        }
  }
@@@ -2471,36 -2482,31 +2491,31 @@@ static void dcn20_reset_back_end_for_pi
                return;
        }
  
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-               /* DPMS may already disable or */
-               /* dpms_off status is incorrect due to fastboot
-                * feature. When system resume from S4 with second
-                * screen only, the dpms_off would be true but
-                * VBIOS lit up eDP, so check link status too.
-                */
-               if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
-                       dc->link_srv->set_dpms_off(pipe_ctx);
-               else if (pipe_ctx->stream_res.audio)
-                       dc->hwss.disable_audio_stream(pipe_ctx);
-               /* free acquired resources */
-               if (pipe_ctx->stream_res.audio) {
-                       /*disable az_endpoint*/
-                       pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-                       /*free audio*/
-                       if (dc->caps.dynamic_audio == true) {
-                               /*we have to dynamic arbitrate the audio endpoints*/
-                               /*we free the resource, need reset is_audio_acquired*/
-                               update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
-                                               pipe_ctx->stream_res.audio, false);
-                               pipe_ctx->stream_res.audio = NULL;
-                       }
+       /* DPMS may already disable or */
+       /* dpms_off status is incorrect due to fastboot
+        * feature. When system resume from S4 with second
+        * screen only, the dpms_off would be true but
+        * VBIOS lit up eDP, so check link status too.
+        */
+       if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+               dc->link_srv->set_dpms_off(pipe_ctx);
+       else if (pipe_ctx->stream_res.audio)
+               dc->hwss.disable_audio_stream(pipe_ctx);
+       /* free acquired resources */
+       if (pipe_ctx->stream_res.audio) {
+               /*disable az_endpoint*/
+               pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+               /*free audio*/
+               if (dc->caps.dynamic_audio == true) {
+                       /*we have to dynamic arbitrate the audio endpoints*/
+                       /*we free the resource, need reset is_audio_acquired*/
+                       update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+                                       pipe_ctx->stream_res.audio, false);
+                       pipe_ctx->stream_res.audio = NULL;
                }
        }
-       else if (pipe_ctx->stream_res.dsc) {
-               dc->link_srv->set_dsc_enable(pipe_ctx, false);
-       }
  
        /* by upper caller loop, parent pipe: pipe0, will be reset last.
         * back end share by all pipes and will be disable only when disable
@@@ -2576,28 -2582,6 +2591,6 @@@ void dcn20_reset_hw_ctx_wrap
        }
  }
  
- void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
- {
-       struct mpc *mpc = dc->res_pool->mpc;
-       // input to MPCC is always RGB, by default leave black_color at 0
-       if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
-               get_hdr_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
-               get_surface_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
-               get_mpctree_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
-               get_surface_tile_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
-               get_subvp_visual_confirm_color(dc, pipe_ctx, color);
-       if (mpc->funcs->set_bg_color) {
-               memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
-               mpc->funcs->set_bg_color(mpc, color, mpcc_id);
-       }
- }
  void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
  {
        struct hubp *hubp = pipe_ctx->plane_res.hubp;
        if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
                !pipe_ctx->update_flags.bits.mpcc) {
                mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
-               dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+               dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
                return;
        }
  
                        NULL,
                        hubp->inst,
                        mpcc_id);
-       dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+       dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
  
        ASSERT(new_mpcc != NULL);
        hubp->opp_id = pipe_ctx->stream_res.opp->inst;
index 8263a07f265f2157cb72afce81dcddb0e9af572f,b9753867d97b00313566d82081dbcaa582b30bac..b59e215027e27179fd9d7fed89603373fee1a0cb
@@@ -330,10 -330,6 +330,6 @@@ void dcn30_enable_writeback
        DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
                __func__, wb_info->dwb_pipe_inst,\
                wb_info->mpcc_inst);
-       if (IS_DIAG_DC(dc->ctx->dce_environment)) {
-               /*till diags switch to warmup interface*/
-               dcn30_mmhubbub_warmup(dc, 1, wb_info);
-       }
        /* Update writeback pipe */
        dcn30_set_writeback(dc, wb_info, context);
  
@@@ -447,28 -443,6 +443,6 @@@ void dcn30_init_hw(struct dc *dc
        if (res_pool->dccg->funcs->dccg_init)
                res_pool->dccg->funcs->dccg_init(res_pool->dccg);
  
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-               REG_WRITE(REFCLK_CNTL, 0);
-               REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
-               REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-               if (!dc->debug.disable_clock_gate) {
-                       /* enable all DCN clock gating */
-                       REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-                       REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-                       REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
-               }
-               //Enable ability to power gate / don't force power on permanently
-               if (hws->funcs.enable_power_gating_plane)
-                       hws->funcs.enable_power_gating_plane(hws, true);
-               return;
-       }
        if (!dcb->funcs->is_accelerated_mode(dcb)) {
                hws->funcs.bios_golden_init(dc);
                hws->funcs.disable_vga(dc->hwseq);
                res_pool->ref_clocks.xtalin_clock_inKhz =
                                dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
  
-               if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-                       if (res_pool->dccg && res_pool->hubbub) {
+               if (res_pool->dccg && res_pool->hubbub) {
  
-                               (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
-                                               dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
-                                               &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+                       (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+                                       dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+                                       &res_pool->ref_clocks.dccg_ref_clock_inKhz);
  
-                               (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
-                                               res_pool->ref_clocks.dccg_ref_clock_inKhz,
-                                               &res_pool->ref_clocks.dchub_ref_clock_inKhz);
-                       } else {
-                               // Not all ASICs have DCCG sw component
-                               res_pool->ref_clocks.dccg_ref_clock_inKhz =
-                                               res_pool->ref_clocks.xtalin_clock_inKhz;
-                               res_pool->ref_clocks.dchub_ref_clock_inKhz =
-                                               res_pool->ref_clocks.xtalin_clock_inKhz;
-                       }
+                       (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+                                       res_pool->ref_clocks.dccg_ref_clock_inKhz,
+                                       &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+               } else {
+                       // Not all ASICs have DCCG sw component
+                       res_pool->ref_clocks.dccg_ref_clock_inKhz =
+                                       res_pool->ref_clocks.xtalin_clock_inKhz;
+                       res_pool->ref_clocks.dchub_ref_clock_inKhz =
+                                       res_pool->ref_clocks.xtalin_clock_inKhz;
                }
        } else
                ASSERT_CRITICAL(false);
                dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
  
        // Get DMCUB capabilities
-       dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+       dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
        dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
        dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
  }
@@@ -736,8 -708,7 +708,7 @@@ bool dcn30_apply_idle_power_optimizatio
                                cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
                                cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
  
-                               dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
-                               dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+                               dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
  
                                return true;
                        }
                                        cmd.mall.cursor_height = cursor_attr.height;
                                        cmd.mall.cursor_pitch = cursor_attr.pitch;
  
-                                       dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
-                                       dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
-                                       dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+                                       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
  
                                        /* Use copied cursor, and it's okay to not switch back */
                                        cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
                                cmd.mall.tmr_scale = tmr_scale;
                                cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
  
-                               dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
-                               dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+                               dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
  
                                return true;
                        }
        cmd.mall.header.payload_bytes =
                sizeof(cmd.mall) - sizeof(cmd.mall.header);
  
-       dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
-       dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
-       dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
  
        return true;
  }
@@@ -983,36 -949,13 +949,36 @@@ void dcn30_set_disp_pattern_generator(c
  }
  
  void dcn30_prepare_bandwidth(struct dc *dc,
-       struct dc_state *context)
+                            struct dc_state *context)
  {
 +      bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
 +      /* Any transition into an FPO config should disable MCLK switching first to avoid
 +       * driver and FW P-State synchronization issues.
 +       */
 +      if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
 +              dc->optimized_required = true;
 +              context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
 +      }
 +
        if (dc->clk_mgr->dc_mode_softmax_enabled)
                if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
                                context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
                        dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
  
        dcn20_prepare_bandwidth(dc, context);
 +      /*
 +       * enabled -> enabled: do not disable
 +       * enabled -> disabled: disable
 +       * disabled -> enabled: don't care
 +       * disabled -> disabled: don't care
 +       */
 +      if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
 +              dc_dmub_srv_p_state_delegate(dc, false, context);
 +
 +      if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
 +              /* After disabling P-State, restore the original value to ensure we get the correct P-State
 +               * on the next optimize. */
 +              context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
 +      }
  }
  
index 00ea71b03ec780d3a6f334913774ad16b810f8e3,3abfe29d0b44b12a00d61e68cc5a07dc8e5ac407..ff3bcadebe59b0b7a5af246495b86452581a1cda
@@@ -176,15 -176,15 +176,15 @@@ enum pipe intel_connector_get_pipe(stru
  /**
   * intel_connector_update_modes - update connector from edid
   * @connector: DRM connector device to use
 - * @edid: previously read EDID information
 + * @drm_edid: previously read EDID information
   */
  int intel_connector_update_modes(struct drm_connector *connector,
 -                              struct edid *edid)
 +                               const struct drm_edid *drm_edid)
  {
        int ret;
  
 -      drm_connector_update_edid_property(connector, edid);
 -      ret = drm_add_edid_modes(connector, edid);
 +      drm_edid_connector_update(connector, drm_edid);
 +      ret = drm_edid_connector_add_modes(connector);
  
        return ret;
  }
  int intel_ddc_get_modes(struct drm_connector *connector,
                        struct i2c_adapter *adapter)
  {
 -      struct edid *edid;
 +      const struct drm_edid *drm_edid;
        int ret;
  
 -      edid = drm_get_edid(connector, adapter);
 -      if (!edid)
 +      drm_edid = drm_edid_read_ddc(connector, adapter);
 +      if (!drm_edid)
                return 0;
  
 -      ret = intel_connector_update_modes(connector, edid);
 -      kfree(edid);
 +      ret = intel_connector_update_modes(connector, drm_edid);
 +      drm_edid_free(drm_edid);
  
        return ret;
  }
@@@ -280,14 -280,14 +280,14 @@@ intel_attach_aspect_ratio_property(stru
  void
  intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
  {
-       if (!drm_mode_create_hdmi_colorspace_property(connector))
+       if (!drm_mode_create_hdmi_colorspace_property(connector, 0))
                drm_connector_attach_colorspace_property(connector);
  }
  
  void
  intel_attach_dp_colorspace_property(struct drm_connector *connector)
  {
-       if (!drm_mode_create_dp_colorspace_property(connector))
+       if (!drm_mode_create_dp_colorspace_property(connector, 0))
                drm_connector_attach_colorspace_property(connector);
  }
  
index df4cf5468e7fd648b23212422ddbb283fd5b4801,43e27ab77f957a18cfa8fd1688f0c0f82f5c7b24..7726a72befc5446bd73d9da51b7ccd8175c0ab5d
@@@ -189,7 -189,7 +189,7 @@@ EXPORT_SYMBOL(ttm_device_swapout)
   * Returns:
   * !0: Failure.
   */
 -int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
 +int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
                    struct device *dev, struct address_space *mapping,
                    struct drm_vma_offset_manager *vma_manager,
                    bool use_dma_alloc, bool use_dma32)
        bdev->funcs = funcs;
  
        ttm_sys_man_init(bdev);
-       ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
+       ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
  
        bdev->vma_manager = vma_manager;
        spin_lock_init(&bdev->lru_lock);
index 4db3982057be8d18fc20a423c91d4ab708bd7514,668abf63f2bd6563c571b56e4149cbe8738ef36b..cddb9151d20f4465bfc269e62c3072ad51a990c0
  
  #include "ttm_module.h"
  
 -#define TTM_MAX_ORDER (PMD_SHIFT - PAGE_SHIFT)
 -#define __TTM_DIM_ORDER (TTM_MAX_ORDER + 1)
 -/* Some architectures have a weird PMD_SHIFT */
 -#define TTM_DIM_ORDER (__TTM_DIM_ORDER <= MAX_ORDER ? __TTM_DIM_ORDER : MAX_ORDER)
 -
  /**
   * struct ttm_pool_dma - Helper object for coherent DMA mappings
   *
@@@ -65,11 -70,11 +65,11 @@@ module_param(page_pool_size, ulong, 064
  
  static atomic_long_t allocated_pages;
  
 -static struct ttm_pool_type global_write_combined[TTM_DIM_ORDER];
 -static struct ttm_pool_type global_uncached[TTM_DIM_ORDER];
 +static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
 +static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
  
 -static struct ttm_pool_type global_dma32_write_combined[TTM_DIM_ORDER];
 -static struct ttm_pool_type global_dma32_uncached[TTM_DIM_ORDER];
 +static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
 +static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
  
  static spinlock_t shrinker_lock;
  static struct list_head shrinker_list;
@@@ -93,7 -98,7 +93,7 @@@ static struct page *ttm_pool_alloc_page
                        __GFP_KSWAPD_RECLAIM;
  
        if (!pool->use_dma_alloc) {
-               p = alloc_pages(gfp_flags, order);
+               p = alloc_pages_node(pool->nid, gfp_flags, order);
                if (p)
                        p->private = order;
                return p;
@@@ -287,7 -292,7 +287,7 @@@ static struct ttm_pool_type *ttm_pool_s
                                                  enum ttm_caching caching,
                                                  unsigned int order)
  {
-       if (pool->use_dma_alloc)
+       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
                return &pool->caching[caching].orders[order];
  
  #ifdef CONFIG_X86
@@@ -444,7 -449,7 +444,7 @@@ int ttm_pool_alloc(struct ttm_pool *poo
        else
                gfp_flags |= GFP_HIGHUSER;
  
 -      for (order = min_t(unsigned int, TTM_MAX_ORDER, __fls(num_pages));
 +      for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
             num_pages;
             order = min_t(unsigned int, order, __fls(num_pages))) {
                struct ttm_pool_type *pt;
@@@ -545,29 -550,32 +545,32 @@@ EXPORT_SYMBOL(ttm_pool_free)
   *
   * @pool: the pool to initialize
   * @dev: device for DMA allocations and mappings
+  * @nid: NUMA node to use for allocations
   * @use_dma_alloc: true if coherent DMA alloc should be used
   * @use_dma32: true if GFP_DMA32 should be used
   *
   * Initialize the pool and its pool types.
   */
  void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
-                  bool use_dma_alloc, bool use_dma32)
+                  int nid, bool use_dma_alloc, bool use_dma32)
  {
        unsigned int i, j;
  
        WARN_ON(!dev && use_dma_alloc);
  
        pool->dev = dev;
+       pool->nid = nid;
        pool->use_dma_alloc = use_dma_alloc;
        pool->use_dma32 = use_dma32;
  
-       if (use_dma_alloc) {
+       if (use_dma_alloc || nid != NUMA_NO_NODE) {
                for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
 -                      for (j = 0; j < TTM_DIM_ORDER; ++j)
 +                      for (j = 0; j <= MAX_ORDER; ++j)
                                ttm_pool_type_init(&pool->caching[i].orders[j],
                                                   pool, i, j);
        }
  }
+ EXPORT_SYMBOL(ttm_pool_init);
  
  /**
   * ttm_pool_fini - Cleanup a pool
@@@ -581,9 -589,9 +584,9 @@@ void ttm_pool_fini(struct ttm_pool *poo
  {
        unsigned int i, j;
  
-       if (pool->use_dma_alloc) {
+       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
                for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
 -                      for (j = 0; j < TTM_DIM_ORDER; ++j)
 +                      for (j = 0; j <= MAX_ORDER; ++j)
                                ttm_pool_type_fini(&pool->caching[i].orders[j]);
        }
  
         */
        synchronize_shrinkers();
  }
+ EXPORT_SYMBOL(ttm_pool_fini);
  
  /* As long as pages are available make sure to release at least one */
  static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
@@@ -637,7 -646,7 +641,7 @@@ static void ttm_pool_debugfs_header(str
        unsigned int i;
  
        seq_puts(m, "\t ");
 -      for (i = 0; i < TTM_DIM_ORDER; ++i)
 +      for (i = 0; i <= MAX_ORDER; ++i)
                seq_printf(m, " ---%2u---", i);
        seq_puts(m, "\n");
  }
@@@ -648,7 -657,7 +652,7 @@@ static void ttm_pool_debugfs_orders(str
  {
        unsigned int i;
  
 -      for (i = 0; i < TTM_DIM_ORDER; ++i)
 +      for (i = 0; i <= MAX_ORDER; ++i)
                seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
        seq_puts(m, "\n");
  }
@@@ -751,13 -760,16 +755,13 @@@ int ttm_pool_mgr_init(unsigned long num
  {
        unsigned int i;
  
 -      BUILD_BUG_ON(TTM_DIM_ORDER > MAX_ORDER);
 -      BUILD_BUG_ON(TTM_DIM_ORDER < 1);
 -
        if (!page_pool_size)
                page_pool_size = num_pages;
  
        spin_lock_init(&shrinker_lock);
        INIT_LIST_HEAD(&shrinker_list);
  
 -      for (i = 0; i < TTM_DIM_ORDER; ++i) {
 +      for (i = 0; i <= MAX_ORDER; ++i) {
                ttm_pool_type_init(&global_write_combined[i], NULL,
                                   ttm_write_combined, i);
                ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@@ -790,7 -802,7 +794,7 @@@ void ttm_pool_mgr_fini(void
  {
        unsigned int i;
  
 -      for (i = 0; i < TTM_DIM_ORDER; ++i) {
 +      for (i = 0; i <= MAX_ORDER; ++i) {
                ttm_pool_type_fini(&global_write_combined[i]);
                ttm_pool_type_fini(&global_uncached[i]);
  
index 1ce4b36ab33be191f1d238e5799e2bda5c5969d4,feac2f2c736e686445b46ad0f66e9295e0b429d8..e0a77671edd6c155329119e498e271a520a4eaab
@@@ -137,6 -137,7 +137,6 @@@ static void ttm_tt_init_fields(struct t
                               unsigned long extra_pages)
  {
        ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
 -      ttm->caching = ttm_cached;
        ttm->page_flags = page_flags;
        ttm->dma_address = NULL;
        ttm->swap_storage = NULL;
@@@ -449,3 -450,9 +449,9 @@@ ttm_kmap_iter_tt_init(struct ttm_kmap_i
        return &iter_tt->base;
  }
  EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+ unsigned long ttm_tt_pages_limit(void)
+ {
+       return ttm_pages_limit;
+ }
+ EXPORT_SYMBOL(ttm_tt_pages_limit);
index 6da41ea1250ab6ec6d37e8780976b057132976a5,482397d5cb48d578c00f616d97a4b21c8bc0072c..5261526d286f5100fb62971450e78e793e241a39
@@@ -153,17 -153,11 +153,17 @@@ static bool vc4_hdmi_mode_needs_scrambl
        return clock > HDMI_14_MAX_TMDS_CLK;
  }
  
 -static bool vc4_hdmi_is_full_range_rgb(struct vc4_hdmi *vc4_hdmi,
 -                                     const struct drm_display_mode *mode)
 +static bool vc4_hdmi_is_full_range(struct vc4_hdmi *vc4_hdmi,
 +                                 struct vc4_hdmi_connector_state *vc4_state)
  {
 +      const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
        struct drm_display_info *display = &vc4_hdmi->connector.display_info;
  
 +      if (vc4_state->broadcast_rgb == VC4_HDMI_BROADCAST_RGB_LIMITED)
 +              return false;
 +      else if (vc4_state->broadcast_rgb == VC4_HDMI_BROADCAST_RGB_FULL)
 +              return true;
 +
        return !display->is_hdmi ||
                drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_FULL;
  }
@@@ -534,45 -528,14 +534,45 @@@ static int vc4_hdmi_connector_atomic_ch
  {
        struct drm_connector_state *old_state =
                drm_atomic_get_old_connector_state(state, connector);
 +      struct vc4_hdmi_connector_state *old_vc4_state =
 +              conn_state_to_vc4_hdmi_conn_state(old_state);
        struct drm_connector_state *new_state =
                drm_atomic_get_new_connector_state(state, connector);
 +      struct vc4_hdmi_connector_state *new_vc4_state =
 +              conn_state_to_vc4_hdmi_conn_state(new_state);
        struct drm_crtc *crtc = new_state->crtc;
  
        if (!crtc)
                return 0;
  
 +      if (old_state->tv.margins.left != new_state->tv.margins.left ||
 +          old_state->tv.margins.right != new_state->tv.margins.right ||
 +          old_state->tv.margins.top != new_state->tv.margins.top ||
 +          old_state->tv.margins.bottom != new_state->tv.margins.bottom) {
 +              struct drm_crtc_state *crtc_state;
 +              int ret;
 +
 +              crtc_state = drm_atomic_get_crtc_state(state, crtc);
 +              if (IS_ERR(crtc_state))
 +                      return PTR_ERR(crtc_state);
 +
 +              /*
 +               * Strictly speaking, we should be calling
 +               * drm_atomic_helper_check_planes() after our call to
 +               * drm_atomic_add_affected_planes(). However, the
 +               * connector atomic_check is called as part of
 +               * drm_atomic_helper_check_modeset() that already
 +               * happens before a call to
 +               * drm_atomic_helper_check_planes() in
 +               * drm_atomic_helper_check().
 +               */
 +              ret = drm_atomic_add_affected_planes(state, crtc);
 +              if (ret)
 +                      return ret;
 +      }
 +
        if (old_state->colorspace != new_state->colorspace ||
 +          old_vc4_state->broadcast_rgb != new_vc4_state->broadcast_rgb ||
            !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) {
                struct drm_crtc_state *crtc_state;
  
        return 0;
  }
  
 +static int vc4_hdmi_connector_get_property(struct drm_connector *connector,
 +                                         const struct drm_connector_state *state,
 +                                         struct drm_property *property,
 +                                         uint64_t *val)
 +{
 +      struct drm_device *drm = connector->dev;
 +      struct vc4_hdmi *vc4_hdmi =
 +              connector_to_vc4_hdmi(connector);
 +      const struct vc4_hdmi_connector_state *vc4_conn_state =
 +              conn_state_to_vc4_hdmi_conn_state(state);
 +
 +      if (property == vc4_hdmi->broadcast_rgb_property) {
 +              *val = vc4_conn_state->broadcast_rgb;
 +      } else {
 +              drm_dbg(drm, "Unknown property [PROP:%d:%s]\n",
 +                      property->base.id, property->name);
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
 +static int vc4_hdmi_connector_set_property(struct drm_connector *connector,
 +                                         struct drm_connector_state *state,
 +                                         struct drm_property *property,
 +                                         uint64_t val)
 +{
 +      struct drm_device *drm = connector->dev;
 +      struct vc4_hdmi *vc4_hdmi =
 +              connector_to_vc4_hdmi(connector);
 +      struct vc4_hdmi_connector_state *vc4_conn_state =
 +              conn_state_to_vc4_hdmi_conn_state(state);
 +
 +      if (property == vc4_hdmi->broadcast_rgb_property) {
 +              vc4_conn_state->broadcast_rgb = val;
 +              return 0;
 +      }
 +
 +      drm_dbg(drm, "Unknown property [PROP:%d:%s]\n",
 +              property->base.id, property->name);
 +      return -EINVAL;
 +}
 +
  static void vc4_hdmi_connector_reset(struct drm_connector *connector)
  {
        struct vc4_hdmi_connector_state *old_state =
        new_state->base.max_bpc = 8;
        new_state->base.max_requested_bpc = 8;
        new_state->output_format = VC4_HDMI_OUTPUT_RGB;
 +      new_state->broadcast_rgb = VC4_HDMI_BROADCAST_RGB_AUTO;
        drm_atomic_helper_connector_tv_margins_reset(connector);
  }
  
@@@ -666,7 -585,6 +666,7 @@@ vc4_hdmi_connector_duplicate_state(stru
        new_state->tmds_char_rate = vc4_state->tmds_char_rate;
        new_state->output_bpc = vc4_state->output_bpc;
        new_state->output_format = vc4_state->output_format;
 +      new_state->broadcast_rgb = vc4_state->broadcast_rgb;
        __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
  
        return &new_state->base;
@@@ -677,8 -595,6 +677,8 @@@ static const struct drm_connector_func
        .reset = vc4_hdmi_connector_reset,
        .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 +      .atomic_get_property = vc4_hdmi_connector_get_property,
 +      .atomic_set_property = vc4_hdmi_connector_set_property,
  };
  
  static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
        .atomic_check = vc4_hdmi_connector_atomic_check,
  };
  
 +static const struct drm_prop_enum_list broadcast_rgb_names[] = {
 +      { VC4_HDMI_BROADCAST_RGB_AUTO, "Automatic" },
 +      { VC4_HDMI_BROADCAST_RGB_FULL, "Full" },
 +      { VC4_HDMI_BROADCAST_RGB_LIMITED, "Limited 16:235" },
 +};
 +
 +static void
 +vc4_hdmi_attach_broadcast_rgb_property(struct drm_device *dev,
 +                                     struct vc4_hdmi *vc4_hdmi)
 +{
 +      struct drm_property *prop = vc4_hdmi->broadcast_rgb_property;
 +
 +      if (!prop) {
 +              prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
 +                                              "Broadcast RGB",
 +                                              broadcast_rgb_names,
 +                                              ARRAY_SIZE(broadcast_rgb_names));
 +              if (!prop)
 +                      return;
 +
 +              vc4_hdmi->broadcast_rgb_property = prop;
 +      }
 +
 +      drm_object_attach_property(&vc4_hdmi->connector.base, prop,
 +                                 VC4_HDMI_BROADCAST_RGB_AUTO);
 +}
 +
  static int vc4_hdmi_connector_init(struct drm_device *dev,
                                   struct vc4_hdmi *vc4_hdmi)
  {
        if (ret)
                return ret;
  
-       ret = drm_mode_create_hdmi_colorspace_property(connector);
+       ret = drm_mode_create_hdmi_colorspace_property(connector, 0);
        if (ret)
                return ret;
  
        if (vc4_hdmi->variant->supports_hdr)
                drm_connector_attach_hdr_output_metadata_property(connector);
  
 +      vc4_hdmi_attach_broadcast_rgb_property(dev, vc4_hdmi);
 +
        drm_connector_attach_encoder(connector, encoder);
  
        return 0;
@@@ -916,7 -803,7 +916,7 @@@ static void vc4_hdmi_set_avi_infoframe(
  
        drm_hdmi_avi_infoframe_quant_range(&frame.avi,
                                           connector, mode,
 -                                         vc4_hdmi_is_full_range_rgb(vc4_hdmi, mode) ?
 +                                         vc4_hdmi_is_full_range(vc4_hdmi, vc4_state) ?
                                           HDMI_QUANTIZATION_RANGE_FULL :
                                           HDMI_QUANTIZATION_RANGE_LIMITED);
        drm_hdmi_avi_infoframe_colorimetry(&frame.avi, cstate);
@@@ -1159,8 -1046,6 +1159,8 @@@ static void vc4_hdmi_csc_setup(struct v
                               struct drm_connector_state *state,
                               const struct drm_display_mode *mode)
  {
 +      struct vc4_hdmi_connector_state *vc4_state =
 +              conn_state_to_vc4_hdmi_conn_state(state);
        struct drm_device *drm = vc4_hdmi->connector.dev;
        unsigned long flags;
        u32 csc_ctl;
        csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
                                VC4_HD_CSC_CTL_ORDER);
  
 -      if (!vc4_hdmi_is_full_range_rgb(vc4_hdmi, mode)) {
 +      if (!vc4_hdmi_is_full_range(vc4_hdmi, vc4_state)) {
                /* CEA VICs other than #1 requre limited range RGB
                 * output unless overridden by an AVI infoframe.
                 * Apply a colorspace conversion to squash 0-255 down
  }
  
  /*
 - * If we need to output Full Range RGB, then use the unity matrix
 - *
 - * [ 1      0      0      0]
 - * [ 0      1      0      0]
 - * [ 0      0      1      0]
 + * Matrices for (internal) RGB to RGB output.
   *
 - * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
 + * Matrices are signed 2p13 fixed point, with signed 9p6 offsets
   */
 -static const u16 vc5_hdmi_csc_full_rgb_unity[3][4] = {
 -      { 0x2000, 0x0000, 0x0000, 0x0000 },
 -      { 0x0000, 0x2000, 0x0000, 0x0000 },
 -      { 0x0000, 0x0000, 0x2000, 0x0000 },
 +static const u16 vc5_hdmi_csc_full_rgb_to_rgb[2][3][4] = {
 +      {
 +              /*
 +               * Full range - unity
 +               *
 +               * [ 1      0      0      0]
 +               * [ 0      1      0      0]
 +               * [ 0      0      1      0]
 +               */
 +              { 0x2000, 0x0000, 0x0000, 0x0000 },
 +              { 0x0000, 0x2000, 0x0000, 0x0000 },
 +              { 0x0000, 0x0000, 0x2000, 0x0000 },
 +      },
 +      {
 +              /*
 +               * Limited range
 +               *
 +               * CEA VICs other than #1 require limited range RGB
 +               * output unless overridden by an AVI infoframe. Apply a
 +               * colorspace conversion to squash 0-255 down to 16-235.
 +               * The matrix here is:
 +               *
 +               * [ 0.8594 0      0      16]
 +               * [ 0      0.8594 0      16]
 +               * [ 0      0      0.8594 16]
 +               */
 +              { 0x1b80, 0x0000, 0x0000, 0x0400 },
 +              { 0x0000, 0x1b80, 0x0000, 0x0400 },
 +              { 0x0000, 0x0000, 0x1b80, 0x0400 },
 +      },
  };
  
  /*
 - * CEA VICs other than #1 require limited range RGB output unless
 - * overridden by an AVI infoframe. Apply a colorspace conversion to
 - * squash 0-255 down to 16-235. The matrix here is:
 - *
 - * [ 0.8594 0      0      16]
 - * [ 0      0.8594 0      16]
 - * [ 0      0      0.8594 16]
 + * Conversion between Full Range RGB and YUV using the BT.601 Colorspace
   *
 - * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
 + * Matrices are signed 2p13 fixed point, with signed 9p6 offsets
   */
 -static const u16 vc5_hdmi_csc_full_rgb_to_limited_rgb[3][4] = {
 -      { 0x1b80, 0x0000, 0x0000, 0x0400 },
 -      { 0x0000, 0x1b80, 0x0000, 0x0400 },
 -      { 0x0000, 0x0000, 0x1b80, 0x0400 },
 +static const u16 vc5_hdmi_csc_full_rgb_to_yuv_bt601[2][3][4] = {
 +      {
 +              /*
 +               * Full Range
 +               *
 +               * [  0.299000  0.587000  0.114000  0   ]
 +               * [ -0.168736 -0.331264  0.500000  128 ]
 +               * [  0.500000 -0.418688 -0.081312  128 ]
 +               */
 +              { 0x0991, 0x12c9, 0x03a6, 0x0000 },
 +              { 0xfa9b, 0xf567, 0x1000, 0x2000 },
 +              { 0x1000, 0xf29b, 0xfd67, 0x2000 },
 +      },
 +      {
 +              /* Limited Range
 +               *
 +               * [  0.255785  0.502160  0.097523  16  ]
 +               * [ -0.147644 -0.289856  0.437500  128 ]
 +               * [  0.437500 -0.366352 -0.071148  128 ]
 +               */
 +              { 0x082f, 0x1012, 0x031f, 0x0400 },
 +              { 0xfb48, 0xf6ba, 0x0e00, 0x2000 },
 +              { 0x0e00, 0xf448, 0xfdba, 0x2000 },
 +      },
  };
  
  /*
 - * Conversion between Full Range RGB and Full Range YUV422 using the
 - * BT.709 Colorspace
 - *
 - *
 - * [  0.181906  0.611804  0.061758  16  ]
 - * [ -0.100268 -0.337232  0.437500  128 ]
 - * [  0.437500 -0.397386 -0.040114  128 ]
 + * Conversion between Full Range RGB and YUV using the BT.709 Colorspace
   *
 - * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
 + * Matrices are signed 2p13 fixed point, with signed 9p6 offsets
   */
 -static const u16 vc5_hdmi_csc_full_rgb_to_limited_yuv422_bt709[3][4] = {
 -      { 0x05d2, 0x1394, 0x01fa, 0x0400 },
 -      { 0xfccc, 0xf536, 0x0e00, 0x2000 },
 -      { 0x0e00, 0xf34a, 0xfeb8, 0x2000 },
 +static const u16 vc5_hdmi_csc_full_rgb_to_yuv_bt709[2][3][4] = {
 +      {
 +              /*
 +               * Full Range
 +               *
 +               * [  0.212600  0.715200  0.072200  0   ]
 +               * [ -0.114572 -0.385428  0.500000  128 ]
 +               * [  0.500000 -0.454153 -0.045847  128 ]
 +               */
 +              { 0x06ce, 0x16e3, 0x024f, 0x0000 },
 +              { 0xfc56, 0xf3ac, 0x1000, 0x2000 },
 +              { 0x1000, 0xf179, 0xfe89, 0x2000 },
 +      },
 +      {
 +              /*
 +               * Limited Range
 +               *
 +               * [  0.181906  0.611804  0.061758  16  ]
 +               * [ -0.100268 -0.337232  0.437500  128 ]
 +               * [  0.437500 -0.397386 -0.040114  128 ]
 +               */
 +              { 0x05d2, 0x1394, 0x01fa, 0x0400 },
 +              { 0xfccc, 0xf536, 0x0e00, 0x2000 },
 +              { 0x0e00, 0xf34a, 0xfeb8, 0x2000 },
 +      },
  };
  
  /*
 - * Conversion between Full Range RGB and Full Range YUV444 using the
 - * BT.709 Colorspace
 - *
 - * [ -0.100268 -0.337232  0.437500  128 ]
 - * [  0.437500 -0.397386 -0.040114  128 ]
 - * [  0.181906  0.611804  0.061758  16  ]
 + * Conversion between Full Range RGB and YUV using the BT.2020 Colorspace
   *
 - * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
 + * Matrices are signed 2p13 fixed point, with signed 9p6 offsets
   */
 -static const u16 vc5_hdmi_csc_full_rgb_to_limited_yuv444_bt709[3][4] = {
 -      { 0xfccc, 0xf536, 0x0e00, 0x2000 },
 -      { 0x0e00, 0xf34a, 0xfeb8, 0x2000 },
 -      { 0x05d2, 0x1394, 0x01fa, 0x0400 },
 +static const u16 vc5_hdmi_csc_full_rgb_to_yuv_bt2020[2][3][4] = {
 +      {
 +              /*
 +               * Full Range
 +               *
 +               * [  0.262700  0.678000  0.059300  0   ]
 +               * [ -0.139630 -0.360370  0.500000  128 ]
 +               * [  0.500000 -0.459786 -0.040214  128 ]
 +               */
 +              { 0x0868, 0x15b2, 0x01e6, 0x0000 },
 +              { 0xfb89, 0xf479, 0x1000, 0x2000 },
 +              { 0x1000, 0xf14a, 0xfeb8, 0x2000 },
 +      },
 +      {
 +              /* Limited Range
 +               *
 +               * [  0.224732  0.580008  0.050729  16  ]
 +               * [ -0.122176 -0.315324  0.437500  128 ]
 +               * [  0.437500 -0.402312 -0.035188  128 ]
 +               */
 +              { 0x082f, 0x1012, 0x031f, 0x0400 },
 +              { 0xfb48, 0xf6ba, 0x0e00, 0x2000 },
 +              { 0x0e00, 0xf448, 0xfdba, 0x2000 },
 +      },
  };
  
  static void vc5_hdmi_set_csc_coeffs(struct vc4_hdmi *vc4_hdmi,
        HDMI_WRITE(HDMI_CSC_34_33, (coeffs[2][3] << 16) | coeffs[2][2]);
  }
  
 +static void vc5_hdmi_set_csc_coeffs_swap(struct vc4_hdmi *vc4_hdmi,
 +                                       const u16 coeffs[3][4])
 +{
 +      lockdep_assert_held(&vc4_hdmi->hw_lock);
 +
 +      /* YUV444 needs the CSC matrices using the channels in a different order */
 +      HDMI_WRITE(HDMI_CSC_12_11, (coeffs[1][1] << 16) | coeffs[1][0]);
 +      HDMI_WRITE(HDMI_CSC_14_13, (coeffs[1][3] << 16) | coeffs[1][2]);
 +      HDMI_WRITE(HDMI_CSC_22_21, (coeffs[2][1] << 16) | coeffs[2][0]);
 +      HDMI_WRITE(HDMI_CSC_24_23, (coeffs[2][3] << 16) | coeffs[2][2]);
 +      HDMI_WRITE(HDMI_CSC_32_31, (coeffs[0][1] << 16) | coeffs[0][0]);
 +      HDMI_WRITE(HDMI_CSC_34_33, (coeffs[0][3] << 16) | coeffs[0][2]);
 +}
 +
 +static const u16
 +(*vc5_hdmi_find_yuv_csc_coeffs(struct vc4_hdmi *vc4_hdmi, u32 colorspace, bool limited))[4]
 +{
 +      switch (colorspace) {
 +      case DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
 +      case DRM_MODE_COLORIMETRY_XVYCC_601:
 +      case DRM_MODE_COLORIMETRY_SYCC_601:
 +      case DRM_MODE_COLORIMETRY_OPYCC_601:
 +      case DRM_MODE_COLORIMETRY_BT601_YCC:
 +              return vc5_hdmi_csc_full_rgb_to_yuv_bt601[limited];
 +
 +      default:
 +      case DRM_MODE_COLORIMETRY_NO_DATA:
 +      case DRM_MODE_COLORIMETRY_BT709_YCC:
 +      case DRM_MODE_COLORIMETRY_XVYCC_709:
 +      case DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED:
 +      case DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT:
 +              return vc5_hdmi_csc_full_rgb_to_yuv_bt709[limited];
 +
 +      case DRM_MODE_COLORIMETRY_BT2020_CYCC:
 +      case DRM_MODE_COLORIMETRY_BT2020_YCC:
 +      case DRM_MODE_COLORIMETRY_BT2020_RGB:
 +      case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
 +      case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
 +              return vc5_hdmi_csc_full_rgb_to_yuv_bt2020[limited];
 +      }
 +}
 +
  static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
                               struct drm_connector_state *state,
                               const struct drm_display_mode *mode)
        struct drm_device *drm = vc4_hdmi->connector.dev;
        struct vc4_hdmi_connector_state *vc4_state =
                conn_state_to_vc4_hdmi_conn_state(state);
 +      unsigned int lim_range = vc4_hdmi_is_full_range(vc4_hdmi, vc4_state) ? 0 : 1;
        unsigned long flags;
 +      const u16 (*csc)[4];
        u32 if_cfg = 0;
        u32 if_xbar = 0x543210;
        u32 csc_chan_ctl = 0;
  
        switch (vc4_state->output_format) {
        case VC4_HDMI_OUTPUT_YUV444:
 -              vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_to_limited_yuv444_bt709);
 +              csc = vc5_hdmi_find_yuv_csc_coeffs(vc4_hdmi, state->colorspace, !!lim_range);
 +
 +              vc5_hdmi_set_csc_coeffs_swap(vc4_hdmi, csc);
                break;
  
        case VC4_HDMI_OUTPUT_YUV422:
 +              csc = vc5_hdmi_find_yuv_csc_coeffs(vc4_hdmi, state->colorspace, !!lim_range);
 +
                csc_ctl |= VC4_SET_FIELD(VC5_MT_CP_CSC_CTL_FILTER_MODE_444_TO_422_STANDARD,
                                         VC5_MT_CP_CSC_CTL_FILTER_MODE_444_TO_422) |
                        VC5_MT_CP_CSC_CTL_USE_444_TO_422 |
                if_cfg |= VC4_SET_FIELD(VC5_DVP_HT_VEC_INTERFACE_CFG_SEL_422_FORMAT_422_LEGACY,
                                        VC5_DVP_HT_VEC_INTERFACE_CFG_SEL_422);
  
 -              vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_to_limited_yuv422_bt709);
 +              vc5_hdmi_set_csc_coeffs(vc4_hdmi, csc);
                break;
  
        case VC4_HDMI_OUTPUT_RGB:
                if_xbar = 0x354021;
  
 -              if (!vc4_hdmi_is_full_range_rgb(vc4_hdmi, mode))
 -                      vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_to_limited_rgb);
 -              else
 -                      vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_unity);
 +              vc5_hdmi_set_csc_coeffs(vc4_hdmi, vc5_hdmi_csc_full_rgb_to_rgb[lim_range]);
                break;
  
        default:
index b046f79f47441c4ea4c04971f9959f767b06587f,ccbf0c0934c36b9b03dfd0008b985aa325d4da5f..02f2ac4dd2df6b51950238dd14b353f62850d59b
  
  #define DP_DSC_MAX_BITS_PER_PIXEL_HI        0x068   /* eDP 1.4 */
  # define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK  (0x3 << 0)
 -# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
 -# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK  0x06
 -# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY  0x08
 +# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK  (0x3 << 5)        /* eDP 1.5 & DP 2.0 */
 +# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY  (1 << 7)  /* eDP 1.5 & DP 2.0 */
  
  #define DP_DSC_DEC_COLOR_FORMAT_CAP         0x069
  # define DP_DSC_RGB                         (1 << 0)
  
  #define DP_EDP_GENERAL_CAP_2              0x703
  # define DP_EDP_OVERDRIVE_ENGINE_ENABLED              (1 << 0)
 -# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
 +# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE               (1 << 4)
  
  #define DP_EDP_GENERAL_CAP_3              0x704    /* eDP 1.4 */
  # define DP_EDP_X_REGION_CAP_MASK                     (0xf << 0)
  # define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE              (1 << 4)
  # define DP_EDP_REGIONAL_BACKLIGHT_ENABLE             (1 << 5)
  # define DP_EDP_UPDATE_REGION_BRIGHTNESS              (1 << 6) /* eDP 1.4 */
 -# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE  (1 << 7)
 +# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE                (1 << 7)
  
  #define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB     0x722
  #define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB     0x723
@@@ -1635,7 -1636,7 +1635,7 @@@ enum dp_pixelformat 
   *
   * This enum is used to indicate DP VSC SDP Colorimetry formats.
   * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
-  * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition.
+  * DB18] and a name of enum member follows enum drm_colorimetry definition.
   *
   * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or
   *                          ITU-R BT.601 colorimetry format
index e143fef07de9f7cfabda413f28af5cd92a48e43a,3cea003462059d8ae753280c0cce604a208571e4..d300fde6c1a47a0330dc17d17725c26fa206235d
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/notifier.h>
  #include <drm/drm_mode_object.h>
  #include <drm/drm_util.h>
+ #include <drm/drm_property.h>
  
  #include <uapi/drm/drm_mode.h>
  
@@@ -199,11 -200,6 +200,11 @@@ enum drm_connector_tv_mode 
         */
        DRM_MODE_TV_MODE_SECAM,
  
 +      /**
 +       * @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes.
 +       *
 +       * Internal implementation detail; this is not uABI.
 +       */
        DRM_MODE_TV_MODE_MAX,
  };
  
@@@ -424,37 -420,106 +425,106 @@@ enum drm_privacy_screen_status 
        PRIVACY_SCREEN_ENABLED_LOCKED,
  };
  
- /*
-  * This is a consolidated colorimetry list supported by HDMI and
+ /**
+  * enum drm_colorspace - color space
+  *
+  * This enum is a consolidated colorimetry list supported by HDMI and
   * DP protocol standard. The respective connectors will register
   * a property with the subset of this list (supported by that
   * respective protocol). Userspace will set the colorspace through
   * a colorspace property which will be created and exposed to
   * userspace.
+  *
+  * DP definitions come from the DP v2.0 spec
+  * HDMI definitions come from the CTA-861-H spec
+  *
+  * A note on YCC and RGB variants:
+  *
+  * Since userspace is not aware of the encoding on the wire
+  * (RGB or YCbCr), drivers are free to pick the appropriate
+  * variant, regardless of what userspace selects. E.g., if
+  * BT2020_RGB is selected by userspace a driver will pick
+  * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
+   *
+  * @DRM_MODE_COLORIMETRY_DEFAULT:
+  *   Driver specific behavior.
+  * @DRM_MODE_COLORIMETRY_NO_DATA:
+  *   Driver specific behavior.
+  * @DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
+  *   (HDMI)
+  *   SMPTE ST 170M colorimetry format
+  * @DRM_MODE_COLORIMETRY_BT709_YCC:
+  *   (HDMI, DP)
+  *   ITU-R BT.709 colorimetry format
+  * @DRM_MODE_COLORIMETRY_XVYCC_601:
+  *   (HDMI, DP)
+  *   xvYCC601 colorimetry format
+  * @DRM_MODE_COLORIMETRY_XVYCC_709:
+  *   (HDMI, DP)
+  *   xvYCC709 colorimetry format
+  * @DRM_MODE_COLORIMETRY_SYCC_601:
+  *   (HDMI, DP)
+  *   sYCC601 colorimetry format
+  * @DRM_MODE_COLORIMETRY_OPYCC_601:
+  *   (HDMI, DP)
+  *   opYCC601 colorimetry format
+  * @DRM_MODE_COLORIMETRY_OPRGB:
+  *   (HDMI, DP)
+  *   opRGB colorimetry format
+  * @DRM_MODE_COLORIMETRY_BT2020_CYCC:
+  *   (HDMI, DP)
+  *   ITU-R BT.2020 Y'c C'bc C'rc (constant luminance) colorimetry format
+  * @DRM_MODE_COLORIMETRY_BT2020_RGB:
+  *   (HDMI, DP)
+  *   ITU-R BT.2020 R' G' B' colorimetry format
+  * @DRM_MODE_COLORIMETRY_BT2020_YCC:
+  *   (HDMI, DP)
+  *   ITU-R BT.2020 Y' C'b C'r colorimetry format
+  * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+  *   (HDMI)
+  *   SMPTE ST 2113 P3D65 colorimetry format
+  * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+  *   (HDMI)
+  *   SMPTE ST 2113 P3DCI colorimetry format
+  * @DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED:
+  *   (DP)
+  *   RGB wide gamut fixed point colorimetry format
+  * @DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT:
+  *   (DP)
+  *   RGB wide gamut floating point
+  *   (scRGB (IEC 61966-2-2)) colorimetry format
+  * @DRM_MODE_COLORIMETRY_BT601_YCC:
+  *   (DP)
+  *   ITU-R BT.601 colorimetry format
+  *   The DP spec does not say whether this is the 525 or the 625
+  *   line version.
   */
- /* For Default case, driver will set the colorspace */
- #define DRM_MODE_COLORIMETRY_DEFAULT                  0
- /* CEA 861 Normal Colorimetry options */
- #define DRM_MODE_COLORIMETRY_NO_DATA                  0
- #define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC           1
- #define DRM_MODE_COLORIMETRY_BT709_YCC                        2
- /* CEA 861 Extended Colorimetry Options */
- #define DRM_MODE_COLORIMETRY_XVYCC_601                        3
- #define DRM_MODE_COLORIMETRY_XVYCC_709                        4
- #define DRM_MODE_COLORIMETRY_SYCC_601                 5
- #define DRM_MODE_COLORIMETRY_OPYCC_601                        6
- #define DRM_MODE_COLORIMETRY_OPRGB                    7
- #define DRM_MODE_COLORIMETRY_BT2020_CYCC              8
- #define DRM_MODE_COLORIMETRY_BT2020_RGB                       9
- #define DRM_MODE_COLORIMETRY_BT2020_YCC                       10
- /* Additional Colorimetry extension added as part of CTA 861.G */
- #define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65           11
- #define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER               12
- /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
- #define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED           13
- #define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT           14
- #define DRM_MODE_COLORIMETRY_BT601_YCC                        15
+ enum drm_colorspace {
+       /* For Default case, driver will set the colorspace */
+       DRM_MODE_COLORIMETRY_DEFAULT            = 0,
+       /* CEA 861 Normal Colorimetry options */
+       DRM_MODE_COLORIMETRY_NO_DATA            = 0,
+       DRM_MODE_COLORIMETRY_SMPTE_170M_YCC     = 1,
+       DRM_MODE_COLORIMETRY_BT709_YCC          = 2,
+       /* CEA 861 Extended Colorimetry Options */
+       DRM_MODE_COLORIMETRY_XVYCC_601          = 3,
+       DRM_MODE_COLORIMETRY_XVYCC_709          = 4,
+       DRM_MODE_COLORIMETRY_SYCC_601           = 5,
+       DRM_MODE_COLORIMETRY_OPYCC_601          = 6,
+       DRM_MODE_COLORIMETRY_OPRGB              = 7,
+       DRM_MODE_COLORIMETRY_BT2020_CYCC        = 8,
+       DRM_MODE_COLORIMETRY_BT2020_RGB         = 9,
+       DRM_MODE_COLORIMETRY_BT2020_YCC         = 10,
+       /* Additional Colorimetry extension added as part of CTA 861.G */
+       DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65     = 11,
+       DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12,
+       /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
+       DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED     = 13,
+       DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT     = 14,
+       DRM_MODE_COLORIMETRY_BT601_YCC          = 15,
+       /* not a valid value; merely used for counting */
+       DRM_MODE_COLORIMETRY_COUNT
+ };
  
  /**
   * enum drm_bus_flags - bus_flags info for &drm_display_info
@@@ -658,14 -723,6 +728,14 @@@ struct drm_display_info 
         */
        bool is_hdmi;
  
 +      /**
 +       * @has_audio: True if the sink supports audio.
 +       *
 +       * This field shall be used instead of calling
 +       * drm_detect_monitor_audio() when possible.
 +       */
 +      bool has_audio;
 +
        /**
         * @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
         */
@@@ -914,7 -971,7 +984,7 @@@ struct drm_connector_state 
         * colorspace change on Sink. This is most commonly used to switch
         * to wider color gamuts like BT2020.
         */
-       u32 colorspace;
+       enum drm_colorspace colorspace;
  
        /**
         * @writeback_job: Writeback job for writeback connectors
@@@ -1938,8 -1995,10 +2008,10 @@@ int drm_connector_attach_hdr_output_met
  bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
                                             struct drm_connector_state *new_state);
  int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
- int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
- int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
+ int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector,
+                                            u32 supported_colorspaces);
+ int drm_mode_create_dp_colorspace_property(struct drm_connector *connector,
+                                          u32 supported_colorspaces);
  int drm_mode_create_content_type_property(struct drm_device *dev);
  int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
  
@@@ -2022,6 -2081,7 +2094,7 @@@ void drm_connector_list_iter_end(struc
  
  bool drm_connector_has_possible_encoder(struct drm_connector *connector,
                                        struct drm_encoder *encoder);
+ const char *drm_get_colorspace_name(enum drm_colorspace colorspace);
  
  /**
   * drm_for_each_connector_iter - connector_list iterator macro
index 8ce14f9d202a2dd8123c0f85631e8d3eba6973d8,23bd8be6d4f852f92057b88d617bf65863945b25..30a347e5aa114921cdb548d3f69df73bcdf71f7e
@@@ -61,18 -61,20 +61,20 @@@ struct ttm_pool_type 
   * struct ttm_pool - Pool for all caching and orders
   *
   * @dev: the device we allocate pages for
+  * @nid: which numa node to use
   * @use_dma_alloc: if coherent DMA allocations should be used
   * @use_dma32: if GFP_DMA32 should be used
   * @caching: pools for each caching/order
   */
  struct ttm_pool {
        struct device *dev;
+       int nid;
  
        bool use_dma_alloc;
        bool use_dma32;
  
        struct {
 -              struct ttm_pool_type orders[MAX_ORDER];
 +              struct ttm_pool_type orders[MAX_ORDER + 1];
        } caching[TTM_NUM_CACHING_TYPES];
  };
  
@@@ -81,7 -83,7 +83,7 @@@ int ttm_pool_alloc(struct ttm_pool *poo
  void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
  
  void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
-                  bool use_dma_alloc, bool use_dma32);
+                  int nid, bool use_dma_alloc, bool use_dma32);
  void ttm_pool_fini(struct ttm_pool *pool);
  
  int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
diff --combined include/linux/pci_ids.h
index 95f33dadb2be2549ef2404649587a1ed22da57d0,0fbfbda3dc269e8efd712ea84f2ed54a4f2c8b08..f368352b3e0fdb8b2a3ec2d36d757569ffff682c
  #define PCI_CLASS_SP_DPIO             0x1100
  #define PCI_CLASS_SP_OTHER            0x1180
  
+ #define PCI_BASE_CLASS_ACCELERATOR    0x12
+ #define PCI_CLASS_ACCELERATOR_PROCESSING      0x1200
  #define PCI_CLASS_OTHERS              0xff
  
  /* Vendors and devices.  Sort key: vendor first, device next. */
  #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
  #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
  #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
 +#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
  #define PCI_DEVICE_ID_AMD_CNB17H_F3   0x1703
  #define PCI_DEVICE_ID_AMD_LANCE               0x2000
  #define PCI_DEVICE_ID_AMD_LANCE_HOME  0x2001
This page took 0.262889 seconds and 4 git commands to generate.