Merge tag 'amd-drm-next-5.13-2021-03-23' of https://gitlab.freedesktop.org/agd5f...
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 26 Mar 2021 14:52:01 +0000 (15:52 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 26 Mar 2021 14:53:21 +0000 (15:53 +0100)
amd-drm-next-5.13-2021-03-23:

amdgpu:
- Debugfs cleanup
- Various cleanups and spelling fixes
- Flexible array cleanups
- Initial AMD Freesync HDMI
- Display fixes
- 10bpc dithering improvements
- Display ASSR support
- Clean up and unify powerplay and swsmu interfaces
- Vangogh fixes
- Add SMU gfx busy queues for RV/PCO
- PCIE DPM fixes
- S0ix fixes
- GPU metrics data fixes
- DCN secure display support
- Backlight type override
- Add initial support for Aldebaran
- RAS fixes
- Prime fixes for A+A systems
- Reset fixes
- Initial resource cursor support
- Drop legacy IO BAR requirements
- Various power fixes

amdkfd:
- MMU notifier fixes
- APU fixes

radeon:
- Debugfs cleanups
- Flexible array cleanups

UAPI:
- amdgpu: Add a new INFO ioctl interface to query video capabilities
  rather than hardcoding them in userspace.  This allows us to provide
  fine grained asic capabilities (e.g., if a particular part is
  bandwidth limited, we can limit the capabilities).  Proposed userspace:
  https://gitlab.freedesktop.org/leoliu/drm/-/commits/info_video_caps
  https://gitlab.freedesktop.org/leoliu/mesa/-/commits/info_video_caps
- amdkfd: bump the driver version.  There was a problem with reporting
  some RAS features on older versions of the driver. Proposed userspace:
  https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/commit/7cdd63475c36bb9f49bb960f90f9a8cdb7e80a21

Danvet: A bunch of conflicts all over, but it seems to compile ... I
did put the call to dc_allow_idle_optimizations() on a single line
since it looked a bit too jarring to be left alone.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210324040147.1990338-1-alexander.deucher@amd.com
14 files changed:
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_ttm.c

index 1af2fa1591fd252d2dd9b0b4fd20ddaf3f2e8795,efe6b5ca5185027c031ccc940c52b5d11b0b708f..a037c223c251b7ca57b026534a30a1d4c89c2af3
@@@ -124,6 -124,16 +124,16 @@@ struct amdgpu_mgpu_inf
        uint32_t                        num_gpu;
        uint32_t                        num_dgpu;
        uint32_t                        num_apu;
+       /* delayed reset_func for XGMI configuration if necessary */
+       struct delayed_work             delayed_reset_work;
+       bool                            pending_reset;
+ };
+ struct amdgpu_watchdog_timer
+ {
+       bool timeout_fatal_disable;
+       uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
  };
  
  #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH       256
@@@ -177,7 -187,9 +187,9 @@@ extern int amdgpu_compute_multipipe
  extern int amdgpu_gpu_recovery;
  extern int amdgpu_emu_mode;
  extern uint amdgpu_smu_memory_pool_size;
+ extern int amdgpu_smu_pptable_id;
  extern uint amdgpu_dc_feature_mask;
+ extern uint amdgpu_freesync_vid_mode;
  extern uint amdgpu_dc_debug_mask;
  extern uint amdgpu_dm_abm_level;
  extern int amdgpu_backlight;
@@@ -185,6 -197,7 +197,7 @@@ extern struct amdgpu_mgpu_info mgpu_inf
  extern int amdgpu_ras_enable;
  extern uint amdgpu_ras_mask;
  extern int amdgpu_bad_page_threshold;
+ extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
  extern int amdgpu_async_gfx_ring;
  extern int amdgpu_mcbp;
  extern int amdgpu_discovery;
@@@ -584,6 -597,19 +597,19 @@@ enum amd_reset_method 
        AMD_RESET_METHOD_PCI,
  };
  
+ struct amdgpu_video_codec_info {
+       u32 codec_type;
+       u32 max_width;
+       u32 max_height;
+       u32 max_pixels_per_frame;
+       u32 max_level;
+ };
+ struct amdgpu_video_codecs {
+       const u32 codec_count;
+       const struct amdgpu_video_codec_info *codec_array;
+ };
  /*
   * ASIC specific functions.
   */
@@@ -628,6 -654,9 +654,9 @@@ struct amdgpu_asic_funcs 
        void (*pre_asic_init)(struct amdgpu_device *adev);
        /* enter/exit umd stable pstate */
        int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
+       /* query video codecs */
+       int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
+                                 const struct amdgpu_video_codecs **codecs);
  };
  
  /*
@@@ -792,12 -821,7 +821,7 @@@ struct amdgpu_device 
        bool                            accel_working;
        struct notifier_block           acpi_nb;
        struct amdgpu_i2c_chan          *i2c_bus[AMDGPU_MAX_I2C_BUS];
-       struct amdgpu_debugfs           debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
-       unsigned                        debugfs_count;
- #if defined(CONFIG_DEBUG_FS)
-       struct dentry                   *debugfs_preempt;
-       struct dentry                   *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
- #endif
+       struct debugfs_blob_wrapper     debugfs_vbios_blob;
        struct amdgpu_atif              *atif;
        struct amdgpu_atcs              atcs;
        struct mutex                    srbm_mutex;
        spinlock_t audio_endpt_idx_lock;
        amdgpu_block_rreg_t             audio_endpt_rreg;
        amdgpu_block_wreg_t             audio_endpt_wreg;
-       void __iomem                    *rio_mem;
-       resource_size_t                 rio_mem_size;
        struct amdgpu_doorbell          doorbell;
  
        /* clock/pll info */
        struct amdgpu_irq_src           vupdate_irq;
        struct amdgpu_irq_src           pageflip_irq;
        struct amdgpu_irq_src           hpd_irq;
+       struct amdgpu_irq_src           dmub_trace_irq;
  
        /* rings */
        u64                             fence_context;
  
        int asic_reset_res;
        struct work_struct              xgmi_reset_work;
+       struct list_head                reset_list;
  
        long                            gfx_timeout;
        long                            sdma_timeout;
@@@ -1066,7 -1090,7 +1090,7 @@@ static inline struct drm_device *adev_t
        return &adev->ddev;
  }
  
 -static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
 +static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
  {
        return container_of(bdev, struct amdgpu_device, mman.bdev);
  }
@@@ -1088,9 -1112,6 +1112,6 @@@ void amdgpu_mm_wreg_mmio_rlc(struct amd
  void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
  uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
  
- u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
- void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
  u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
                                u32 pcie_index, u32 pcie_data,
                                u32 reg_addr);
@@@ -1107,6 -1128,15 +1128,15 @@@ void amdgpu_device_indirect_wreg64(stru
  bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
  bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
  
+ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+                                 struct amdgpu_job *job,
+                                 bool *need_full_reset_arg);
+ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+                         struct list_head *device_list_handle,
+                         bool *need_full_reset_arg,
+                         bool skip_hw_reset);
  int emu_soc_asic_init(struct amdgpu_device *adev);
  
  /*
        } while (0)
  
  #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
- #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
- #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
  
  #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
  #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
  #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
  #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
        ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
+ #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
  
  #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
  
@@@ -1247,6 -1276,7 +1276,7 @@@ void amdgpu_device_program_register_seq
                                             const u32 array_size);
  
  bool amdgpu_device_supports_atpx(struct drm_device *dev);
+ int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
  bool amdgpu_device_supports_boco(struct drm_device *dev);
  bool amdgpu_device_supports_baco(struct drm_device *dev);
  bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
index 9f6b299cbf74449715c4e3563c9e7fb32c47ee84,ab516a1a54cdd14cea68f9ade3d537100de4b788..e93850f2f3b11c400eea93b0b5da27259681a15e
@@@ -31,6 -31,7 +31,7 @@@
  #include "amdgpu_amdkfd.h"
  #include "amdgpu_dma_buf.h"
  #include <uapi/linux/kfd_ioctl.h>
+ #include "amdgpu_xgmi.h"
  
  /* BO flag to indicate a KFD userptr BO */
  #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@@ -96,7 -97,7 +97,7 @@@ void amdgpu_amdkfd_gpuvm_init_mem_limit
        uint64_t mem;
  
        si_meminfo(&si);
-       mem = si.totalram - si.totalhigh;
+       mem = si.freeram - si.freehigh;
        mem *= si.mem_unit;
  
        spin_lock_init(&kfd_mem_limit.mem_limit_lock);
   */
  #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
  
 +static size_t amdgpu_amdkfd_acc_size(uint64_t size)
 +{
 +      size >>= PAGE_SHIFT;
 +      size *= sizeof(dma_addr_t) + sizeof(void *);
 +
 +      return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
 +              __roundup_pow_of_two(sizeof(struct ttm_tt)) +
 +              PAGE_ALIGN(size);
 +}
 +
  static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
                uint64_t size, u32 domain, bool sg)
  {
        size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
        int ret = 0;
  
 -      acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 -                                     sizeof(struct amdgpu_bo));
 +      acc_size = amdgpu_amdkfd_acc_size(size);
  
        vram_needed = 0;
        if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@@@ -184,7 -176,8 +185,7 @@@ static void unreserve_mem_limit(struct 
  {
        size_t acc_size;
  
 -      acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 -                                     sizeof(struct amdgpu_bo));
 +      acc_size = amdgpu_amdkfd_acc_size(size);
  
        spin_lock(&kfd_mem_limit.mem_limit_lock);
        if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@@@ -412,7 -405,10 +413,10 @@@ static uint64_t get_pte_flags(struct am
  {
        struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
        bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
+       bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
        uint32_t mapping_flags;
+       uint64_t pte_flags;
+       bool snoop = false;
  
        mapping_flags = AMDGPU_VM_PAGE_READABLE;
        if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
                                AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
                }
                break;
+       case CHIP_ALDEBARAN:
+               if (coherent && uncached) {
+                       if (adev->gmc.xgmi.connected_to_cpu ||
+                               !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
+                               snoop = true;
+                       mapping_flags |= AMDGPU_VM_MTYPE_UC;
+               } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+                       if (bo_adev == adev) {
+                               mapping_flags |= AMDGPU_VM_MTYPE_RW;
+                               if (adev->gmc.xgmi.connected_to_cpu)
+                                       snoop = true;
+                       } else {
+                               mapping_flags |= AMDGPU_VM_MTYPE_NC;
+                               if (amdgpu_xgmi_same_hive(adev, bo_adev))
+                                       snoop = true;
+                       }
+               } else {
+                       snoop = true;
+                       if (adev->gmc.xgmi.connected_to_cpu)
+                               /* system memory uses NC on A+A */
+                               mapping_flags |= AMDGPU_VM_MTYPE_NC;
+                       else
+                               mapping_flags |= coherent ?
+                                       AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+               }
+               break;
        default:
                mapping_flags |= coherent ?
                        AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
        }
  
-       return amdgpu_gem_va_map_flags(adev, mapping_flags);
+       pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
+       pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
+       return pte_flags;
  }
  
  /* add_bo_to_vm - Add a BO to a VM
index cfb1a9a044772c828e80b8b80916330033ed61a2,a5e98d0142d70e276ae4a49d3f62e49b5323fd3c..27b19503773b93a4cc7a2019d9eff80150212ea5
@@@ -97,6 -97,10 +97,10 @@@ static bool igp_read_bios_from_vram(str
                if (amdgpu_device_need_post(adev))
                        return false;
  
+       /* FB BAR not enabled */
+       if (pci_resource_len(adev->pdev, 0) == 0)
+               return false;
        adev->bios = NULL;
        vram_base = pci_resource_start(adev->pdev, 0);
        bios = ioremap_wc(vram_base, size);
@@@ -291,7 -295,7 +295,7 @@@ static bool amdgpu_atrm_get_bios(struc
                        continue;
  
                status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
 -              if (!ACPI_FAILURE(status)) {
 +              if (ACPI_SUCCESS(status)) {
                        found = true;
                        break;
                }
                                continue;
  
                        status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
 -                      if (!ACPI_FAILURE(status)) {
 +                      if (ACPI_SUCCESS(status)) {
                                found = true;
                                break;
                        }
  
        adev->bios = kmalloc(size, GFP_KERNEL);
        if (!adev->bios) {
-               DRM_ERROR("Unable to allocate bios\n");
+               dev_err(adev->dev, "Unable to allocate bios\n");
                return false;
        }
  
@@@ -364,7 -368,7 +368,7 @@@ static bool amdgpu_acpi_vfct_bios(struc
                return false;
        tbl_size = hdr->length;
        if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
-               DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+               dev_info(adev->dev, "ACPI VFCT table present but broken (too short #1),skipping\n");
                return false;
        }
  
  
                offset += sizeof(VFCT_IMAGE_HEADER);
                if (offset > tbl_size) {
-                       DRM_ERROR("ACPI VFCT image header truncated\n");
+                       dev_info(adev->dev, "ACPI VFCT image header truncated,skipping\n");
                        return false;
                }
  
                offset += vhdr->ImageLength;
                if (offset > tbl_size) {
-                       DRM_ERROR("ACPI VFCT image truncated\n");
+                       dev_info(adev->dev, "ACPI VFCT image truncated,skipping\n");
                        return false;
                }
  
                }
        }
  
-       DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+       dev_info(adev->dev, "ACPI VFCT table present but broken (too short #2),skipping\n");
        return false;
  }
  #else
@@@ -453,7 -457,7 +457,7 @@@ bool amdgpu_get_bios(struct amdgpu_devi
                goto success;
        }
  
-       DRM_ERROR("Unable to locate a BIOS ROM\n");
+       dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
        return false;
  
  success:
index 8e0a5650d3838502c2701cf924f001f897cea992,6d16f58ac91ea3c3e365978e70fe0f4c1ac29a47..1a4809d9e85022681771065da141736069a12c7b
@@@ -36,8 -36,6 +36,6 @@@
  #include <linux/firmware.h>
  #include <linux/pm_runtime.h>
  
- #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  
@@@ -487,7 -485,7 +485,7 @@@ int amdgpu_fence_driver_init_ring(struc
  
                r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
                                   num_hw_submission, amdgpu_job_hang_limit,
 -                                 timeout, ring->name);
 +                                 timeout, NULL, ring->name);
                if (r) {
                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
                                  ring->name);
@@@ -533,6 -531,8 +531,8 @@@ void amdgpu_fence_driver_fini(struct am
  
                if (!ring || !ring->fence_drv.initialized)
                        continue;
+               if (!ring->no_scheduler)
+                       drm_sched_fini(&ring->sched);
                r = amdgpu_fence_wait_empty(ring);
                if (r) {
                        /* no need to trigger GPU reset as we are unloading */
                if (ring->fence_drv.irq_src)
                        amdgpu_irq_put(adev, ring->fence_drv.irq_src,
                                       ring->fence_drv.irq_type);
-               if (!ring->no_scheduler)
-                       drm_sched_fini(&ring->sched);
                del_timer_sync(&ring->fence_drv.fallback_timer);
                for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
                        dma_fence_put(ring->fence_drv.fences[j]);
@@@ -697,11 -696,9 +696,9 @@@ static const struct dma_fence_ops amdgp
   * Fence debugfs
   */
  #if defined(CONFIG_DEBUG_FS)
- static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
+ static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
  {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
        int i;
  
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
   *
   * Manually trigger a gpu reset at the next fence wait.
   */
- static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
+ static int gpu_recover_get(void *data, u64 *val)
  {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct amdgpu_device *adev = (struct amdgpu_device *)data;
+       struct drm_device *dev = adev_to_drm(adev);
        int r;
  
        r = pm_runtime_get_sync(dev->dev);
                return 0;
        }
  
-       seq_printf(m, "gpu recover\n");
-       amdgpu_device_gpu_recover(adev, NULL);
+       *val = amdgpu_device_gpu_recover(adev, NULL);
  
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
        return 0;
  }
  
- static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
-       {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
-       {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
- };
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
+ DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
+                        "%lld\n");
  
- static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
-       {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
- };
  #endif
  
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
  {
  #if defined(CONFIG_DEBUG_FS)
-       if (amdgpu_sriov_vf(adev))
-               return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov,
-                                               ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov));
-       return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list,
-                                       ARRAY_SIZE(amdgpu_debugfs_fence_list));
- #else
-       return 0;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
+       struct dentry *root = minor->debugfs_root;
+       debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
+                           &amdgpu_debugfs_fence_info_fops);
+       if (!amdgpu_sriov_vf(adev))
+               debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
+                                   &amdgpu_debugfs_gpu_recover_fops);
  #endif
  }
  
index fde2d899b2c491e381de34ac0af78d7460819c6b,f1ede4b43d077979b079a518d655c72ec3b8a189..5807cad833d370fb8838c6d07e34cf400e4ef34b
@@@ -71,7 -71,7 +71,7 @@@
   */
  static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
  {
 -      struct page *dummy_page = ttm_bo_glob.dummy_read_page;
 +      struct page *dummy_page = ttm_glob.dummy_read_page;
  
        if (adev->dummy_page_addr)
                return 0;
@@@ -202,6 -202,7 +202,7 @@@ void amdgpu_gart_table_vram_free(struc
                return;
        }
        amdgpu_bo_unref(&adev->gart.bo);
+       adev->gart.ptr = NULL;
  }
  
  /*
@@@ -236,9 -237,6 +237,6 @@@ int amdgpu_gart_unbind(struct amdgpu_de
        t = offset / AMDGPU_GPU_PAGE_SIZE;
        p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
        for (i = 0; i < pages; i++, p++) {
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-               adev->gart.pages[p] = NULL;
- #endif
                page_base = adev->dummy_page_addr;
                if (!adev->gart.ptr)
                        continue;
@@@ -312,9 -310,6 +310,6 @@@ int amdgpu_gart_bind(struct amdgpu_devi
                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
                     uint64_t flags)
  {
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       unsigned t,p;
- #endif
        int r, i;
  
        if (!adev->gart.ready) {
                return -EINVAL;
        }
  
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
-       for (i = 0; i < pages; i++, p++)
-               adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
- #endif
        if (!adev->gart.ptr)
                return 0;
  
@@@ -373,14 -361,6 +361,6 @@@ int amdgpu_gart_init(struct amdgpu_devi
        DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
                 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
  
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       /* Allocate pages table */
-       adev->gart.pages = vzalloc(array_size(sizeof(void *),
-                                             adev->gart.num_cpu_pages));
-       if (adev->gart.pages == NULL)
-               return -ENOMEM;
- #endif
        return 0;
  }
  
   */
  void amdgpu_gart_fini(struct amdgpu_device *adev)
  {
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       vfree(adev->gart.pages);
-       adev->gart.pages = NULL;
- #endif
        amdgpu_gart_dummy_page_fini(adev);
  }
index 984dcf5a475ea5400fa86afaa6e1ece3b213c4d5,de52a99916f8e1ee391f7b9eed50f84b8061226c..ac1bb5089260483c37b4488f6799a84c8d61c99e
@@@ -523,6 -523,7 +523,6 @@@ static int amdgpu_bo_do_create(struct a
        };
        struct amdgpu_bo *bo;
        unsigned long page_align, size = bp->size;
 -      size_t acc_size;
        int r;
  
        /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
  
        *bo_ptr = NULL;
  
 -      acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 -                                     sizeof(struct amdgpu_bo));
 -
        bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
                bo->tbo.priority = 1;
  
        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
 -                               &bo->placement, page_align, &ctx, acc_size,
 -                               NULL, bp->resv, &amdgpu_bo_destroy);
 +                               &bo->placement, page_align, &ctx,  NULL,
 +                               bp->resv, &amdgpu_bo_destroy);
        if (unlikely(r != 0))
                return r;
  
@@@ -1061,13 -1065,17 +1061,17 @@@ static const char *amdgpu_vram_names[] 
   */
  int amdgpu_bo_init(struct amdgpu_device *adev)
  {
-       /* reserve PAT memory space to WC for VRAM */
-       arch_io_reserve_memtype_wc(adev->gmc.aper_base,
-                                  adev->gmc.aper_size);
+       /* On A+A platform, VRAM can be mapped as WB */
+       if (!adev->gmc.xgmi.connected_to_cpu) {
+               /* reserve PAT memory space to WC for VRAM */
+               arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+                               adev->gmc.aper_size);
+               /* Add an MTRR for the VRAM */
+               adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+                               adev->gmc.aper_size);
+       }
  
-       /* Add an MTRR for the VRAM */
-       adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
-                                             adev->gmc.aper_size);
        DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
                 adev->gmc.mc_vram_size >> 20,
                 (unsigned long long)adev->gmc.aper_size >> 20);
  void amdgpu_bo_fini(struct amdgpu_device *adev)
  {
        amdgpu_ttm_fini(adev);
-       arch_phys_wc_del(adev->gmc.vram_mtrr);
-       arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+       if (!adev->gmc.xgmi.connected_to_cpu) {
+               arch_phys_wc_del(adev->gmc.vram_mtrr);
+               arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+       }
  }
  
  /**
index a785acc09f206b68118b5064ecf00f65a2fc7052,b51d0100532f229b3d8ace0c50667cc9a6b8b889..1c6131489a851ae8d4447d4ffa70b5093de73bc4
@@@ -47,7 -47,6 +47,6 @@@
  #include <drm/ttm/ttm_bo_driver.h>
  #include <drm/ttm/ttm_placement.h>
  
- #include <drm/drm_debugfs.h>
  #include <drm/amdgpu_drm.h>
  
  #include "amdgpu.h"
  #include "amdgpu_sdma.h"
  #include "amdgpu_ras.h"
  #include "amdgpu_atomfirmware.h"
+ #include "amdgpu_res_cursor.h"
  #include "bif/bif_4_1_d.h"
  
  #define AMDGPU_TTM_VRAM_MAX_DW_READ   (size_t)128
  
 -static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
 +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
                                   struct ttm_tt *ttm,
                                   struct ttm_resource *bo_mem);
 -static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
 +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
                                      struct ttm_tt *ttm);
  
  static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@@@ -178,55 -178,12 +178,12 @@@ static int amdgpu_verify_access(struct 
                                          filp->private_data);
  }
  
- /**
-  * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
-  *
-  * @bo: The bo to assign the memory to.
-  * @mm_node: Memory manager node for drm allocator.
-  * @mem: The region where the bo resides.
-  *
-  */
- static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
-                                   struct drm_mm_node *mm_node,
-                                   struct ttm_resource *mem)
- {
-       uint64_t addr = 0;
-       if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
-               addr = mm_node->start << PAGE_SHIFT;
-               addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
-                                               mem->mem_type);
-       }
-       return addr;
- }
- /**
-  * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
-  * @offset. It also modifies the offset to be within the drm_mm_node returned
-  *
-  * @mem: The region where the bo resides.
-  * @offset: The offset that drm_mm_node is used for finding.
-  *
-  */
- static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
-                                              uint64_t *offset)
- {
-       struct drm_mm_node *mm_node = mem->mm_node;
-       while (*offset >= (mm_node->size << PAGE_SHIFT)) {
-               *offset -= (mm_node->size << PAGE_SHIFT);
-               ++mm_node;
-       }
-       return mm_node;
- }
  /**
   * amdgpu_ttm_map_buffer - Map memory into the GART windows
   * @bo: buffer object to map
   * @mem: memory object to map
-  * @mm_node: drm_mm node object to map
+  * @mm_cur: range to map
   * @num_pages: number of pages to map
-  * @offset: offset into @mm_node where to start
   * @window: which GART window to use
   * @ring: DMA ring to use for the copy
   * @tmz: if we should setup a TMZ enabled mapping
   */
  static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                                 struct ttm_resource *mem,
-                                struct drm_mm_node *mm_node,
-                                unsigned num_pages, uint64_t offset,
-                                unsigned window, struct amdgpu_ring *ring,
-                                bool tmz, uint64_t *addr)
+                                struct amdgpu_res_cursor *mm_cur,
+                                unsigned num_pages, unsigned window,
+                                struct amdgpu_ring *ring, bool tmz,
+                                uint64_t *addr)
  {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
  
        /* Map only what can't be accessed directly */
        if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
-               *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+               *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
+                       mm_cur->start;
                return 0;
        }
  
        *addr = adev->gmc.gart_start;
        *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
                AMDGPU_GPU_PAGE_SIZE;
-       *addr += offset & ~PAGE_MASK;
+       *addr += mm_cur->start & ~PAGE_MASK;
  
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8;
        cpu_addr = &job->ibs[0].ptr[num_dw];
  
        if (mem->mem_type == TTM_PL_TT) {
-               dma_addr_t *dma_address;
+               dma_addr_t *dma_addr;
  
-               dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
-               r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+               dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
+               r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
                                    cpu_addr);
                if (r)
                        goto error_free;
        } else {
                dma_addr_t dma_address;
  
-               dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+               dma_address = mm_cur->start;
                dma_address += adev->vm_manager.vram_base_offset;
  
                for (i = 0; i < num_pages; ++i) {
@@@ -354,9 -312,8 +312,8 @@@ int amdgpu_ttm_copy_mem_to_mem(struct a
        const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
                                        AMDGPU_GPU_PAGE_SIZE);
  
-       uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-       struct drm_mm_node *src_mm, *dst_mm;
+       struct amdgpu_res_cursor src_mm, dst_mm;
        struct dma_fence *fence = NULL;
        int r = 0;
  
                return -EINVAL;
        }
  
-       src_offset = src->offset;
-       if (src->mem->mm_node) {
-               src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
-               src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
-       } else {
-               src_mm = NULL;
-               src_node_size = ULLONG_MAX;
-       }
-       dst_offset = dst->offset;
-       if (dst->mem->mm_node) {
-               dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
-               dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
-       } else {
-               dst_mm = NULL;
-               dst_node_size = ULLONG_MAX;
-       }
+       amdgpu_res_first(src->mem, src->offset, size, &src_mm);
+       amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
  
        mutex_lock(&adev->mman.gtt_window_lock);
-       while (size) {
-               uint32_t src_page_offset = src_offset & ~PAGE_MASK;
-               uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
+       while (src_mm.remaining) {
+               uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
+               uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
                struct dma_fence *next;
                uint32_t cur_size;
                uint64_t from, to;
                 * begins at an offset, then adjust the size accordingly
                 */
                cur_size = max(src_page_offset, dst_page_offset);
-               cur_size = min(min3(src_node_size, dst_node_size, size),
+               cur_size = min(min3(src_mm.size, dst_mm.size, size),
                               (uint64_t)(GTT_MAX_BYTES - cur_size));
  
                /* Map src to window 0 and dst to window 1. */
-               r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+               r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
                                          PFN_UP(cur_size + src_page_offset),
-                                         src_offset, 0, ring, tmz, &from);
+                                         0, ring, tmz, &from);
                if (r)
                        goto error;
  
-               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
                                          PFN_UP(cur_size + dst_page_offset),
-                                         dst_offset, 1, ring, tmz, &to);
+                                         1, ring, tmz, &to);
                if (r)
                        goto error;
  
                dma_fence_put(fence);
                fence = next;
  
-               size -= cur_size;
-               if (!size)
-                       break;
-               src_node_size -= cur_size;
-               if (!src_node_size) {
-                       ++src_mm;
-                       src_node_size = src_mm->size << PAGE_SHIFT;
-                       src_offset = 0;
-               } else {
-                       src_offset += cur_size;
-               }
-               dst_node_size -= cur_size;
-               if (!dst_node_size) {
-                       ++dst_mm;
-                       dst_node_size = dst_mm->size << PAGE_SHIFT;
-                       dst_offset = 0;
-               } else {
-                       dst_offset += cur_size;
-               }
+               amdgpu_res_next(&src_mm, cur_size);
+               amdgpu_res_next(&dst_mm, cur_size);
        }
  error:
        mutex_unlock(&adev->mman.gtt_window_lock);
@@@ -519,7 -441,8 +441,8 @@@ error
  static bool amdgpu_mem_visible(struct amdgpu_device *adev,
                               struct ttm_resource *mem)
  {
-       struct drm_mm_node *nodes = mem->mm_node;
+       uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+       struct amdgpu_res_cursor cursor;
  
        if (mem->mem_type == TTM_PL_SYSTEM ||
            mem->mem_type == TTM_PL_TT)
        if (mem->mem_type != TTM_PL_VRAM)
                return false;
  
+       amdgpu_res_first(mem, 0, mem_size, &cursor);
        /* ttm_resource_ioremap only supports contiguous memory */
-       if (nodes->size != mem->num_pages)
+       if (cursor.size != mem_size)
                return false;
  
-       return ((nodes->start + nodes->size) << PAGE_SHIFT)
-               <= adev->gmc.visible_vram_size;
+       return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
  }
  
  /*
@@@ -646,7 -570,7 +570,7 @@@ out
   *
   * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
   */
 -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 +static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct drm_mm_node *mm_node = mem->mm_node;
  
                mem->bus.offset += adev->gmc.aper_base;
                mem->bus.is_iomem = true;
-               mem->bus.caching = ttm_write_combined;
+               if (adev->gmc.xgmi.connected_to_cpu)
+                       mem->bus.caching = ttm_cached;
+               else
+                       mem->bus.caching = ttm_write_combined;
                break;
        default:
                return -EINVAL;
@@@ -686,12 -613,10 +613,10 @@@ static unsigned long amdgpu_ttm_io_mem_
                                           unsigned long page_offset)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
-       uint64_t offset = (page_offset << PAGE_SHIFT);
-       struct drm_mm_node *mm;
+       struct amdgpu_res_cursor cursor;
  
-       mm = amdgpu_find_mm_node(&bo->mem, &offset);
-       offset += adev->gmc.aper_base;
-       return mm->start + (offset >> PAGE_SHIFT);
+       amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+       return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
  }
  
  /**
@@@ -893,7 -818,7 +818,7 @@@ void amdgpu_ttm_tt_set_user_pages(struc
   *
   * Called by amdgpu_ttm_backend_bind()
   **/
 -static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
 +static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
                                     struct ttm_tt *ttm)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@@ -931,7 -856,7 +856,7 @@@ release_sg
  /*
   * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
   */
 -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
 +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
                                        struct ttm_tt *ttm)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@@ -1015,7 -940,7 +940,7 @@@ gart_bind_fail
   * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
   * This handles binding GTT memory to the device address space.
   */
 -static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
 +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
                                   struct ttm_tt *ttm,
                                   struct ttm_resource *bo_mem)
  {
@@@ -1155,7 -1080,7 +1080,7 @@@ int amdgpu_ttm_recover_gart(struct ttm_
   * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
   * ttm_tt_destroy().
   */
 -static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
 +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
                                      struct ttm_tt *ttm)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        gtt->bound = false;
  }
  
 -static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
 +static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
                                       struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@@ -1234,7 -1159,7 +1159,7 @@@ static struct ttm_tt *amdgpu_ttm_tt_cre
   * Map the pages of a ttm_tt object to an address space visible
   * to the underlying device.
   */
 -static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
 +static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
                                  struct ttm_tt *ttm,
                                  struct ttm_operation_ctx *ctx)
  {
   * Unmaps pages of a ttm_tt object from the device address space and
   * unpopulates the page array backing it.
   */
 -static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
 +static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
                                     struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@@ -1430,6 -1355,10 +1355,10 @@@ uint64_t amdgpu_ttm_tt_pde_flags(struc
                        flags |= AMDGPU_PTE_SNOOPED;
        }
  
+       if (mem && mem->mem_type == TTM_PL_VRAM &&
+                       mem->bus.caching == ttm_cached)
+               flags |= AMDGPU_PTE_SNOOPED;
        return flags;
  }
  
@@@ -1469,7 -1398,7 +1398,7 @@@ static bool amdgpu_ttm_bo_eviction_valu
                                            const struct ttm_place *place)
  {
        unsigned long num_pages = bo->mem.num_pages;
-       struct drm_mm_node *node = bo->mem.mm_node;
+       struct amdgpu_res_cursor cursor;
        struct dma_resv_list *flist;
        struct dma_fence *f;
        int i;
  
        case TTM_PL_VRAM:
                /* Check each drm MM node individually */
-               while (num_pages) {
-                       if (place->fpfn < (node->start + node->size) &&
-                           !(place->lpfn && place->lpfn <= node->start))
+               amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+                                &cursor);
+               while (cursor.remaining) {
+                       if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
+                           && !(place->lpfn &&
+                                place->lpfn <= PFN_DOWN(cursor.start)))
                                return true;
  
-                       num_pages -= node->size;
-                       ++node;
+                       amdgpu_res_next(&cursor, cursor.size);
                }
                return false;
  
   * access for debugging purposes.
   */
  static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
-                                   unsigned long offset,
-                                   void *buf, int len, int write)
+                                   unsigned long offset, void *buf, int len,
+                                   int write)
  {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
-       struct drm_mm_node *nodes;
+       struct amdgpu_res_cursor cursor;
+       unsigned long flags;
        uint32_t value = 0;
        int ret = 0;
-       uint64_t pos;
-       unsigned long flags;
  
        if (bo->mem.mem_type != TTM_PL_VRAM)
                return -EIO;
  
-       pos = offset;
-       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
-       pos += (nodes->start << PAGE_SHIFT);
-       while (len && pos < adev->gmc.mc_vram_size) {
-               uint64_t aligned_pos = pos & ~(uint64_t)3;
-               uint64_t bytes = 4 - (pos & 3);
-               uint32_t shift = (pos & 3) * 8;
+       amdgpu_res_first(&bo->mem, offset, len, &cursor);
+       while (cursor.remaining) {
+               uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
+               uint64_t bytes = 4 - (cursor.start & 3);
+               uint32_t shift = (cursor.start & 3) * 8;
                uint32_t mask = 0xffffffff << shift;
  
-               if (len < bytes) {
-                       mask &= 0xffffffff >> (bytes - len) * 8;
-                       bytes = len;
+               if (cursor.size < bytes) {
+                       mask &= 0xffffffff >> (bytes - cursor.size) * 8;
+                       bytes = cursor.size;
                }
  
                if (mask != 0xffffffff) {
                        spin_lock_irqsave(&adev->mmio_idx_lock, flags);
                        WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
                        WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
-                       if (!write || mask != 0xffffffff)
-                               value = RREG32_NO_KIQ(mmMM_DATA);
+                       value = RREG32_NO_KIQ(mmMM_DATA);
                        if (write) {
                                value &= ~mask;
                                value |= (*(uint32_t *)buf << shift) & mask;
                                memcpy(buf, &value, bytes);
                        }
                } else {
-                       bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
-                       bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
-                       amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
-                                                 bytes, write);
+                       bytes = cursor.size & 0x3ull;
+                       amdgpu_device_vram_access(adev, cursor.start,
+                                                 (uint32_t *)buf, bytes,
+                                                 write);
                }
  
                ret += bytes;
                buf = (uint8_t *)buf + bytes;
-               pos += bytes;
-               len -= bytes;
-               if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
-                       ++nodes;
-                       pos = (nodes->start << PAGE_SHIFT);
-               }
+               amdgpu_res_next(&cursor, bytes);
        }
  
        return ret;
@@@ -1603,7 -1523,7 +1523,7 @@@ amdgpu_bo_delete_mem_notify(struct ttm_
        amdgpu_bo_move_notify(bo, false, NULL);
  }
  
 -static struct ttm_bo_driver amdgpu_bo_driver = {
 +static struct ttm_device_funcs amdgpu_bo_driver = {
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@@@ -1696,7 -1616,7 +1616,7 @@@ static void amdgpu_ttm_training_data_bl
                (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
        ctx->train_data_size =
                GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
-       
        DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
                        ctx->train_data_size,
                        ctx->p2c_train_data_offset,
@@@ -1785,7 -1705,7 +1705,7 @@@ int amdgpu_ttm_init(struct amdgpu_devic
        mutex_init(&adev->mman.gtt_window_lock);
  
        /* No others user of address space so set it to 0 */
 -      r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
 +      r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
                               adev_to_drm(adev)->anon_inode->i_mapping,
                               adev_to_drm(adev)->vma_offset_manager,
                               adev->need_swiotlb,
        /* Change the size here instead of the init above so only lpfn is affected */
        amdgpu_ttm_set_buffer_funcs_status(adev, false);
  #ifdef CONFIG_64BIT
-       adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
-                                               adev->gmc.visible_vram_size);
+ #ifdef CONFIG_X86
+       if (adev->gmc.xgmi.connected_to_cpu)
+               adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
+                               adev->gmc.visible_vram_size);
+       else
+ #endif
+               adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
+                               adev->gmc.visible_vram_size);
  #endif
  
        /*
@@@ -1926,7 -1853,7 +1853,7 @@@ void amdgpu_ttm_fini(struct amdgpu_devi
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
 -      ttm_bo_device_release(&adev->mman.bdev);
 +      ttm_device_fini(&adev->mman.bdev);
        adev->mman.initialized = false;
        DRM_INFO("amdgpu: ttm finalized\n");
  }
@@@ -2002,7 -1929,7 +1929,7 @@@ unlock
        return ret;
  }
  
 -static struct vm_operations_struct amdgpu_ttm_vm_ops = {
 +static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
        .fault = amdgpu_ttm_fault,
        .open = ttm_bo_vm_open,
        .close = ttm_bo_vm_close,
@@@ -2053,7 -1980,8 +1980,8 @@@ int amdgpu_copy_buffer(struct amdgpu_ri
                return r;
  
        if (vm_needs_flush) {
-               job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
+               job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+                                       adev->gmc.pdb0_bo : adev->gart.bo);
                job->vm_needs_flush = true;
        }
        if (resv) {
@@@ -2104,9 -2032,9 +2032,9 @@@ int amdgpu_fill_buffer(struct amdgpu_b
        uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  
-       struct drm_mm_node *mm_node;
-       unsigned long num_pages;
+       struct amdgpu_res_cursor cursor;
        unsigned int num_loops, num_dw;
+       uint64_t num_bytes;
  
        struct amdgpu_job *job;
        int r;
                        return r;
        }
  
-       num_pages = bo->tbo.mem.num_pages;
-       mm_node = bo->tbo.mem.mm_node;
+       num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
        num_loops = 0;
-       while (num_pages) {
-               uint64_t byte_count = mm_node->size << PAGE_SHIFT;
  
-               num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
-               num_pages -= mm_node->size;
-               ++mm_node;
+       amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+       while (cursor.remaining) {
+               num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
+               amdgpu_res_next(&cursor, cursor.size);
        }
        num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
  
                }
        }
  
-       num_pages = bo->tbo.mem.num_pages;
-       mm_node = bo->tbo.mem.mm_node;
-       while (num_pages) {
-               uint64_t byte_count = mm_node->size << PAGE_SHIFT;
-               uint64_t dst_addr;
-               dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
-               while (byte_count) {
-                       uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
-                                                          max_bytes);
+       amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+       while (cursor.remaining) {
+               uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
+               uint64_t dst_addr = cursor.start;
  
-                       amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
-                                               dst_addr, cur_size_in_bytes);
+               dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+               amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
+                                       cur_size);
  
-                       dst_addr += cur_size_in_bytes;
-                       byte_count -= cur_size_in_bytes;
-               }
-               num_pages -= mm_node->size;
-               ++mm_node;
+               amdgpu_res_next(&cursor, cur_size);
        }
  
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@@ -2191,36 -2106,74 +2106,74 @@@ error_free
  
  #if defined(CONFIG_DEBUG_FS)
  
- static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
+ static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
  {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       unsigned ttm_pl = (uintptr_t)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+                                                           TTM_PL_VRAM);
        struct drm_printer p = drm_seq_file_printer(m);
  
        man->func->debug(man, &p);
        return 0;
  }
  
- static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+ static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
  {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
  
        return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
  }
  
- static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
-       {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
-       {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
-       {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
-       {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
-       {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
-       {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
- };
+ static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+                                                           TTM_PL_TT);
+       struct drm_printer p = drm_seq_file_printer(m);
+       man->func->debug(man, &p);
+       return 0;
+ }
+ static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+                                                           AMDGPU_PL_GDS);
+       struct drm_printer p = drm_seq_file_printer(m);
+       man->func->debug(man, &p);
+       return 0;
+ }
+ static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+                                                           AMDGPU_PL_GWS);
+       struct drm_printer p = drm_seq_file_printer(m);
+       man->func->debug(man, &p);
+       return 0;
+ }
+ static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+                                                           AMDGPU_PL_OA);
+       struct drm_printer p = drm_seq_file_printer(m);
+       man->func->debug(man, &p);
+       return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
+ DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
  
  /*
   * amdgpu_ttm_vram_read - Linear read access to VRAM
@@@ -2308,58 -2261,6 +2261,6 @@@ static const struct file_operations amd
        .llseek = default_llseek,
  };
  
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- /*
-  * amdgpu_ttm_gtt_read - Linear read access to GTT memory
-  */
- static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
-                                  size_t size, loff_t *pos)
- {
-       struct amdgpu_device *adev = file_inode(f)->i_private;
-       ssize_t result = 0;
-       int r;
-       while (size) {
-               loff_t p = *pos / PAGE_SIZE;
-               unsigned off = *pos & ~PAGE_MASK;
-               size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
-               struct page *page;
-               void *ptr;
-               if (p >= adev->gart.num_cpu_pages)
-                       return result;
-               page = adev->gart.pages[p];
-               if (page) {
-                       ptr = kmap(page);
-                       ptr += off;
-                       r = copy_to_user(buf, ptr, cur_size);
-                       kunmap(adev->gart.pages[p]);
-               } else
-                       r = clear_user(buf, cur_size);
-               if (r)
-                       return -EFAULT;
-               result += cur_size;
-               buf += cur_size;
-               *pos += cur_size;
-               size -= cur_size;
-       }
-       return result;
- }
- static const struct file_operations amdgpu_ttm_gtt_fops = {
-       .owner = THIS_MODULE,
-       .read = amdgpu_ttm_gtt_read,
-       .llseek = default_llseek
- };
- #endif
  /*
   * amdgpu_iomem_read - Virtual read access to GPU mapped memory
   *
@@@ -2474,46 -2375,29 +2375,29 @@@ static const struct file_operations amd
        .llseek = default_llseek
  };
  
- static const struct {
-       char *name;
-       const struct file_operations *fops;
-       int domain;
- } ttm_debugfs_entries[] = {
-       { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
- #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-       { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
- #endif
-       { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
- };
  #endif
  
int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
  {
  #if defined(CONFIG_DEBUG_FS)
-       unsigned count;
        struct drm_minor *minor = adev_to_drm(adev)->primary;
-       struct dentry *ent, *root = minor->debugfs_root;
-       for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
-               ent = debugfs_create_file(
-                               ttm_debugfs_entries[count].name,
-                               S_IFREG | S_IRUGO, root,
-                               adev,
-                               ttm_debugfs_entries[count].fops);
-               if (IS_ERR(ent))
-                       return PTR_ERR(ent);
-               if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
-                       i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
-               else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
-                       i_size_write(ent->d_inode, adev->gmc.gart_size);
-               adev->mman.debugfs_entries[count] = ent;
-       }
-       count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
-       return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
- #else
-       return 0;
+       struct dentry *root = minor->debugfs_root;
+       debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
+                                &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
+       debugfs_create_file("amdgpu_iomem", 0444, root, adev,
+                           &amdgpu_ttm_iomem_fops);
+       debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
+                           &amdgpu_mm_vram_table_fops);
+       debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
+                           &amdgpu_mm_tt_table_fops);
+       debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
+                           &amdgpu_mm_gds_table_fops);
+       debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
+                           &amdgpu_mm_gws_table_fops);
+       debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
+                           &amdgpu_mm_oa_table_fops);
+       debugfs_create_file("ttm_page_pool", 0444, root, adev,
+                           &amdgpu_ttm_page_pool_fops);
  #endif
  }
index 7189f837010839e1ca9bbc0418866201377e42a1,e62992797557a00169ac20cec85d213ce58f78a3..dec0db8b0b13c14e6bfaf7eed23afdc5b4093923
@@@ -60,14 -60,10 +60,10 @@@ struct amdgpu_gtt_mgr 
  };
  
  struct amdgpu_mman {
 -      struct ttm_bo_device            bdev;
 +      struct ttm_device               bdev;
        bool                            initialized;
        void __iomem                    *aper_base_kaddr;
  
- #if defined(CONFIG_DEBUG_FS)
-       struct dentry                   *debugfs_entries[8];
- #endif
        /* buffer handling */
        const struct amdgpu_buffer_funcs        *buffer_funcs;
        struct amdgpu_ring                      *buffer_funcs_ring;
@@@ -119,8 -115,7 +115,7 @@@ int amdgpu_vram_mgr_alloc_sgt(struct am
                              struct device *dev,
                              enum dma_data_direction dir,
                              struct sg_table **sgt);
- void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
-                             struct device *dev,
+ void amdgpu_vram_mgr_free_sgt(struct device *dev,
                              enum dma_data_direction dir,
                              struct sg_table *sgt);
  uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
@@@ -186,6 -181,6 +181,6 @@@ uint64_t amdgpu_ttm_tt_pde_flags(struc
  uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                                 struct ttm_resource *mem);
  
int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
  
  #endif
index 9d19078246c86f3dacb1e9537601a950af24cd8b,2b0f1445be656e484fbeae3015afc2cabdfef607..e8cafc97eada2aa0650e646b35e2d236b5f91cb3
@@@ -92,13 -92,13 +92,13 @@@ struct amdgpu_prt_cb 
  static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
  {
        mutex_lock(&vm->eviction_lock);
-       vm->saved_flags = memalloc_nofs_save();
+       vm->saved_flags = memalloc_noreclaim_save();
  }
  
  static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
  {
        if (mutex_trylock(&vm->eviction_lock)) {
-               vm->saved_flags = memalloc_nofs_save();
+               vm->saved_flags = memalloc_noreclaim_save();
                return 1;
        }
        return 0;
  
  static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
  {
-       memalloc_nofs_restore(vm->saved_flags);
+       memalloc_noreclaim_restore(vm->saved_flags);
        mutex_unlock(&vm->eviction_lock);
  }
  
@@@ -638,15 -638,15 +638,15 @@@ void amdgpu_vm_move_to_lru_tail(struct 
        struct amdgpu_vm_bo_base *bo_base;
  
        if (vm->bulk_moveable) {
 -              spin_lock(&ttm_bo_glob.lru_lock);
 +              spin_lock(&ttm_glob.lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
 -              spin_unlock(&ttm_bo_glob.lru_lock);
 +              spin_unlock(&ttm_glob.lru_lock);
                return;
        }
  
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
  
 -      spin_lock(&ttm_bo_glob.lru_lock);
 +      spin_lock(&ttm_glob.lru_lock);
        list_for_each_entry(bo_base, &vm->idle, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
  
                                                &bo->shadow->tbo.mem,
                                                &vm->lru_bulk_move);
        }
 -      spin_unlock(&ttm_bo_glob.lru_lock);
 +      spin_unlock(&ttm_glob.lru_lock);
  
        vm->bulk_moveable = true;
  }
index 55e39b462a5e041182700be5608a098025388121,ddfdfaf444397ae777e6f141541c3089a2beb770..00edf78975b1b775954377c0b6d2282235e04b6b
@@@ -34,6 -34,7 +34,7 @@@
  #include "dc/inc/hw/dmcu.h"
  #include "dc/inc/hw/abm.h"
  #include "dc/dc_dmub_srv.h"
+ #include "dc/dc_edid_parser.h"
  #include "amdgpu_dm_trace.h"
  
  #include "vid.h"
@@@ -75,7 -76,6 +76,6 @@@
  #include <drm/drm_edid.h>
  #include <drm/drm_vblank.h>
  #include <drm/drm_audio_component.h>
- #include <drm/drm_hdcp.h>
  
  #if defined(CONFIG_DRM_AMD_DC_DCN)
  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@@ -212,6 -212,9 +212,9 @@@ static bool amdgpu_dm_psr_disable_all(s
  static const struct drm_format_info *
  amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
  
+ static bool
+ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+                                struct drm_crtc_state *new_crtc_state);
  /*
   * dm_vblank_get_counter
   *
@@@ -335,6 -338,17 +338,17 @@@ static inline bool amdgpu_dm_vrr_active
               dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
  }
  
+ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+                                             struct dm_crtc_state *new_state)
+ {
+       if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
+               return true;
+       else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
+               return true;
+       else
+               return false;
+ }
  /**
   * dm_pflip_high_irq() - Handle pageflip interrupt
   * @interrupt_params: ignored
@@@ -566,6 -580,31 +580,31 @@@ static void dm_crtc_high_irq(void *inte
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  }
  
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ /**
+  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
+  * DCN generation ASICs
+  * @interrupt params - interrupt parameters
+  *
+  * Used to set crc window/read out crc value at vertical line 0 position
+  */
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
+ {
+       struct common_irq_params *irq_params = interrupt_params;
+       struct amdgpu_device *adev = irq_params->adev;
+       struct amdgpu_crtc *acrtc;
+       acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
+       if (!acrtc)
+               return;
+       amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
+ }
+ #endif
+ #endif
  static int dm_set_clockgating_state(void *handle,
                  enum amd_clockgating_state state)
  {
@@@ -951,9 -990,8 +990,7 @@@ static void event_mall_stutter(struct w
        else
                dm->active_vblank_irq_count--;
  
--      dc_allow_idle_optimizations(
-               dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
 -              dm->dc, dm->active_vblank_irq_count == 0);
++      dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
  
        DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
  
@@@ -1060,6 -1098,7 +1097,7 @@@ static int amdgpu_dm_init(struct amdgpu
  
        init_data.flags.power_down_display_on_boot = true;
  
+       INIT_LIST_HEAD(&adev->dm.da_list);
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
  
  
                dc_init_callbacks(adev->dm.dc, &init_params);
        }
+ #endif
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
  #endif
        if (amdgpu_dm_initialize_drm_device(adev)) {
                DRM_ERROR(
@@@ -1182,6 -1224,13 +1223,13 @@@ static void amdgpu_dm_fini(struct amdgp
  
        amdgpu_dm_destroy_drm_device(&adev->dm);
  
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       if (adev->dm.crc_rd_wrk) {
+               flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
+               kfree(adev->dm.crc_rd_wrk);
+               adev->dm.crc_rd_wrk = NULL;
+       }
+ #endif
  #ifdef CONFIG_DRM_AMD_DC_HDCP
        if (adev->dm.hdcp_workqueue) {
                hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
        if (adev->dm.dc)
                dc_deinit_callbacks(adev->dm.dc);
  #endif
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+       if (adev->dm.vblank_workqueue) {
+               adev->dm.vblank_workqueue->dm = NULL;
+               kfree(adev->dm.vblank_workqueue);
+               adev->dm.vblank_workqueue = NULL;
+       }
+ #endif
        if (adev->dm.dc->ctx->dmub_srv) {
                dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
                adev->dm.dc->ctx->dmub_srv = NULL;
@@@ -1847,6 -1905,9 +1904,9 @@@ static int dm_suspend(void *handle
                return ret;
        }
  
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+       amdgpu_dm_crtc_secure_display_suspend(adev);
+ #endif
        WARN_ON(adev->dm.cached_state);
        adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
  
@@@ -2171,6 -2232,10 +2231,10 @@@ static int dm_resume(void *handle
  
        dm->cached_state = NULL;
  
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+       amdgpu_dm_crtc_secure_display_resume(adev);
+ #endif
        amdgpu_dm_irq_resume_late(adev);
  
        amdgpu_dm_smu_write_watermarks_table(adev);
@@@ -2907,6 -2972,16 +2971,16 @@@ static int dcn10_register_irq_handlers(
        struct dc_interrupt_params int_params = {0};
        int r;
        int i;
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       static const unsigned int vrtl_int_srcid[] = {
+               DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
+               DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
+               DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
+               DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
+               DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
+               DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
+       };
+ #endif
  
        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
                        adev, &int_params, dm_crtc_high_irq, c_irq_params);
        }
  
+       /* Use otg vertical line interrupt */
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
+               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
+                               vrtl_int_srcid[i], &adev->vline0_irq);
+               if (r) {
+                       DRM_ERROR("Failed to add vline0 irq id!\n");
+                       return r;
+               }
+               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+               int_params.irq_source =
+                       dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
+               if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
+                       DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
+                       break;
+               }
+               c_irq_params = &adev->dm.vline0_params[int_params.irq_source
+                                       - DC_IRQ_SOURCE_DC1_VLINE0];
+               c_irq_params->adev = adev;
+               c_irq_params->irq_src = int_params.irq_source;
+               amdgpu_dm_irq_register_interrupt(adev, &int_params,
+                               dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+       }
+ #endif
        /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
         * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
         * to trigger at end of each vblank, regardless of state of the lock,
@@@ -4583,6 -4689,7 +4688,6 @@@ fill_dc_plane_info_and_addr(struct amdg
        const struct drm_framebuffer *fb = plane_state->fb;
        const struct amdgpu_framebuffer *afb =
                to_amdgpu_framebuffer(plane_state->fb);
 -      struct drm_format_name_buf format_name;
        int ret;
  
        memset(plane_info, 0, sizeof(*plane_info));
                break;
        default:
                DRM_ERROR(
 -                      "Unsupported screen format %s\n",
 -                      drm_get_format_name(fb->format->format, &format_name));
 +                      "Unsupported screen format %p4cc\n",
 +                      &fb->format->format);
                return -EINVAL;
        }
  
@@@ -5001,19 -5108,16 +5106,16 @@@ static void fill_stream_properties_from
                timing_out->hdmi_vic = hv_frame.vic;
        }
  
-       timing_out->h_addressable = mode_in->crtc_hdisplay;
-       timing_out->h_total = mode_in->crtc_htotal;
-       timing_out->h_sync_width =
-               mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
-       timing_out->h_front_porch =
-               mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
-       timing_out->v_total = mode_in->crtc_vtotal;
-       timing_out->v_addressable = mode_in->crtc_vdisplay;
-       timing_out->v_front_porch =
-               mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
-       timing_out->v_sync_width =
-               mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
-       timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+       timing_out->h_addressable = mode_in->hdisplay;
+       timing_out->h_total = mode_in->htotal;
+       timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+       timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+       timing_out->v_total = mode_in->vtotal;
+       timing_out->v_addressable = mode_in->vdisplay;
+       timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+       timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+       timing_out->pix_clk_100hz = mode_in->clock * 10;
        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
  
        stream->output_color_space = get_output_color_space(timing_out);
@@@ -5180,6 -5284,86 +5282,86 @@@ static void dm_enable_per_frame_crtc_ma
        set_master_stream(context->streams, context->stream_count);
  }
  
+ static struct drm_display_mode *
+ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+                         bool use_probed_modes)
+ {
+       struct drm_display_mode *m, *m_pref = NULL;
+       u16 current_refresh, highest_refresh;
+       struct list_head *list_head = use_probed_modes ?
+                                                   &aconnector->base.probed_modes :
+                                                   &aconnector->base.modes;
+       if (aconnector->freesync_vid_base.clock != 0)
+               return &aconnector->freesync_vid_base;
+       /* Find the preferred mode */
+       list_for_each_entry (m, list_head, head) {
+               if (m->type & DRM_MODE_TYPE_PREFERRED) {
+                       m_pref = m;
+                       break;
+               }
+       }
+       if (!m_pref) {
+               /* Probably an EDID with no preferred mode. Fallback to first entry */
+               m_pref = list_first_entry_or_null(
+                       &aconnector->base.modes, struct drm_display_mode, head);
+               if (!m_pref) {
+                       DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+                       return NULL;
+               }
+       }
+       highest_refresh = drm_mode_vrefresh(m_pref);
+       /*
+        * Find the mode with highest refresh rate with same resolution.
+        * For some monitors, preferred mode is not the mode with highest
+        * supported refresh rate.
+        */
+       list_for_each_entry (m, list_head, head) {
+               current_refresh  = drm_mode_vrefresh(m);
+               if (m->hdisplay == m_pref->hdisplay &&
+                   m->vdisplay == m_pref->vdisplay &&
+                   highest_refresh < current_refresh) {
+                       highest_refresh = current_refresh;
+                       m_pref = m;
+               }
+       }
+       aconnector->freesync_vid_base = *m_pref;
+       return m_pref;
+ }
+ static bool is_freesync_video_mode(struct drm_display_mode *mode,
+                                  struct amdgpu_dm_connector *aconnector)
+ {
+       struct drm_display_mode *high_mode;
+       int timing_diff;
+       high_mode = get_highest_refresh_rate_mode(aconnector, false);
+       if (!high_mode || !mode)
+               return false;
+       timing_diff = high_mode->vtotal - mode->vtotal;
+       if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+           high_mode->hdisplay != mode->hdisplay ||
+           high_mode->vdisplay != mode->vdisplay ||
+           high_mode->hsync_start != mode->hsync_start ||
+           high_mode->hsync_end != mode->hsync_end ||
+           high_mode->htotal != mode->htotal ||
+           high_mode->hskew != mode->hskew ||
+           high_mode->vscan != mode->vscan ||
+           high_mode->vsync_start - mode->vsync_start != timing_diff ||
+           high_mode->vsync_end - mode->vsync_end != timing_diff)
+               return false;
+       else
+               return true;
+ }
  static struct dc_stream_state *
  create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                       const struct drm_display_mode *drm_mode,
                dm_state ? &dm_state->base : NULL;
        struct dc_stream_state *stream = NULL;
        struct drm_display_mode mode = *drm_mode;
+       struct drm_display_mode saved_mode;
+       struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
-       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
  #if defined(CONFIG_DRM_AMD_DC_DCN)
        uint32_t link_bandwidth_kbps;
  #endif
        struct dc_sink *sink = NULL;
+       memset(&saved_mode, 0, sizeof(saved_mode));
        if (aconnector == NULL) {
                DRM_ERROR("aconnector is NULL!\n");
                return stream;
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               decide_crtc_timing_for_drm_display_mode(
+               recalculate_timing |= amdgpu_freesync_vid_mode &&
+                                is_freesync_video_mode(&mode, aconnector);
+               if (recalculate_timing) {
+                       freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+                       saved_mode = mode;
+                       mode = *freesync_mode;
+               } else {
+                       decide_crtc_timing_for_drm_display_mode(
                                &mode, preferred_mode,
                                dm_state ? (dm_state->scaling != RMX_OFF) : false);
+               }
                preferred_refresh = drm_mode_vrefresh(preferred_mode);
        }
  
-       if (!dm_state)
+       if (recalculate_timing)
+               drm_mode_set_crtcinfo(&saved_mode, 0);
+       else
                drm_mode_set_crtcinfo(&mode, 0);
  
-       /*
+        /*
        * If scaling is enabled and refresh rate didn't change
        * we copy the vic and polarities of the old timings
        */
-       if (!scale || mode_refresh != preferred_refresh)
-               fill_stream_properties_from_drm_display_mode(stream,
-                       &mode, &aconnector->base, con_state, NULL, requested_bpc);
+       if (!recalculate_timing || mode_refresh != preferred_refresh)
+               fill_stream_properties_from_drm_display_mode(
+                       stream, &mode, &aconnector->base, con_state, NULL,
+                       requested_bpc);
        else
-               fill_stream_properties_from_drm_display_mode(stream,
-                       &mode, &aconnector->base, con_state, old_stream, requested_bpc);
+               fill_stream_properties_from_drm_display_mode(
+                       stream, &mode, &aconnector->base, con_state, old_stream,
+                       requested_bpc);
  
        stream->timing.flags.DSC = 0;
  
@@@ -5409,15 -5611,22 +5609,22 @@@ dm_crtc_duplicate_state(struct drm_crt
        state->abm_level = cur->abm_level;
        state->vrr_supported = cur->vrr_supported;
        state->freesync_config = cur->freesync_config;
-       state->crc_src = cur->crc_src;
        state->cm_has_degamma = cur->cm_has_degamma;
        state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
        /* TODO Duplicate dc_stream after objects are stream object is flattened */
  
        return &state->base;
  }
  
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
+ {
+       crtc_debugfs_init(crtc);
+       return 0;
+ }
+ #endif
  static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
  {
        enum dc_irq_source irq_source;
@@@ -5503,6 -5712,9 +5710,9 @@@ static const struct drm_crtc_funcs amdg
        .enable_vblank = dm_enable_vblank,
        .disable_vblank = dm_disable_vblank,
        .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       .late_register = amdgpu_dm_crtc_late_register,
+ #endif
  };
  
  static enum drm_connector_status
@@@ -6488,13 -6700,17 +6698,17 @@@ static int dm_plane_helper_check_state(
                        else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
                                viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
  
-                       /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
-                        * which is still OK to satisfy the condition below, thereby also covering these cases
-                        * (when plane is completely outside of screen).
-                        * x2 for width is because of pipe-split.
-                        */
-                       if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
+                       if (viewport_width < 0 || viewport_height < 0) {
+                               DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
+                               return -EINVAL;
+                       } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
+                               DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
                                return -EINVAL;
+                       } else if (viewport_height < MIN_VIEWPORT_SIZE) {
+                               DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
+                               return -EINVAL;
+                       }
                }
  
                /* Get min/max allowed scaling factors from plane caps. */
  }
  
  static int dm_plane_atomic_check(struct drm_plane *plane,
 -                               struct drm_plane_state *state)
 +                               struct drm_atomic_state *state)
  {
 +      struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 +                                                                               plane);
        struct amdgpu_device *adev = drm_to_adev(plane->dev);
        struct dc *dc = adev->dm.dc;
        struct dm_plane_state *dm_plane_state;
        struct drm_crtc_state *new_crtc_state;
        int ret;
  
 -      trace_amdgpu_dm_plane_atomic_check(state);
 +      trace_amdgpu_dm_plane_atomic_check(new_plane_state);
  
 -      dm_plane_state = to_dm_plane_state(state);
 +      dm_plane_state = to_dm_plane_state(new_plane_state);
  
        if (!dm_plane_state->dc_state)
                return 0;
  
        new_crtc_state =
 -              drm_atomic_get_new_crtc_state(state->state, state->crtc);
 +              drm_atomic_get_new_crtc_state(state,
 +                                            new_plane_state->crtc);
        if (!new_crtc_state)
                return -EINVAL;
  
 -      ret = dm_plane_helper_check_state(state, new_crtc_state);
 +      ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
        if (ret)
                return ret;
  
 -      ret = fill_dc_scaling_info(state, &scaling_info);
 +      ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
        if (ret)
                return ret;
  
  }
  
  static int dm_plane_atomic_async_check(struct drm_plane *plane,
 -                                     struct drm_plane_state *new_plane_state)
 +                                     struct drm_atomic_state *state)
  {
        /* Only support async updates on cursor planes. */
        if (plane->type != DRM_PLANE_TYPE_CURSOR)
  }
  
  static void dm_plane_atomic_async_update(struct drm_plane *plane,
 -                                       struct drm_plane_state *new_state)
 +                                       struct drm_atomic_state *state)
  {
 +      struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 +                                                                         plane);
        struct drm_plane_state *old_state =
 -              drm_atomic_get_old_plane_state(new_state->state, plane);
 +              drm_atomic_get_old_plane_state(state, plane);
  
        trace_amdgpu_dm_atomic_update_cursor(new_state);
  
@@@ -6975,11 -7186,118 +7189,118 @@@ static void amdgpu_dm_connector_ddc_get
                 */
                drm_mode_sort(&connector->probed_modes);
                amdgpu_dm_get_native_mode(connector);
+               /* Freesync capabilities are reset by calling
+                * drm_add_edid_modes() and need to be
+                * restored here.
+                */
+               amdgpu_dm_update_freesync_caps(connector, edid);
        } else {
                amdgpu_dm_connector->num_modes = 0;
        }
  }
  
+ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+                             struct drm_display_mode *mode)
+ {
+       struct drm_display_mode *m;
+       list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+               if (drm_mode_equal(m, mode))
+                       return true;
+       }
+       return false;
+ }
+ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+ {
+       const struct drm_display_mode *m;
+       struct drm_display_mode *new_mode;
+       uint i;
+       uint32_t new_modes_count = 0;
+       /* Standard FPS values
+        *
+        * 23.976   - TV/NTSC
+        * 24       - Cinema
+        * 25       - TV/PAL
+        * 29.97    - TV/NTSC
+        * 30       - TV/NTSC
+        * 48       - Cinema HFR
+        * 50       - TV/PAL
+        * 60       - Commonly used
+        * 48,72,96 - Multiples of 24
+        */
+       const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
+                                        48000, 50000, 60000, 72000, 96000 };
+       /*
+        * Find mode with highest refresh rate with the same resolution
+        * as the preferred mode. Some monitors report a preferred mode
+        * with lower resolution than the highest refresh rate supported.
+        */
+       m = get_highest_refresh_rate_mode(aconnector, true);
+       if (!m)
+               return 0;
+       for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+               uint64_t target_vtotal, target_vtotal_diff;
+               uint64_t num, den;
+               if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+                       continue;
+               if (common_rates[i] < aconnector->min_vfreq * 1000 ||
+                   common_rates[i] > aconnector->max_vfreq * 1000)
+                       continue;
+               num = (unsigned long long)m->clock * 1000 * 1000;
+               den = common_rates[i] * (unsigned long long)m->htotal;
+               target_vtotal = div_u64(num, den);
+               target_vtotal_diff = target_vtotal - m->vtotal;
+               /* Check for illegal modes */
+               if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
+                   m->vsync_end + target_vtotal_diff < m->vsync_start ||
+                   m->vtotal + target_vtotal_diff < m->vsync_end)
+                       continue;
+               new_mode = drm_mode_duplicate(aconnector->base.dev, m);
+               if (!new_mode)
+                       goto out;
+               new_mode->vtotal += (u16)target_vtotal_diff;
+               new_mode->vsync_start += (u16)target_vtotal_diff;
+               new_mode->vsync_end += (u16)target_vtotal_diff;
+               new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+               new_mode->type |= DRM_MODE_TYPE_DRIVER;
+               if (!is_duplicate_mode(aconnector, new_mode)) {
+                       drm_mode_probed_add(&aconnector->base, new_mode);
+                       new_modes_count += 1;
+               } else
+                       drm_mode_destroy(aconnector->base.dev, new_mode);
+       }
+  out:
+       return new_modes_count;
+ }
+ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
+                                                  struct edid *edid)
+ {
+       struct amdgpu_dm_connector *amdgpu_dm_connector =
+               to_amdgpu_dm_connector(connector);
+       if (!(amdgpu_freesync_vid_mode && edid))
+               return;
+       
+       if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+               amdgpu_dm_connector->num_modes +=
+                       add_fs_modes(amdgpu_dm_connector);
+ }
  static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
  {
        struct amdgpu_dm_connector *amdgpu_dm_connector =
        } else {
                amdgpu_dm_connector_ddc_get_modes(connector, edid);
                amdgpu_dm_connector_add_common_modes(encoder, connector);
+               amdgpu_dm_connector_add_freesync_modes(connector, edid);
        }
        amdgpu_dm_fbc_init(connector);
  
@@@ -7299,8 -7618,19 +7621,19 @@@ static void manage_dm_interrupts(struc
                        adev,
                        &adev->pageflip_irq,
                        irq_type);
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+               amdgpu_irq_get(
+                       adev,
+                       &adev->vline0_irq,
+                       irq_type);
+ #endif
        } else {
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+               amdgpu_irq_put(
+                       adev,
+                       &adev->vline0_irq,
+                       irq_type);
+ #endif
                amdgpu_irq_put(
                        adev,
                        &adev->pageflip_irq,
@@@ -7424,10 -7754,6 +7757,6 @@@ static int get_cursor_position(struct d
        int x, y;
        int xorigin = 0, yorigin = 0;
  
-       position->enable = false;
-       position->x = 0;
-       position->y = 0;
        if (!crtc || !plane->state->fb)
                return 0;
  
@@@ -7474,7 -7800,7 +7803,7 @@@ static void handle_cursor_update(struc
        struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        uint64_t address = afb ? afb->address : 0;
-       struct dc_cursor_position position;
+       struct dc_cursor_position position = {0};
        struct dc_cursor_attributes attributes;
        int ret;
  
@@@ -7559,6 -7885,7 +7888,7 @@@ static void update_freesync_state_on_st
        struct amdgpu_device *adev = dm->adev;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
        unsigned long flags;
+       bool pack_sdp_v1_3 = false;
  
        if (!new_stream)
                return;
                &vrr_params,
                PACKET_TYPE_VRR,
                TRANSFER_FUNC_UNKNOWN,
-               &vrr_infopacket);
+               &vrr_infopacket,
+               pack_sdp_v1_3);
  
        new_crtc_state->freesync_timing_changed |=
                (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
@@@ -7654,9 -7982,22 +7985,22 @@@ static void update_stream_irq_parameter
        if (new_crtc_state->vrr_supported &&
            config.min_refresh_in_uhz &&
            config.max_refresh_in_uhz) {
-               config.state = new_crtc_state->base.vrr_enabled ?
-                       VRR_STATE_ACTIVE_VARIABLE :
-                       VRR_STATE_INACTIVE;
+               /*
+                * if freesync compatible mode was set, config.state will be set
+                * in atomic check
+                */
+               if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
+                   (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
+                    new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
+                       vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
+                       vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
+                       vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
+                       vrr_params.state = VRR_STATE_ACTIVE_FIXED;
+               } else {
+                       config.state = new_crtc_state->base.vrr_enabled ?
+                                                    VRR_STATE_ACTIVE_VARIABLE :
+                                                    VRR_STATE_INACTIVE;
+               }
        } else {
                config.state = VRR_STATE_UNSUPPORTED;
        }
@@@ -7977,8 -8318,7 +8321,7 @@@ static void amdgpu_dm_commit_planes(str
                 * re-adjust the min/max bounds now that DC doesn't handle this
                 * as part of commit.
                 */
-               if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
-                   amdgpu_dm_vrr_active(acrtc_state)) {
+               if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
                        dc_stream_adjust_vmin_vmax(
                                dm->dc, acrtc_state->stream,
@@@ -8263,6 -8603,7 +8606,7 @@@ static void amdgpu_dm_atomic_commit_tai
                        /* i.e. reset mode */
                        if (dm_old_crtc_state->stream)
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
                        mode_set_reset_required = true;
                }
        } /* for_each_crtc_in_state() */
                        hdcp_update_display(
                                adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
                                new_con_state->hdcp_content_type,
-                               new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
-                                                                                                        : false);
+                               new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
        }
  #endif
  
         */
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ #ifdef CONFIG_DEBUG_FS
+               bool configure_crc = false;
+               enum amdgpu_dm_pipe_crc_source cur_crc_src;
+ #endif
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
  
                if (new_crtc_state->active &&
                         * settings for the stream.
                         */
                        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+                       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+                       cur_crc_src = acrtc->dm_irq_params.crc_src;
+                       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+                       if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
+                               configure_crc = true;
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+                               if (amdgpu_dm_crc_window_is_activated(crtc))
+                                       configure_crc = false;
+ #endif
+                       }
  
-                       if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
+                       if (configure_crc)
                                amdgpu_dm_crtc_configure_crc_source(
-                                       crtc, dm_new_crtc_state,
-                                       dm_new_crtc_state->crc_src);
-                       }
+                                       crtc, dm_new_crtc_state, cur_crc_src);
  #endif
                }
        }
@@@ -8662,6 -9014,7 +9017,7 @@@ static void get_freesync_config_for_crt
                        to_amdgpu_dm_connector(new_con_state->base.connector);
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
        int vrefresh = drm_mode_vrefresh(mode);
+       bool fs_vid_mode = false;
  
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
                                        vrefresh >= aconnector->min_vfreq &&
  
        if (new_crtc_state->vrr_supported) {
                new_crtc_state->stream->ignore_msa_timing_param = true;
-               config.state = new_crtc_state->base.vrr_enabled ?
-                               VRR_STATE_ACTIVE_VARIABLE :
-                               VRR_STATE_INACTIVE;
-               config.min_refresh_in_uhz =
-                               aconnector->min_vfreq * 1000000;
-               config.max_refresh_in_uhz =
-                               aconnector->max_vfreq * 1000000;
+               fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+               config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
+               config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
                config.vsif_supported = true;
                config.btr = true;
-       }
  
+               if (fs_vid_mode) {
+                       config.state = VRR_STATE_ACTIVE_FIXED;
+                       config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
+                       goto out;
+               } else if (new_crtc_state->base.vrr_enabled) {
+                       config.state = VRR_STATE_ACTIVE_VARIABLE;
+               } else {
+                       config.state = VRR_STATE_INACTIVE;
+               }
+       }
+ out:
        new_crtc_state->freesync_config = config;
  }
  
@@@ -8692,6 -9052,50 +9055,50 @@@ static void reset_freesync_config_for_c
               sizeof(new_crtc_state->vrr_infopacket));
  }
  
+ static bool
+ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+                                struct drm_crtc_state *new_crtc_state)
+ {
+       struct drm_display_mode old_mode, new_mode;
+       if (!old_crtc_state || !new_crtc_state)
+               return false;
+       old_mode = old_crtc_state->mode;
+       new_mode = new_crtc_state->mode;
+       if (old_mode.clock       == new_mode.clock &&
+           old_mode.hdisplay    == new_mode.hdisplay &&
+           old_mode.vdisplay    == new_mode.vdisplay &&
+           old_mode.htotal      == new_mode.htotal &&
+           old_mode.vtotal      != new_mode.vtotal &&
+           old_mode.hsync_start == new_mode.hsync_start &&
+           old_mode.vsync_start != new_mode.vsync_start &&
+           old_mode.hsync_end   == new_mode.hsync_end &&
+           old_mode.vsync_end   != new_mode.vsync_end &&
+           old_mode.hskew       == new_mode.hskew &&
+           old_mode.vscan       == new_mode.vscan &&
+           (old_mode.vsync_end - old_mode.vsync_start) ==
+           (new_mode.vsync_end - new_mode.vsync_start))
+               return true;
+       return false;
+ }
+ static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+       uint64_t num, den, res;
+       struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+       dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+       num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
+       den = (unsigned long long)new_crtc_state->mode.htotal *
+             (unsigned long long)new_crtc_state->mode.vtotal;
+       res = div_u64(num, den);
+       dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
+ }
  static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                                struct drm_atomic_state *state,
                                struct drm_crtc *crtc,
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
+               if (amdgpu_freesync_vid_mode &&
+                   dm_new_crtc_state->stream &&
+                   is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+                       goto skip_modeset;
                if (dm_new_crtc_state->stream &&
                    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
                    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
  
+               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+                   is_timing_unchanged_for_freesync(new_crtc_state,
+                                                    old_crtc_state)) {
+                       new_crtc_state->mode_changed = false;
+                       DRM_DEBUG_DRIVER(
+                               "Mode change not required for front porch change, "
+                               "setting mode_changed to %d",
+                               new_crtc_state->mode_changed);
+                       set_freesync_fixed_config(dm_new_crtc_state);
+                       goto skip_modeset;
+               } else if (amdgpu_freesync_vid_mode && aconnector &&
+                          is_freesync_video_mode(&new_crtc_state->mode,
+                                                 aconnector)) {
+                       set_freesync_fixed_config(dm_new_crtc_state);
+               }
                ret = dm_atomic_get_state(state, &dm_state);
                if (ret)
                        goto fail;
@@@ -9390,7 -9817,7 +9820,7 @@@ static int amdgpu_dm_atomic_check(struc
        }
  
  #if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (adev->asic_type >= CHIP_NAVI10) {
+       if (dc_resource_is_dsc_encoding_supported(dc)) {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                        if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
                                ret = add_affected_mst_dsc_crtcs(state, crtc);
@@@ -9696,11 -10123,85 +10126,85 @@@ static bool is_dp_capable_without_timin
  
        return capable;
  }
+ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+               uint8_t *edid_ext, int len,
+               struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+       int i;
+       struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+       struct dc *dc = adev->dm.dc;
+       /* send extension block to DMCU for parsing */
+       for (i = 0; i < len; i += 8) {
+               bool res;
+               int offset;
+               /* send 8 bytes a time */
+               if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
+                       return false;
+               if (i+8 == len) {
+                       /* EDID block sent completed, expect result */
+                       int version, min_rate, max_rate;
+                       res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
+                       if (res) {
+                               /* amd vsdb found */
+                               vsdb_info->freesync_supported = 1;
+                               vsdb_info->amd_vsdb_version = version;
+                               vsdb_info->min_refresh_rate_hz = min_rate;
+                               vsdb_info->max_refresh_rate_hz = max_rate;
+                               return true;
+                       }
+                       /* not amd vsdb */
+                       return false;
+               }
+               /* check for ack*/
+               res = dc_edid_parser_recv_cea_ack(dc, &offset);
+               if (!res)
+                       return false;
+       }
+       return false;
+ }
+ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+               struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+       uint8_t *edid_ext = NULL;
+       int i;
+       bool valid_vsdb_found = false;
+       /*----- drm_find_cea_extension() -----*/
+       /* No EDID or EDID extensions */
+       if (edid == NULL || edid->extensions == 0)
+               return -ENODEV;
+       /* Find CEA extension */
+       for (i = 0; i < edid->extensions; i++) {
+               edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
+               if (edid_ext[0] == CEA_EXT)
+                       break;
+       }
+       if (i == edid->extensions)
+               return -ENODEV;
+       /*----- cea_db_offsets() -----*/
+       if (edid_ext[0] != CEA_EXT)
+               return -ENODEV;
+       valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
+       return valid_vsdb_found ? i : -ENODEV;
+ }
  void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
                                        struct edid *edid)
  {
-       int i;
-       bool edid_check_required;
+       int i = 0;
        struct detailed_timing *timing;
        struct detailed_non_pixel *data;
        struct detailed_data_monitor_range *range;
        struct drm_device *dev = connector->dev;
        struct amdgpu_device *adev = drm_to_adev(dev);
        bool freesync_capable = false;
+       struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
  
        if (!connector->state) {
                DRM_ERROR("%s - Connector has no state", __func__);
  
        dm_con_state = to_dm_connector_state(connector->state);
  
-       edid_check_required = false;
        if (!amdgpu_dm_connector->dc_sink) {
                DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
                goto update;
        }
        if (!adev->dm.freesync_module)
                goto update;
-       /*
-        * if edid non zero restrict freesync only for dp and edp
-        */
-       if (edid) {
-               if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
-                       || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+       if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+               || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+               bool edid_check_required = false;
+               if (edid) {
                        edid_check_required = is_dp_capable_without_timing_msa(
                                                adev->dm.dc,
                                                amdgpu_dm_connector);
                }
-       }
-       if (edid_check_required == true && (edid->version > 1 ||
-          (edid->version == 1 && edid->revision > 1))) {
-               for (i = 0; i < 4; i++) {
  
-                       timing  = &edid->detailed_timings[i];
-                       data    = &timing->data.other_data;
-                       range   = &data->data.range;
-                       /*
-                        * Check if monitor has continuous frequency mode
-                        */
-                       if (data->type != EDID_DETAIL_MONITOR_RANGE)
-                               continue;
-                       /*
-                        * Check for flag range limits only. If flag == 1 then
-                        * no additional timing information provided.
-                        * Default GTF, GTF Secondary curve and CVT are not
-                        * supported
-                        */
-                       if (range->flags != 1)
-                               continue;
+               if (edid_check_required == true && (edid->version > 1 ||
+                  (edid->version == 1 && edid->revision > 1))) {
+                       for (i = 0; i < 4; i++) {
  
-                       amdgpu_dm_connector->min_vfreq = range->min_vfreq;
-                       amdgpu_dm_connector->max_vfreq = range->max_vfreq;
-                       amdgpu_dm_connector->pixel_clock_mhz =
-                               range->pixel_clock_mhz * 10;
+                               timing  = &edid->detailed_timings[i];
+                               data    = &timing->data.other_data;
+                               range   = &data->data.range;
+                               /*
+                                * Check if monitor has continuous frequency mode
+                                */
+                               if (data->type != EDID_DETAIL_MONITOR_RANGE)
+                                       continue;
+                               /*
+                                * Check for flag range limits only. If flag == 1 then
+                                * no additional timing information provided.
+                                * Default GTF, GTF Secondary curve and CVT are not
+                                * supported
+                                */
+                               if (range->flags != 1)
+                                       continue;
  
-                       connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
-                       connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+                               amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+                               amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+                               amdgpu_dm_connector->pixel_clock_mhz =
+                                       range->pixel_clock_mhz * 10;
  
-                       break;
-               }
+                               connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+                               connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
  
-               if (amdgpu_dm_connector->max_vfreq -
-                   amdgpu_dm_connector->min_vfreq > 10) {
+                               break;
+                       }
  
-                       freesync_capable = true;
+                       if (amdgpu_dm_connector->max_vfreq -
+                           amdgpu_dm_connector->min_vfreq > 10) {
+                               freesync_capable = true;
+                       }
+               }
+       } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+               if (i >= 0 && vsdb_info.freesync_supported) {
+                       timing  = &edid->detailed_timings[i];
+                       data    = &timing->data.other_data;
+                       amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+                       amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+                       if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+                               freesync_capable = true;
+                       connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+                       connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
                }
        }
  
index c494235016e09aac6814ab6d2cbcc22659fe9529,0fbdfff87835b529a13e0793c637bf7c6b8e3329..622a5bf9737f713240f14641cce2c090d372c47d
@@@ -116,7 -116,7 +116,7 @@@ struct _vcs_dpi_ip_params_st dcn3_01_i
        .dcc_supported = true,
        .writeback_interface_buffer_size_kbytes = 90,
        .writeback_line_buffer_buffer_size = 656640,
-       .max_line_buffer_lines = 12,
+       .max_line_buffer_lines = 32,
        .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
@@@ -873,6 -873,7 +873,7 @@@ static const struct dc_debug_options de
        .underflow_assert_delay_us = 0xFFFFFFFF,
        .dwb_fi_phase = -1, // -1 = disable
        .dmub_command_table = true,
+       .use_max_lb = false,
  };
  
  static const struct dc_debug_options debug_defaults_diags = {
        .scl_reset_length10 = true,
        .dwb_fi_phase = -1, // -1 = disable
        .dmub_command_table = true,
+       .use_max_lb = false,
  };
  
  void dcn301_dpp_destroy(struct dpp **dpp)
@@@ -1619,106 -1621,13 +1621,107 @@@ static void dcn301_update_bw_bounding_b
        dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
  }
  
 +static void calculate_wm_set_for_vlevel(
 +              int vlevel,
 +              struct wm_range_table_entry *table_entry,
 +              struct dcn_watermarks *wm_set,
 +              struct display_mode_lib *dml,
 +              display_e2e_pipe_params_st *pipes,
 +              int pipe_cnt)
 +{
 +      double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
 +
 +      ASSERT(vlevel < dml->soc.num_states);
 +      /* only pipe 0 is read for voltage and dcf/soc clocks */
 +      pipes[0].clks_cfg.voltage = vlevel;
 +      pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
 +      pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
 +
 +      dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
 +      dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
 +      dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
 +
 +      wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
 +      wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
 +      dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
 +
 +}
 +
 +static void dcn301_calculate_wm_and_dlg(
 +              struct dc *dc, struct dc_state *context,
 +              display_e2e_pipe_params_st *pipes,
 +              int pipe_cnt,
 +              int vlevel_req)
 +{
 +      int i, pipe_idx;
 +      int vlevel, vlevel_max;
 +      struct wm_range_table_entry *table_entry;
 +      struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
 +
 +      ASSERT(bw_params);
 +
 +      vlevel_max = bw_params->clk_table.num_entries - 1;
 +
 +      /* WM Set D */
 +      table_entry = &bw_params->wm_table.entries[WM_D];
 +      if (table_entry->wm_type == WM_TYPE_RETRAINING)
 +              vlevel = 0;
 +      else
 +              vlevel = vlevel_max;
 +      calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
 +                                              &context->bw_ctx.dml, pipes, pipe_cnt);
 +      /* WM Set C */
 +      table_entry = &bw_params->wm_table.entries[WM_C];
 +      vlevel = min(max(vlevel_req, 2), vlevel_max);
 +      calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
 +                                              &context->bw_ctx.dml, pipes, pipe_cnt);
 +      /* WM Set B */
 +      table_entry = &bw_params->wm_table.entries[WM_B];
 +      vlevel = min(max(vlevel_req, 1), vlevel_max);
 +      calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
 +                                              &context->bw_ctx.dml, pipes, pipe_cnt);
 +
 +      /* WM Set A */
 +      table_entry = &bw_params->wm_table.entries[WM_A];
 +      vlevel = min(vlevel_req, vlevel_max);
 +      calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
 +                                              &context->bw_ctx.dml, pipes, pipe_cnt);
 +
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
 +              pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 +
 +              if (dc->config.forced_clocks) {
 +                      pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
 +                      pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
 +              }
 +              if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
 +                      pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
 +              if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
 +                      pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
 +
 +              pipe_idx++;
 +      }
 +
 +      dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
 +}
 +
  static struct resource_funcs dcn301_res_pool_funcs = {
        .destroy = dcn301_destroy_resource_pool,
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
 -      .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
 +      .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+       .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@@ -1764,6 -1673,8 +1767,8 @@@ static bool dcn301_resource_construct
        dc->caps.min_horizontal_blanking_period = 80;
        dc->caps.dmdata_alloc_size = 2048;
        dc->caps.max_slave_planes = 1;
+       dc->caps.max_slave_yuv_planes = 1;
+       dc->caps.max_slave_rgb_planes = 1;
        dc->caps.is_apu = true;
        dc->caps.post_blend_color_processing = true;
        dc->caps.force_dp_tps4_for_cp2520 = true;
index 0e0f494fbb5e138b1f739cd664252d2899c2fc12,3908ad9291769cf04bf79f4d156105dbd38dfaa2..6ee9dd833b85c2a5a58593bd55e7043f00f31aa7
@@@ -58,6 -58,20 +58,20 @@@ enum dc_irq_source to_dal_irq_source_dc
                return DC_IRQ_SOURCE_VBLANK5;
        case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
                return DC_IRQ_SOURCE_VBLANK6;
+       case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT:
+               return DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+       case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+               return DC_IRQ_SOURCE_DC1_VLINE0;
+       case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+               return DC_IRQ_SOURCE_DC2_VLINE0;
+       case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+               return DC_IRQ_SOURCE_DC3_VLINE0;
+       case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+               return DC_IRQ_SOURCE_DC4_VLINE0;
+       case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+               return DC_IRQ_SOURCE_DC5_VLINE0;
+       case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+               return DC_IRQ_SOURCE_DC6_VLINE0;
        case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
                return DC_IRQ_SOURCE_PFLIP1;
        case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
@@@ -173,6 -187,16 +187,12 @@@ static const struct irq_source_info_fun
        .ack = NULL
  };
  
 -static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
 -      .set = NULL,
 -      .ack = NULL
 -};
+ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
+       .set = NULL,
+       .ack = NULL
+ };
  #undef BASE_INNER
  #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
  
                .funcs = &vblank_irq_info_funcs\
        }
  
+ #define vline0_int_entry(reg_num)\
+       [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
+               IRQ_REG_ENTRY(OTG, reg_num,\
+                       OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+                       OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+               .funcs = &vline0_irq_info_funcs\
+       }
  #define dummy_irq_entry() \
        {\
                .funcs = &dummy_irq_info_funcs\
@@@ -366,6 -398,12 +394,12 @@@ irq_source_info_dcn21[DAL_IRQ_SOURCES_N
        vblank_int_entry(3),
        vblank_int_entry(4),
        vblank_int_entry(5),
+       vline0_int_entry(0),
+       vline0_int_entry(1),
+       vline0_int_entry(2),
+       vline0_int_entry(3),
+       vline0_int_entry(4),
+       vline0_int_entry(5),
  };
  
  static const struct irq_service_funcs irq_service_funcs_dcn21 = {
index 1aab2ccfed13b46b0f86f6477e3e31a0821d53b3,6fd178eafb636217a25831c5fb220da1f09caa74..42281fce552e6ee66221b0646cd80eedddb9ca1a
@@@ -451,13 -451,8 +451,8 @@@ struct radeon_surface_reg 
   * TTM.
   */
  struct radeon_mman {
 -      struct ttm_bo_device            bdev;
 +      struct ttm_device               bdev;
        bool                            initialized;
- #if defined(CONFIG_DEBUG_FS)
-       struct dentry                   *vram;
-       struct dentry                   *gtt;
- #endif
  };
  
  struct radeon_bo_list {
@@@ -516,8 -511,6 +511,6 @@@ struct radeon_bo 
  };
  #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
  
- int radeon_gem_debugfs_init(struct radeon_device *rdev);
  /* sub-allocation manager, it has to be protected by another lock.
   * By conception this is an helper for other part of the driver
   * like the indirect buffer or semaphore, which both have their
@@@ -835,6 -828,7 +828,7 @@@ struct radeon_ib 
  };
  
  struct radeon_ring {
+       struct radeon_device    *rdev;
        struct radeon_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
@@@ -1112,9 -1106,6 +1106,6 @@@ struct radeon_cs_packet 
  typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
                                      struct radeon_cs_packet *pkt,
                                      unsigned idx, unsigned reg);
- typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
-                                     struct radeon_cs_packet *pkt);
  
  /*
   * AGP
@@@ -1798,15 -1789,8 +1789,8 @@@ static inline void radeon_mn_unregister
  /*
   * Debugfs
   */
- struct radeon_debugfs {
-       struct drm_info_list    *files;
-       unsigned                num_files;
- };
- int radeon_debugfs_add_files(struct radeon_device *rdev,
-                            struct drm_info_list *files,
-                            unsigned nfiles);
- int radeon_debugfs_fence_init(struct radeon_device *rdev);
+ void radeon_debugfs_fence_init(struct radeon_device *rdev);
+ void radeon_gem_debugfs_init(struct radeon_device *rdev);
  
  /*
   * ASIC ring specific functions.
@@@ -2431,9 -2415,6 +2415,6 @@@ struct radeon_device 
        struct drm_file *cmask_filp;
        /* i2c buses */
        struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
-       /* debugfs */
-       struct radeon_debugfs   debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
-       unsigned                debugfs_count;
        /* virtual memory */
        struct radeon_vm_manager        vm_manager;
        struct mutex                    gpu_clock_mutex;
@@@ -2824,7 -2805,7 +2805,7 @@@ extern int radeon_ttm_tt_set_userptr(st
                                     uint32_t flags);
  extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
  extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
 -bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
 +bool radeon_ttm_tt_is_bound(struct ttm_device *bdev, struct ttm_tt *ttm);
  extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
  extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
@@@ -2834,7 -2815,7 +2815,7 @@@ extern void radeon_ttm_set_active_vram_
  extern void radeon_program_register_sequence(struct radeon_device *rdev,
                                             const u32 *registers,
                                             const u32 array_size);
 -struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev);
 +struct radeon_device *radeon_get_rdev(struct ttm_device *bdev);
  
  /* KMS */
  
index 5fc8bae401af2cbe632dfa12c813ba0b86747475,eb7a1c68459f83ceb860ed8bc5c43b872f80d6e2..476ce9c24b9f50e0ea09d58dae009a5ea94e7bc8
@@@ -39,7 -39,6 +39,6 @@@
  #include <linux/swiotlb.h>
  
  #include <drm/drm_agpsupport.h>
- #include <drm/drm_debugfs.h>
  #include <drm/drm_device.h>
  #include <drm/drm_file.h>
  #include <drm/drm_prime.h>
  #include "radeon.h"
  #include "radeon_ttm.h"
  
- static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
- static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
+ static void radeon_ttm_debugfs_init(struct radeon_device *rdev);
  
 -static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
 -                            struct ttm_tt *ttm,
 +static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
                              struct ttm_resource *bo_mem);
 -static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
 -                               struct ttm_tt *ttm);
 +static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
  
 -struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 +struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
  {
        struct radeon_mman *mman;
        struct radeon_device *rdev;
@@@ -278,7 -278,7 +276,7 @@@ out
        return 0;
  }
  
 -static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 +static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
  {
        struct radeon_device *rdev = radeon_get_rdev(bdev);
        size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
@@@ -345,7 -345,7 +343,7 @@@ struct radeon_ttm_tt 
  };
  
  /* prepare the sg table with the user pages */
 -static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 +static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
  {
        struct radeon_device *rdev = radeon_get_rdev(bdev);
        struct radeon_ttm_tt *gtt = (void *)ttm;
@@@ -406,7 -406,7 +404,7 @@@ release_pages
        return r;
  }
  
 -static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 +static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
  {
        struct radeon_device *rdev = radeon_get_rdev(bdev);
        struct radeon_ttm_tt *gtt = (void *)ttm;
@@@ -442,7 -442,7 +440,7 @@@ static bool radeon_ttm_backend_is_bound
        return (gtt->bound);
  }
  
 -static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
 +static int radeon_ttm_backend_bind(struct ttm_device *bdev,
                                   struct ttm_tt *ttm,
                                   struct ttm_resource *bo_mem)
  {
        return 0;
  }
  
 -static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 +static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
  {
        struct radeon_ttm_tt *gtt = (void *)ttm;
        struct radeon_device *rdev = radeon_get_rdev(bdev);
        gtt->bound = false;
  }
  
 -static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 +static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
  {
        struct radeon_ttm_tt *gtt = (void *)ttm;
  
@@@ -552,7 -552,7 +550,7 @@@ static struct radeon_ttm_tt *radeon_ttm
        return container_of(ttm, struct radeon_ttm_tt, ttm);
  }
  
 -static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
 +static int radeon_ttm_tt_populate(struct ttm_device *bdev,
                                  struct ttm_tt *ttm,
                                  struct ttm_operation_ctx *ctx)
  {
        return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
  }
  
 -static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 +static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
  {
        struct radeon_device *rdev = radeon_get_rdev(bdev);
        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
@@@ -611,7 -611,7 +609,7 @@@ int radeon_ttm_tt_set_userptr(struct ra
        return 0;
  }
  
 -bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
 +bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
                            struct ttm_tt *ttm)
  {
  #if IS_ENABLED(CONFIG_AGP)
        return radeon_ttm_backend_is_bound(ttm);
  }
  
 -static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
 +static int radeon_ttm_tt_bind(struct ttm_device *bdev,
                              struct ttm_tt *ttm,
                              struct ttm_resource *bo_mem)
  {
        return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
  }
  
 -static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
 +static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
                                 struct ttm_tt *ttm)
  {
  #if IS_ENABLED(CONFIG_AGP)
        radeon_ttm_backend_unbind(bdev, ttm);
  }
  
 -static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev,
 +static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
                                  struct ttm_tt *ttm)
  {
  #if IS_ENABLED(CONFIG_AGP)
@@@ -698,7 -698,7 +696,7 @@@ radeon_bo_delete_mem_notify(struct ttm_
        radeon_bo_move_notify(bo, false, NULL);
  }
  
 -static struct ttm_bo_driver radeon_bo_driver = {
 +static struct ttm_device_funcs radeon_bo_driver = {
        .ttm_tt_create = &radeon_ttm_tt_create,
        .ttm_tt_populate = &radeon_ttm_tt_populate,
        .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
@@@ -716,7 -716,7 +714,7 @@@ int radeon_ttm_init(struct radeon_devic
        int r;
  
        /* No others user of address space so set it to 0 */
 -      r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
 +      r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
                               rdev->ddev->anon_inode->i_mapping,
                               rdev->ddev->vma_offset_manager,
                               rdev->need_swiotlb,
        DRM_INFO("radeon: %uM of GTT memory ready.\n",
                 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
  
-       r = radeon_ttm_debugfs_init(rdev);
-       if (r) {
-               DRM_ERROR("Failed to init debugfs\n");
-               return r;
-       }
+       radeon_ttm_debugfs_init(rdev);
        return 0;
  }
  
@@@ -775,7 -772,7 +770,7 @@@ void radeon_ttm_fini(struct radeon_devi
  
        if (!rdev->mman.initialized)
                return;
-       radeon_ttm_debugfs_fini(rdev);
        if (rdev->stolen_vga_memory) {
                r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
                if (r == 0) {
        }
        ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
        ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
 -      ttm_bo_device_release(&rdev->mman.bdev);
 +      ttm_device_fini(&rdev->mman.bdev);
        radeon_gart_fini(rdev);
        rdev->mman.initialized = false;
        DRM_INFO("radeon: ttm finalized\n");
@@@ -835,7 -832,7 +830,7 @@@ unlock_mclk
        return ret;
  }
  
 -static struct vm_operations_struct radeon_ttm_vm_ops = {
 +static const struct vm_operations_struct radeon_ttm_vm_ops = {
        .fault = radeon_ttm_fault,
        .open = ttm_bo_vm_open,
        .close = ttm_bo_vm_close,
@@@ -861,36 -858,38 +856,38 @@@ int radeon_mmap(struct file *filp, stru
  
  #if defined(CONFIG_DEBUG_FS)
  
- static int radeon_mm_dump_table(struct seq_file *m, void *data)
+ static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
  {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       unsigned ttm_pl = *(int*)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl);
+       struct radeon_device *rdev = (struct radeon_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
+                                                           TTM_PL_VRAM);
        struct drm_printer p = drm_seq_file_printer(m);
  
        man->func->debug(man, &p);
        return 0;
  }
  
- static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data)
+ static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
  {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_device *rdev = (struct radeon_device *)m->private;
  
        return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
  }
  
- static int ttm_pl_vram = TTM_PL_VRAM;
- static int ttm_pl_tt = TTM_PL_TT;
+ static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused)
+ {
+       struct radeon_device *rdev = (struct radeon_device *)m->private;
+       struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
+                                                           TTM_PL_TT);
+       struct drm_printer p = drm_seq_file_printer(m);
+       man->func->debug(man, &p);
+       return 0;
+ }
  
- static struct drm_info_list radeon_ttm_debugfs_list[] = {
-       {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
-       {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
-       {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL}
- };
+ DEFINE_SHOW_ATTRIBUTE(radeon_mm_vram_dump_table);
+ DEFINE_SHOW_ATTRIBUTE(radeon_mm_gtt_dump_table);
+ DEFINE_SHOW_ATTRIBUTE(radeon_ttm_page_pool);
  
  static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
  {
@@@ -924,7 -923,7 +921,7 @@@ static ssize_t radeon_ttm_vram_read(str
                value = RREG32(RADEON_MM_DATA);
                spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
  
-               r = put_user(value, (uint32_t *)buf);
+               r = put_user(value, (uint32_t __user *)buf);
                if (r)
                        return r;
  
@@@ -1000,38 -999,23 +997,23 @@@ static const struct file_operations rad
  
  #endif
  
- static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+ static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
  {
  #if defined(CONFIG_DEBUG_FS)
-       unsigned count;
        struct drm_minor *minor = rdev->ddev->primary;
        struct dentry *root = minor->debugfs_root;
  
-       rdev->mman.vram = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO,
-                                             root, rdev,
-                                             &radeon_ttm_vram_fops);
-       rdev->mman.gtt = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO,
-                                            root, rdev, &radeon_ttm_gtt_fops);
-       count = ARRAY_SIZE(radeon_ttm_debugfs_list);
-       return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
- #else
-       return 0;
- #endif
- }
- static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
- {
- #if defined(CONFIG_DEBUG_FS)
+       debugfs_create_file("radeon_vram", 0444, root, rdev,
+                           &radeon_ttm_vram_fops);
  
-       debugfs_remove(rdev->mman.vram);
-       rdev->mman.vram = NULL;
+       debugfs_create_file("radeon_gtt", 0444, root, rdev,
+                           &radeon_ttm_gtt_fops);
  
-       debugfs_remove(rdev->mman.gtt);
-       rdev->mman.gtt = NULL;
+       debugfs_create_file("radeon_vram_mm", 0444, root, rdev,
+                           &radeon_mm_vram_dump_table_fops);
+       debugfs_create_file("radeon_gtt_mm", 0444, root, rdev,
+                           &radeon_mm_gtt_dump_table_fops);
+       debugfs_create_file("ttm_page_pool", 0444, root, rdev,
+                           &radeon_ttm_page_pool_fops);
  #endif
  }
This page took 0.210744 seconds and 4 git commands to generate.