]> Git Repo - linux.git/commitdiff
Merge tag 'drm-misc-next-2020-09-21' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <[email protected]>
Tue, 22 Sep 2020 23:49:48 +0000 (09:49 +1000)
committerDave Airlie <[email protected]>
Tue, 22 Sep 2020 23:52:24 +0000 (09:52 +1000)
drm-misc-next for 5.10:

UAPI Changes:

Cross-subsystem Changes:
  - virtio: Merged a PR for patches that will affect drm/virtio

Core Changes:
  - dev: More devm_drm convertions and removal of drm_dev_init
  - atomic: Split out drm_atomic_helper_calc_timestamping_constants of
    drm_atomic_helper_update_legacy_modeset_state
  - ttm: More rework

Driver Changes:
  - i915: selftests improvements
  - panfrost: support for Amlogic SoC
  - vc4: one fix
  - tree-wide: conversions to devm_drm_dev_alloc,
  - ast: simplifications of the atomic modesetting code
  - panfrost: multiple fixes
  - vc4: multiple fixes
Signed-off-by: Dave Airlie <[email protected]>
From: Maxime Ripard <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
18 files changed:
1  2 
drivers/dma-buf/udmabuf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
include/drm/drm_prime.h

index 89e293bd9252325ba70b6c00617e82220a11fe73,5ee1e939971056655d8294cab59c46811022a839..db732f71e59aded339b3b0199579864436aa5510
@@@ -63,9 -63,10 +63,9 @@@ static struct sg_table *get_sg_table(st
                                        GFP_KERNEL);
        if (ret < 0)
                goto err;
 -      if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
 -              ret = -EINVAL;
 +      ret = dma_map_sgtable(dev, sg, direction, 0);
 +      if (ret < 0)
                goto err;
 -      }
        return sg;
  
  err:
@@@ -77,7 -78,7 +77,7 @@@
  static void put_sg_table(struct device *dev, struct sg_table *sg,
                         enum dma_data_direction direction)
  {
 -      dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
 +      dma_unmap_sgtable(dev, sg, direction, 0);
        sg_free_table(sg);
        kfree(sg);
  }
@@@ -307,6 -308,9 +307,9 @@@ static long udmabuf_ioctl(struct file *
  static const struct file_operations udmabuf_fops = {
        .owner          = THIS_MODULE,
        .unlocked_ioctl = udmabuf_ioctl,
+ #ifdef CONFIG_COMPAT
+       .compat_ioctl   = udmabuf_ioctl,
+ #endif
  };
  
  static struct miscdevice udmabuf_misc = {
index a4b518211b1f83f7a7b5f5f1347c49f999f51b14,3ded6f43f9826b4b364d3fdee29549f1d2e10cee..81e4cf869f50abbe0f297b849323079e0f245205
@@@ -32,6 -32,7 +32,6 @@@
  #include <drm/drm_pciids.h>
  #include <linux/console.h>
  #include <linux/module.h>
 -#include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include <linux/vga_switcheroo.h>
  #include <drm/drm_probe_helper.h>
@@@ -1072,16 -1073,8 +1072,16 @@@ static const struct pci_device_id pciid
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
  
        /* Navi12 */
 -      {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
 -      {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
 +      {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
 +      {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
 +
 +      /* Sienna_Cichlid */
 +      {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
  
        {0, 0, 0}
  };
@@@ -1109,16 -1102,6 +1109,16 @@@ static int amdgpu_pci_probe(struct pci_
                return -ENODEV;
        }
  
 +      /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
 +       * however, SME requires an indirect IOMMU mapping because the encryption
 +       * bit is beyond the DMA mask of the chip.
 +       */
 +      if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
 +              dev_info(&pdev->dev,
 +                       "SME is not compatible with RAVEN\n");
 +              return -ENOTSUPP;
 +      }
 +
  #ifdef CONFIG_DRM_AMDGPU_SI
        if (!amdgpu_si_support) {
                switch (flags & AMD_ASIC_MASK) {
        if (ret)
                return ret;
  
-       adev = kzalloc(sizeof(*adev), GFP_KERNEL);
-       if (!adev)
-               return -ENOMEM;
+       adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev);
+       if (IS_ERR(adev))
+               return PTR_ERR(adev);
  
        adev->dev  = &pdev->dev;
        adev->pdev = pdev;
        ddev = adev_to_drm(adev);
-       ret = drm_dev_init(ddev, &kms_driver, &pdev->dev);
-       if (ret)
-               goto err_free;
-       drmm_add_final_kfree(ddev, adev);
  
        if (!supports_atomic)
                ddev->driver_features &= ~DRIVER_ATOMIC;
  
        ret = pci_enable_device(pdev);
        if (ret)
-               goto err_free;
+               return ret;
  
        ddev->pdev = pdev;
        pci_set_drvdata(pdev, ddev);
@@@ -1205,8 -1183,6 +1200,6 @@@ retry_init
  
  err_pci:
        pci_disable_device(pdev);
- err_free:
-       drm_dev_put(ddev);
        return ret;
  }
  
@@@ -1223,7 -1199,6 +1216,6 @@@ amdgpu_pci_remove(struct pci_dev *pdev
        amdgpu_driver_unload_kms(dev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
-       drm_dev_put(dev);
  }
  
  static void
@@@ -1333,7 -1308,7 +1325,7 @@@ static int amdgpu_pmops_runtime_suspend
                if (amdgpu_is_atpx_hybrid()) {
                        pci_ignore_hotplug(pdev);
                } else {
 -                      pci_save_state(pdev);
 +                      amdgpu_device_cache_pci_state(pdev);
                        pci_disable_device(pdev);
                        pci_ignore_hotplug(pdev);
                        pci_set_power_state(pdev, PCI_D3cold);
@@@ -1366,7 -1341,7 +1358,7 @@@ static int amdgpu_pmops_runtime_resume(
                        pci_set_master(pdev);
                } else {
                        pci_set_power_state(pdev, PCI_D0);
 -                      pci_restore_state(pdev);
 +                      amdgpu_device_load_pci_state(pdev);
                        ret = pci_enable_device(pdev);
                        if (ret)
                                return ret;
@@@ -1545,13 -1520,6 +1537,13 @@@ static struct drm_driver kms_driver = 
        .patchlevel = KMS_DRIVER_PATCHLEVEL,
  };
  
 +static struct pci_error_handlers amdgpu_pci_err_handler = {
 +      .error_detected = amdgpu_pci_error_detected,
 +      .mmio_enabled   = amdgpu_pci_mmio_enabled,
 +      .slot_reset     = amdgpu_pci_slot_reset,
 +      .resume         = amdgpu_pci_resume,
 +};
 +
  static struct pci_driver amdgpu_kms_pci_driver = {
        .name = DRIVER_NAME,
        .id_table = pciidlist,
        .remove = amdgpu_pci_remove,
        .shutdown = amdgpu_pci_shutdown,
        .driver.pm = &amdgpu_pm_ops,
 +      .err_handler = &amdgpu_pci_err_handler,
  };
  
  static int __init amdgpu_init(void)
index 4a85f8cedd77c3d9dc85c6969dd1351605ff66f4,e7b67dc330a4a160aaf233796ece6074ce0e2d4c..8039d239958466989b6626187584867f14c9f58a
  
  #define AMDGPU_TTM_VRAM_MAX_DW_READ   (size_t)128
  
+ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+                                  struct ttm_tt *ttm,
+                                  struct ttm_resource *bo_mem);
  static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
                                    unsigned int type,
                                    uint64_t size)
  {
        return ttm_range_man_init(&adev->mman.bdev, type,
-                                 TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
                                  false, size >> PAGE_SHIFT);
  }
  
@@@ -88,7 -91,8 +91,8 @@@ static void amdgpu_evict_flags(struct t
        static const struct ttm_place placements = {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+               .mem_type = TTM_PL_SYSTEM,
+               .flags = TTM_PL_MASK_CACHING
        };
  
        /* Don't handle scatter gather BOs */
@@@ -174,24 -178,6 +178,6 @@@ static int amdgpu_verify_access(struct 
                                          filp->private_data);
  }
  
- /**
-  * amdgpu_move_null - Register memory for a buffer object
-  *
-  * @bo: The bo to assign the memory to
-  * @new_mem: The memory to be assigned.
-  *
-  * Assign the memory from new_mem to the memory of the buffer object bo.
-  */
- static void amdgpu_move_null(struct ttm_buffer_object *bo,
-                            struct ttm_resource *new_mem)
- {
-       struct ttm_resource *old_mem = &bo->mem;
-       BUG_ON(old_mem->mm_node != NULL);
-       *old_mem = *new_mem;
-       new_mem->mm_node = NULL;
- }
  /**
   * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
   *
@@@ -514,9 -500,9 +500,9 @@@ static int amdgpu_move_blit(struct ttm_
  
        /* Always block for VM page tables before committing the new location */
        if (bo->type == ttm_bo_type_kernel)
-               r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
+               r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
        else
-               r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
+               r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
        dma_fence_put(fence);
        return r;
  
@@@ -551,7 -537,8 +537,8 @@@ static int amdgpu_move_vram_ram(struct 
        placement.busy_placement = &placements;
        placements.fpfn = 0;
        placements.lpfn = 0;
-       placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       placements.mem_type = TTM_PL_TT;
+       placements.flags = TTM_PL_MASK_CACHING;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                pr_err("Failed to find GTT space for blit from VRAM\n");
                goto out_cleanup;
        }
  
+       r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+       if (unlikely(r))
+               goto out_cleanup;
        /* Bind the memory to the GTT space */
-       r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
+       r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@@ -607,7 -598,8 +598,8 @@@ static int amdgpu_move_ram_vram(struct 
        placement.busy_placement = &placements;
        placements.fpfn = 0;
        placements.lpfn = 0;
-       placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       placements.mem_type = TTM_PL_TT;
+       placements.flags = TTM_PL_MASK_CACHING;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                pr_err("Failed to find GTT space for blit to VRAM\n");
@@@ -676,7 -668,7 +668,7 @@@ static int amdgpu_bo_move(struct ttm_bu
        adev = amdgpu_ttm_adev(bo->bdev);
  
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
-               amdgpu_move_null(bo, new_mem);
+               ttm_bo_move_null(bo, new_mem);
                return 0;
        }
        if ((old_mem->mem_type == TTM_PL_TT &&
            (old_mem->mem_type == TTM_PL_SYSTEM &&
             new_mem->mem_type == TTM_PL_TT)) {
                /* bind is enough */
-               amdgpu_move_null(bo, new_mem);
+               ttm_bo_move_null(bo, new_mem);
                return 0;
        }
        if (old_mem->mem_type == AMDGPU_PL_GDS ||
            new_mem->mem_type == AMDGPU_PL_GWS ||
            new_mem->mem_type == AMDGPU_PL_OA) {
                /* Nothing to save here */
-               amdgpu_move_null(bo, new_mem);
+               ttm_bo_move_null(bo, new_mem);
                return 0;
        }
  
@@@ -773,7 -765,7 +765,7 @@@ static int amdgpu_ttm_io_mem_reserve(st
                        mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
                                        mem->bus.offset;
  
-               mem->bus.base = adev->gmc.aper_base;
+               mem->bus.offset += adev->gmc.aper_base;
                mem->bus.is_iomem = true;
                break;
        default:
  static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
  {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        uint64_t offset = (page_offset << PAGE_SHIFT);
        struct drm_mm_node *mm;
  
        mm = amdgpu_find_mm_node(&bo->mem, &offset);
-       return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
-               (offset >> PAGE_SHIFT);
+       offset += adev->gmc.aper_base;
+       return mm->start + (offset >> PAGE_SHIFT);
  }
  
  /**
@@@ -824,6 -817,7 +817,7 @@@ struct amdgpu_ttm_tt 
        uint64_t                userptr;
        struct task_struct      *usertask;
        uint32_t                userflags;
+       bool                    bound;
  #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
        struct hmm_range        *range;
  #endif
@@@ -991,9 -985,10 +985,10 @@@ void amdgpu_ttm_tt_set_user_pages(struc
   *
   * Called by amdgpu_ttm_backend_bind()
   **/
- static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+                                    struct ttm_tt *ttm)
  {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
  
  
  release_sg:
        kfree(ttm->sg);
 +      ttm->sg = NULL;
        return r;
  }
  
  /**
   * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
   */
- static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+                                       struct ttm_tt *ttm)
  {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
  
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
@@@ -1111,16 -1106,23 +1107,23 @@@ gart_bind_fail
   * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
   * This handles binding GTT memory to the device address space.
   */
- static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+                                  struct ttm_tt *ttm,
                                   struct ttm_resource *bo_mem)
  {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
        uint64_t flags;
        int r = 0;
  
+       if (!bo_mem)
+               return -EINVAL;
+       if (gtt->bound)
+               return 0;
        if (gtt->userptr) {
-               r = amdgpu_ttm_tt_pin_userptr(ttm);
+               r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
                if (r) {
                        DRM_ERROR("failed to pin userptr\n");
                        return r;
        if (r)
                DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
                          ttm->num_pages, gtt->offset);
+       gtt->bound = true;
        return r;
  }
  
  /**
 - * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
 + * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
 + * through AGP or GART aperture.
 + *
 + * If bo is accessible through AGP aperture, then use AGP aperture
 + * to access bo; otherwise allocate logical space in GART aperture
 + * and map bo to GART aperture.
   */
  int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
  {
                placement.busy_placement = &placements;
                placements.fpfn = 0;
                placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
-               placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
-                       TTM_PL_FLAG_TT;
+               placements.mem_type = TTM_PL_TT;
+               placements.flags = bo->mem.placement;
  
                r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
                if (unlikely(r))
@@@ -1243,15 -1241,19 +1247,19 @@@ int amdgpu_ttm_recover_gart(struct ttm_
   * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
   * ttm_tt_destroy().
   */
- static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+                                     struct ttm_tt *ttm)
  {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
  
+       if (!gtt->bound)
+               return;
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr)
-               amdgpu_ttm_tt_unpin_userptr(ttm);
+               amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
  
        if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
                return;
        if (r)
                DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
                          gtt->ttm.ttm.num_pages, gtt->offset);
+       gtt->bound = false;
  }
  
- static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
+ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+                                      struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
  
+       amdgpu_ttm_backend_unbind(bdev, ttm);
+       ttm_tt_destroy_common(bdev, ttm);
        if (gtt->usertask)
                put_task_struct(gtt->usertask);
  
        kfree(gtt);
  }
  
- static struct ttm_backend_func amdgpu_backend_func = {
-       .bind = &amdgpu_ttm_backend_bind,
-       .unbind = &amdgpu_ttm_backend_unbind,
-       .destroy = &amdgpu_ttm_backend_destroy,
- };
  /**
   * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
   *
@@@ -1296,7 -1296,6 +1302,6 @@@ static struct ttm_tt *amdgpu_ttm_tt_cre
        if (gtt == NULL) {
                return NULL;
        }
-       gtt->ttm.ttm.func = &amdgpu_backend_func;
        gtt->gobj = &bo->base;
  
        /* allocate space for the uninitialized page entries */
   * Map the pages of a ttm_tt object to an address space visible
   * to the underlying device.
   */
- static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
-                       struct ttm_operation_ctx *ctx)
+ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+                                 struct ttm_tt *ttm,
+                                 struct ttm_operation_ctx *ctx)
  {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
  
        /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
                        return -ENOMEM;
  
                ttm->page_flags |= TTM_PAGE_FLAG_SG;
-               ttm->state = tt_unbound;
+               ttm_tt_set_populated(ttm);
                return 0;
        }
  
                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
                                                 gtt->ttm.dma_address,
                                                 ttm->num_pages);
-               ttm->state = tt_unbound;
+               ttm_tt_set_populated(ttm);
                return 0;
        }
  
   * Unmaps pages of a ttm_tt object from the device address space and
   * unpopulates the page array backing it.
   */
- static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        struct amdgpu_device *adev;
        if (ttm->page_flags & TTM_PAGE_FLAG_SG)
                return;
  
-       adev = amdgpu_ttm_adev(ttm->bdev);
+       adev = amdgpu_ttm_adev(bdev);
  
  #ifdef CONFIG_SWIOTLB
        if (adev->need_swiotlb && swiotlb_nr_tbl()) {
@@@ -1697,6 -1697,9 +1703,9 @@@ static struct ttm_bo_driver amdgpu_bo_d
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
+       .ttm_tt_bind = &amdgpu_ttm_backend_bind,
+       .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
+       .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
        .evict_flags = &amdgpu_evict_flags,
        .move = &amdgpu_bo_move,
index 57738164625b40df75096a27f1947ae7243a7b67,b24c14bfab31d8db7b166c3c18b0da9c7bc2395b..bb1bc7f5d149322bb080c93d2c4ef6e0823f384d
@@@ -228,14 -228,17 +228,14 @@@ static u32 dm_vblank_get_counter(struc
                return 0;
        else {
                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 -              struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
 -                              acrtc->base.state);
  
 -
 -              if (acrtc_state->stream == NULL) {
 +              if (acrtc->dm_irq_params.stream == NULL) {
                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
                                  crtc);
                        return 0;
                }
  
 -              return dc_stream_get_vblank_counter(acrtc_state->stream);
 +              return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
        }
  }
  
@@@ -248,8 -251,10 +248,8 @@@ static int dm_crtc_get_scanoutpos(struc
                return -EINVAL;
        else {
                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 -              struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
 -                                              acrtc->base.state);
  
 -              if (acrtc_state->stream ==  NULL) {
 +              if (acrtc->dm_irq_params.stream ==  NULL) {
                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
                                  crtc);
                        return 0;
                 * TODO rework base driver to use values directly.
                 * for now parse it back into reg-format
                 */
 -              dc_stream_get_scanoutpos(acrtc_state->stream,
 +              dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
                                         &v_blank_start,
                                         &v_blank_end,
                                         &h_position,
@@@ -318,14 -323,6 +318,14 @@@ get_crtc_by_otg_inst(struct amdgpu_devi
        return NULL;
  }
  
 +static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
 +{
 +      return acrtc->dm_irq_params.freesync_config.state ==
 +                     VRR_STATE_ACTIVE_VARIABLE ||
 +             acrtc->dm_irq_params.freesync_config.state ==
 +                     VRR_STATE_ACTIVE_FIXED;
 +}
 +
  static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
  {
        return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
@@@ -346,6 -343,7 +346,6 @@@ static void dm_pflip_high_irq(void *int
        struct amdgpu_device *adev = irq_params->adev;
        unsigned long flags;
        struct drm_pending_vblank_event *e;
 -      struct dm_crtc_state *acrtc_state;
        uint32_t vpos, hpos, v_blank_start, v_blank_end;
        bool vrr_active;
  
        if (!e)
                WARN_ON(1);
  
 -      acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
 -      vrr_active = amdgpu_dm_vrr_active(acrtc_state);
 +      vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
  
        /* Fixed refresh rate, or VRR scanout position outside front-porch? */
        if (!vrr_active ||
 -          !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
 +          !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
                                      &v_blank_end, &hpos, &vpos) ||
            (vpos < v_blank_start)) {
                /* Update to correct count and vblank timestamp if racing with
         * of pageflip completion, so last_flip_vblank is the forbidden count
         * for queueing new pageflips if vsync + VRR is enabled.
         */
 -      amdgpu_crtc->last_flip_vblank =
 +      amdgpu_crtc->dm_irq_params.last_flip_vblank =
                amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
  
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
@@@ -442,17 -441,17 +442,17 @@@ static void dm_vupdate_high_irq(void *i
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_crtc *acrtc;
 -      struct dm_crtc_state *acrtc_state;
        unsigned long flags;
 +      int vrr_active;
  
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
  
        if (acrtc) {
 -              acrtc_state = to_dm_crtc_state(acrtc->base.state);
 +              vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
  
                DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
                              acrtc->crtc_id,
 -                            amdgpu_dm_vrr_active(acrtc_state));
 +                            vrr_active);
  
                /* Core vblank handling is done here after end of front-porch in
                 * vrr mode, as vblank timestamping will give valid results
                 * page-flip completion events that have been queued to us
                 * if a pageflip happened inside front-porch.
                 */
 -              if (amdgpu_dm_vrr_active(acrtc_state)) {
 +              if (vrr_active) {
                        drm_crtc_handle_vblank(&acrtc->base);
  
                        /* BTR processing for pre-DCE12 ASICs */
 -                      if (acrtc_state->stream &&
 +                      if (acrtc->dm_irq_params.stream &&
                            adev->family < AMDGPU_FAMILY_AI) {
                                spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                                mod_freesync_handle_v_update(
                                    adev->dm.freesync_module,
 -                                  acrtc_state->stream,
 -                                  &acrtc_state->vrr_params);
 +                                  acrtc->dm_irq_params.stream,
 +                                  &acrtc->dm_irq_params.vrr_params);
  
                                dc_stream_adjust_vmin_vmax(
                                    adev->dm.dc,
 -                                  acrtc_state->stream,
 -                                  &acrtc_state->vrr_params.adjust);
 +                                  acrtc->dm_irq_params.stream,
 +                                  &acrtc->dm_irq_params.vrr_params.adjust);
                                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                        }
                }
@@@ -494,17 -493,18 +494,17 @@@ static void dm_crtc_high_irq(void *inte
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_crtc *acrtc;
 -      struct dm_crtc_state *acrtc_state;
        unsigned long flags;
 +      int vrr_active;
  
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
        if (!acrtc)
                return;
  
 -      acrtc_state = to_dm_crtc_state(acrtc->base.state);
 +      vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
  
        DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
 -                       amdgpu_dm_vrr_active(acrtc_state),
 -                       acrtc_state->active_planes);
 +                    vrr_active, acrtc->dm_irq_params.active_planes);
  
        /**
         * Core vblank handling at start of front-porch is only possible
         * valid results while done in front-porch. Otherwise defer it
         * to dm_vupdate_high_irq after end of front-porch.
         */
 -      if (!amdgpu_dm_vrr_active(acrtc_state))
 +      if (!vrr_active)
                drm_crtc_handle_vblank(&acrtc->base);
  
        /**
  
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
  
 -      if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
 -          acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
 +      if (acrtc->dm_irq_params.stream &&
 +          acrtc->dm_irq_params.vrr_params.supported &&
 +          acrtc->dm_irq_params.freesync_config.state ==
 +                  VRR_STATE_ACTIVE_VARIABLE) {
                mod_freesync_handle_v_update(adev->dm.freesync_module,
 -                                           acrtc_state->stream,
 -                                           &acrtc_state->vrr_params);
 +                                           acrtc->dm_irq_params.stream,
 +                                           &acrtc->dm_irq_params.vrr_params);
  
 -              dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
 -                                         &acrtc_state->vrr_params.adjust);
 +              dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
 +                                         &acrtc->dm_irq_params.vrr_params.adjust);
        }
  
        /*
         */
        if (adev->family >= AMDGPU_FAMILY_RV &&
            acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
 -          acrtc_state->active_planes == 0) {
 +          acrtc->dm_irq_params.active_planes == 0) {
                if (acrtc->event) {
                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
                        acrtc->event = NULL;
@@@ -880,45 -878,6 +880,45 @@@ static int dm_dmub_hw_init(struct amdgp
        return 0;
  }
  
 +static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
 +                                                         struct drm_atomic_state *state)
 +{
 +      struct drm_connector *connector;
 +      struct drm_crtc *crtc;
 +      struct amdgpu_dm_connector *amdgpu_dm_connector;
 +      struct drm_connector_state *conn_state;
 +      struct dm_crtc_state *acrtc_state;
 +      struct drm_crtc_state *crtc_state;
 +      struct dc_stream_state *stream;
 +      struct drm_device *dev = adev_to_drm(adev);
 +
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +
 +              amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 +              conn_state = connector->state;
 +
 +              if (!(conn_state && conn_state->crtc))
 +                      continue;
 +
 +              crtc = conn_state->crtc;
 +              acrtc_state = to_dm_crtc_state(crtc->state);
 +
 +              if (!(acrtc_state && acrtc_state->stream))
 +                      continue;
 +
 +              stream = acrtc_state->stream;
 +
 +              if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
 +                  amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
 +                  amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
 +                  amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
 +                      conn_state = drm_atomic_get_connector_state(state, connector);
 +                      crtc_state = drm_atomic_get_crtc_state(state, crtc);
 +                      crtc_state->mode_changed = true;
 +              }
 +      }
 +}
 +
  static int amdgpu_dm_init(struct amdgpu_device *adev)
  {
        struct dc_init_data init_data;
@@@ -1466,6 -1425,9 +1466,6 @@@ static int dm_late_init(void *handle
        struct dmcu *dmcu = NULL;
        bool ret = true;
  
 -      if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
 -              return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 -
        dmcu = adev->dm.dc->res_pool->dmcu;
  
        for (i = 0; i < 16; i++)
@@@ -3411,6 -3373,9 +3411,6 @@@ static int amdgpu_dm_initialize_drm_dev
                goto fail;
        }
  
 -      /* No userspace support. */
 -      dm->dc->debug.disable_tri_buf = true;
 -
        return 0;
  fail:
        kfree(aencoder);
@@@ -4724,10 -4689,9 +4724,10 @@@ create_stream_for_sink(struct amdgpu_dm
                                                             dc_link_get_link_cap(aconnector->dc_link));
  
  #if defined(CONFIG_DRM_AMD_DC_DCN)
 -              if (dsc_caps.is_dsc_supported) {
 +              if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
                        /* Set DSC policy according to dsc_clock_en */
 -                      dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
 +                      dc_dsc_policy_set_enable_dsc_when_not_needed(
 +                              aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
  
                        if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
                                                  &dsc_caps,
                                                  &stream->timing.dsc_cfg))
                                stream->timing.flags.DSC = 1;
                        /* Overwrite the stream flag if DSC is enabled through debugfs */
 -                      if (aconnector->dsc_settings.dsc_clock_en)
 +                      if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
                                stream->timing.flags.DSC = 1;
  
 -                      if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
 -                              stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
 -                                                                      aconnector->dsc_settings.dsc_slice_width);
 +                      if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
 +                              stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
  
 -                      if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
 -                              stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
 -                                                                      aconnector->dsc_settings.dsc_slice_height);
 +                      if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
 +                              stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
  
                        if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
                                stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
@@@ -4843,6 -4809,7 +4843,6 @@@ dm_crtc_duplicate_state(struct drm_crt
        }
  
        state->active_planes = cur->active_planes;
 -      state->vrr_params = cur->vrr_params;
        state->vrr_infopacket = cur->vrr_infopacket;
        state->abm_level = cur->abm_level;
        state->vrr_supported = cur->vrr_supported;
@@@ -5460,6 -5427,19 +5460,6 @@@ static void dm_crtc_helper_disable(stru
  {
  }
  
 -static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
 -{
 -      struct drm_device *dev = new_crtc_state->crtc->dev;
 -      struct drm_plane *plane;
 -
 -      drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
 -              if (plane->type == DRM_PLANE_TYPE_CURSOR)
 -                      return true;
 -      }
 -
 -      return false;
 -}
 -
  static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
  {
        struct drm_atomic_state *state = new_crtc_state->state;
@@@ -5523,20 -5503,19 +5523,20 @@@ static int dm_crtc_helper_atomic_check(
                return ret;
        }
  
 -      /* In some use cases, like reset, no stream is attached */
 -      if (!dm_crtc_state->stream)
 -              return 0;
 -
        /*
 -       * We want at least one hardware plane enabled to use
 -       * the stream with a cursor enabled.
 +       * We require the primary plane to be enabled whenever the CRTC is, otherwise
 +       * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
 +       * planes are disabled, which is not supported by the hardware. And there is legacy
 +       * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
         */
 -      if (state->enable && state->active &&
 -          does_crtc_have_active_cursor(state) &&
 -          dm_crtc_state->active_planes == 0)
 +      if (state->enable &&
 +          !(state->plane_mask & drm_plane_mask(crtc->primary)))
                return -EINVAL;
  
 +      /* In some use cases, like reset, no stream is attached */
 +      if (!dm_crtc_state->stream)
 +              return 0;
 +
        if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
                return 0;
  
@@@ -6883,7 -6862,6 +6883,7 @@@ static void update_freesync_state_on_st
        struct mod_vrr_params vrr_params;
        struct dc_info_packet vrr_infopacket = {0};
        struct amdgpu_device *adev = dm->adev;
 +      struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
        unsigned long flags;
  
        if (!new_stream)
                return;
  
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 -      vrr_params = new_crtc_state->vrr_params;
 +        vrr_params = acrtc->dm_irq_params.vrr_params;
  
        if (surface) {
                mod_freesync_handle_preflip(
                &vrr_infopacket);
  
        new_crtc_state->freesync_timing_changed |=
 -              (memcmp(&new_crtc_state->vrr_params.adjust,
 +              (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
                        &vrr_params.adjust,
                        sizeof(vrr_params.adjust)) != 0);
  
                        &vrr_infopacket,
                        sizeof(vrr_infopacket)) != 0);
  
 -      new_crtc_state->vrr_params = vrr_params;
 +      acrtc->dm_irq_params.vrr_params = vrr_params;
        new_crtc_state->vrr_infopacket = vrr_infopacket;
  
 -      new_stream->adjust = new_crtc_state->vrr_params.adjust;
 +      new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
        new_stream->vrr_infopacket = vrr_infopacket;
  
        if (new_crtc_state->freesync_vrr_info_changed)
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  }
  
 -static void pre_update_freesync_state_on_stream(
 +static void update_stream_irq_parameters(
        struct amdgpu_display_manager *dm,
        struct dm_crtc_state *new_crtc_state)
  {
        struct mod_vrr_params vrr_params;
        struct mod_freesync_config config = new_crtc_state->freesync_config;
        struct amdgpu_device *adev = dm->adev;
 +      struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
        unsigned long flags;
  
        if (!new_stream)
                return;
  
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 -      vrr_params = new_crtc_state->vrr_params;
 +      vrr_params = acrtc->dm_irq_params.vrr_params;
  
        if (new_crtc_state->vrr_supported &&
            config.min_refresh_in_uhz &&
                                      &config, &vrr_params);
  
        new_crtc_state->freesync_timing_changed |=
 -              (memcmp(&new_crtc_state->vrr_params.adjust,
 -                      &vrr_params.adjust,
 -                      sizeof(vrr_params.adjust)) != 0);
 +              (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
 +                      &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
  
 -      new_crtc_state->vrr_params = vrr_params;
 +      new_crtc_state->freesync_config = config;
 +      /* Copy state for access from DM IRQ handler */
 +      acrtc->dm_irq_params.freesync_config = config;
 +      acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
 +      acrtc->dm_irq_params.vrr_params = vrr_params;
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  }
  
@@@ -7223,7 -7197,7 +7223,7 @@@ static void amdgpu_dm_commit_planes(str
                         * on late submission of flips.
                         */
                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 -                      last_flip_vblank = acrtc_attach->last_flip_vblank;
 +                      last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
                }
  
                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
                        dc_stream_adjust_vmin_vmax(
                                dm->dc, acrtc_state->stream,
 -                              &acrtc_state->vrr_params.adjust);
 +                              &acrtc_attach->dm_irq_params.vrr_params.adjust);
                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
                }
                mutex_lock(&dm->dc_lock);
@@@ -7457,6 -7431,34 +7457,6 @@@ static int amdgpu_dm_atomic_commit(stru
                                   struct drm_atomic_state *state,
                                   bool nonblock)
  {
 -      struct drm_crtc *crtc;
 -      struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 -      struct amdgpu_device *adev = drm_to_adev(dev);
 -      int i;
 -
 -      /*
 -       * We evade vblank and pflip interrupts on CRTCs that are undergoing
 -       * a modeset, being disabled, or have no active planes.
 -       *
 -       * It's done in atomic commit rather than commit tail for now since
 -       * some of these interrupt handlers access the current CRTC state and
 -       * potentially the stream pointer itself.
 -       *
 -       * Since the atomic state is swapped within atomic commit and not within
 -       * commit tail this would leave to new state (that hasn't been committed yet)
 -       * being accesssed from within the handlers.
 -       *
 -       * TODO: Fix this so we can do this in commit tail and not have to block
 -       * in atomic check.
 -       */
 -      for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 -              struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 -
 -              if (old_crtc_state->active &&
 -                  (!new_crtc_state->active ||
 -                   drm_atomic_crtc_needs_modeset(new_crtc_state)))
 -                      manage_dm_interrupts(adev, acrtc, false);
 -      }
        /*
         * Add check here for SoC's that support hardware cursor plane, to
         * unset legacy_cursor_update
@@@ -7494,6 -7496,7 +7494,7 @@@ static void amdgpu_dm_atomic_commit_tai
        bool mode_set_reset_required = false;
  
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
+       drm_atomic_helper_calc_timestamping_constants(state);
  
        dm_state = dm_atomic_get_new_state(state);
        if (dm_state && dm_state->context) {
                dc_resource_state_copy_construct_current(dm->dc, dc_state);
        }
  
 +      for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
 +                                     new_crtc_state, i) {
 +              struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 +
 +              dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 +
 +              if (old_crtc_state->active &&
 +                  (!new_crtc_state->active ||
 +                   drm_atomic_crtc_needs_modeset(new_crtc_state))) {
 +                      manage_dm_interrupts(adev, acrtc, false);
 +                      dc_stream_release(dm_old_crtc_state->stream);
 +              }
 +      }
 +
        /* update changed items */
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
                        if (!status)
                                status = dc_stream_get_status_from_state(dc_state,
                                                                         dm_new_crtc_state->stream);
 -
                        if (!status)
                                DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
                        else
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
  
 -              /* Update freesync active state. */
 -              pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
 +              /* For freesync config update on crtc state and params for irq */
 +              update_stream_irq_parameters(dm, dm_new_crtc_state);
  
                /* Handle vrr on->off / off->on transitions */
                amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
  
 +              dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 +
                if (new_crtc_state->active &&
                    (!old_crtc_state->active ||
                     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
 +                      dc_stream_retain(dm_new_crtc_state->stream);
 +                      acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
                        manage_dm_interrupts(adev, acrtc, true);
 +
  #ifdef CONFIG_DEBUG_FS
                        /**
                         * Frontend may have changed so reapply the CRC capture
@@@ -8009,6 -7994,8 +8010,6 @@@ static void reset_freesync_config_for_c
  {
        new_crtc_state->vrr_supported = false;
  
 -      memset(&new_crtc_state->vrr_params, 0,
 -             sizeof(new_crtc_state->vrr_params));
        memset(&new_crtc_state->vrr_infopacket, 0,
               sizeof(new_crtc_state->vrr_infopacket));
  }
@@@ -8579,8 -8566,6 +8580,8 @@@ static int amdgpu_dm_atomic_check(struc
        int ret, i;
        bool lock_and_validation_needed = false;
  
 +      amdgpu_check_debugfs_connector_property_change(adev, state);
 +
        ret = drm_atomic_helper_check_modeset(dev, state);
        if (ret)
                goto fail;
index a63008ce284de3cbc915eaeb4818baaa0310f3a0,ecf8a55e93d9338fd6b4eb11d1b774ab8512bb11..6654bccd9466589b86b84808c1fd681a185b803c
@@@ -39,7 -39,7 +39,7 @@@ static size_t roundup_gem_size(size_t s
  void armada_gem_free_object(struct drm_gem_object *obj)
  {
        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
-       struct armada_private *priv = obj->dev->dev_private;
+       struct armada_private *priv = drm_to_armada_dev(obj->dev);
  
        DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  
@@@ -77,7 -77,7 +77,7 @@@
  int
  armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  {
-       struct armada_private *priv = dev->dev_private;
+       struct armada_private *priv = drm_to_armada_dev(dev);
        size_t size = obj->obj.size;
  
        if (obj->page || obj->linear)
@@@ -379,7 -379,7 +379,7 @@@ armada_gem_prime_map_dma_buf(struct dma
        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
        struct scatterlist *sg;
        struct sg_table *sgt;
 -      int i, num;
 +      int i;
  
        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
        if (!sgt)
  
                mapping = dobj->obj.filp->f_mapping;
  
 -              for_each_sg(sgt->sgl, sg, count, i) {
 +              for_each_sgtable_sg(sgt, sg, i) {
                        struct page *page;
  
                        page = shmem_read_mapping_page(mapping, i);
 -                      if (IS_ERR(page)) {
 -                              num = i;
 +                      if (IS_ERR(page))
                                goto release;
 -                      }
  
                        sg_set_page(sg, page, PAGE_SIZE, 0);
                }
  
 -              if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
 -                      num = sgt->nents;
 +              if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                        goto release;
 -              }
        } else if (dobj->page) {
                /* Single contiguous page */
                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  
                sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  
 -              if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 +              if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                        goto free_table;
        } else if (dobj->linear) {
                /* Single contiguous physical region - no struct page */
        return sgt;
  
   release:
 -      for_each_sg(sgt->sgl, sg, num, i)
 -              put_page(sg_page(sg));
 +      for_each_sgtable_sg(sgt, sg, i)
 +              if (sg_page(sg))
 +                      put_page(sg_page(sg));
   free_table:
        sg_free_table(sgt);
   free_sgt:
@@@ -446,12 -449,11 +446,12 @@@ static void armada_gem_prime_unmap_dma_
        int i;
  
        if (!dobj->linear)
 -              dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 +              dma_unmap_sgtable(attach->dev, sgt, dir, 0);
  
        if (dobj->obj.filp) {
                struct scatterlist *sg;
 -              for_each_sg(sgt->sgl, sg, sgt->nents, i)
 +
 +              for_each_sgtable_sg(sgt, sg, i)
                        put_page(sg_page(sg));
        }
  
index 54864400015dda0628b4ea5a299d8236eacd4e71,b9c5a98c5c9ce38e43d098067df50c379e705ab2..e875425336406fdd8d483257fde972ab03d5dc25
   * OF THIS SOFTWARE.
   */
  
 +#include <linux/bitfield.h>
  #include <linux/delay.h>
  #include <linux/errno.h>
  #include <linux/i2c.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
 +#include <linux/random.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
  #include <linux/iopoll.h>
@@@ -425,22 -423,6 +425,22 @@@ drm_dp_encode_sideband_req(const struc
                memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
                idx += req->u.i2c_write.num_bytes;
                break;
 +      case DP_QUERY_STREAM_ENC_STATUS: {
 +              const struct drm_dp_query_stream_enc_status *msg;
 +
 +              msg = &req->u.enc_status;
 +              buf[idx] = msg->stream_id;
 +              idx++;
 +              memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
 +              idx += sizeof(msg->client_id);
 +              buf[idx] = 0;
 +              buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
 +              buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
 +              buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
 +              buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
 +              idx++;
 +              }
 +              break;
        }
        raw->cur_len = idx;
  }
@@@ -569,20 -551,6 +569,20 @@@ drm_dp_decode_sideband_req(const struc
                                return -ENOMEM;
                }
                break;
 +      case DP_QUERY_STREAM_ENC_STATUS:
 +              req->u.enc_status.stream_id = buf[idx++];
 +              for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
 +                      req->u.enc_status.client_id[i] = buf[idx++];
 +
 +              req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
 +                                                         buf[idx]);
 +              req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
 +                                                               buf[idx]);
 +              req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
 +                                                            buf[idx]);
 +              req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
 +                                                                  buf[idx]);
 +              break;
        }
  
        return 0;
@@@ -661,16 -629,6 +661,16 @@@ drm_dp_dump_sideband_msg_req_body(cons
                  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
                  req->u.i2c_write.bytes);
                break;
 +      case DP_QUERY_STREAM_ENC_STATUS:
 +              P("stream_id=%u client_id=%*ph stream_event=%x "
 +                "valid_event=%d stream_behavior=%x valid_behavior=%d",
 +                req->u.enc_status.stream_id,
 +                (int)ARRAY_SIZE(req->u.enc_status.client_id),
 +                req->u.enc_status.client_id, req->u.enc_status.stream_event,
 +                req->u.enc_status.valid_stream_event,
 +                req->u.enc_status.stream_behavior,
 +                req->u.enc_status.valid_stream_behavior);
 +              break;
        default:
                P("???\n");
                break;
@@@ -978,42 -936,6 +978,42 @@@ static bool drm_dp_sideband_parse_power
        return true;
  }
  
 +static bool
 +drm_dp_sideband_parse_query_stream_enc_status(
 +                              struct drm_dp_sideband_msg_rx *raw,
 +                              struct drm_dp_sideband_msg_reply_body *repmsg)
 +{
 +      struct drm_dp_query_stream_enc_status_ack_reply *reply;
 +
 +      reply = &repmsg->u.enc_status;
 +
 +      reply->stream_id = raw->msg[3];
 +
 +      reply->reply_signed = raw->msg[2] & BIT(0);
 +
 +      /*
 +       * NOTE: It's my impression from reading the spec that the below parsing
 +       * is correct. However I noticed while testing with an HDCP 1.4 display
 +       * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
 +       * would expect both bits to be set. So keep the parsing following the
 +       * spec, but beware reality might not match the spec (at least for some
 +       * configurations).
 +       */
 +      reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
 +      reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
 +
 +      reply->query_capable_device_present = raw->msg[2] & BIT(5);
 +      reply->legacy_device_present = raw->msg[2] & BIT(6);
 +      reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
 +
 +      reply->auth_completed = !!(raw->msg[1] & BIT(3));
 +      reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
 +      reply->repeater_present = !!(raw->msg[1] & BIT(5));
 +      reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
 +
 +      return true;
 +}
 +
  static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
                                        struct drm_dp_sideband_msg_reply_body *msg)
  {
                return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
        case DP_REMOTE_I2C_READ:
                return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
+       case DP_REMOTE_I2C_WRITE:
+               return true; /* since there's nothing to parse */
        case DP_ENUM_PATH_RESOURCES:
                return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
        case DP_ALLOCATE_PAYLOAD:
                return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
        case DP_CLEAR_PAYLOAD_ID_TABLE:
                return true; /* since there's nothing to parse */
 +      case DP_QUERY_STREAM_ENC_STATUS:
 +              return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
        default:
                DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
                          drm_dp_mst_req_type_str(msg->req_type));
@@@ -1201,25 -1123,6 +1203,25 @@@ static void build_power_updown_phy(stru
        msg->path_msg = true;
  }
  
 +static int
 +build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
 +                            u8 *q_id)
 +{
 +      struct drm_dp_sideband_msg_req_body req;
 +
 +      req.req_type = DP_QUERY_STREAM_ENC_STATUS;
 +      req.u.enc_status.stream_id = stream_id;
 +      memcpy(req.u.enc_status.client_id, q_id,
 +             sizeof(req.u.enc_status.client_id));
 +      req.u.enc_status.stream_event = 0;
 +      req.u.enc_status.valid_stream_event = false;
 +      req.u.enc_status.stream_behavior = 0;
 +      req.u.enc_status.valid_stream_behavior = false;
 +
 +      drm_dp_encode_sideband_req(&req, msg);
 +      return 0;
 +}
 +
  static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
                                        struct drm_dp_vcpi *vcpi)
  {
@@@ -3252,57 -3155,6 +3254,57 @@@ int drm_dp_send_power_updown_phy(struc
  }
  EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
  
 +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
 +              struct drm_dp_mst_port *port,
 +              struct drm_dp_query_stream_enc_status_ack_reply *status)
 +{
 +      struct drm_dp_sideband_msg_tx *txmsg;
 +      u8 nonce[7];
 +      int len, ret;
 +
 +      txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
 +      if (!txmsg)
 +              return -ENOMEM;
 +
 +      port = drm_dp_mst_topology_get_port_validated(mgr, port);
 +      if (!port) {
 +              ret = -EINVAL;
 +              goto out_get_port;
 +      }
 +
 +      get_random_bytes(nonce, sizeof(nonce));
 +
 +      /*
 +       * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
 +       *  transaction at the MST Branch device directly connected to the
 +       *  Source"
 +       */
 +      txmsg->dst = mgr->mst_primary;
 +
 +      len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
 +
 +      drm_dp_queue_down_tx(mgr, txmsg);
 +
 +      ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
 +      if (ret < 0) {
 +              goto out;
 +      } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
 +              drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
 +              ret = -ENXIO;
 +              goto out;
 +      }
 +
 +      ret = 0;
 +      memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
 +
 +out:
 +      drm_dp_mst_topology_put_port(port);
 +out_get_port:
 +      kfree(txmsg);
 +      return ret;
 +}
 +EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
 +
  static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
                                       int id,
                                       struct drm_dp_payload *payload)
@@@ -5499,29 -5351,29 +5501,29 @@@ static bool remote_i2c_read_ok(const st
                msgs[num - 1].len <= 0xff;
  }
  
- /* I2C device */
- static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
-                              int num)
+ static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
+ {
+       int i;
+       for (i = 0; i < num - 1; i++) {
+               if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
+                   msgs[i].len > 0xff)
+                       return false;
+       }
+       return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
+ }
+ static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
+                              struct drm_dp_mst_port *port,
+                              struct i2c_msg *msgs, int num)
  {
-       struct drm_dp_aux *aux = adapter->algo_data;
-       struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
-       struct drm_dp_mst_branch *mstb;
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
        unsigned int i;
        struct drm_dp_sideband_msg_req_body msg;
        struct drm_dp_sideband_msg_tx *txmsg = NULL;
        int ret;
  
-       mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
-       if (!mstb)
-               return -EREMOTEIO;
-       if (!remote_i2c_read_ok(msgs, num)) {
-               DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
-               ret = -EIO;
-               goto out;
-       }
        memset(&msg, 0, sizeof(msg));
        msg.req_type = DP_REMOTE_I2C_READ;
        msg.u.i2c_read.num_transactions = num - 1;
        }
  out:
        kfree(txmsg);
+       return ret;
+ }
+ static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
+                               struct drm_dp_mst_port *port,
+                               struct i2c_msg *msgs, int num)
+ {
+       struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+       unsigned int i;
+       struct drm_dp_sideband_msg_req_body msg;
+       struct drm_dp_sideband_msg_tx *txmsg = NULL;
+       int ret;
+       txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+       if (!txmsg) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       for (i = 0; i < num; i++) {
+               memset(&msg, 0, sizeof(msg));
+               msg.req_type = DP_REMOTE_I2C_WRITE;
+               msg.u.i2c_write.port_number = port->port_num;
+               msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
+               msg.u.i2c_write.num_bytes = msgs[i].len;
+               msg.u.i2c_write.bytes = msgs[i].buf;
+               memset(txmsg, 0, sizeof(*txmsg));
+               txmsg->dst = mstb;
+               drm_dp_encode_sideband_req(&msg, txmsg);
+               drm_dp_queue_down_tx(mgr, txmsg);
+               ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+               if (ret > 0) {
+                       if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+                               ret = -EREMOTEIO;
+                               goto out;
+                       }
+               } else {
+                       goto out;
+               }
+       }
+       ret = num;
+ out:
+       kfree(txmsg);
+       return ret;
+ }
+ /* I2C device */
+ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
+                              struct i2c_msg *msgs, int num)
+ {
+       struct drm_dp_aux *aux = adapter->algo_data;
+       struct drm_dp_mst_port *port =
+               container_of(aux, struct drm_dp_mst_port, aux);
+       struct drm_dp_mst_branch *mstb;
+       struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+       int ret;
+       mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
+       if (!mstb)
+               return -EREMOTEIO;
+       if (remote_i2c_read_ok(msgs, num)) {
+               ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
+       } else if (remote_i2c_write_ok(msgs, num)) {
+               ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
+       } else {
+               DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+               ret = -EIO;
+       }
        drm_dp_mst_topology_put_mstb(mstb);
        return ret;
  }
index 47d8211221f2a3d70bdc82d7faf374328826646e,0a952f27c184670c2736b18d75c50d78b5b354d7..d77c9f8ff26ced1e994792f36e4cc49321bb31f3
@@@ -126,8 -126,8 +126,8 @@@ void drm_gem_shmem_free_object(struct d
                drm_prime_gem_destroy(obj, shmem->sgt);
        } else {
                if (shmem->sgt) {
 -                      dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
 -                                   shmem->sgt->nents, DMA_BIDIRECTIONAL);
 +                      dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
 +                                        DMA_BIDIRECTIONAL, 0);
                        sg_free_table(shmem->sgt);
                        kfree(shmem->sgt);
                }
@@@ -424,7 -424,8 +424,7 @@@ void drm_gem_shmem_purge_locked(struct 
  
        WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
  
 -      dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
 -                   shmem->sgt->nents, DMA_BIDIRECTIONAL);
 +      dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
        sg_free_table(shmem->sgt);
        kfree(shmem->sgt);
        shmem->sgt = NULL;
@@@ -655,7 -656,7 +655,7 @@@ struct sg_table *drm_gem_shmem_get_sg_t
  
        WARN_ON(shmem->base.import_attach);
  
-       return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+       return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
  }
  EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
  
@@@ -696,17 -697,12 +696,17 @@@ struct sg_table *drm_gem_shmem_get_page
                goto err_put_pages;
        }
        /* Map the pages for use by the h/w. */
 -      dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 +      ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 +      if (ret)
 +              goto err_free_sgt;
  
        shmem->sgt = sgt;
  
        return sgt;
  
 +err_free_sgt:
 +      sg_free_table(sgt);
 +      kfree(sgt);
  err_put_pages:
        drm_gem_shmem_put_pages(shmem);
        return ERR_PTR(ret);
index b8c7f068a5a417c4169bd9874495cd998d41814f,8a6a3c99b7d8a759c3168ae329ac1b37ac6640e5..11fe9ff76fd57227798e00e7e2954e80a9c6b57c
@@@ -617,7 -617,6 +617,7 @@@ struct sg_table *drm_gem_map_dma_buf(st
  {
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct sg_table *sgt;
 +      int ret;
  
        if (WARN_ON(dir == DMA_NONE))
                return ERR_PTR(-EINVAL);
        else
                sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
  
 -      if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
 -                            DMA_ATTR_SKIP_CPU_SYNC)) {
 +      ret = dma_map_sgtable(attach->dev, sgt, dir,
 +                            DMA_ATTR_SKIP_CPU_SYNC);
 +      if (ret) {
                sg_free_table(sgt);
                kfree(sgt);
 -              sgt = ERR_PTR(-ENOMEM);
 +              sgt = ERR_PTR(ret);
        }
  
        return sgt;
@@@ -654,7 -652,8 +654,7 @@@ void drm_gem_unmap_dma_buf(struct dma_b
        if (!sgt)
                return;
  
 -      dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
 -                         DMA_ATTR_SKIP_CPU_SYNC);
 +      dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sgt);
        kfree(sgt);
  }
@@@ -803,9 -802,11 +803,11 @@@ static const struct dma_buf_ops drm_gem
   *
   * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
   */
- struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+                                      struct page **pages, unsigned int nr_pages)
  {
        struct sg_table *sg = NULL;
+       size_t max_segment = 0;
        int ret;
  
        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
                goto out;
        }
  
-       ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-                               nr_pages << PAGE_SHIFT, GFP_KERNEL);
+       if (dev)
+               max_segment = dma_max_mapping_size(dev->dev);
+       if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
+               max_segment = SCATTERLIST_MAX_SEGMENT;
+       ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+                                         nr_pages << PAGE_SHIFT,
+                                         max_segment, GFP_KERNEL);
        if (ret)
                goto out;
  
@@@ -826,37 -832,6 +833,37 @@@ out
  }
  EXPORT_SYMBOL(drm_prime_pages_to_sg);
  
 +/**
 + * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
 + * @sgt: sg_table describing the buffer to check
 + *
 + * This helper calculates the contiguous size in the DMA address space
 + * of the the buffer described by the provided sg_table.
 + *
 + * This is useful for implementing
 + * &drm_gem_object_funcs.gem_prime_import_sg_table.
 + */
 +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
 +{
 +      dma_addr_t expected = sg_dma_address(sgt->sgl);
 +      struct scatterlist *sg;
 +      unsigned long size = 0;
 +      int i;
 +
 +      for_each_sgtable_dma_sg(sgt, sg, i) {
 +              unsigned int len = sg_dma_len(sg);
 +
 +              if (!len)
 +                      break;
 +              if (sg_dma_address(sg) != expected)
 +                      break;
 +              expected += len;
 +              size += len;
 +      }
 +      return size;
 +}
 +EXPORT_SYMBOL(drm_prime_get_contiguous_size);
 +
  /**
   * drm_gem_prime_export - helper library implementation of the export callback
   * @obj: GEM object to export
@@@ -991,26 -966,45 +998,26 @@@ EXPORT_SYMBOL(drm_gem_prime_import)
  int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
                                     dma_addr_t *addrs, int max_entries)
  {
 -      unsigned count;
 -      struct scatterlist *sg;
 -      struct page *page;
 -      u32 page_len, page_index;
 -      dma_addr_t addr;
 -      u32 dma_len, dma_index;
 -
 -      /*
 -       * Scatterlist elements contains both pages and DMA addresses, but
 -       * one shoud not assume 1:1 relation between them. The sg->length is
 -       * the size of the physical memory chunk described by the sg->page,
 -       * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
 -       * described by the sg_dma_address(sg).
 -       */
 -      page_index = 0;
 -      dma_index = 0;
 -      for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 -              page_len = sg->length;
 -              page = sg_page(sg);
 -              dma_len = sg_dma_len(sg);
 -              addr = sg_dma_address(sg);
 -
 -              while (pages && page_len > 0) {
 -                      if (WARN_ON(page_index >= max_entries))
 +      struct sg_dma_page_iter dma_iter;
 +      struct sg_page_iter page_iter;
 +      struct page **p = pages;
 +      dma_addr_t *a = addrs;
 +
 +      if (pages) {
 +              for_each_sgtable_page(sgt, &page_iter, 0) {
 +                      if (WARN_ON(p - pages >= max_entries))
                                return -1;
 -                      pages[page_index] = page;
 -                      page++;
 -                      page_len -= PAGE_SIZE;
 -                      page_index++;
 +                      *p++ = sg_page_iter_page(&page_iter);
                }
 -              while (addrs && dma_len > 0) {
 -                      if (WARN_ON(dma_index >= max_entries))
 +      }
 +      if (addrs) {
 +              for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
 +                      if (WARN_ON(a - addrs >= max_entries))
                                return -1;
 -                      addrs[dma_index] = addr;
 -                      addr += PAGE_SIZE;
 -                      dma_len -= PAGE_SIZE;
 -                      dma_index++;
 +                      *a++ = sg_page_iter_dma_address(&dma_iter);
                }
        }
 +
        return 0;
  }
  EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
index eaf1949bc2e408a48d759f7aeaef689ab417fb96,ea19f1d27275683ab0fe507309e3a69135acb6c1..d1533bdc1335ed6b29d4affe5a807e2bc8a495ef
@@@ -27,7 -27,7 +27,7 @@@ static void etnaviv_gem_scatter_map(str
         * because display controller, GPU, etc. are not coherent.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 -              dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@@@ -51,7 -51,7 +51,7 @@@
         * discard those writes.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 -              dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* called with etnaviv_obj->lock held */
@@@ -103,7 -103,8 +103,8 @@@ struct page **etnaviv_gem_get_pages(str
                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
                struct sg_table *sgt;
  
-               sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+               sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
+                                           etnaviv_obj->pages, npages);
                if (IS_ERR(sgt)) {
                        dev_err(dev->dev, "failed to allocate sgt: %ld\n",
                                PTR_ERR(sgt));
@@@ -404,8 -405,9 +405,8 @@@ int etnaviv_gem_cpu_prep(struct drm_gem
        }
  
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 -              dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
 -                                  etnaviv_obj->sgt->nents,
 -                                  etnaviv_op_to_dma_dir(op));
 +              dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
 +                                       etnaviv_op_to_dma_dir(op));
                etnaviv_obj->last_cpu_prep_op = op;
        }
  
@@@ -420,7 -422,8 +421,7 @@@ int etnaviv_gem_cpu_fini(struct drm_gem
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
                /* fini without a prep is almost certainly a userspace error */
                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
 -              dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
 -                      etnaviv_obj->sgt->nents,
 +              dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
                etnaviv_obj->last_cpu_prep_op = 0;
        }
index efc1d0f33fd7ef8242657d0e31781e90a3b4d2f3,345eceec24fe7bac2ce6e3f539c949ff213d3f8d..5a9d933e425a41eefb138f892a26bf93fdb30b15
@@@ -67,7 -67,6 +67,7 @@@
  #include "intel_bw.h"
  #include "intel_cdclk.h"
  #include "intel_color.h"
 +#include "intel_csr.h"
  #include "intel_display_types.h"
  #include "intel_dp_link_training.h"
  #include "intel_fbc.h"
@@@ -7332,10 -7331,6 +7332,10 @@@ enum intel_display_power_domain intel_p
                return POWER_DOMAIN_PORT_DDI_F_LANES;
        case PORT_G:
                return POWER_DOMAIN_PORT_DDI_G_LANES;
 +      case PORT_H:
 +              return POWER_DOMAIN_PORT_DDI_H_LANES;
 +      case PORT_I:
 +              return POWER_DOMAIN_PORT_DDI_I_LANES;
        default:
                MISSING_CASE(port);
                return POWER_DOMAIN_PORT_OTHER;
@@@ -7361,10 -7356,6 +7361,10 @@@ intel_aux_power_domain(struct intel_dig
                        return POWER_DOMAIN_AUX_F_TBT;
                case AUX_CH_G:
                        return POWER_DOMAIN_AUX_G_TBT;
 +              case AUX_CH_H:
 +                      return POWER_DOMAIN_AUX_H_TBT;
 +              case AUX_CH_I:
 +                      return POWER_DOMAIN_AUX_I_TBT;
                default:
                        MISSING_CASE(dig_port->aux_ch);
                        return POWER_DOMAIN_AUX_C_TBT;
@@@ -7396,10 -7387,6 +7396,10 @@@ intel_legacy_aux_to_power_domain(enum a
                return POWER_DOMAIN_AUX_F;
        case AUX_CH_G:
                return POWER_DOMAIN_AUX_G;
 +      case AUX_CH_H:
 +              return POWER_DOMAIN_AUX_H;
 +      case AUX_CH_I:
 +              return POWER_DOMAIN_AUX_I;
        default:
                MISSING_CASE(aux_ch);
                return POWER_DOMAIN_AUX_A;
@@@ -13484,12 -13471,6 +13484,6 @@@ encoder_retry
                    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
                    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  
-       /*
-        * Make drm_calc_timestamping_constants in
-        * drm_atomic_helper_update_legacy_modeset_state() happy
-        */
-       pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
        return 0;
  }
  
@@@ -14649,8 -14630,16 +14643,8 @@@ u8 intel_calc_active_pipes(struct intel
  static int intel_modeset_checks(struct intel_atomic_state *state)
  {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 -      int ret;
  
        state->modeset = true;
 -      state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
 -
 -      if (state->active_pipes != dev_priv->active_pipes) {
 -              ret = _intel_atomic_lock_global_state(state);
 -              if (ret)
 -                      return ret;
 -      }
  
        if (IS_HASWELL(dev_priv))
                return hsw_mode_set_planes_workaround(state);
@@@ -14794,8 -14783,7 +14788,8 @@@ static int intel_atomic_check_cdclk(str
                                    bool *need_cdclk_calc)
  {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 -      struct intel_cdclk_state *new_cdclk_state;
 +      const struct intel_cdclk_state *old_cdclk_state;
 +      const struct intel_cdclk_state *new_cdclk_state;
        struct intel_plane_state *plane_state;
        struct intel_bw_state *new_bw_state;
        struct intel_plane *plane;
                        return ret;
        }
  
 +      old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
        new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
  
 -      if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
 +      if (new_cdclk_state &&
 +          old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
                *need_cdclk_calc = true;
  
        ret = dev_priv->display.bw_calc_min_cdclk(state);
@@@ -15765,6 -15751,14 +15759,6 @@@ static void intel_atomic_track_fbs(stru
                                        plane->frontbuffer_bit);
  }
  
 -static void assert_global_state_locked(struct drm_i915_private *dev_priv)
 -{
 -      struct intel_crtc *crtc;
 -
 -      for_each_intel_crtc(&dev_priv->drm, crtc)
 -              drm_modeset_lock_assert_held(&crtc->base.mutex);
 -}
 -
  static int intel_atomic_commit(struct drm_device *dev,
                               struct drm_atomic_state *_state,
                               bool nonblock)
        intel_shared_dpll_swap_state(state);
        intel_atomic_track_fbs(state);
  
 -      if (state->global_state_changed) {
 -              assert_global_state_locked(dev_priv);
 -
 -              dev_priv->active_pipes = state->active_pipes;
 -      }
 -
        drm_atomic_state_get(&state->base);
        INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
  
@@@ -16886,7 -16886,7 +16880,7 @@@ static void intel_setup_outputs(struct 
  
        intel_pps_init(dev_priv);
  
 -      if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
 +      if (!HAS_DISPLAY(dev_priv))
                return;
  
        if (IS_ROCKETLAKE(dev_priv)) {
@@@ -17872,27 -17872,6 +17866,27 @@@ int intel_modeset_init_noirq(struct drm
  {
        int ret;
  
 +      if (i915_inject_probe_failure(i915))
 +              return -ENODEV;
 +
 +      if (HAS_DISPLAY(i915)) {
 +              ret = drm_vblank_init(&i915->drm,
 +                                    INTEL_NUM_PIPES(i915));
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      intel_bios_init(i915);
 +
 +      ret = intel_vga_register(i915);
 +      if (ret)
 +              goto cleanup_bios;
 +
 +      /* FIXME: completely on the wrong abstraction layer */
 +      intel_power_domains_init_hw(i915, false);
 +
 +      intel_csr_ucode_init(i915);
 +
        i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
        i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
                                        WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
  
        ret = intel_cdclk_init(i915);
        if (ret)
 -              return ret;
 +              goto cleanup_vga_client_pw_domain_csr;
  
        ret = intel_dbuf_init(i915);
        if (ret)
 -              return ret;
 +              goto cleanup_vga_client_pw_domain_csr;
  
        ret = intel_bw_init(i915);
        if (ret)
 -              return ret;
 +              goto cleanup_vga_client_pw_domain_csr;
  
        init_llist_head(&i915->atomic_helper.free_list);
        INIT_WORK(&i915->atomic_helper.free_work,
        intel_fbc_init(i915);
  
        return 0;
 +
 +cleanup_vga_client_pw_domain_csr:
 +      intel_csr_ucode_fini(i915);
 +      intel_power_domains_driver_remove(i915);
 +      intel_vga_unregister(i915);
 +cleanup_bios:
 +      intel_bios_driver_remove(i915);
 +
 +      return ret;
  }
  
 -/* part #2: call after irq install */
 -int intel_modeset_init(struct drm_i915_private *i915)
 +/* part #2: call after irq install, but before gem init */
 +int intel_modeset_init_nogem(struct drm_i915_private *i915)
  {
        struct drm_device *dev = &i915->drm;
        enum pipe pipe;
                    INTEL_NUM_PIPES(i915),
                    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
  
 -      if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
 +      if (HAS_DISPLAY(i915)) {
                for_each_pipe(i915, pipe) {
                        ret = intel_crtc_init(i915, pipe);
                        if (ret) {
        return 0;
  }
  
 +/* part #3: call after gem init */
 +int intel_modeset_init(struct drm_i915_private *i915)
 +{
 +      int ret;
 +
 +      intel_overlay_setup(i915);
 +
 +      if (!HAS_DISPLAY(i915))
 +              return 0;
 +
 +      ret = intel_fbdev_init(&i915->drm);
 +      if (ret)
 +              return ret;
 +
 +      /* Only enable hotplug handling once the fbdev is fully set up. */
 +      intel_hpd_init(i915);
 +
 +      intel_init_ipc(i915);
 +
 +      intel_psr_set_force_mode_changed(i915->psr.dp);
 +
 +      return 0;
 +}
 +
  void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
  {
        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@@ -18939,18 -18885,6 +18933,18 @@@ void intel_modeset_driver_remove_noirq(
        intel_fbc_cleanup_cfb(i915);
  }
  
 +/* part #3: call after gem init */
 +void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
 +{
 +      intel_csr_ucode_fini(i915);
 +
 +      intel_power_domains_driver_remove(i915);
 +
 +      intel_vga_unregister(i915);
 +
 +      intel_bios_driver_remove(i915);
 +}
 +
  #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  
  struct intel_display_error_state {
@@@ -19011,7 -18945,7 +19005,7 @@@ intel_display_capture_error_state(struc
  
        BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
  
 -      if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
 +      if (!HAS_DISPLAY(dev_priv))
                return NULL;
  
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
index 397c313a8b6929da9a8ad360484a8566ae365850,c207d22397916f35406840786c7a3b5e5e6582c6..b6c42fd872adecb2881e9a933e15dbd83b447962
@@@ -79,8 -79,6 +79,6 @@@ static void mock_device_release(struct 
  
  out:
        i915_params_free(&i915->params);
-       put_device(&i915->drm.pdev->dev);
-       i915->drm.pdev = NULL;
  }
  
  static struct drm_driver mock_driver = {
@@@ -118,22 -116,15 +116,15 @@@ static struct dev_pm_domain pm_domain 
  
  struct drm_i915_private *mock_gem_device(void)
  {
 -      struct drm_i915_private *i915;
 -      struct pci_dev *pdev;
  #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
 -      struct dev_iommu iommu;
 +      static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
  #endif
-       int err;
 +      struct drm_i915_private *i915;
 +      struct pci_dev *pdev;
  
        pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
                return NULL;
-       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
-       if (!i915) {
-               kfree(pdev);
-               return NULL;
-       }
        device_initialize(&pdev->dev);
        pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
        pdev->dev.release = release_dev;
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  
  #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
 -      /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
 -      memset(&iommu, 0, sizeof(iommu));
 -      iommu.priv = (void *)-1;
 -      pdev->dev.iommu = &iommu;
 +      /* HACK to disable iommu for the fake device; force identity mapping */
 +      pdev->dev.iommu = &fake_iommu;
  #endif
+       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+               put_device(&pdev->dev);
+               return NULL;
+       }
+       i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
+                                 struct drm_i915_private, drm);
+       if (IS_ERR(i915)) {
+               pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
+               devres_release_group(&pdev->dev, NULL);
+               put_device(&pdev->dev);
+               return NULL;
+       }
  
        pci_set_drvdata(pdev, i915);
+       i915->drm.pdev = pdev;
  
        dev_pm_domain_set(&pdev->dev, &pm_domain);
        pm_runtime_enable(&pdev->dev);
        if (pm_runtime_enabled(&pdev->dev))
                WARN_ON(pm_runtime_get_sync(&pdev->dev));
  
-       err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
-       if (err) {
-               pr_err("Failed to initialise mock GEM device: err=%d\n", err);
-               put_device(&pdev->dev);
-               kfree(i915);
-               return NULL;
-       }
-       i915->drm.pdev = pdev;
-       drmm_add_final_kfree(&i915->drm, i915);
  
        i915_params_copy(&i915->params, &i915_modparams);
  
@@@ -222,7 -220,15 +218,15 @@@ err_drv
        intel_gt_driver_late_release(&i915->gt);
        intel_memory_regions_driver_release(i915);
        drm_mode_config_cleanup(&i915->drm);
-       drm_dev_put(&i915->drm);
+       mock_destroy_device(i915);
  
        return NULL;
  }
+ void mock_destroy_device(struct drm_i915_private *i915)
+ {
+       struct device *dev = i915->drm.dev;
+       devres_release_group(dev, NULL);
+       put_device(dev);
+ }
index 8c7ae812b813179c85af514cb9642eb8802312e7,b4553caaa196b1965b6b701334101d6004cdf8ec..e47958c3704abf823752e84ab7843c3053cf9991
@@@ -53,10 -53,11 +53,10 @@@ static void sync_for_device(struct msm_
        struct device *dev = msm_obj->base.dev->dev;
  
        if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
 -              dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_sync_sgtable_for_device(dev, msm_obj->sgt,
 +                                          DMA_BIDIRECTIONAL);
        } else {
 -              dma_map_sg(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
        }
  }
  
@@@ -65,9 -66,11 +65,9 @@@ static void sync_for_cpu(struct msm_gem
        struct device *dev = msm_obj->base.dev->dev;
  
        if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
 -              dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
        } else {
 -              dma_unmap_sg(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
        }
  }
  
@@@ -123,7 -126,7 +123,7 @@@ static struct page **get_pages(struct d
  
                msm_obj->pages = p;
  
-               msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+               msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
                if (IS_ERR(msm_obj->sgt)) {
                        void *ptr = ERR_CAST(msm_obj->sgt);
  
index cb50f2ba2e46640cabc67b1757063fc619276748,0055d86576f79fe65c394ed85ab732df44c8338d..62e5d0970525e8b6b2fefd9f86e8aec091238d8c
@@@ -36,8 -36,8 +36,8 @@@ static int rockchip_gem_iommu_map(struc
  
        rk_obj->dma_addr = rk_obj->mm.start;
  
 -      ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
 -                         rk_obj->sgt->nents, prot);
 +      ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
 +                              prot);
        if (ret < rk_obj->base.size) {
                DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
                          ret, rk_obj->base.size);
@@@ -85,7 -85,8 +85,8 @@@ static int rockchip_gem_get_pages(struc
  
        rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
  
-       rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+       rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
+                                           rk_obj->pages, rk_obj->num_pages);
        if (IS_ERR(rk_obj->sgt)) {
                ret = PTR_ERR(rk_obj->sgt);
                goto err_put_pages;
         * TODO: Replace this by drm_clflush_sg() once it can be implemented
         * without relying on symbols that are not exported.
         */
 -      for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
 +      for_each_sgtable_sg(rk_obj->sgt, s, i)
                sg_dma_address(s) = sg_phys(s);
  
 -      dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
 -                             DMA_TO_DEVICE);
 +      dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
  
        return 0;
  
@@@ -349,8 -351,8 +350,8 @@@ void rockchip_gem_free_object(struct dr
                if (private->domain) {
                        rockchip_gem_iommu_unmap(rk_obj);
                } else {
 -                      dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
 -                                   rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +                      dma_unmap_sgtable(drm->dev, rk_obj->sgt,
 +                                        DMA_BIDIRECTIONAL, 0);
                }
                drm_prime_gem_destroy(obj, rk_obj->sgt);
        } else {
@@@ -441,7 -443,7 +442,7 @@@ struct sg_table *rockchip_gem_prime_get
        int ret;
  
        if (rk_obj->pages)
-               return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+               return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
  
        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
        if (!sgt)
        return sgt;
  }
  
 -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
 -                                                   int count)
 -{
 -      struct scatterlist *s;
 -      dma_addr_t expected = sg_dma_address(sgt->sgl);
 -      unsigned int i;
 -      unsigned long size = 0;
 -
 -      for_each_sg(sgt->sgl, s, count, i) {
 -              if (sg_dma_address(s) != expected)
 -                      break;
 -              expected = sg_dma_address(s) + sg_dma_len(s);
 -              size += sg_dma_len(s);
 -      }
 -      return size;
 -}
 -
  static int
  rockchip_gem_iommu_map_sg(struct drm_device *drm,
                          struct dma_buf_attachment *attach,
@@@ -475,13 -494,15 +476,13 @@@ rockchip_gem_dma_map_sg(struct drm_devi
                        struct sg_table *sg,
                        struct rockchip_gem_object *rk_obj)
  {
 -      int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
 -                             DMA_BIDIRECTIONAL);
 -      if (!count)
 -              return -EINVAL;
 +      int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
 +      if (err)
 +              return err;
  
 -      if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
 +      if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
                DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
 -              dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
 -                           DMA_BIDIRECTIONAL);
 +              dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
                return -EINVAL;
        }
  
index 01d94befab11f1683adb36127bbc2d988d4f4fe4,47e2935b8c68131eead6e6df54d286d124c220fa..a2bac20ff19ddb73852f590b8605ff0514b84e8f
@@@ -98,8 -98,8 +98,8 @@@ static struct sg_table *tegra_bo_pin(st
                 * the SG table needs to be copied to avoid overwriting any
                 * other potential users of the original SG table.
                 */
 -              err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
 -                                           GFP_KERNEL);
 +              err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
 +                                           obj->sgt->orig_nents, GFP_KERNEL);
                if (err < 0)
                        goto free;
        } else {
@@@ -196,7 -196,8 +196,7 @@@ static int tegra_bo_iommu_map(struct te
  
        bo->iova = bo->mm->start;
  
 -      bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
 -                              bo->sgt->nents, prot);
 +      bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
        if (!bo->size) {
                dev_err(tegra->drm->dev, "failed to map buffer\n");
                err = -ENOMEM;
@@@ -263,7 -264,8 +263,7 @@@ free
  static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  {
        if (bo->pages) {
 -              dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                           DMA_FROM_DEVICE);
 +              dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
                sg_free_table(bo->sgt);
                kfree(bo->sgt);
@@@ -282,15 -284,18 +282,15 @@@ static int tegra_bo_get_pages(struct dr
  
        bo->num_pages = bo->gem.size >> PAGE_SHIFT;
  
-       bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+       bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
        if (IS_ERR(bo->sgt)) {
                err = PTR_ERR(bo->sgt);
                goto put_pages;
        }
  
 -      err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                       DMA_FROM_DEVICE);
 -      if (err == 0) {
 -              err = -EFAULT;
 +      err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
 +      if (err)
                goto free_sgt;
 -      }
  
        return 0;
  
@@@ -566,7 -571,7 +566,7 @@@ tegra_gem_prime_map_dma_buf(struct dma_
                        goto free;
        }
  
 -      if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 +      if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                goto free;
  
        return sgt;
@@@ -585,7 -590,7 +585,7 @@@ static void tegra_gem_prime_unmap_dma_b
        struct tegra_bo *bo = to_tegra_bo(gem);
  
        if (bo->pages)
 -              dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 +              dma_unmap_sgtable(attach->dev, sgt, dir, 0);
  
        sg_free_table(sgt);
        kfree(sgt);
@@@ -604,7 -609,8 +604,7 @@@ static int tegra_gem_prime_begin_cpu_ac
        struct drm_device *drm = gem->dev;
  
        if (bo->pages)
 -              dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                                  DMA_FROM_DEVICE);
 +              dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
  
        return 0;
  }
@@@ -617,7 -623,8 +617,7 @@@ static int tegra_gem_prime_end_cpu_acce
        struct drm_device *drm = gem->dev;
  
        if (bo->pages)
 -              dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                                     DMA_TO_DEVICE);
 +              dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
  
        return 0;
  }
index 651d1b0e8e8dfce443551aeaa3a3e73df0933299,a7550044b8b2e666539e4085d0864a7a7046e119..07945ca238e2d93741a7df8db1c21c56f56e7f74
@@@ -302,7 -302,7 +302,7 @@@ static struct sg_table *vmalloc_to_sgt(
                return NULL;
        }
  
 -      for_each_sg(sgt->sgl, sg, *sg_ents, i) {
 +      for_each_sgtable_sg(sgt, sg, i) {
                pg = vmalloc_to_page(data);
                if (!pg) {
                        sg_free_table(sgt);
        return sgt;
  }
  
- static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
-                                     struct virtio_gpu_vbuffer *vbuf,
-                                     struct virtio_gpu_fence *fence,
-                                     int elemcnt,
-                                     struct scatterlist **sgs,
-                                     int outcnt,
-                                     int incnt)
+ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+                                    struct virtio_gpu_vbuffer *vbuf,
+                                    struct virtio_gpu_fence *fence,
+                                    int elemcnt,
+                                    struct scatterlist **sgs,
+                                    int outcnt,
+                                    int incnt)
  {
        struct virtqueue *vq = vgdev->ctrlq.vq;
        int ret, idx;
                if (fence && vbuf->objs)
                        virtio_gpu_array_unlock_resv(vbuf->objs);
                free_vbuf(vgdev, vbuf);
-               return;
+               return -1;
        }
  
        if (vgdev->has_indirect)
@@@ -373,15 -373,16 +373,16 @@@ again
        spin_unlock(&vgdev->ctrlq.qlock);
  
        drm_dev_exit(idx);
+       return 0;
  }
  
- static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
-                                               struct virtio_gpu_vbuffer *vbuf,
-                                               struct virtio_gpu_fence *fence)
+ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+                                              struct virtio_gpu_vbuffer *vbuf,
+                                              struct virtio_gpu_fence *fence)
  {
        struct scatterlist *sgs[3], vcmd, vout, vresp;
        struct sg_table *sgt = NULL;
-       int elemcnt = 0, outcnt = 0, incnt = 0;
+       int elemcnt = 0, outcnt = 0, incnt = 0, ret;
  
        /* set up vcmd */
        sg_init_one(&vcmd, vbuf->buf, vbuf->size);
                        if (!sgt) {
                                if (fence && vbuf->objs)
                                        virtio_gpu_array_unlock_resv(vbuf->objs);
-                               return;
+                               return -1;
                        }
  
                        elemcnt += sg_ents;
                incnt++;
        }
  
-       virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
-                                 incnt);
+       ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+                                       incnt);
  
        if (sgt) {
                sg_free_table(sgt);
                kfree(sgt);
        }
+       return ret;
  }
  
  void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
                virtqueue_notify(vgdev->ctrlq.vq);
  }
  
- static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
-                                        struct virtio_gpu_vbuffer *vbuf)
+ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+                                       struct virtio_gpu_vbuffer *vbuf)
  {
-       virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
+       return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
  }
  
  static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@@ -534,6 -536,7 +536,7 @@@ void virtio_gpu_cmd_unref_resource(stru
  {
        struct virtio_gpu_resource_unref *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
+       int ret;
  
        cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
                                        virtio_gpu_cmd_unref_cb);
        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
  
        vbuf->resp_cb_data = bo;
-       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       if (ret < 0)
+               virtio_gpu_cleanup_object(bo);
  }
  
  void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@@ -603,8 -608,9 +608,8 @@@ void virtio_gpu_cmd_transfer_to_host_2d
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
  
        if (use_dma_api)
 -              dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 -                                     shmem->pages->sgl, shmem->pages->nents,
 -                                     DMA_TO_DEVICE);
 +              dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
 +                                          shmem->pages, DMA_TO_DEVICE);
  
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
@@@ -683,9 -689,13 +688,13 @@@ static void virtio_gpu_cmd_get_capset_i
        int i = le32_to_cpu(cmd->capset_index);
  
        spin_lock(&vgdev->display_info_lock);
-       vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
-       vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
-       vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+       if (vgdev->capsets) {
+               vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+               vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+               vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+       } else {
+               DRM_ERROR("invalid capset memory.");
+       }
        spin_unlock(&vgdev->display_info_lock);
        wake_up(&vgdev->resp_wq);
  }
@@@ -1018,8 -1028,9 +1027,8 @@@ void virtio_gpu_cmd_transfer_to_host_3d
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
  
        if (use_dma_api)
 -              dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 -                                     shmem->pages->sgl, shmem->pages->nents,
 -                                     DMA_TO_DEVICE);
 +              dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
 +                                          shmem->pages, DMA_TO_DEVICE);
  
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
index 13c31e2d72541017f4d90b7fc160ed37811612b9,7454f797d37b10b0a2a534dade37d7edd232d767..7f0310441da1348f3b5325eb3924b9bc82f54251
  static const struct ttm_place vram_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       .mem_type = TTM_PL_VRAM,
+       .flags = TTM_PL_FLAG_CACHED
  };
  
  static const struct ttm_place vram_ne_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .mem_type = TTM_PL_VRAM,
+       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  };
  
  static const struct ttm_place sys_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+       .mem_type = TTM_PL_SYSTEM,
+       .flags = TTM_PL_FLAG_CACHED
  };
  
  static const struct ttm_place sys_ne_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .mem_type = TTM_PL_SYSTEM,
+       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  };
  
  static const struct ttm_place gmr_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       .mem_type = VMW_PL_GMR,
+       .flags = TTM_PL_FLAG_CACHED
  };
  
  static const struct ttm_place gmr_ne_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .mem_type = VMW_PL_GMR,
+       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  };
  
  static const struct ttm_place mob_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+       .mem_type = VMW_PL_MOB,
+       .flags = TTM_PL_FLAG_CACHED
  };
  
  static const struct ttm_place mob_ne_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
-       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .mem_type = VMW_PL_MOB,
+       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  };
  
  struct ttm_placement vmw_vram_placement = {
@@@ -89,11 -97,13 +97,13 @@@ static const struct ttm_place vram_gmr_
        {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+               .mem_type = TTM_PL_VRAM,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+               .mem_type = VMW_PL_GMR,
+               .flags = TTM_PL_FLAG_CACHED
        }
  };
  
@@@ -101,11 -111,13 +111,13 @@@ static const struct ttm_place gmr_vram_
        {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+               .mem_type = VMW_PL_GMR,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+               .mem_type = TTM_PL_VRAM,
+               .flags = TTM_PL_FLAG_CACHED
        }
  };
  
@@@ -120,12 -132,14 +132,14 @@@ static const struct ttm_place vram_gmr_
        {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
+               .mem_type = TTM_PL_VRAM,
+               .flags = TTM_PL_FLAG_CACHED |
                         TTM_PL_FLAG_NO_EVICT
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
+               .mem_type = VMW_PL_GMR,
+               .flags = TTM_PL_FLAG_CACHED |
                         TTM_PL_FLAG_NO_EVICT
        }
  };
@@@ -169,19 -183,23 +183,23 @@@ static const struct ttm_place evictable
        {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+               .mem_type = TTM_PL_SYSTEM,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+               .mem_type = TTM_PL_VRAM,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+               .mem_type = VMW_PL_GMR,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+               .mem_type = VMW_PL_MOB,
+               .flags = TTM_PL_FLAG_CACHED
        }
  };
  
@@@ -189,15 -207,18 +207,18 @@@ static const struct ttm_place nonfixed_
        {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+               .mem_type = TTM_PL_SYSTEM,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+               .mem_type = VMW_PL_GMR,
+               .flags = TTM_PL_FLAG_CACHED
        }, {
                .fpfn = 0,
                .lpfn = 0,
-               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+               .mem_type = VMW_PL_MOB,
+               .flags = TTM_PL_FLAG_CACHED
        }
  };
  
@@@ -246,6 -267,7 +267,7 @@@ struct vmw_ttm_tt 
        struct vmw_sg_table vsgt;
        uint64_t sg_alloc_size;
        bool mapped;
+       bool bound;
  };
  
  const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
@@@ -362,7 -384,8 +384,7 @@@ static void vmw_ttm_unmap_from_dma(stru
  {
        struct device *dev = vmw_tt->dev_priv->dev->dev;
  
 -      dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
 -              DMA_BIDIRECTIONAL);
 +      dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
        vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
  }
  
  static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
  {
        struct device *dev = vmw_tt->dev_priv->dev->dev;
 -      int ret;
  
 -      ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
 -                       DMA_BIDIRECTIONAL);
 -      if (unlikely(ret == 0))
 -              return -ENOMEM;
 -
 -      vmw_tt->sgt.nents = ret;
 -
 -      return 0;
 +      return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /**
@@@ -440,10 -471,10 +462,10 @@@ static int vmw_ttm_map_dma(struct vmw_t
                if (unlikely(ret != 0))
                        goto out_sg_alloc_fail;
  
 -              if (vsgt->num_pages > vmw_tt->sgt.nents) {
 +              if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
                        uint64_t over_alloc =
                                sgl_size * (vsgt->num_pages -
 -                                          vmw_tt->sgt.nents);
 +                                          vmw_tt->sgt.orig_nents);
  
                        ttm_mem_global_free(glob, over_alloc);
                        vmw_tt->sg_alloc_size -= over_alloc;
@@@ -530,11 -561,18 +552,18 @@@ const struct vmw_sg_table *vmw_bo_sg_ta
  }
  
  
- static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
+ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
+                       struct ttm_tt *ttm, struct ttm_resource *bo_mem)
  {
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-       int ret;
+       int ret = 0;
+       if (!bo_mem)
+               return -EINVAL;
+       if (vmw_be->bound)
+               return 0;
  
        ret = vmw_ttm_map_dma(vmw_be);
        if (unlikely(ret != 0))
  
        switch (bo_mem->mem_type) {
        case VMW_PL_GMR:
-               return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+               ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
                                    ttm->num_pages, vmw_be->gmr_id);
+               break;
        case VMW_PL_MOB:
                if (unlikely(vmw_be->mob == NULL)) {
                        vmw_be->mob =
                                return -ENOMEM;
                }
  
-               return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+               ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
                                    &vmw_be->vsgt, ttm->num_pages,
                                    vmw_be->gmr_id);
+               break;
        default:
                BUG();
        }
-       return 0;
+       vmw_be->bound = true;
+       return ret;
  }
  
- static void vmw_ttm_unbind(struct ttm_tt *ttm)
+ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
+                          struct ttm_tt *ttm)
  {
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  
+       if (!vmw_be->bound)
+               return;
        switch (vmw_be->mem_type) {
        case VMW_PL_GMR:
                vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
  
        if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
                vmw_ttm_unmap_dma(vmw_be);
+       vmw_be->bound = false;
  }
  
  
- static void vmw_ttm_destroy(struct ttm_tt *ttm)
+ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
  {
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  
+       vmw_ttm_unbind(bdev, ttm);
+       ttm_tt_destroy_common(bdev, ttm);
        vmw_ttm_unmap_dma(vmw_be);
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
                ttm_dma_tt_fini(&vmw_be->dma_ttm);
  }
  
  
- static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+ static int vmw_ttm_populate(struct ttm_bo_device *bdev,
+                           struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
  {
        struct vmw_ttm_tt *vmw_tt =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
        int ret;
  
-       if (ttm->state != tt_unpopulated)
+       if (ttm_tt_is_populated(ttm))
                return 0;
  
        if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
        return ret;
  }
  
- static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
+                              struct ttm_tt *ttm)
  {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
                                                 dma_ttm.ttm);
                ttm_pool_unpopulate(ttm);
  }
  
- static struct ttm_backend_func vmw_ttm_func = {
-       .bind = vmw_ttm_bind,
-       .unbind = vmw_ttm_unbind,
-       .destroy = vmw_ttm_destroy,
- };
  static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
                                        uint32_t page_flags)
  {
        if (!vmw_be)
                return NULL;
  
-       vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
        vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
        vmw_be->mob = NULL;
  
@@@ -712,8 -755,8 +746,8 @@@ static int vmw_ttm_io_mem_reserve(struc
        case VMW_PL_MOB:
                return 0;
        case TTM_PL_VRAM:
-               mem->bus.offset = mem->start << PAGE_SHIFT;
-               mem->bus.base = dev_priv->vram_start;
+               mem->bus.offset = (mem->start << PAGE_SHIFT) +
+                       dev_priv->vram_start;
                mem->bus.is_iomem = true;
                break;
        default:
@@@ -757,6 -800,9 +791,9 @@@ struct ttm_bo_driver vmw_bo_driver = 
        .ttm_tt_create = &vmw_ttm_tt_create,
        .ttm_tt_populate = &vmw_ttm_populate,
        .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
+       .ttm_tt_bind = &vmw_ttm_bind,
+       .ttm_tt_unbind = &vmw_ttm_unbind,
+       .ttm_tt_destroy = &vmw_ttm_destroy,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = vmw_evict_flags,
        .move = NULL,
@@@ -787,7 -833,7 +824,7 @@@ int vmw_bo_create_and_populate(struct v
  
        ret = ttm_bo_reserve(bo, false, true, NULL);
        BUG_ON(ret != 0);
-       ret = vmw_ttm_populate(bo->ttm, &ctx);
+       ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
        if (likely(ret == 0)) {
                struct vmw_ttm_tt *vmw_tt =
                        container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
index a487384d5cb7fc93e85606e4e8c78976c3093e3a,a8aefaa38bd360c3969212b93be11a8b072666ff..2f464ef2d53e0a802ef04cbc2ad6d37eee796a58
@@@ -180,7 -180,8 +180,8 @@@ struct sg_table *xen_drm_front_gem_get_
        if (!xen_obj->pages)
                return ERR_PTR(-ENOMEM);
  
-       return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+       return drm_prime_pages_to_sg(gem_obj->dev,
+                                    xen_obj->pages, xen_obj->num_pages);
  }
  
  struct drm_gem_object *
@@@ -217,7 -218,7 +218,7 @@@ xen_drm_front_gem_import_sg_table(struc
                return ERR_PTR(ret);
  
        DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
 -                size, sgt->nents);
 +                size, sgt->orig_nents);
  
        return &xen_obj->base;
  }
diff --combined include/drm/drm_prime.h
index 47ef11614627c6356cf1837ff5c0a4da494f397c,bf141e74a1c229061e9e60b60b9302c987246c37..0f69f9fbf12cd9ff4fa610ad5d046af147268426
@@@ -88,12 -88,11 +88,13 @@@ void drm_gem_dmabuf_vunmap(struct dma_b
  int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
  
- struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+                                      struct page **pages, unsigned int nr_pages);
  struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
                                     int flags);
  
 +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
 +
  /* helper functions for importing */
  struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
                                                struct dma_buf *dma_buf,
This page took 0.217888 seconds and 4 git commands to generate.