]> Git Repo - J-linux.git/commitdiff
Merge tag 'drm-misc-next-2022-11-10-1' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <[email protected]>
Tue, 15 Nov 2022 07:29:49 +0000 (17:29 +1000)
committerDave Airlie <[email protected]>
Tue, 15 Nov 2022 21:17:32 +0000 (07:17 +1000)
drm-misc-next for 6.2:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
- atomic-helper: Add begin_fb_access and end_fb_access hooks
- fb-helper: Rework to move fb emulation into helpers
- scheduler: rework entity flush, kill and fini
- ttm: Optimize pool allocations

Driver Changes:
- amdgpu: scheduler rework
- hdlcd: Switch to DRM-managed resources
- ingenic: Fix registration error path
- lcdif: FIFO threshold tuning
- meson: Fix return type of cvbs' mode_valid
- ofdrm: multiple fixes (kconfig, types, endianness)
- sun4i: A100 and D1 support
- panel:
  - New Panel: Jadard JD9365DA-H3

Signed-off-by: Dave Airlie <[email protected]>
From: Maxime Ripard <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/20221110083612.g63eaocoaa554soh@houat
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/nouveau/nouveau_drm.c

index 8816853e50c0a3d149444b46a06760a71e845cca,046d466b4ee40a729c0586259ec2867c19607c80..f99d4873bf223da6d3e286f2c29a07a6e31fcb37
@@@ -195,7 -195,7 +195,7 @@@ void amdgpu_amdkfd_device_init(struct a
                }
  
                adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
 -                                              adev_to_drm(adev), &gpu_resources);
 +                                                      &gpu_resources);
  
                amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
  
@@@ -673,7 -673,7 +673,7 @@@ int amdgpu_amdkfd_submit_ib(struct amdg
                goto err;
        }
  
-       ret = amdgpu_job_alloc(adev, 1, &job, NULL);
+       ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
        if (ret)
                goto err;
  
@@@ -706,13 -706,6 +706,13 @@@ err
  
  void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
  {
 +      /* Temporary workaround to fix issues observed in some
 +       * compute applications when GFXOFF is enabled on GFX11.
 +       */
 +      if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) {
 +              pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
 +              amdgpu_gfx_off_ctrl(adev, idle);
 +      }
        amdgpu_dpm_switch_power_profile(adev,
                                        PP_SMC_POWER_PROFILE_COMPUTE,
                                        !idle);
@@@ -760,7 -753,9 +760,7 @@@ bool amdgpu_amdkfd_have_atomics_support
  
  void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
  {
 -      struct ras_err_data err_data = {0, 0, 0, NULL};
 -
 -      amdgpu_umc_poison_handler(adev, &err_data, reset);
 +      amdgpu_umc_poison_handler(adev, reset);
  }
  
  bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
index 5b9f992e4607457fedc518b25f0fb3e97d32c902,cc65df9f241964fb8b852063386ec85c0acc254e..ad4e78728733e8ef823e6878ba6dd3f6b1714e67
@@@ -37,6 -37,7 +37,7 @@@
  #include <linux/pci-p2pdma.h>
  
  #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_fb_helper.h>
  #include <drm/drm_probe_helper.h>
  #include <drm/amdgpu_drm.h>
  #include <linux/vgaarb.h>
@@@ -1568,7 -1569,7 +1569,7 @@@ static int amdgpu_device_check_argument
   * @pdev: pci dev pointer
   * @state: vga_switcheroo state
   *
 - * Callback for the switcheroo driver.  Suspends or resumes the
 + * Callback for the switcheroo driver.  Suspends or resumes
   * the asics before or after it is powered up using ACPI methods.
   */
  static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
@@@ -2397,7 -2398,7 +2398,7 @@@ static int amdgpu_device_ip_init(struc
                        adev->ip_blocks[i].status.hw = true;
  
                        /* right after GMC hw init, we create CSA */
 -                      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
 +                      if (amdgpu_mcbp) {
                                r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
                                                                AMDGPU_GEM_DOMAIN_VRAM,
                                                                AMDGPU_CSA_SIZE);
@@@ -3210,15 -3211,6 +3211,15 @@@ static int amdgpu_device_ip_resume_phas
                        return r;
                }
                adev->ip_blocks[i].status.hw = true;
 +
 +              if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
 +                      /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
 +                       * amdgpu_device_resume() after IP resume.
 +                       */
 +                      amdgpu_gfx_off_ctrl(adev, false);
 +                      DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
 +              }
 +
        }
  
        return 0;
@@@ -4060,18 -4052,15 +4061,18 @@@ void amdgpu_device_fini_sw(struct amdgp
   * at suspend time.
   *
   */
 -static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
 +static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
  {
 +      int ret;
 +
        /* No need to evict vram on APUs for suspend to ram or s2idle */
        if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
 -              return;
 +              return 0;
  
 -      if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
 +      ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
 +      if (ret)
                DRM_WARN("evicting device resources failed\n");
 -
 +      return ret;
  }
  
  /*
@@@ -4121,9 -4110,7 +4122,9 @@@ int amdgpu_device_suspend(struct drm_de
        if (!adev->in_s0ix)
                amdgpu_amdkfd_suspend(adev, adev->in_runpm);
  
 -      amdgpu_device_evict_resources(adev);
 +      r = amdgpu_device_evict_resources(adev);
 +      if (r)
 +              return r;
  
        amdgpu_fence_driver_hw_fini(adev);
  
@@@ -4197,17 -4184,8 +4198,17 @@@ int amdgpu_device_resume(struct drm_dev
        }
  
        /* Make sure IB tests flushed */
 +      if (amdgpu_sriov_vf(adev))
 +              amdgpu_irq_gpu_reset_resume_helper(adev);
        flush_delayed_work(&adev->delayed_init_work);
  
 +      if (adev->in_s0ix) {
 +              /* re-enable gfxoff after IP resume. This re-enables gfxoff after
 +               * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
 +               */
 +              amdgpu_gfx_off_ctrl(adev, true);
 +              DRM_DEBUG("will enable gfxoff for the mission mode\n");
 +      }
        if (fbcon)
                drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
  
@@@ -5404,7 -5382,7 +5405,7 @@@ skip_hw_reset
                        drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
                }
  
 -              if (adev->enable_mes)
 +              if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
                        amdgpu_mes_self_test(tmp_adev);
  
                if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
index bf2d50c8c92ad5e1f64ce125f6793a1b3131e88a,ca96ee2c2c962696b6b0aca1ab30862f53f89722..6b35bb948f96321dc08c59a69de34a23bccc2381
@@@ -25,6 -25,7 +25,7 @@@
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_aperture.h>
  #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
  #include <drm/drm_gem.h>
  #include <drm/drm_vblank.h>
  #include <drm/drm_managed.h>
@@@ -2201,8 -2202,7 +2202,8 @@@ amdgpu_pci_remove(struct pci_dev *pdev
                pm_runtime_forbid(dev->dev);
        }
  
 -      if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
 +      if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
 +          !amdgpu_sriov_vf(adev)) {
                bool need_to_reset_gpu = false;
  
                if (adev->gmc.xgmi.num_physical_nodes > 1) {
index d75e0370a07438be412e144a616ce8cf20781ce6,b495eff635a329d00aabfc8a1ee6267e6f307c7e..ba348046fcec98791a7d812f713dcf3692b6eab2
@@@ -29,6 -29,7 +29,7 @@@
  #include "amdgpu.h"
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_drv.h>
+ #include <drm/drm_fb_helper.h>
  #include "amdgpu_uvd.h"
  #include "amdgpu_vce.h"
  #include "atom.h"
@@@ -337,17 -338,11 +338,17 @@@ static int amdgpu_firmware_info(struct 
                fw_info->feature = adev->psp.cap_feature_version;
                break;
        case AMDGPU_INFO_FW_MES_KIQ:
 -              fw_info->ver = adev->mes.ucode_fw_version[0];
 -              fw_info->feature = 0;
 +              fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
 +              fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK)
 +                                      >> AMDGPU_MES_FEAT_VERSION_SHIFT;
                break;
        case AMDGPU_INFO_FW_MES:
 -              fw_info->ver = adev->mes.ucode_fw_version[1];
 +              fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
 +              fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK)
 +                                      >> AMDGPU_MES_FEAT_VERSION_SHIFT;
 +              break;
 +      case AMDGPU_INFO_FW_IMU:
 +              fw_info->ver = adev->gfx.imu_fw_version;
                fw_info->feature = 0;
                break;
        default:
@@@ -796,7 -791,7 +797,7 @@@ int amdgpu_info_ioctl(struct drm_devic
                dev_info->ids_flags = 0;
                if (adev->flags & AMD_IS_APU)
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
 -              if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
 +              if (amdgpu_mcbp)
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
                if (amdgpu_is_tmz(adev))
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
@@@ -1172,7 -1167,7 +1173,7 @@@ int amdgpu_driver_open_kms(struct drm_d
                goto error_vm;
        }
  
 -      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
 +      if (amdgpu_mcbp) {
                uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
  
                r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
@@@ -1236,7 -1231,7 +1237,7 @@@ void amdgpu_driver_postclose_kms(struc
        if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
                amdgpu_vce_free_handles(adev, file_priv);
  
 -      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
 +      if (amdgpu_mcbp) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
                amdgpu_vm_bo_del(adev, fpriv->csa_va);
@@@ -1526,15 -1521,6 +1527,15 @@@ static int amdgpu_debugfs_firmware_info
                           fw_info.feature, fw_info.ver);
        }
  
 +      /* IMU */
 +      query_fw.fw_type = AMDGPU_INFO_FW_IMU;
 +      query_fw.index = 0;
 +      ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
 +      if (ret)
 +              return ret;
 +      seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
 +                 fw_info.feature, fw_info.ver);
 +
        /* PSP SOS */
        query_fw.fw_type = AMDGPU_INFO_FW_SOS;
        ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
index 8c00a7a06c32dc71e5c625084303b8f5b2424397,aea8d26b1724926b252cf7bea9831eef6f4b049a..0ba87ca6f318b978f65187bb19ae04a87f67897d
@@@ -189,7 -189,6 +189,6 @@@ static int amdgpu_ttm_map_buffer(struc
        struct amdgpu_device *adev = ring->adev;
        unsigned offset, num_pages, num_dw, num_bytes;
        uint64_t src_addr, dst_addr;
-       struct dma_fence *fence;
        struct amdgpu_job *job;
        void *cpu_addr;
        uint64_t flags;
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED,
+                                    num_dw * 4 + num_bytes,
                                     AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
                return r;
                }
        }
  
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r)
-               goto error_free;
-       dma_fence_put(fence);
-       return r;
- error_free:
-       amdgpu_job_free(job);
-       return r;
+       dma_fence_put(amdgpu_job_submit(job));
+       return 0;
  }
  
  /**
@@@ -1417,7 -1408,8 +1408,8 @@@ static void amdgpu_ttm_vram_mm_access(s
  }
  
  static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
-                                       unsigned long offset, void *buf, int len, int write)
+                                       unsigned long offset, void *buf,
+                                       int len, int write)
  {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
                memcpy(adev->mman.sdma_access_ptr, buf, len);
  
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED,
+                                    num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+                                    &job);
        if (r)
                goto out;
  
        amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
-       src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+       src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+               src_mm.start;
        dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
        if (write)
                swap(src_addr, dst_addr);
  
-       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
+                               PAGE_SIZE, false);
  
        amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
  
-       r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r) {
-               amdgpu_job_free(job);
-               goto out;
-       }
+       fence = amdgpu_job_submit(job);
  
        if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
                r = -ETIMEDOUT;
@@@ -1959,7 -1952,9 +1952,9 @@@ static int amdgpu_ttm_prepare_job(struc
                AMDGPU_IB_POOL_DELAYED;
        int r;
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED,
+                                    num_dw * 4, pool, job);
        if (r)
                return r;
  
                                                        adev->gart.bo);
                (*job)->vm_needs_flush = true;
        }
-       if (resv) {
-               r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
-                                    AMDGPU_SYNC_ALWAYS,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
-               if (r) {
-                       DRM_ERROR("sync failed (%d).\n", r);
-                       amdgpu_job_free(*job);
-                       return r;
-               }
-       }
-       return 0;
+       if (!resv)
+               return 0;
+       return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
+                                                  DMA_RESV_USAGE_BOOKKEEP);
  }
  
  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        if (direct_submit)
                r = amdgpu_job_submit_direct(job, ring, fence);
        else
-               r = amdgpu_job_submit(job, &adev->mman.entity,
-                                     AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+               *fence = amdgpu_job_submit(job);
        if (r)
                goto error_free;
  
@@@ -2070,16 -2058,8 +2058,8 @@@ static int amdgpu_ttm_fill_mem(struct a
  
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, fence);
-       if (r)
-               goto error_free;
+       *fence = amdgpu_job_submit(job);
        return 0;
- error_free:
-       amdgpu_job_free(job);
-       return r;
  }
  
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
@@@ -2295,9 -2275,9 +2275,9 @@@ static ssize_t amdgpu_iomem_read(struc
                if (p->mapping != adev->mman.bdev.dev_mapping)
                        return -EPERM;
  
 -              ptr = kmap(p);
 +              ptr = kmap_local_page(p);
                r = copy_to_user(buf, ptr + off, bytes);
 -              kunmap(p);
 +              kunmap_local(ptr);
                if (r)
                        return -EFAULT;
  
@@@ -2346,9 -2326,9 +2326,9 @@@ static ssize_t amdgpu_iomem_write(struc
                if (p->mapping != adev->mman.bdev.dev_mapping)
                        return -EPERM;
  
 -              ptr = kmap(p);
 +              ptr = kmap_local_page(p);
                r = copy_from_user(ptr + off, buf, bytes);
 -              kunmap(p);
 +              kunmap_local(ptr);
                if (r)
                        return -EFAULT;
  
index 3723e90e3a9046bb9450a8cce43d70ee9f238675,79069c485386f4831133eeb58d4dc910c3c5ad93..c9a95a816912d1c6f0387d8819c307ef281330c1
@@@ -65,8 -65,11 +65,11 @@@ svm_migrate_gart_map(struct amdgpu_rin
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = npages * 8;
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
-                                    AMDGPU_IB_POOL_DELAYED, &job);
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED,
+                                    num_dw * 4 + num_bytes,
+                                    AMDGPU_IB_POOL_DELAYED,
+                                    &job);
        if (r)
                return r;
  
        cpu_addr = &job->ibs[0].ptr[num_dw];
  
        amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r)
-               goto error_free;
+       fence = amdgpu_job_submit(job);
        dma_fence_put(fence);
  
        return r;
- error_free:
-       amdgpu_job_free(job);
-       return r;
  }
  
  /**
@@@ -529,8 -524,8 +524,8 @@@ svm_migrate_ram_to_vram(struct svm_rang
        for (addr = start; addr < end;) {
                unsigned long next;
  
 -              vma = find_vma(mm, addr);
 -              if (!vma || addr < vma->vm_start)
 +              vma = vma_lookup(mm, addr);
 +              if (!vma)
                        break;
  
                next = min(vma->vm_end, end);
@@@ -798,8 -793,8 +793,8 @@@ int svm_migrate_vram_to_ram(struct svm_
        for (addr = start; addr < end;) {
                unsigned long next;
  
 -              vma = find_vma(mm, addr);
 -              if (!vma || addr < vma->vm_start) {
 +              vma = vma_lookup(mm, addr);
 +              if (!vma) {
                        pr_debug("failed to find vma for prange %p\n", prange);
                        r = -EFAULT;
                        break;
@@@ -973,10 -968,12 +968,10 @@@ out_unlock_prange
  out_unlock_svms:
        mutex_unlock(&p->svms.lock);
  out_unref_process:
 +      pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
        kfd_unref_process(p);
  out_mmput:
        mmput(mm);
 -
 -      pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
 -
        return r ? VM_FAULT_SIGBUS : 0;
  }
  
index 3db79f0b5a8faae06a30e6f97f19ba37aac15ff2,d58dd916488a15a95b1a9b69e811dc1cd05de094..441810277cb5e1c1d0e81cd315d325d7fc9a2f86
@@@ -82,7 -82,6 +82,6 @@@
  #include <drm/drm_atomic_uapi.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_blend.h>
- #include <drm/drm_fb_helper.h>
  #include <drm/drm_fourcc.h>
  #include <drm/drm_edid.h>
  #include <drm/drm_vblank.h>
@@@ -1399,6 -1398,7 +1398,6 @@@ static int amdgpu_dm_init(struct amdgpu
  
        mutex_init(&adev->dm.dc_lock);
        mutex_init(&adev->dm.audio_lock);
 -      spin_lock_init(&adev->dm.vblank_lock);
  
        if(amdgpu_dm_irq_init(adev)) {
                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  
        adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
  
 +      /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
 +      adev->dm.dc->debug.ignore_cable_id = true;
 +
        r = dm_dmub_hw_init(adev);
        if (r) {
                DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@@ -2812,7 -2809,6 +2811,6 @@@ const struct amdgpu_ip_block_version dm
  static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
        .fb_create = amdgpu_display_user_framebuffer_create,
        .get_format_info = amd_get_format_info,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
        .atomic_check = amdgpu_dm_atomic_check,
        .atomic_commit = drm_atomic_helper_commit,
  };
@@@ -5604,14 -5600,16 +5602,14 @@@ static void apply_dsc_policy_for_stream
  {
        struct drm_connector *drm_connector = &aconnector->base;
        uint32_t link_bandwidth_kbps;
 -      uint32_t max_dsc_target_bpp_limit_override = 0;
        struct dc *dc = sink->ctx->dc;
        uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
        uint32_t dsc_max_supported_bw_in_kbps;
 +      uint32_t max_dsc_target_bpp_limit_override =
 +              drm_connector->display_info.max_dsc_bpp;
  
        link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
                                                        dc_link_get_link_cap(aconnector->dc_link));
 -      if (stream->link && stream->link->local_sink)
 -              max_dsc_target_bpp_limit_override =
 -                      stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
  
        /* Set DSC policy according to dsc_clock_en */
        dc_dsc_policy_set_enable_dsc_when_not_needed(
@@@ -5692,7 -5690,6 +5690,7 @@@ create_stream_for_sink(struct amdgpu_dm
        bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
 +      enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
  #if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dsc_dec_dpcd_caps dsc_caps;
  #endif
                        if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
                                stream->use_vsc_sdp_for_colorimetry = true;
                }
 -              mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
 +              if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
 +                      tf = TRANSFER_FUNC_GAMMA_22;
 +              mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
                aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
  
        }
@@@ -6148,70 -6143,6 +6146,70 @@@ static void handle_edid_mgmt(struct amd
        create_eml_sink(aconnector);
  }
  
 +static enum dc_status dm_validate_stream_and_context(struct dc *dc,
 +                                              struct dc_stream_state *stream)
 +{
 +      enum dc_status dc_result = DC_ERROR_UNEXPECTED;
 +      struct dc_plane_state *dc_plane_state = NULL;
 +      struct dc_state *dc_state = NULL;
 +
 +      if (!stream)
 +              goto cleanup;
 +
 +      dc_plane_state = dc_create_plane_state(dc);
 +      if (!dc_plane_state)
 +              goto cleanup;
 +
 +      dc_state = dc_create_state(dc);
 +      if (!dc_state)
 +              goto cleanup;
 +
 +      /* populate stream to plane */
 +      dc_plane_state->src_rect.height  = stream->src.height;
 +      dc_plane_state->src_rect.width   = stream->src.width;
 +      dc_plane_state->dst_rect.height  = stream->src.height;
 +      dc_plane_state->dst_rect.width   = stream->src.width;
 +      dc_plane_state->clip_rect.height = stream->src.height;
 +      dc_plane_state->clip_rect.width  = stream->src.width;
 +      dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
 +      dc_plane_state->plane_size.surface_size.height = stream->src.height;
 +      dc_plane_state->plane_size.surface_size.width  = stream->src.width;
 +      dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
 +      dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
 +      dc_plane_state->tiling_info.gfx9.swizzle =  DC_SW_UNKNOWN;
 +      dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
 +      dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
 +      dc_plane_state->rotation = ROTATION_ANGLE_0;
 +      dc_plane_state->is_tiling_rotated = false;
 +      dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
 +
 +      dc_result = dc_validate_stream(dc, stream);
 +      if (dc_result == DC_OK)
 +              dc_result = dc_validate_plane(dc, dc_plane_state);
 +
 +      if (dc_result == DC_OK)
 +              dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
 +
 +      if (dc_result == DC_OK && !dc_add_plane_to_context(
 +                                              dc,
 +                                              stream,
 +                                              dc_plane_state,
 +                                              dc_state))
 +              dc_result = DC_FAIL_ATTACH_SURFACES;
 +
 +      if (dc_result == DC_OK)
 +              dc_result = dc_validate_global_state(dc, dc_state, true);
 +
 +cleanup:
 +      if (dc_state)
 +              dc_release_state(dc_state);
 +
 +      if (dc_plane_state)
 +              dc_plane_state_release(dc_plane_state);
 +
 +      return dc_result;
 +}
 +
  struct dc_stream_state *
  create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                                const struct drm_display_mode *drm_mode,
                if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
  
 +              if (dc_result == DC_OK)
 +                      dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
 +
                if (dc_result != DC_OK) {
                        DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
                                      drm_mode->hdisplay,
@@@ -7902,9 -7830,6 +7900,9 @@@ static void amdgpu_dm_commit_planes(str
                         */
                        if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
                            acrtc_attach->dm_irq_params.allow_psr_entry &&
 +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 +                          !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
 +#endif
                            !acrtc_state->stream->link->psr_settings.psr_allow_active)
                                amdgpu_dm_psr_enable(acrtc_state->stream);
                } else {
@@@ -8366,8 -8291,8 +8364,8 @@@ static void amdgpu_dm_atomic_commit_tai
  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
                                if (amdgpu_dm_crc_window_is_activated(crtc)) {
                                        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 -                                      acrtc->dm_irq_params.crc_window.update_win = true;
 -                                      acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
 +                                      acrtc->dm_irq_params.window_param.update_win = true;
 +                                      acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
                                        spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
                                        crc_rd_wrk->crtc = crtc;
                                        spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
index a19f18b251f315aa151b343570f7a5395b7c8e2f,fd99ec0f4257a6a78308f5d2e0e8c0b192e4efb4..80f154b6adabd684495f739a9c17c5b629049c57
@@@ -33,7 -33,6 +33,8 @@@
  #include <drm/drm_aperture.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/drm_drv.h>
 +#include <drm/drm_fb_helper.h>
++#include <drm/drm_fbdev_generic.h>
  #include <drm/drm_gem_ttm_helper.h>
  #include <drm/drm_ioctl.h>
  #include <drm/drm_vblank.h>
@@@ -50,6 -49,7 +51,6 @@@
  
  #include <nvif/class.h>
  #include <nvif/cl0002.h>
 -#include <nvif/cla06f.h>
  
  #include "nouveau_drv.h"
  #include "nouveau_dma.h"
@@@ -62,6 -62,7 +63,6 @@@
  #include "nouveau_bios.h"
  #include "nouveau_ioctl.h"
  #include "nouveau_abi16.h"
 -#include "nouveau_fbcon.h"
  #include "nouveau_fence.h"
  #include "nouveau_debugfs.h"
  #include "nouveau_usif.h"
@@@ -315,19 -316,28 +316,19 @@@ static voi
  nouveau_accel_ce_init(struct nouveau_drm *drm)
  {
        struct nvif_device *device = &drm->client.device;
 +      u64 runm;
        int ret = 0;
  
        /* Allocate channel that has access to a (preferably async) copy
         * engine, to use for TTM buffer moves.
         */
 -      if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 -              ret = nouveau_channel_new(drm, device,
 -                                        nvif_fifo_runlist_ce(device), 0,
 -                                        true, &drm->cechan);
 -      } else
 -      if (device->info.chipset >= 0xa3 &&
 -          device->info.chipset != 0xaa &&
 -          device->info.chipset != 0xac) {
 -              /* Prior to Kepler, there's only a single runlist, so all
 -               * engines can be accessed from any channel.
 -               *
 -               * We still want to use a separate channel though.
 -               */
 -              ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false,
 -                                        &drm->cechan);
 +      runm = nvif_fifo_runlist_ce(device);
 +      if (!runm) {
 +              NV_DEBUG(drm, "no ce runlist\n");
 +              return;
        }
  
 +      ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
        if (ret)
                NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
  }
@@@ -345,17 -355,23 +346,17 @@@ static voi
  nouveau_accel_gr_init(struct nouveau_drm *drm)
  {
        struct nvif_device *device = &drm->client.device;
 -      u32 arg0, arg1;
 +      u64 runm;
        int ret;
  
 -      if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
 -              return;
 -
        /* Allocate channel that has access to the graphics engine. */
 -      if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 -              arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
 -              arg1 = 1;
 -      } else {
 -              arg0 = NvDmaFB;
 -              arg1 = NvDmaTT;
 +      runm = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
 +      if (!runm) {
 +              NV_DEBUG(drm, "no gr runlist\n");
 +              return;
        }
  
 -      ret = nouveau_channel_new(drm, device, arg0, arg1, false,
 -                                &drm->channel);
 +      ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
        if (ret) {
                NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
                nouveau_accel_gr_fini(drm);
@@@ -420,7 -436,6 +421,7 @@@ nouveau_accel_fini(struct nouveau_drm *
        nouveau_accel_gr_fini(drm);
        if (drm->fence)
                nouveau_fence(drm)->dtor(drm);
 +      nouveau_channels_fini(drm);
  }
  
  static void
@@@ -470,7 -485,6 +471,7 @@@ nouveau_accel_init(struct nouveau_drm *
                case PASCAL_CHANNEL_GPFIFO_A:
                case VOLTA_CHANNEL_GPFIFO_A:
                case TURING_CHANNEL_GPFIFO_A:
 +              case AMPERE_CHANNEL_GPFIFO_A:
                case AMPERE_CHANNEL_GPFIFO_B:
                        ret = nvc0_fence_create(drm);
                        break;
@@@ -597,6 -611,7 +598,6 @@@ nouveau_drm_device_init(struct drm_devi
        nouveau_hwmon_init(dev);
        nouveau_svm_init(drm);
        nouveau_dmem_init(drm);
 -      nouveau_fbcon_init(dev);
        nouveau_led_init(dev);
  
        if (nouveau_pmops_runtime()) {
@@@ -640,6 -655,7 +641,6 @@@ nouveau_drm_device_fini(struct drm_devi
        }
  
        nouveau_led_fini(dev);
 -      nouveau_fbcon_fini(dev);
        nouveau_dmem_fini(drm);
        nouveau_svm_fini(drm);
        nouveau_hwmon_fini(dev);
@@@ -793,11 -809,6 +794,11 @@@ static int nouveau_drm_probe(struct pci
        if (ret)
                goto fail_drm_dev_init;
  
 +      if (nouveau_drm(drm_dev)->client.device.info.ram_size <= 32 * 1024 * 1024)
 +              drm_fbdev_generic_setup(drm_dev, 8);
 +      else
 +              drm_fbdev_generic_setup(drm_dev, 32);
 +
        quirk_broken_nv_runpm(pdev);
        return 0;
  
@@@ -854,6 -865,8 +855,6 @@@ nouveau_do_suspend(struct drm_device *d
        nouveau_led_suspend(dev);
  
        if (dev->mode_config.num_crtc) {
 -              NV_DEBUG(drm, "suspending console...\n");
 -              nouveau_fbcon_set_suspend(dev, 1);
                NV_DEBUG(drm, "suspending display...\n");
                ret = nouveau_display_suspend(dev, runtime);
                if (ret)
@@@ -927,6 -940,8 +928,6 @@@ nouveau_do_resume(struct drm_device *de
        if (dev->mode_config.num_crtc) {
                NV_DEBUG(drm, "resuming display...\n");
                nouveau_display_resume(dev, runtime);
 -              NV_DEBUG(drm, "resuming console...\n");
 -              nouveau_fbcon_set_suspend(dev, 0);
        }
  
        nouveau_led_resume(dev);
@@@ -1281,6 -1296,7 +1282,6 @@@ static void nouveau_display_options(voi
        DRM_DEBUG_DRIVER("... tv_disable   : %d\n", nouveau_tv_disable);
        DRM_DEBUG_DRIVER("... ignorelid    : %d\n", nouveau_ignorelid);
        DRM_DEBUG_DRIVER("... duallink     : %d\n", nouveau_duallink);
 -      DRM_DEBUG_DRIVER("... nofbaccel    : %d\n", nouveau_nofbaccel);
        DRM_DEBUG_DRIVER("... config       : %s\n", nouveau_config);
        DRM_DEBUG_DRIVER("... debug        : %s\n", nouveau_debug);
        DRM_DEBUG_DRIVER("... noaccel      : %d\n", nouveau_noaccel);
This page took 0.206395 seconds and 4 git commands to generate.