]> Git Repo - linux.git/commitdiff
Merge tag 'drm-misc-next-2022-11-24' of git://anongit.freedesktop.org/drm/drm-misc...
authorDaniel Vetter <[email protected]>
Tue, 3 Jan 2023 08:48:03 +0000 (09:48 +0100)
committerDaniel Vetter <[email protected]>
Tue, 3 Jan 2023 08:48:04 +0000 (09:48 +0100)
drm-misc-next for 6.2:

Cross-subsystem Changes:
- fbdev: Make fb_modesetting_disabled() static
- udmabuf: Add vmap and vunmap methods to udmabuf_ops

Core Changes:
- doc: make drm-uapi igt-tests more readable
- fb-helper: Revert of the damage worker removal
- fourcc: Add missing big-endian XRGB1555 and RGB565 formats
- gem-shmem: Fix for resource leakage in __drm_gem_shmem_create()
- scheduler: Fix lockup in drm_sched_entity_kill()

Signed-off-by: Daniel Vetter <[email protected]>
From: Maxime Ripard <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/20221124074615.ahflw5q5ktfdsr7k@houat
1  2 
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/scheduler/sched_main.c

index b602cd72a12059f124640475f64eb0e271073dc5,db73234edcbe8627cc648819e296b38704bd35ef..f21f47737817a67e4114157d2a18d42c3efc10a8
@@@ -79,8 -79,10 +79,10 @@@ __drm_gem_shmem_create(struct drm_devic
        } else {
                ret = drm_gem_object_init(dev, obj, size);
        }
-       if (ret)
+       if (ret) {
+               drm_gem_private_object_fini(obj);
                goto err_free;
+       }
  
        ret = drm_gem_create_mmap_offset(obj);
        if (ret)
@@@ -571,20 -573,12 +573,20 @@@ static void drm_gem_shmem_vm_open(struc
  {
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 -      int ret;
  
        WARN_ON(shmem->base.import_attach);
  
 -      ret = drm_gem_shmem_get_pages(shmem);
 -      WARN_ON_ONCE(ret != 0);
 +      mutex_lock(&shmem->pages_lock);
 +
 +      /*
 +       * We should have already pinned the pages when the buffer was first
 +       * mmap'd, vm_open() just grabs an additional reference for the new
 +       * mm the vma is getting copied into (ie. on fork()).
 +       */
 +      if (!WARN_ON_ONCE(!shmem->pages_use_count))
 +              shmem->pages_use_count++;
 +
 +      mutex_unlock(&shmem->pages_lock);
  
        drm_gem_vm_open(vma);
  }
@@@ -630,8 -624,10 +632,8 @@@ int drm_gem_shmem_mmap(struct drm_gem_s
        }
  
        ret = drm_gem_shmem_get_pages(shmem);
 -      if (ret) {
 -              drm_gem_vm_close(vma);
 +      if (ret)
                return ret;
 -      }
  
        vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
index 31f3a1267be44fbf87562b2d0c1b54398b005feb,857ec20be9e8e5d7edfbbc9e8a452903dbc24ad8..fd22d753b4ed0ca40ade22549b042f767e1bab30
@@@ -417,6 -417,27 +417,6 @@@ static void drm_sched_job_timedout(stru
        }
  }
  
 - /**
 -  * drm_sched_increase_karma - Update sched_entity guilty flag
 -  *
 -  * @bad: The job guilty of time out
 -  *
 -  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
 -  * limit of the scheduler then the respective sched entity is marked guilty and
 -  * jobs from it will not be scheduled further
 -  */
 -void drm_sched_increase_karma(struct drm_sched_job *bad)
 -{
 -      drm_sched_increase_karma_ext(bad, 1);
 -}
 -EXPORT_SYMBOL(drm_sched_increase_karma);
 -
 -void drm_sched_reset_karma(struct drm_sched_job *bad)
 -{
 -      drm_sched_increase_karma_ext(bad, 0);
 -}
 -EXPORT_SYMBOL(drm_sched_reset_karma);
 -
  /**
   * drm_sched_stop - stop the scheduler
   *
@@@ -557,15 -578,32 +557,15 @@@ EXPORT_SYMBOL(drm_sched_start)
   *
   */
  void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 -{
 -      drm_sched_resubmit_jobs_ext(sched, INT_MAX);
 -}
 -EXPORT_SYMBOL(drm_sched_resubmit_jobs);
 -
 -/**
 - * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
 - *
 - * @sched: scheduler instance
 - * @max: job numbers to relaunch
 - *
 - */
 -void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
  {
        struct drm_sched_job *s_job, *tmp;
        uint64_t guilty_context;
        bool found_guilty = false;
        struct dma_fence *fence;
 -      int i = 0;
  
        list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
  
 -              if (i >= max)
 -                      break;
 -
                if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
                        found_guilty = true;
                        guilty_context = s_job->s_fence->scheduled.context;
                        dma_fence_set_error(&s_fence->finished, -ECANCELED);
  
                fence = sched->ops->run_job(s_job);
 -              i++;
  
                if (IS_ERR_OR_NULL(fence)) {
                        if (IS_ERR(fence))
                }
        }
  }
 -EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
 +EXPORT_SYMBOL(drm_sched_resubmit_jobs);
  
  /**
   * drm_sched_job_init - init a scheduler job
@@@ -987,7 -1026,7 +987,7 @@@ static int drm_sched_main(void *param
                sched_job = drm_sched_entity_pop_job(entity);
  
                if (!sched_job) {
-                       complete(&entity->entity_idle);
+                       complete_all(&entity->entity_idle);
                        continue;
                }
  
  
                trace_drm_run_job(sched_job, entity);
                fence = sched->ops->run_job(sched_job);
-               complete(&entity->entity_idle);
+               complete_all(&entity->entity_idle);
                drm_sched_fence_scheduled(s_fence);
  
                if (!IS_ERR_OR_NULL(fence)) {
@@@ -1126,15 -1165,13 +1126,15 @@@ void drm_sched_fini(struct drm_gpu_sche
  EXPORT_SYMBOL(drm_sched_fini);
  
  /**
 - * drm_sched_increase_karma_ext - Update sched_entity guilty flag
 + * drm_sched_increase_karma - Update sched_entity guilty flag
   *
   * @bad: The job guilty of time out
 - * @type: type for increase/reset karma
   *
 + * Increment on every hang caused by the 'bad' job. If this exceeds the hang
 + * limit of the scheduler then the respective sched entity is marked guilty and
 + * jobs from it will not be scheduled further
   */
 -void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
 +void drm_sched_increase_karma(struct drm_sched_job *bad)
  {
        int i;
        struct drm_sched_entity *tmp;
         * corrupt but keep in mind that kernel jobs always considered good.
         */
        if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 -              if (type == 0)
 -                      atomic_set(&bad->karma, 0);
 -              else if (type == 1)
 -                      atomic_inc(&bad->karma);
 +              atomic_inc(&bad->karma);
  
                for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
                     i++) {
                                if (bad->s_fence->scheduled.context ==
                                    entity->fence_context) {
                                        if (entity->guilty)
 -                                              atomic_set(entity->guilty, type);
 +                                              atomic_set(entity->guilty, 1);
                                        break;
                                }
                        }
                }
        }
  }
 -EXPORT_SYMBOL(drm_sched_increase_karma_ext);
 +EXPORT_SYMBOL(drm_sched_increase_karma);
This page took 0.07098 seconds and 4 git commands to generate.