2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
35 * The organisation of the scheduler is the following:
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
46 * Note that once a job was taken from the entities queue and pushed to the
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
54 * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55 * in which the jobs fetched from scheduler entities are executed.
57 * In this context the &drm_gpu_scheduler keeps track of a driver specified
58 * credit limit representing the capacity of this scheduler and a credit count;
59 * every &drm_sched_job carries a driver specified number of credits.
61 * Once a job is executed (but not yet finished), the job's credits contribute
62 * to the scheduler's credit count until the job is finished. If by executing
63 * one more job the scheduler's credit count would exceed the scheduler's
64 * credit limit, the job won't be executed. Instead, the scheduler will wait
65 * until the credit count has decreased enough to not overflow its credit limit.
66 * This implies waiting for previously executed jobs.
68 * Optionally, drivers may register a callback (update_job_credits) provided by
69 * struct drm_sched_backend_ops to update the job's credits dynamically. The
70 * scheduler executes this callback every time the scheduler considers a job for
71 * execution and subsequently checks whether the job fits the scheduler's credit
75 #include <linux/wait.h>
76 #include <linux/sched.h>
77 #include <linux/completion.h>
78 #include <linux/dma-resv.h>
79 #include <uapi/linux/sched/types.h>
81 #include <drm/drm_print.h>
82 #include <drm/drm_gem.h>
83 #include <drm/drm_syncobj.h>
84 #include <drm/gpu_scheduler.h>
85 #include <drm/spsc_queue.h>
87 #define CREATE_TRACE_POINTS
88 #include "gpu_scheduler_trace.h"
90 #define to_drm_sched_job(sched_job) \
91 container_of((sched_job), struct drm_sched_job, queue_node)
93 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
96 * DOC: sched_policy (int)
97 * Used to override default entities scheduling policy in a run queue.
99 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
100 module_param_named(sched_policy, drm_sched_policy, int, 0444);
102 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
106 drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
107 atomic_read(&sched->credit_count),
114 * drm_sched_can_queue -- Can we queue more to the hardware?
115 * @sched: scheduler instance
116 * @entity: the scheduler entity
118 * Return true if we can push at least one more job from @entity, false
121 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
122 struct drm_sched_entity *entity)
124 struct drm_sched_job *s_job;
126 s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
130 if (sched->ops->update_job_credits) {
131 s_job->credits = sched->ops->update_job_credits(s_job);
133 drm_WARN(sched, !s_job->credits,
134 "Jobs with zero credits bypass job-flow control.\n");
137 /* If a job exceeds the credit limit, truncate it to the credit limit
138 * itself to guarantee forward progress.
140 if (drm_WARN(sched, s_job->credits > sched->credit_limit,
141 "Jobs may not exceed the credit limit, truncate.\n"))
142 s_job->credits = sched->credit_limit;
144 return drm_sched_available_credits(sched) >= s_job->credits;
147 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
148 const struct rb_node *b)
150 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
151 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
153 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
156 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
158 struct drm_sched_rq *rq = entity->rq;
160 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
161 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
162 RB_CLEAR_NODE(&entity->rb_tree_node);
166 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
169 * Both locks need to be grabbed, one to protect from entity->rq change
170 * for entity from within concurrent drm_sched_entity_select_rq and the
171 * other to update the rb tree structure.
173 spin_lock(&entity->rq_lock);
174 spin_lock(&entity->rq->lock);
176 drm_sched_rq_remove_fifo_locked(entity);
178 entity->oldest_job_waiting = ts;
180 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
181 drm_sched_entity_compare_before);
183 spin_unlock(&entity->rq->lock);
184 spin_unlock(&entity->rq_lock);
188 * drm_sched_rq_init - initialize a given run queue struct
190 * @sched: scheduler instance to associate with this run queue
191 * @rq: scheduler run queue
193 * Initializes a scheduler runqueue.
195 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
196 struct drm_sched_rq *rq)
198 spin_lock_init(&rq->lock);
199 INIT_LIST_HEAD(&rq->entities);
200 rq->rb_tree_root = RB_ROOT_CACHED;
201 rq->current_entity = NULL;
206 * drm_sched_rq_add_entity - add an entity
208 * @rq: scheduler run queue
209 * @entity: scheduler entity
211 * Adds a scheduler entity to the run queue.
213 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
214 struct drm_sched_entity *entity)
216 if (!list_empty(&entity->list))
219 spin_lock(&rq->lock);
221 atomic_inc(rq->sched->score);
222 list_add_tail(&entity->list, &rq->entities);
224 spin_unlock(&rq->lock);
228 * drm_sched_rq_remove_entity - remove an entity
230 * @rq: scheduler run queue
231 * @entity: scheduler entity
233 * Removes a scheduler entity from the run queue.
235 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
236 struct drm_sched_entity *entity)
238 if (list_empty(&entity->list))
241 spin_lock(&rq->lock);
243 atomic_dec(rq->sched->score);
244 list_del_init(&entity->list);
246 if (rq->current_entity == entity)
247 rq->current_entity = NULL;
249 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
250 drm_sched_rq_remove_fifo_locked(entity);
252 spin_unlock(&rq->lock);
256 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
258 * @sched: the gpu scheduler
259 * @rq: scheduler run queue to check.
261 * Try to find the next ready entity.
263 * Return an entity if one is found; return an error-pointer (!NULL) if an
264 * entity was ready, but the scheduler had insufficient credits to accommodate
265 * its job; return NULL, if no ready entity was found.
267 static struct drm_sched_entity *
268 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
269 struct drm_sched_rq *rq)
271 struct drm_sched_entity *entity;
273 spin_lock(&rq->lock);
275 entity = rq->current_entity;
277 list_for_each_entry_continue(entity, &rq->entities, list) {
278 if (drm_sched_entity_is_ready(entity)) {
279 /* If we can't queue yet, preserve the current
280 * entity in terms of fairness.
282 if (!drm_sched_can_queue(sched, entity)) {
283 spin_unlock(&rq->lock);
284 return ERR_PTR(-ENOSPC);
287 rq->current_entity = entity;
288 reinit_completion(&entity->entity_idle);
289 spin_unlock(&rq->lock);
295 list_for_each_entry(entity, &rq->entities, list) {
296 if (drm_sched_entity_is_ready(entity)) {
297 /* If we can't queue yet, preserve the current entity in
300 if (!drm_sched_can_queue(sched, entity)) {
301 spin_unlock(&rq->lock);
302 return ERR_PTR(-ENOSPC);
305 rq->current_entity = entity;
306 reinit_completion(&entity->entity_idle);
307 spin_unlock(&rq->lock);
311 if (entity == rq->current_entity)
315 spin_unlock(&rq->lock);
321 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
323 * @sched: the gpu scheduler
324 * @rq: scheduler run queue to check.
326 * Find oldest waiting ready entity.
328 * Return an entity if one is found; return an error-pointer (!NULL) if an
329 * entity was ready, but the scheduler had insufficient credits to accommodate
330 * its job; return NULL, if no ready entity was found.
332 static struct drm_sched_entity *
333 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
334 struct drm_sched_rq *rq)
338 spin_lock(&rq->lock);
339 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
340 struct drm_sched_entity *entity;
342 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
343 if (drm_sched_entity_is_ready(entity)) {
344 /* If we can't queue yet, preserve the current entity in
347 if (!drm_sched_can_queue(sched, entity)) {
348 spin_unlock(&rq->lock);
349 return ERR_PTR(-ENOSPC);
352 rq->current_entity = entity;
353 reinit_completion(&entity->entity_idle);
357 spin_unlock(&rq->lock);
359 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
363 * drm_sched_run_job_queue - enqueue run-job work
364 * @sched: scheduler instance
366 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
368 if (!READ_ONCE(sched->pause_submit))
369 queue_work(sched->submit_wq, &sched->work_run_job);
373 * __drm_sched_run_free_queue - enqueue free-job work
374 * @sched: scheduler instance
376 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
378 if (!READ_ONCE(sched->pause_submit))
379 queue_work(sched->submit_wq, &sched->work_free_job);
383 * drm_sched_run_free_queue - enqueue free-job work if ready
384 * @sched: scheduler instance
386 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
388 struct drm_sched_job *job;
390 spin_lock(&sched->job_list_lock);
391 job = list_first_entry_or_null(&sched->pending_list,
392 struct drm_sched_job, list);
393 if (job && dma_fence_is_signaled(&job->s_fence->finished))
394 __drm_sched_run_free_queue(sched);
395 spin_unlock(&sched->job_list_lock);
399 * drm_sched_job_done - complete a job
400 * @s_job: pointer to the job which is done
402 * Finish the job's fence and wake up the worker thread.
404 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
406 struct drm_sched_fence *s_fence = s_job->s_fence;
407 struct drm_gpu_scheduler *sched = s_fence->sched;
409 atomic_sub(s_job->credits, &sched->credit_count);
410 atomic_dec(sched->score);
412 trace_drm_sched_process_job(s_fence);
414 dma_fence_get(&s_fence->finished);
415 drm_sched_fence_finished(s_fence, result);
416 dma_fence_put(&s_fence->finished);
417 __drm_sched_run_free_queue(sched);
421 * drm_sched_job_done_cb - the callback for a done job
423 * @cb: fence callbacks
425 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
427 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
429 drm_sched_job_done(s_job, f->error);
433 * drm_sched_start_timeout - start timeout for reset worker
435 * @sched: scheduler instance to start the worker for
437 * Start the timeout for the given scheduler.
439 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
441 lockdep_assert_held(&sched->job_list_lock);
443 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
444 !list_empty(&sched->pending_list))
445 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
448 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
450 spin_lock(&sched->job_list_lock);
451 drm_sched_start_timeout(sched);
452 spin_unlock(&sched->job_list_lock);
456 * drm_sched_tdr_queue_imm: - immediately start job timeout handler
458 * @sched: scheduler for which the timeout handling should be started.
460 * Start timeout handling immediately for the named scheduler.
462 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
464 spin_lock(&sched->job_list_lock);
466 drm_sched_start_timeout(sched);
467 spin_unlock(&sched->job_list_lock);
469 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
472 * drm_sched_fault - immediately start timeout handler
474 * @sched: scheduler where the timeout handling should be started.
476 * Start timeout handling immediately when the driver detects a hardware fault.
478 void drm_sched_fault(struct drm_gpu_scheduler *sched)
480 if (sched->timeout_wq)
481 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
483 EXPORT_SYMBOL(drm_sched_fault);
486 * drm_sched_suspend_timeout - Suspend scheduler job timeout
488 * @sched: scheduler instance for which to suspend the timeout
490 * Suspend the delayed work timeout for the scheduler. This is done by
491 * modifying the delayed work timeout to an arbitrary large value,
492 * MAX_SCHEDULE_TIMEOUT in this case.
494 * Returns the timeout remaining
497 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
499 unsigned long sched_timeout, now = jiffies;
501 sched_timeout = sched->work_tdr.timer.expires;
504 * Modify the timeout to an arbitrarily large value. This also prevents
505 * the timeout to be restarted when new submissions arrive
507 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
508 && time_after(sched_timeout, now))
509 return sched_timeout - now;
511 return sched->timeout;
513 EXPORT_SYMBOL(drm_sched_suspend_timeout);
516 * drm_sched_resume_timeout - Resume scheduler job timeout
518 * @sched: scheduler instance for which to resume the timeout
519 * @remaining: remaining timeout
521 * Resume the delayed work timeout for the scheduler.
523 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
524 unsigned long remaining)
526 spin_lock(&sched->job_list_lock);
528 if (list_empty(&sched->pending_list))
529 cancel_delayed_work(&sched->work_tdr);
531 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
533 spin_unlock(&sched->job_list_lock);
535 EXPORT_SYMBOL(drm_sched_resume_timeout);
537 static void drm_sched_job_begin(struct drm_sched_job *s_job)
539 struct drm_gpu_scheduler *sched = s_job->sched;
541 spin_lock(&sched->job_list_lock);
542 list_add_tail(&s_job->list, &sched->pending_list);
543 drm_sched_start_timeout(sched);
544 spin_unlock(&sched->job_list_lock);
547 static void drm_sched_job_timedout(struct work_struct *work)
549 struct drm_gpu_scheduler *sched;
550 struct drm_sched_job *job;
551 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
553 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
555 /* Protects against concurrent deletion in drm_sched_get_finished_job */
556 spin_lock(&sched->job_list_lock);
557 job = list_first_entry_or_null(&sched->pending_list,
558 struct drm_sched_job, list);
562 * Remove the bad job so it cannot be freed by concurrent
563 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
564 * is parked at which point it's safe.
566 list_del_init(&job->list);
567 spin_unlock(&sched->job_list_lock);
569 status = job->sched->ops->timedout_job(job);
572 * Guilty job did complete and hence needs to be manually removed
573 * See drm_sched_stop doc.
575 if (sched->free_guilty) {
576 job->sched->ops->free_job(job);
577 sched->free_guilty = false;
580 spin_unlock(&sched->job_list_lock);
583 if (status != DRM_GPU_SCHED_STAT_ENODEV)
584 drm_sched_start_timeout_unlocked(sched);
588 * drm_sched_stop - stop the scheduler
590 * @sched: scheduler instance
591 * @bad: job which caused the time out
593 * Stop the scheduler and also removes and frees all completed jobs.
594 * Note: bad job will not be freed as it might be used later and so it's
595 * callers responsibility to release it manually if it's not part of the
596 * pending list any more.
599 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
601 struct drm_sched_job *s_job, *tmp;
603 drm_sched_wqueue_stop(sched);
606 * Reinsert back the bad job here - now it's safe as
607 * drm_sched_get_finished_job cannot race against us and release the
608 * bad job at this point - we parked (waited for) any in progress
609 * (earlier) cleanups and drm_sched_get_finished_job will not be called
610 * now until the scheduler thread is unparked.
612 if (bad && bad->sched == sched)
614 * Add at the head of the queue to reflect it was the earliest
617 list_add(&bad->list, &sched->pending_list);
620 * Iterate the job list from later to earlier one and either deactive
621 * their HW callbacks or remove them from pending list if they already
623 * This iteration is thread safe as sched thread is stopped.
625 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
627 if (s_job->s_fence->parent &&
628 dma_fence_remove_callback(s_job->s_fence->parent,
630 dma_fence_put(s_job->s_fence->parent);
631 s_job->s_fence->parent = NULL;
632 atomic_sub(s_job->credits, &sched->credit_count);
635 * remove job from pending_list.
636 * Locking here is for concurrent resume timeout
638 spin_lock(&sched->job_list_lock);
639 list_del_init(&s_job->list);
640 spin_unlock(&sched->job_list_lock);
643 * Wait for job's HW fence callback to finish using s_job
644 * before releasing it.
646 * Job is still alive so fence refcount at least 1
648 dma_fence_wait(&s_job->s_fence->finished, false);
651 * We must keep bad job alive for later use during
652 * recovery by some of the drivers but leave a hint
653 * that the guilty job must be released.
656 sched->ops->free_job(s_job);
658 sched->free_guilty = true;
663 * Stop pending timer in flight as we rearm it in drm_sched_start. This
664 * avoids the pending timeout work in progress to fire right away after
665 * this TDR finished and before the newly restarted jobs had a
666 * chance to complete.
668 cancel_delayed_work(&sched->work_tdr);
671 EXPORT_SYMBOL(drm_sched_stop);
674 * drm_sched_start - recover jobs after a reset
676 * @sched: scheduler instance
677 * @full_recovery: proceed with complete sched restart
680 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
682 struct drm_sched_job *s_job, *tmp;
686 * Locking the list is not required here as the sched thread is parked
687 * so no new jobs are being inserted or removed. Also concurrent
688 * GPU recovers can't run in parallel.
690 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
691 struct dma_fence *fence = s_job->s_fence->parent;
693 atomic_add(s_job->credits, &sched->credit_count);
699 r = dma_fence_add_callback(fence, &s_job->cb,
700 drm_sched_job_done_cb);
702 drm_sched_job_done(s_job, fence->error);
704 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
707 drm_sched_job_done(s_job, -ECANCELED);
711 drm_sched_start_timeout_unlocked(sched);
713 drm_sched_wqueue_start(sched);
715 EXPORT_SYMBOL(drm_sched_start);
718 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
720 * @sched: scheduler instance
722 * Re-submitting jobs was a concept AMD came up as cheap way to implement
723 * recovery after a job timeout.
725 * This turned out to be not working very well. First of all there are many
726 * problem with the dma_fence implementation and requirements. Either the
727 * implementation is risking deadlocks with core memory management or violating
728 * documented implementation details of the dma_fence object.
730 * Drivers can still save and restore their state for recovery operations, but
731 * we shouldn't make this a general scheduler feature around the dma_fence
734 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
736 struct drm_sched_job *s_job, *tmp;
737 uint64_t guilty_context;
738 bool found_guilty = false;
739 struct dma_fence *fence;
741 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
742 struct drm_sched_fence *s_fence = s_job->s_fence;
744 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
746 guilty_context = s_job->s_fence->scheduled.context;
749 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
750 dma_fence_set_error(&s_fence->finished, -ECANCELED);
752 fence = sched->ops->run_job(s_job);
754 if (IS_ERR_OR_NULL(fence)) {
756 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
758 s_job->s_fence->parent = NULL;
761 s_job->s_fence->parent = dma_fence_get(fence);
763 /* Drop for orignal kref_init */
764 dma_fence_put(fence);
768 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
771 * drm_sched_job_init - init a scheduler job
772 * @job: scheduler job to init
773 * @entity: scheduler entity to use
774 * @credits: the number of credits this job contributes to the schedulers
776 * @owner: job owner for debugging
778 * Refer to drm_sched_entity_push_job() documentation
779 * for locking considerations.
781 * Drivers must make sure drm_sched_job_cleanup() if this function returns
782 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
784 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
785 * has died, which can mean that there's no valid runqueue for a @entity.
786 * This function returns -ENOENT in this case (which probably should be -EIO as
787 * a more meanigful return value).
789 * Returns 0 for success, negative error code otherwise.
791 int drm_sched_job_init(struct drm_sched_job *job,
792 struct drm_sched_entity *entity,
793 u32 credits, void *owner)
796 /* This will most likely be followed by missing frames
797 * or worse--a blank screen--leave a trail in the
798 * logs, so this can be debugged easier.
800 drm_err(job->sched, "%s: entity has no rq!\n", __func__);
804 if (unlikely(!credits)) {
805 pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
809 job->entity = entity;
810 job->credits = credits;
811 job->s_fence = drm_sched_fence_alloc(entity, owner);
815 INIT_LIST_HEAD(&job->list);
817 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
821 EXPORT_SYMBOL(drm_sched_job_init);
824 * drm_sched_job_arm - arm a scheduler job for execution
825 * @job: scheduler job to arm
827 * This arms a scheduler job for execution. Specifically it initializes the
828 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
829 * or other places that need to track the completion of this job.
831 * Refer to drm_sched_entity_push_job() documentation for locking
834 * This can only be called if drm_sched_job_init() succeeded.
836 void drm_sched_job_arm(struct drm_sched_job *job)
838 struct drm_gpu_scheduler *sched;
839 struct drm_sched_entity *entity = job->entity;
842 drm_sched_entity_select_rq(entity);
843 sched = entity->rq->sched;
846 job->s_priority = entity->priority;
847 job->id = atomic64_inc_return(&sched->job_id_count);
849 drm_sched_fence_init(job->s_fence, job->entity);
851 EXPORT_SYMBOL(drm_sched_job_arm);
854 * drm_sched_job_add_dependency - adds the fence as a job dependency
855 * @job: scheduler job to add the dependencies to
856 * @fence: the dma_fence to add to the list of dependencies.
858 * Note that @fence is consumed in both the success and error cases.
861 * 0 on success, or an error on failing to expand the array.
863 int drm_sched_job_add_dependency(struct drm_sched_job *job,
864 struct dma_fence *fence)
866 struct dma_fence *entry;
874 /* Deduplicate if we already depend on a fence from the same context.
875 * This lets the size of the array of deps scale with the number of
876 * engines involved, rather than the number of BOs.
878 xa_for_each(&job->dependencies, index, entry) {
879 if (entry->context != fence->context)
882 if (dma_fence_is_later(fence, entry)) {
883 dma_fence_put(entry);
884 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
886 dma_fence_put(fence);
891 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
893 dma_fence_put(fence);
897 EXPORT_SYMBOL(drm_sched_job_add_dependency);
900 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
901 * @job: scheduler job to add the dependencies to
902 * @file: drm file private pointer
903 * @handle: syncobj handle to lookup
904 * @point: timeline point
906 * This adds the fence matching the given syncobj to @job.
909 * 0 on success, or an error on failing to expand the array.
911 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
912 struct drm_file *file,
916 struct dma_fence *fence;
919 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
923 return drm_sched_job_add_dependency(job, fence);
925 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
928 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
929 * @job: scheduler job to add the dependencies to
930 * @resv: the dma_resv object to get the fences from
931 * @usage: the dma_resv_usage to use to filter the fences
933 * This adds all fences matching the given usage from @resv to @job.
934 * Must be called with the @resv lock held.
937 * 0 on success, or an error on failing to expand the array.
939 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
940 struct dma_resv *resv,
941 enum dma_resv_usage usage)
943 struct dma_resv_iter cursor;
944 struct dma_fence *fence;
947 dma_resv_assert_held(resv);
949 dma_resv_for_each_fence(&cursor, resv, usage, fence) {
950 /* Make sure to grab an additional ref on the added fence */
951 dma_fence_get(fence);
952 ret = drm_sched_job_add_dependency(job, fence);
954 dma_fence_put(fence);
960 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
963 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
965 * @job: scheduler job to add the dependencies to
966 * @obj: the gem object to add new dependencies from.
967 * @write: whether the job might write the object (so we need to depend on
968 * shared fences in the reservation object).
970 * This should be called after drm_gem_lock_reservations() on your array of
971 * GEM objects used in the job but before updating the reservations with your
975 * 0 on success, or an error on failing to expand the array.
977 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
978 struct drm_gem_object *obj,
981 return drm_sched_job_add_resv_dependencies(job, obj->resv,
982 dma_resv_usage_rw(write));
984 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
987 * drm_sched_job_cleanup - clean up scheduler job resources
988 * @job: scheduler job to clean up
990 * Cleans up the resources allocated with drm_sched_job_init().
992 * Drivers should call this from their error unwind code if @job is aborted
993 * before drm_sched_job_arm() is called.
995 * After that point of no return @job is committed to be executed by the
996 * scheduler, and this function should be called from the
997 * &drm_sched_backend_ops.free_job callback.
999 void drm_sched_job_cleanup(struct drm_sched_job *job)
1001 struct dma_fence *fence;
1002 unsigned long index;
1004 if (kref_read(&job->s_fence->finished.refcount)) {
1005 /* drm_sched_job_arm() has been called */
1006 dma_fence_put(&job->s_fence->finished);
1008 /* aborted job before committing to run it */
1009 drm_sched_fence_free(job->s_fence);
1012 job->s_fence = NULL;
1014 xa_for_each(&job->dependencies, index, fence) {
1015 dma_fence_put(fence);
1017 xa_destroy(&job->dependencies);
1020 EXPORT_SYMBOL(drm_sched_job_cleanup);
1023 * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1024 * @sched: scheduler instance
1025 * @entity: the scheduler entity
1027 * Wake up the scheduler if we can queue jobs.
1029 void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
1030 struct drm_sched_entity *entity)
1032 if (drm_sched_can_queue(sched, entity))
1033 drm_sched_run_job_queue(sched);
1037 * drm_sched_select_entity - Select next entity to process
1039 * @sched: scheduler instance
1041 * Return an entity to process or NULL if none are found.
1043 * Note, that we break out of the for-loop when "entity" is non-null, which can
1044 * also be an error-pointer--this assures we don't process lower priority
1045 * run-queues. See comments in the respectively called functions.
1047 static struct drm_sched_entity *
1048 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1050 struct drm_sched_entity *entity;
1053 /* Start with the highest priority.
1055 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1056 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1057 drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1058 drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1063 return IS_ERR(entity) ? NULL : entity;
1067 * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1069 * @sched: scheduler instance
1071 * Returns the next finished job from the pending list (if there is one)
1072 * ready for it to be destroyed.
1074 static struct drm_sched_job *
1075 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1077 struct drm_sched_job *job, *next;
1079 spin_lock(&sched->job_list_lock);
1081 job = list_first_entry_or_null(&sched->pending_list,
1082 struct drm_sched_job, list);
1084 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1085 /* remove job from pending_list */
1086 list_del_init(&job->list);
1088 /* cancel this job's TO timer */
1089 cancel_delayed_work(&sched->work_tdr);
1090 /* make the scheduled timestamp more accurate */
1091 next = list_first_entry_or_null(&sched->pending_list,
1092 typeof(*next), list);
1095 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1096 &next->s_fence->scheduled.flags))
1097 next->s_fence->scheduled.timestamp =
1098 dma_fence_timestamp(&job->s_fence->finished);
1099 /* start TO timer for next job */
1100 drm_sched_start_timeout(sched);
1106 spin_unlock(&sched->job_list_lock);
1112 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1113 * @sched_list: list of drm_gpu_schedulers
1114 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1116 * Returns pointer of the sched with the least load or NULL if none of the
1117 * drm_gpu_schedulers are ready
1119 struct drm_gpu_scheduler *
1120 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1121 unsigned int num_sched_list)
1123 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1125 unsigned int min_score = UINT_MAX, num_score;
1127 for (i = 0; i < num_sched_list; ++i) {
1128 sched = sched_list[i];
1130 if (!sched->ready) {
1131 DRM_WARN("scheduler %s is not ready, skipping",
1136 num_score = atomic_read(sched->score);
1137 if (num_score < min_score) {
1138 min_score = num_score;
1139 picked_sched = sched;
1143 return picked_sched;
1145 EXPORT_SYMBOL(drm_sched_pick_best);
1148 * drm_sched_free_job_work - worker to call free_job
1152 static void drm_sched_free_job_work(struct work_struct *w)
1154 struct drm_gpu_scheduler *sched =
1155 container_of(w, struct drm_gpu_scheduler, work_free_job);
1156 struct drm_sched_job *job;
1158 if (READ_ONCE(sched->pause_submit))
1161 job = drm_sched_get_finished_job(sched);
1163 sched->ops->free_job(job);
1165 drm_sched_run_free_queue(sched);
1166 drm_sched_run_job_queue(sched);
1170 * drm_sched_run_job_work - worker to call run_job
1174 static void drm_sched_run_job_work(struct work_struct *w)
1176 struct drm_gpu_scheduler *sched =
1177 container_of(w, struct drm_gpu_scheduler, work_run_job);
1178 struct drm_sched_entity *entity;
1179 struct dma_fence *fence;
1180 struct drm_sched_fence *s_fence;
1181 struct drm_sched_job *sched_job;
1184 if (READ_ONCE(sched->pause_submit))
1187 entity = drm_sched_select_entity(sched);
1191 sched_job = drm_sched_entity_pop_job(entity);
1193 complete_all(&entity->entity_idle);
1194 return; /* No more work */
1197 s_fence = sched_job->s_fence;
1199 atomic_add(sched_job->credits, &sched->credit_count);
1200 drm_sched_job_begin(sched_job);
1202 trace_drm_run_job(sched_job, entity);
1203 fence = sched->ops->run_job(sched_job);
1204 complete_all(&entity->entity_idle);
1205 drm_sched_fence_scheduled(s_fence, fence);
1207 if (!IS_ERR_OR_NULL(fence)) {
1208 /* Drop for original kref_init of the fence */
1209 dma_fence_put(fence);
1211 r = dma_fence_add_callback(fence, &sched_job->cb,
1212 drm_sched_job_done_cb);
1214 drm_sched_job_done(sched_job, fence->error);
1216 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1218 drm_sched_job_done(sched_job, IS_ERR(fence) ?
1219 PTR_ERR(fence) : 0);
1222 wake_up(&sched->job_scheduled);
1223 drm_sched_run_job_queue(sched);
1227 * drm_sched_init - Init a gpu scheduler instance
1229 * @sched: scheduler instance
1230 * @ops: backend operations for this scheduler
1231 * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
1232 * allocated and used
1233 * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
1234 * @credit_limit: the number of credits this scheduler can hold from all jobs
1235 * @hang_limit: number of times to allow a job to hang before dropping it
1236 * @timeout: timeout value in jiffies for the scheduler
1237 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1239 * @score: optional score atomic shared with other schedulers
1240 * @name: name used for debugging
1241 * @dev: target &struct device
1243 * Return 0 on success, otherwise error code.
1245 int drm_sched_init(struct drm_gpu_scheduler *sched,
1246 const struct drm_sched_backend_ops *ops,
1247 struct workqueue_struct *submit_wq,
1248 u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
1249 long timeout, struct workqueue_struct *timeout_wq,
1250 atomic_t *score, const char *name, struct device *dev)
1255 sched->credit_limit = credit_limit;
1257 sched->timeout = timeout;
1258 sched->timeout_wq = timeout_wq ? : system_wq;
1259 sched->hang_limit = hang_limit;
1260 sched->score = score ? score : &sched->_score;
1263 if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1264 /* This is a gross violation--tell drivers what the problem is.
1266 drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1269 } else if (sched->sched_rq) {
1270 /* Not an error, but warn anyway so drivers can
1271 * fine-tune their DRM calling order, and return all
1274 drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
1279 sched->submit_wq = submit_wq;
1280 sched->own_submit_wq = false;
1282 sched->submit_wq = alloc_ordered_workqueue(name, 0);
1283 if (!sched->submit_wq)
1286 sched->own_submit_wq = true;
1289 sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
1290 GFP_KERNEL | __GFP_ZERO);
1291 if (!sched->sched_rq)
1293 sched->num_rqs = num_rqs;
1294 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1295 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1296 if (!sched->sched_rq[i])
1298 drm_sched_rq_init(sched, sched->sched_rq[i]);
1301 init_waitqueue_head(&sched->job_scheduled);
1302 INIT_LIST_HEAD(&sched->pending_list);
1303 spin_lock_init(&sched->job_list_lock);
1304 atomic_set(&sched->credit_count, 0);
1305 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1306 INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1307 INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1308 atomic_set(&sched->_score, 0);
1309 atomic64_set(&sched->job_id_count, 0);
1310 sched->pause_submit = false;
1312 sched->ready = true;
1315 for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1316 kfree(sched->sched_rq[i]);
1318 kfree(sched->sched_rq);
1319 sched->sched_rq = NULL;
1320 if (sched->own_submit_wq)
1321 destroy_workqueue(sched->submit_wq);
1322 drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1325 EXPORT_SYMBOL(drm_sched_init);
1328 * drm_sched_fini - Destroy a gpu scheduler
1330 * @sched: scheduler instance
1332 * Tears down and cleans up the scheduler.
1334 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1336 struct drm_sched_entity *s_entity;
1339 drm_sched_wqueue_stop(sched);
1341 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1342 struct drm_sched_rq *rq = sched->sched_rq[i];
1344 spin_lock(&rq->lock);
1345 list_for_each_entry(s_entity, &rq->entities, list)
1347 * Prevents reinsertion and marks job_queue as idle,
1348 * it will removed from rq in drm_sched_entity_fini
1351 s_entity->stopped = true;
1352 spin_unlock(&rq->lock);
1353 kfree(sched->sched_rq[i]);
1356 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1357 wake_up_all(&sched->job_scheduled);
1359 /* Confirm no work left behind accessing device structures */
1360 cancel_delayed_work_sync(&sched->work_tdr);
1362 if (sched->own_submit_wq)
1363 destroy_workqueue(sched->submit_wq);
1364 sched->ready = false;
1365 kfree(sched->sched_rq);
1366 sched->sched_rq = NULL;
1368 EXPORT_SYMBOL(drm_sched_fini);
1371 * drm_sched_increase_karma - Update sched_entity guilty flag
1373 * @bad: The job guilty of time out
1375 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1376 * limit of the scheduler then the respective sched entity is marked guilty and
1377 * jobs from it will not be scheduled further
1379 void drm_sched_increase_karma(struct drm_sched_job *bad)
1382 struct drm_sched_entity *tmp;
1383 struct drm_sched_entity *entity;
1384 struct drm_gpu_scheduler *sched = bad->sched;
1386 /* don't change @bad's karma if it's from KERNEL RQ,
1387 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1388 * corrupt but keep in mind that kernel jobs always considered good.
1390 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1391 atomic_inc(&bad->karma);
1393 for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1394 struct drm_sched_rq *rq = sched->sched_rq[i];
1396 spin_lock(&rq->lock);
1397 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1398 if (bad->s_fence->scheduled.context ==
1399 entity->fence_context) {
1401 atomic_set(entity->guilty, 1);
1405 spin_unlock(&rq->lock);
1406 if (&entity->list != &rq->entities)
1411 EXPORT_SYMBOL(drm_sched_increase_karma);
1414 * drm_sched_wqueue_ready - Is the scheduler ready for submission
1416 * @sched: scheduler instance
1418 * Returns true if submission is ready
1420 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1422 return sched->ready;
1424 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1427 * drm_sched_wqueue_stop - stop scheduler submission
1429 * @sched: scheduler instance
1431 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1433 WRITE_ONCE(sched->pause_submit, true);
1434 cancel_work_sync(&sched->work_run_job);
1435 cancel_work_sync(&sched->work_free_job);
1437 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1440 * drm_sched_wqueue_start - start scheduler submission
1442 * @sched: scheduler instance
1444 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1446 WRITE_ONCE(sched->pause_submit, false);
1447 queue_work(sched->submit_wq, &sched->work_run_job);
1448 queue_work(sched->submit_wq, &sched->work_free_job);
1450 EXPORT_SYMBOL(drm_sched_wqueue_start);