2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
35 * The organisation of the scheduler is the following:
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
58 #define to_drm_sched_job(sched_job) \
59 container_of((sched_job), struct drm_sched_job, queue_node)
61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
63 static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
66 * drm_sched_rq_init - initialize a given run queue struct
68 * @rq: scheduler run queue
70 * Initializes a scheduler runqueue.
72 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
73 struct drm_sched_rq *rq)
75 spin_lock_init(&rq->lock);
76 INIT_LIST_HEAD(&rq->entities);
77 rq->current_entity = NULL;
82 * drm_sched_rq_add_entity - add an entity
84 * @rq: scheduler run queue
85 * @entity: scheduler entity
87 * Adds a scheduler entity to the run queue.
89 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
90 struct drm_sched_entity *entity)
92 if (!list_empty(&entity->list))
95 list_add_tail(&entity->list, &rq->entities);
96 spin_unlock(&rq->lock);
100 * drm_sched_rq_remove_entity - remove an entity
102 * @rq: scheduler run queue
103 * @entity: scheduler entity
105 * Removes a scheduler entity from the run queue.
107 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
108 struct drm_sched_entity *entity)
110 if (list_empty(&entity->list))
112 spin_lock(&rq->lock);
113 list_del_init(&entity->list);
114 if (rq->current_entity == entity)
115 rq->current_entity = NULL;
116 spin_unlock(&rq->lock);
120 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
122 * @rq: scheduler run queue to check.
124 * Try to find a ready entity, returns NULL if none found.
126 static struct drm_sched_entity *
127 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
129 struct drm_sched_entity *entity;
131 spin_lock(&rq->lock);
133 entity = rq->current_entity;
135 list_for_each_entry_continue(entity, &rq->entities, list) {
136 if (drm_sched_entity_is_ready(entity)) {
137 rq->current_entity = entity;
138 spin_unlock(&rq->lock);
144 list_for_each_entry(entity, &rq->entities, list) {
146 if (drm_sched_entity_is_ready(entity)) {
147 rq->current_entity = entity;
148 spin_unlock(&rq->lock);
152 if (entity == rq->current_entity)
156 spin_unlock(&rq->lock);
162 * drm_sched_dependency_optimized
164 * @fence: the dependency fence
165 * @entity: the entity which depends on the above fence
167 * Returns true if the dependency can be optimized and false otherwise
169 bool drm_sched_dependency_optimized(struct dma_fence* fence,
170 struct drm_sched_entity *entity)
172 struct drm_gpu_scheduler *sched = entity->rq->sched;
173 struct drm_sched_fence *s_fence;
175 if (!fence || dma_fence_is_signaled(fence))
177 if (fence->context == entity->fence_context)
179 s_fence = to_drm_sched_fence(fence);
180 if (s_fence && s_fence->sched == sched)
185 EXPORT_SYMBOL(drm_sched_dependency_optimized);
188 * drm_sched_start_timeout - start timeout for reset worker
190 * @sched: scheduler instance to start the worker for
192 * Start the timeout for the given scheduler.
194 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
196 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
197 !list_empty(&sched->ring_mirror_list))
198 schedule_delayed_work(&sched->work_tdr, sched->timeout);
202 * drm_sched_fault - immediately start timeout handler
204 * @sched: scheduler where the timeout handling should be started.
206 * Start timeout handling immediately when the driver detects a hardware fault.
208 void drm_sched_fault(struct drm_gpu_scheduler *sched)
210 mod_delayed_work(system_wq, &sched->work_tdr, 0);
212 EXPORT_SYMBOL(drm_sched_fault);
215 * drm_sched_suspend_timeout - Suspend scheduler job timeout
217 * @sched: scheduler instance for which to suspend the timeout
219 * Suspend the delayed work timeout for the scheduler. This is done by
220 * modifying the delayed work timeout to an arbitrary large value,
221 * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
222 * called from an IRQ context.
224 * Returns the timeout remaining
227 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
229 unsigned long sched_timeout, now = jiffies;
231 sched_timeout = sched->work_tdr.timer.expires;
234 * Modify the timeout to an arbitrarily large value. This also prevents
235 * the timeout to be restarted when new submissions arrive
237 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
238 && time_after(sched_timeout, now))
239 return sched_timeout - now;
241 return sched->timeout;
243 EXPORT_SYMBOL(drm_sched_suspend_timeout);
246 * drm_sched_resume_timeout - Resume scheduler job timeout
248 * @sched: scheduler instance for which to resume the timeout
249 * @remaining: remaining timeout
251 * Resume the delayed work timeout for the scheduler. Note that
252 * this function can be called from an IRQ context.
254 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
255 unsigned long remaining)
259 spin_lock_irqsave(&sched->job_list_lock, flags);
261 if (list_empty(&sched->ring_mirror_list))
262 cancel_delayed_work(&sched->work_tdr);
264 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
266 spin_unlock_irqrestore(&sched->job_list_lock, flags);
268 EXPORT_SYMBOL(drm_sched_resume_timeout);
270 /* job_finish is called after hw fence signaled
272 static void drm_sched_job_finish(struct work_struct *work)
274 struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
276 struct drm_gpu_scheduler *sched = s_job->sched;
280 * Canceling the timeout without removing our job from the ring mirror
281 * list is safe, as we will only end up in this worker if our jobs
282 * finished fence has been signaled. So even if some another worker
283 * manages to find this job as the next job in the list, the fence
284 * signaled check below will prevent the timeout to be restarted.
286 cancel_delayed_work_sync(&sched->work_tdr);
288 spin_lock_irqsave(&sched->job_list_lock, flags);
289 /* remove job from ring_mirror_list */
290 list_del_init(&s_job->node);
291 /* queue TDR for next job */
292 drm_sched_start_timeout(sched);
293 spin_unlock_irqrestore(&sched->job_list_lock, flags);
295 sched->ops->free_job(s_job);
298 static void drm_sched_job_finish_cb(struct dma_fence *f,
299 struct dma_fence_cb *cb)
301 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
303 schedule_work(&job->finish_work);
306 static void drm_sched_job_begin(struct drm_sched_job *s_job)
308 struct drm_gpu_scheduler *sched = s_job->sched;
311 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
312 drm_sched_job_finish_cb);
314 spin_lock_irqsave(&sched->job_list_lock, flags);
315 list_add_tail(&s_job->node, &sched->ring_mirror_list);
316 drm_sched_start_timeout(sched);
317 spin_unlock_irqrestore(&sched->job_list_lock, flags);
320 static void drm_sched_job_timedout(struct work_struct *work)
322 struct drm_gpu_scheduler *sched;
323 struct drm_sched_job *job;
326 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
327 job = list_first_entry_or_null(&sched->ring_mirror_list,
328 struct drm_sched_job, node);
331 job->sched->ops->timedout_job(job);
333 spin_lock_irqsave(&sched->job_list_lock, flags);
334 drm_sched_start_timeout(sched);
335 spin_unlock_irqrestore(&sched->job_list_lock, flags);
339 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
341 * @sched: scheduler instance
342 * @bad: bad scheduler job
345 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
347 struct drm_sched_job *s_job;
348 struct drm_sched_entity *entity, *tmp;
352 spin_lock_irqsave(&sched->job_list_lock, flags);
353 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
354 if (s_job->s_fence->parent &&
355 dma_fence_remove_callback(s_job->s_fence->parent,
356 &s_job->s_fence->cb)) {
357 dma_fence_put(s_job->s_fence->parent);
358 s_job->s_fence->parent = NULL;
359 atomic_dec(&sched->hw_rq_count);
362 spin_unlock_irqrestore(&sched->job_list_lock, flags);
364 if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
365 atomic_inc(&bad->karma);
366 /* don't increase @bad's karma if it's from KERNEL RQ,
367 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
368 * corrupt but keep in mind that kernel jobs always considered good.
370 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
371 struct drm_sched_rq *rq = &sched->sched_rq[i];
373 spin_lock(&rq->lock);
374 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
375 if (bad->s_fence->scheduled.context == entity->fence_context) {
376 if (atomic_read(&bad->karma) > bad->sched->hang_limit)
378 atomic_set(entity->guilty, 1);
382 spin_unlock(&rq->lock);
383 if (&entity->list != &rq->entities)
388 EXPORT_SYMBOL(drm_sched_hw_job_reset);
391 * drm_sched_job_recovery - recover jobs after a reset
393 * @sched: scheduler instance
396 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
398 struct drm_sched_job *s_job, *tmp;
399 bool found_guilty = false;
403 spin_lock_irqsave(&sched->job_list_lock, flags);
404 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
405 struct drm_sched_fence *s_fence = s_job->s_fence;
406 struct dma_fence *fence;
407 uint64_t guilty_context;
409 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
411 guilty_context = s_job->s_fence->scheduled.context;
414 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
415 dma_fence_set_error(&s_fence->finished, -ECANCELED);
417 spin_unlock_irqrestore(&sched->job_list_lock, flags);
418 fence = sched->ops->run_job(s_job);
419 atomic_inc(&sched->hw_rq_count);
422 s_fence->parent = dma_fence_get(fence);
423 r = dma_fence_add_callback(fence, &s_fence->cb,
424 drm_sched_process_job);
426 drm_sched_process_job(fence, &s_fence->cb);
428 DRM_ERROR("fence add callback failed (%d)\n",
430 dma_fence_put(fence);
432 if (s_fence->finished.error < 0)
433 drm_sched_expel_job_unlocked(s_job);
434 drm_sched_process_job(NULL, &s_fence->cb);
436 spin_lock_irqsave(&sched->job_list_lock, flags);
438 drm_sched_start_timeout(sched);
439 spin_unlock_irqrestore(&sched->job_list_lock, flags);
441 EXPORT_SYMBOL(drm_sched_job_recovery);
444 * drm_sched_job_init - init a scheduler job
446 * @job: scheduler job to init
447 * @entity: scheduler entity to use
448 * @owner: job owner for debugging
450 * Refer to drm_sched_entity_push_job() documentation
451 * for locking considerations.
453 * Returns 0 for success, negative error code otherwise.
455 int drm_sched_job_init(struct drm_sched_job *job,
456 struct drm_sched_entity *entity,
459 struct drm_gpu_scheduler *sched;
461 drm_sched_entity_select_rq(entity);
465 sched = entity->rq->sched;
468 job->entity = entity;
469 job->s_priority = entity->rq - sched->sched_rq;
470 job->s_fence = drm_sched_fence_create(entity, owner);
473 job->id = atomic64_inc_return(&sched->job_id_count);
475 INIT_WORK(&job->finish_work, drm_sched_job_finish);
476 INIT_LIST_HEAD(&job->node);
480 EXPORT_SYMBOL(drm_sched_job_init);
483 * drm_sched_job_cleanup - clean up scheduler job resources
485 * @job: scheduler job to clean up
487 void drm_sched_job_cleanup(struct drm_sched_job *job)
489 dma_fence_put(&job->s_fence->finished);
492 EXPORT_SYMBOL(drm_sched_job_cleanup);
495 * drm_sched_ready - is the scheduler ready
497 * @sched: scheduler instance
499 * Return true if we can push more jobs to the hw, otherwise false.
501 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
503 return atomic_read(&sched->hw_rq_count) <
504 sched->hw_submission_limit;
508 * drm_sched_wakeup - Wake up the scheduler when it is ready
510 * @sched: scheduler instance
513 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
515 if (drm_sched_ready(sched))
516 wake_up_interruptible(&sched->wake_up_worker);
520 * drm_sched_select_entity - Select next entity to process
522 * @sched: scheduler instance
524 * Returns the entity to process or NULL if none are found.
526 static struct drm_sched_entity *
527 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
529 struct drm_sched_entity *entity;
532 if (!drm_sched_ready(sched))
535 /* Kernel run queue has higher priority than normal run queue*/
536 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
537 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
546 * drm_sched_process_job - process a job
549 * @cb: fence callbacks
551 * Called after job has finished execution.
553 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
555 struct drm_sched_fence *s_fence =
556 container_of(cb, struct drm_sched_fence, cb);
557 struct drm_gpu_scheduler *sched = s_fence->sched;
559 dma_fence_get(&s_fence->finished);
560 atomic_dec(&sched->hw_rq_count);
561 atomic_dec(&sched->num_jobs);
562 drm_sched_fence_finished(s_fence);
564 trace_drm_sched_process_job(s_fence);
565 dma_fence_put(&s_fence->finished);
566 wake_up_interruptible(&sched->wake_up_worker);
570 * drm_sched_blocked - check if the scheduler is blocked
572 * @sched: scheduler instance
574 * Returns true if blocked, otherwise false.
576 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
578 if (kthread_should_park()) {
587 * drm_sched_main - main scheduler thread
589 * @param: scheduler instance
593 static int drm_sched_main(void *param)
595 struct sched_param sparam = {.sched_priority = 1};
596 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
599 sched_setscheduler(current, SCHED_FIFO, &sparam);
601 while (!kthread_should_stop()) {
602 struct drm_sched_entity *entity = NULL;
603 struct drm_sched_fence *s_fence;
604 struct drm_sched_job *sched_job;
605 struct dma_fence *fence;
607 wait_event_interruptible(sched->wake_up_worker,
608 (!drm_sched_blocked(sched) &&
609 (entity = drm_sched_select_entity(sched))) ||
610 kthread_should_stop());
615 sched_job = drm_sched_entity_pop_job(entity);
619 s_fence = sched_job->s_fence;
621 atomic_inc(&sched->hw_rq_count);
622 drm_sched_job_begin(sched_job);
624 fence = sched->ops->run_job(sched_job);
625 drm_sched_fence_scheduled(s_fence);
628 s_fence->parent = dma_fence_get(fence);
629 r = dma_fence_add_callback(fence, &s_fence->cb,
630 drm_sched_process_job);
632 drm_sched_process_job(fence, &s_fence->cb);
634 DRM_ERROR("fence add callback failed (%d)\n",
636 dma_fence_put(fence);
638 if (s_fence->finished.error < 0)
639 drm_sched_expel_job_unlocked(sched_job);
640 drm_sched_process_job(NULL, &s_fence->cb);
643 wake_up(&sched->job_scheduled);
648 static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
650 struct drm_gpu_scheduler *sched = s_job->sched;
652 spin_lock(&sched->job_list_lock);
653 list_del_init(&s_job->node);
654 spin_unlock(&sched->job_list_lock);
658 * drm_sched_init - Init a gpu scheduler instance
660 * @sched: scheduler instance
661 * @ops: backend operations for this scheduler
662 * @hw_submission: number of hw submissions that can be in flight
663 * @hang_limit: number of times to allow a job to hang before dropping it
664 * @timeout: timeout value in jiffies for the scheduler
665 * @name: name used for debugging
667 * Return 0 on success, otherwise error code.
669 int drm_sched_init(struct drm_gpu_scheduler *sched,
670 const struct drm_sched_backend_ops *ops,
671 unsigned hw_submission,
678 sched->hw_submission_limit = hw_submission;
680 sched->timeout = timeout;
681 sched->hang_limit = hang_limit;
682 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
683 drm_sched_rq_init(sched, &sched->sched_rq[i]);
685 init_waitqueue_head(&sched->wake_up_worker);
686 init_waitqueue_head(&sched->job_scheduled);
687 INIT_LIST_HEAD(&sched->ring_mirror_list);
688 spin_lock_init(&sched->job_list_lock);
689 atomic_set(&sched->hw_rq_count, 0);
690 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
691 atomic_set(&sched->num_jobs, 0);
692 atomic64_set(&sched->job_id_count, 0);
694 /* Each scheduler will run on a seperate kernel thread */
695 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
696 if (IS_ERR(sched->thread)) {
697 ret = PTR_ERR(sched->thread);
698 sched->thread = NULL;
699 DRM_ERROR("Failed to create scheduler for %s.\n", name);
706 EXPORT_SYMBOL(drm_sched_init);
709 * drm_sched_fini - Destroy a gpu scheduler
711 * @sched: scheduler instance
713 * Tears down and cleans up the scheduler.
715 void drm_sched_fini(struct drm_gpu_scheduler *sched)
718 kthread_stop(sched->thread);
720 sched->ready = false;
722 EXPORT_SYMBOL(drm_sched_fini);