2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <uapi/linux/sched/types.h>
29 #include "gpu_scheduler.h"
31 #define CREATE_TRACE_POINTS
32 #include "gpu_sched_trace.h"
34 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
38 /* Initialize a given run queue struct */
39 static void amd_sched_rq_init(struct amd_sched_rq *rq)
41 spin_lock_init(&rq->lock);
42 INIT_LIST_HEAD(&rq->entities);
43 rq->current_entity = NULL;
46 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
49 if (!list_empty(&entity->list))
52 list_add_tail(&entity->list, &rq->entities);
53 spin_unlock(&rq->lock);
56 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
59 if (list_empty(&entity->list))
62 list_del_init(&entity->list);
63 if (rq->current_entity == entity)
64 rq->current_entity = NULL;
65 spin_unlock(&rq->lock);
69 * Select an entity which could provide a job to run
71 * @rq The run queue to check.
73 * Try to find a ready entity, returns NULL if none found.
75 static struct amd_sched_entity *
76 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
78 struct amd_sched_entity *entity;
82 entity = rq->current_entity;
84 list_for_each_entry_continue(entity, &rq->entities, list) {
85 if (amd_sched_entity_is_ready(entity)) {
86 rq->current_entity = entity;
87 spin_unlock(&rq->lock);
93 list_for_each_entry(entity, &rq->entities, list) {
95 if (amd_sched_entity_is_ready(entity)) {
96 rq->current_entity = entity;
97 spin_unlock(&rq->lock);
101 if (entity == rq->current_entity)
105 spin_unlock(&rq->lock);
111 * Init a context entity used by scheduler when submit to HW ring.
113 * @sched The pointer to the scheduler
114 * @entity The pointer to a valid amd_sched_entity
115 * @rq The run queue this entity belongs
116 * @kernel If this is an entity for the kernel
117 * @jobs The max number of jobs in the job queue
119 * return 0 if succeed. negative error code on failure
121 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122 struct amd_sched_entity *entity,
123 struct amd_sched_rq *rq,
128 if (!(sched && entity && rq))
131 memset(entity, 0, sizeof(struct amd_sched_entity));
132 INIT_LIST_HEAD(&entity->list);
134 entity->sched = sched;
136 spin_lock_init(&entity->rq_lock);
137 spin_lock_init(&entity->queue_lock);
138 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
142 atomic_set(&entity->fence_seq, 0);
143 entity->fence_context = dma_fence_context_alloc(2);
149 * Query if entity is initialized
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
154 * return true if entity is initialized, false otherwise
156 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157 struct amd_sched_entity *entity)
159 return entity->sched == sched &&
164 * Check if entity is idle
166 * @entity The pointer to a valid scheduler entity
168 * Return true if entity don't has any unscheduled jobs.
170 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
173 if (kfifo_is_empty(&entity->job_queue))
180 * Check if entity is ready
182 * @entity The pointer to a valid scheduler entity
184 * Return true if entity could provide a job.
186 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
188 if (kfifo_is_empty(&entity->job_queue))
191 if (READ_ONCE(entity->dependency))
198 * Destroy a context entity
200 * @sched Pointer to scheduler instance
201 * @entity The pointer to a valid scheduler entity
203 * Cleanup and free the allocated resources.
205 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
206 struct amd_sched_entity *entity)
210 if (!amd_sched_entity_is_initialized(sched, entity))
213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs or discard them on SIGKILL
216 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
219 r = wait_event_killable(sched->job_scheduled,
220 amd_sched_entity_is_idle(entity));
221 amd_sched_entity_set_rq(entity, NULL);
223 struct amd_sched_job *job;
225 /* Park the kernel for a moment to make sure it isn't processing
228 kthread_park(sched->thread);
229 kthread_unpark(sched->thread);
230 while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
231 struct amd_sched_fence *s_fence = job->s_fence;
232 amd_sched_fence_scheduled(s_fence);
233 dma_fence_set_error(&s_fence->finished, -ESRCH);
234 amd_sched_fence_finished(s_fence);
235 dma_fence_put(&s_fence->finished);
236 sched->ops->free_job(job);
240 kfifo_free(&entity->job_queue);
243 static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
245 struct amd_sched_entity *entity =
246 container_of(cb, struct amd_sched_entity, cb);
247 entity->dependency = NULL;
249 amd_sched_wakeup(entity->sched);
252 static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
254 struct amd_sched_entity *entity =
255 container_of(cb, struct amd_sched_entity, cb);
256 entity->dependency = NULL;
260 void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
261 struct amd_sched_rq *rq)
263 if (entity->rq == rq)
266 spin_lock(&entity->rq_lock);
269 amd_sched_rq_remove_entity(entity->rq, entity);
273 amd_sched_rq_add_entity(rq, entity);
275 spin_unlock(&entity->rq_lock);
278 bool amd_sched_dependency_optimized(struct dma_fence* fence,
279 struct amd_sched_entity *entity)
281 struct amd_gpu_scheduler *sched = entity->sched;
282 struct amd_sched_fence *s_fence;
284 if (!fence || dma_fence_is_signaled(fence))
286 if (fence->context == entity->fence_context)
288 s_fence = to_amd_sched_fence(fence);
289 if (s_fence && s_fence->sched == sched)
295 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
297 struct amd_gpu_scheduler *sched = entity->sched;
298 struct dma_fence * fence = entity->dependency;
299 struct amd_sched_fence *s_fence;
301 if (fence->context == entity->fence_context) {
302 /* We can ignore fences from ourself */
303 dma_fence_put(entity->dependency);
307 s_fence = to_amd_sched_fence(fence);
308 if (s_fence && s_fence->sched == sched) {
311 * Fence is from the same scheduler, only need to wait for
314 fence = dma_fence_get(&s_fence->scheduled);
315 dma_fence_put(entity->dependency);
316 entity->dependency = fence;
317 if (!dma_fence_add_callback(fence, &entity->cb,
318 amd_sched_entity_clear_dep))
321 /* Ignore it when it is already scheduled */
322 dma_fence_put(fence);
326 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
327 amd_sched_entity_wakeup))
330 dma_fence_put(entity->dependency);
334 static struct amd_sched_job *
335 amd_sched_entity_peek_job(struct amd_sched_entity *entity)
337 struct amd_gpu_scheduler *sched = entity->sched;
338 struct amd_sched_job *sched_job;
340 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
343 while ((entity->dependency = sched->ops->dependency(sched_job)))
344 if (amd_sched_entity_add_dependency_cb(entity))
351 * Helper to submit a job to the job queue
353 * @sched_job The pointer to job required to submit
355 * Returns true if we could submit the job.
357 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
359 struct amd_gpu_scheduler *sched = sched_job->sched;
360 struct amd_sched_entity *entity = sched_job->s_entity;
361 bool added, first = false;
363 spin_lock(&entity->queue_lock);
364 added = kfifo_in(&entity->job_queue, &sched_job,
365 sizeof(sched_job)) == sizeof(sched_job);
367 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
370 spin_unlock(&entity->queue_lock);
372 /* first job wakes up scheduler */
374 /* Add the entity to the run queue */
375 spin_lock(&entity->rq_lock);
376 amd_sched_rq_add_entity(entity->rq, entity);
377 spin_unlock(&entity->rq_lock);
378 amd_sched_wakeup(sched);
383 /* job_finish is called after hw fence signaled
385 static void amd_sched_job_finish(struct work_struct *work)
387 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
389 struct amd_gpu_scheduler *sched = s_job->sched;
391 /* remove job from ring_mirror_list */
392 spin_lock(&sched->job_list_lock);
393 list_del_init(&s_job->node);
394 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
395 struct amd_sched_job *next;
397 spin_unlock(&sched->job_list_lock);
398 cancel_delayed_work_sync(&s_job->work_tdr);
399 spin_lock(&sched->job_list_lock);
401 /* queue TDR for next job */
402 next = list_first_entry_or_null(&sched->ring_mirror_list,
403 struct amd_sched_job, node);
406 schedule_delayed_work(&next->work_tdr, sched->timeout);
408 spin_unlock(&sched->job_list_lock);
409 dma_fence_put(&s_job->s_fence->finished);
410 sched->ops->free_job(s_job);
413 static void amd_sched_job_finish_cb(struct dma_fence *f,
414 struct dma_fence_cb *cb)
416 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
418 schedule_work(&job->finish_work);
421 static void amd_sched_job_begin(struct amd_sched_job *s_job)
423 struct amd_gpu_scheduler *sched = s_job->sched;
425 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
426 amd_sched_job_finish_cb);
428 spin_lock(&sched->job_list_lock);
429 list_add_tail(&s_job->node, &sched->ring_mirror_list);
430 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
431 list_first_entry_or_null(&sched->ring_mirror_list,
432 struct amd_sched_job, node) == s_job)
433 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
434 spin_unlock(&sched->job_list_lock);
437 static void amd_sched_job_timedout(struct work_struct *work)
439 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
442 job->sched->ops->timedout_job(job);
445 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
447 struct amd_sched_job *s_job;
449 spin_lock(&sched->job_list_lock);
450 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
451 if (s_job->s_fence->parent &&
452 dma_fence_remove_callback(s_job->s_fence->parent,
453 &s_job->s_fence->cb)) {
454 dma_fence_put(s_job->s_fence->parent);
455 s_job->s_fence->parent = NULL;
456 atomic_dec(&sched->hw_rq_count);
459 spin_unlock(&sched->job_list_lock);
462 void amd_sched_job_kickout(struct amd_sched_job *s_job)
464 struct amd_gpu_scheduler *sched = s_job->sched;
466 spin_lock(&sched->job_list_lock);
467 list_del_init(&s_job->node);
468 spin_unlock(&sched->job_list_lock);
471 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
473 struct amd_sched_job *s_job, *tmp;
476 spin_lock(&sched->job_list_lock);
477 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
478 struct amd_sched_job, node);
479 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
480 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
482 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
483 struct amd_sched_fence *s_fence = s_job->s_fence;
484 struct dma_fence *fence;
486 spin_unlock(&sched->job_list_lock);
487 fence = sched->ops->run_job(s_job);
488 atomic_inc(&sched->hw_rq_count);
490 s_fence->parent = dma_fence_get(fence);
491 r = dma_fence_add_callback(fence, &s_fence->cb,
492 amd_sched_process_job);
494 amd_sched_process_job(fence, &s_fence->cb);
496 DRM_ERROR("fence add callback failed (%d)\n",
498 dma_fence_put(fence);
500 DRM_ERROR("Failed to run job!\n");
501 amd_sched_process_job(NULL, &s_fence->cb);
503 spin_lock(&sched->job_list_lock);
505 spin_unlock(&sched->job_list_lock);
509 * Submit a job to the job queue
511 * @sched_job The pointer to job required to submit
513 * Returns 0 for success, negative error code otherwise.
515 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
517 struct amd_sched_entity *entity = sched_job->s_entity;
519 trace_amd_sched_job(sched_job);
520 wait_event(entity->sched->job_scheduled,
521 amd_sched_entity_in(sched_job));
524 /* init a sched_job with basic field */
525 int amd_sched_job_init(struct amd_sched_job *job,
526 struct amd_gpu_scheduler *sched,
527 struct amd_sched_entity *entity,
531 job->s_entity = entity;
532 job->s_fence = amd_sched_fence_create(entity, owner);
535 job->id = atomic64_inc_return(&sched->job_id_count);
537 INIT_WORK(&job->finish_work, amd_sched_job_finish);
538 INIT_LIST_HEAD(&job->node);
539 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
545 * Return ture if we can push more jobs to the hw.
547 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
549 return atomic_read(&sched->hw_rq_count) <
550 sched->hw_submission_limit;
554 * Wake up the scheduler when it is ready
556 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
558 if (amd_sched_ready(sched))
559 wake_up_interruptible(&sched->wake_up_worker);
563 * Select next entity to process
565 static struct amd_sched_entity *
566 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
568 struct amd_sched_entity *entity;
571 if (!amd_sched_ready(sched))
574 /* Kernel run queue has higher priority than normal run queue*/
575 for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
576 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
584 static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
586 struct amd_sched_fence *s_fence =
587 container_of(cb, struct amd_sched_fence, cb);
588 struct amd_gpu_scheduler *sched = s_fence->sched;
590 dma_fence_get(&s_fence->finished);
591 atomic_dec(&sched->hw_rq_count);
592 amd_sched_fence_finished(s_fence);
594 trace_amd_sched_process_job(s_fence);
595 dma_fence_put(&s_fence->finished);
596 wake_up_interruptible(&sched->wake_up_worker);
599 static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
601 if (kthread_should_park()) {
609 static int amd_sched_main(void *param)
611 struct sched_param sparam = {.sched_priority = 1};
612 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
615 sched_setscheduler(current, SCHED_FIFO, &sparam);
617 while (!kthread_should_stop()) {
618 struct amd_sched_entity *entity = NULL;
619 struct amd_sched_fence *s_fence;
620 struct amd_sched_job *sched_job;
621 struct dma_fence *fence;
623 wait_event_interruptible(sched->wake_up_worker,
624 (!amd_sched_blocked(sched) &&
625 (entity = amd_sched_select_entity(sched))) ||
626 kthread_should_stop());
631 sched_job = amd_sched_entity_peek_job(entity);
635 s_fence = sched_job->s_fence;
637 atomic_inc(&sched->hw_rq_count);
638 amd_sched_job_begin(sched_job);
640 fence = sched->ops->run_job(sched_job);
641 amd_sched_fence_scheduled(s_fence);
644 s_fence->parent = dma_fence_get(fence);
645 r = dma_fence_add_callback(fence, &s_fence->cb,
646 amd_sched_process_job);
648 amd_sched_process_job(fence, &s_fence->cb);
650 DRM_ERROR("fence add callback failed (%d)\n",
652 dma_fence_put(fence);
654 DRM_ERROR("Failed to run job!\n");
655 amd_sched_process_job(NULL, &s_fence->cb);
658 count = kfifo_out(&entity->job_queue, &sched_job,
660 WARN_ON(count != sizeof(sched_job));
661 wake_up(&sched->job_scheduled);
667 * Init a gpu scheduler instance
669 * @sched The pointer to the scheduler
670 * @ops The backend operations for this scheduler.
671 * @hw_submissions Number of hw submissions to do.
672 * @name Name used for debugging
674 * Return 0 on success, otherwise error code.
676 int amd_sched_init(struct amd_gpu_scheduler *sched,
677 const struct amd_sched_backend_ops *ops,
678 unsigned hw_submission, long timeout, const char *name)
682 sched->hw_submission_limit = hw_submission;
684 sched->timeout = timeout;
685 for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
686 amd_sched_rq_init(&sched->sched_rq[i]);
688 init_waitqueue_head(&sched->wake_up_worker);
689 init_waitqueue_head(&sched->job_scheduled);
690 INIT_LIST_HEAD(&sched->ring_mirror_list);
691 spin_lock_init(&sched->job_list_lock);
692 atomic_set(&sched->hw_rq_count, 0);
693 atomic64_set(&sched->job_id_count, 0);
695 /* Each scheduler will run on a seperate kernel thread */
696 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
697 if (IS_ERR(sched->thread)) {
698 DRM_ERROR("Failed to create scheduler for %s.\n", name);
699 return PTR_ERR(sched->thread);
706 * Destroy a gpu scheduler
708 * @sched The pointer to the scheduler
710 void amd_sched_fini(struct amd_gpu_scheduler *sched)
713 kthread_stop(sched->thread);