2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
31 #include "gpu_scheduler_trace.h"
33 #define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
37 * drm_sched_entity_init - Init a context entity used by scheduler when
40 * @entity: scheduler entity to init
41 * @rq_list: the list of run queue on which jobs from this
42 * entity can be submitted
43 * @num_rq_list: number of run queue in rq_list
44 * @guilty: atomic_t set to 1 when a job on this queue
45 * is found to be guilty causing a timeout
47 * Note: the rq_list should have atleast one element to schedule
50 * Returns 0 on success or a negative error code on failure.
52 int drm_sched_entity_init(struct drm_sched_entity *entity,
53 struct drm_sched_rq **rq_list,
54 unsigned int num_rq_list,
59 if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
62 memset(entity, 0, sizeof(struct drm_sched_entity));
63 INIT_LIST_HEAD(&entity->list);
65 entity->guilty = guilty;
66 entity->num_rq_list = num_rq_list;
67 entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
72 init_completion(&entity->entity_idle);
74 for (i = 0; i < num_rq_list; ++i)
75 entity->rq_list[i] = rq_list[i];
78 entity->rq = rq_list[0];
80 entity->last_scheduled = NULL;
82 spin_lock_init(&entity->rq_lock);
83 spsc_queue_init(&entity->job_queue);
85 atomic_set(&entity->fence_seq, 0);
86 entity->fence_context = dma_fence_context_alloc(2);
90 EXPORT_SYMBOL(drm_sched_entity_init);
93 * drm_sched_entity_is_idle - Check if entity is idle
95 * @entity: scheduler entity
97 * Returns true if the entity does not have any unscheduled jobs.
99 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
101 rmb(); /* for list_empty to work without lock */
103 if (list_empty(&entity->list) ||
104 spsc_queue_count(&entity->job_queue) == 0)
111 * drm_sched_entity_is_ready - Check if entity is ready
113 * @entity: scheduler entity
115 * Return true if entity could provide a job.
117 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
119 if (spsc_queue_peek(&entity->job_queue) == NULL)
122 if (READ_ONCE(entity->dependency))
129 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
131 * @entity: scheduler entity
133 * Return the pointer to the rq with least load.
135 static struct drm_sched_rq *
136 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
138 struct drm_sched_rq *rq = NULL;
139 unsigned int min_jobs = UINT_MAX, num_jobs;
142 for (i = 0; i < entity->num_rq_list; ++i) {
143 struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
145 if (!entity->rq_list[i]->sched->ready) {
146 DRM_WARN("sched%s is not ready, skipping", sched->name);
150 num_jobs = atomic_read(&sched->num_jobs);
151 if (num_jobs < min_jobs) {
153 rq = entity->rq_list[i];
161 * drm_sched_entity_flush - Flush a context entity
163 * @entity: scheduler entity
164 * @timeout: time to wait in for Q to become empty in jiffies.
166 * Splitting drm_sched_entity_fini() into two functions, The first one does the
167 * waiting, removes the entity from the runqueue and returns an error when the
168 * process was killed.
170 * Returns the remaining time in jiffies left from the input timeout
172 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
174 struct drm_gpu_scheduler *sched;
175 struct task_struct *last_user;
181 sched = entity->rq->sched;
183 * The client will not queue more IBs during this fini, consume existing
184 * queued IBs or discard them on SIGKILL
186 if (current->flags & PF_EXITING) {
188 ret = wait_event_timeout(
189 sched->job_scheduled,
190 drm_sched_entity_is_idle(entity),
193 wait_event_killable(sched->job_scheduled,
194 drm_sched_entity_is_idle(entity));
197 /* For killed process disable any more IBs enqueue right now */
198 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
199 if ((!last_user || last_user == current->group_leader) &&
200 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
201 spin_lock(&entity->rq_lock);
202 entity->stopped = true;
203 drm_sched_rq_remove_entity(entity->rq, entity);
204 spin_unlock(&entity->rq_lock);
209 EXPORT_SYMBOL(drm_sched_entity_flush);
212 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
215 * @cb: our callback structure
217 * Signal the scheduler finished fence when the entity in question is killed.
219 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
220 struct dma_fence_cb *cb)
222 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
225 drm_sched_fence_finished(job->s_fence);
226 WARN_ON(job->s_fence->parent);
227 job->sched->ops->free_job(job);
231 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
233 * @entity: entity which is cleaned up
235 * Makes sure that all remaining jobs in an entity are killed before it is
238 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
240 struct drm_sched_job *job;
243 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
244 struct drm_sched_fence *s_fence = job->s_fence;
246 drm_sched_fence_scheduled(s_fence);
247 dma_fence_set_error(&s_fence->finished, -ESRCH);
250 * When pipe is hanged by older entity, new entity might
251 * not even have chance to submit it's first job to HW
252 * and so entity->last_scheduled will remain NULL
254 if (!entity->last_scheduled) {
255 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
259 r = dma_fence_add_callback(entity->last_scheduled,
261 drm_sched_entity_kill_jobs_cb);
263 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
265 DRM_ERROR("fence add callback failed (%d)\n", r);
270 * drm_sched_entity_cleanup - Destroy a context entity
272 * @entity: scheduler entity
274 * This should be called after @drm_sched_entity_do_release. It goes over the
275 * entity and signals all jobs with an error code if the process was killed.
278 void drm_sched_entity_fini(struct drm_sched_entity *entity)
280 struct drm_gpu_scheduler *sched = NULL;
283 sched = entity->rq->sched;
284 drm_sched_rq_remove_entity(entity->rq, entity);
287 /* Consumption of existing IBs wasn't completed. Forcefully
290 if (spsc_queue_count(&entity->job_queue)) {
293 * Wait for thread to idle to make sure it isn't processing
296 wait_for_completion(&entity->entity_idle);
299 if (entity->dependency) {
300 dma_fence_remove_callback(entity->dependency,
302 dma_fence_put(entity->dependency);
303 entity->dependency = NULL;
306 drm_sched_entity_kill_jobs(entity);
309 dma_fence_put(entity->last_scheduled);
310 entity->last_scheduled = NULL;
311 kfree(entity->rq_list);
313 EXPORT_SYMBOL(drm_sched_entity_fini);
316 * drm_sched_entity_fini - Destroy a context entity
318 * @entity: scheduler entity
320 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
322 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
324 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
325 drm_sched_entity_fini(entity);
327 EXPORT_SYMBOL(drm_sched_entity_destroy);
330 * drm_sched_entity_clear_dep - callback to clear the entities dependency
332 static void drm_sched_entity_clear_dep(struct dma_fence *f,
333 struct dma_fence_cb *cb)
335 struct drm_sched_entity *entity =
336 container_of(cb, struct drm_sched_entity, cb);
338 entity->dependency = NULL;
343 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
346 static void drm_sched_entity_wakeup(struct dma_fence *f,
347 struct dma_fence_cb *cb)
349 struct drm_sched_entity *entity =
350 container_of(cb, struct drm_sched_entity, cb);
352 drm_sched_entity_clear_dep(f, cb);
353 drm_sched_wakeup(entity->rq->sched);
357 * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
359 static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
360 enum drm_sched_priority priority)
362 *rq = &(*rq)->sched->sched_rq[priority];
366 * drm_sched_entity_set_priority - Sets priority of the entity
368 * @entity: scheduler entity
369 * @priority: scheduler priority
371 * Update the priority of runqueus used for the entity.
373 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
374 enum drm_sched_priority priority)
378 spin_lock(&entity->rq_lock);
380 for (i = 0; i < entity->num_rq_list; ++i)
381 drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
384 drm_sched_rq_remove_entity(entity->rq, entity);
385 drm_sched_entity_set_rq_priority(&entity->rq, priority);
386 drm_sched_rq_add_entity(entity->rq, entity);
389 spin_unlock(&entity->rq_lock);
391 EXPORT_SYMBOL(drm_sched_entity_set_priority);
394 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
396 * @entity: entity with dependency
398 * Add a callback to the current dependency of the entity to wake up the
399 * scheduler when the entity becomes available.
401 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
403 struct drm_gpu_scheduler *sched = entity->rq->sched;
404 struct dma_fence *fence = entity->dependency;
405 struct drm_sched_fence *s_fence;
407 if (fence->context == entity->fence_context ||
408 fence->context == entity->fence_context + 1) {
410 * Fence is a scheduled/finished fence from a job
411 * which belongs to the same entity, we can ignore
412 * fences from ourself
414 dma_fence_put(entity->dependency);
418 s_fence = to_drm_sched_fence(fence);
419 if (s_fence && s_fence->sched == sched) {
422 * Fence is from the same scheduler, only need to wait for
425 fence = dma_fence_get(&s_fence->scheduled);
426 dma_fence_put(entity->dependency);
427 entity->dependency = fence;
428 if (!dma_fence_add_callback(fence, &entity->cb,
429 drm_sched_entity_clear_dep))
432 /* Ignore it when it is already scheduled */
433 dma_fence_put(fence);
437 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
438 drm_sched_entity_wakeup))
441 dma_fence_put(entity->dependency);
446 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
448 * @entity: entity to get the job from
450 * Process all dependencies and try to get one job from the entities queue.
452 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
454 struct drm_gpu_scheduler *sched = entity->rq->sched;
455 struct drm_sched_job *sched_job;
457 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
461 while ((entity->dependency =
462 sched->ops->dependency(sched_job, entity))) {
463 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
465 if (drm_sched_entity_add_dependency_cb(entity))
469 /* skip jobs from entity that marked guilty */
470 if (entity->guilty && atomic_read(entity->guilty))
471 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
473 dma_fence_put(entity->last_scheduled);
474 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
476 spsc_queue_pop(&entity->job_queue);
481 * drm_sched_entity_select_rq - select a new rq for the entity
483 * @entity: scheduler entity
485 * Check all prerequisites and select a new rq for the entity for load
488 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
490 struct dma_fence *fence;
491 struct drm_sched_rq *rq;
493 if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
496 fence = READ_ONCE(entity->last_scheduled);
497 if (fence && !dma_fence_is_signaled(fence))
500 rq = drm_sched_entity_get_free_sched(entity);
501 if (rq == entity->rq)
504 spin_lock(&entity->rq_lock);
505 drm_sched_rq_remove_entity(entity->rq, entity);
507 spin_unlock(&entity->rq_lock);
511 * drm_sched_entity_push_job - Submit a job to the entity's job queue
513 * @sched_job: job to submit
514 * @entity: scheduler entity
516 * Note: To guarantee that the order of insertion to queue matches
517 * the job's fence sequence number this function should be
518 * called with drm_sched_job_init under common lock.
520 * Returns 0 for success, negative error code otherwise.
522 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
523 struct drm_sched_entity *entity)
527 trace_drm_sched_job(sched_job, entity);
528 atomic_inc(&entity->rq->sched->num_jobs);
529 WRITE_ONCE(entity->last_user, current->group_leader);
530 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
532 /* first job wakes up scheduler */
534 /* Add the entity to the run queue */
535 spin_lock(&entity->rq_lock);
536 if (entity->stopped) {
537 spin_unlock(&entity->rq_lock);
539 DRM_ERROR("Trying to push to a killed entity\n");
542 drm_sched_rq_add_entity(entity->rq, entity);
543 spin_unlock(&entity->rq_lock);
544 drm_sched_wakeup(entity->rq->sched);
547 EXPORT_SYMBOL(drm_sched_entity_push_job);