]> Git Repo - linux.git/blob - drivers/gpu/drm/scheduler/sched_entity.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / scheduler / sched_entity.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30
31 #include "gpu_scheduler_trace.h"
32
33 #define to_drm_sched_job(sched_job)             \
34                 container_of((sched_job), struct drm_sched_job, queue_node)
35
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note that the &sched_list must have at least one element to schedule the entity.
49  *
50  * For changing @priority later on at runtime see
51  * drm_sched_entity_set_priority(). For changing the set of schedulers
52  * @sched_list at runtime see drm_sched_entity_modify_sched().
53  *
54  * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55  * drm_sched_entity_destroy().
56  *
57  * Returns 0 on success or a negative error code on failure.
58  */
59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60                           enum drm_sched_priority priority,
61                           struct drm_gpu_scheduler **sched_list,
62                           unsigned int num_sched_list,
63                           atomic_t *guilty)
64 {
65         if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66                 return -EINVAL;
67
68         memset(entity, 0, sizeof(struct drm_sched_entity));
69         INIT_LIST_HEAD(&entity->list);
70         entity->rq = NULL;
71         entity->guilty = guilty;
72         entity->num_sched_list = num_sched_list;
73         entity->priority = priority;
74         entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
75         RCU_INIT_POINTER(entity->last_scheduled, NULL);
76         RB_CLEAR_NODE(&entity->rb_tree_node);
77
78         if(num_sched_list)
79                 entity->rq = &sched_list[0]->sched_rq[entity->priority];
80
81         init_completion(&entity->entity_idle);
82
83         /* We start in an idle state. */
84         complete_all(&entity->entity_idle);
85
86         spin_lock_init(&entity->rq_lock);
87         spsc_queue_init(&entity->job_queue);
88
89         atomic_set(&entity->fence_seq, 0);
90         entity->fence_context = dma_fence_context_alloc(2);
91
92         return 0;
93 }
94 EXPORT_SYMBOL(drm_sched_entity_init);
95
96 /**
97  * drm_sched_entity_modify_sched - Modify sched of an entity
98  * @entity: scheduler entity to init
99  * @sched_list: the list of new drm scheds which will replace
100  *               existing entity->sched_list
101  * @num_sched_list: number of drm sched in sched_list
102  *
103  * Note that this must be called under the same common lock for @entity as
104  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
105  * guarantee through some other means that this is never called while new jobs
106  * can be pushed to @entity.
107  */
108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
109                                     struct drm_gpu_scheduler **sched_list,
110                                     unsigned int num_sched_list)
111 {
112         WARN_ON(!num_sched_list || !sched_list);
113
114         entity->sched_list = sched_list;
115         entity->num_sched_list = num_sched_list;
116 }
117 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
118
119 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
120 {
121         rmb(); /* for list_empty to work without lock */
122
123         if (list_empty(&entity->list) ||
124             spsc_queue_count(&entity->job_queue) == 0 ||
125             entity->stopped)
126                 return true;
127
128         return false;
129 }
130
131 /* Return true if entity could provide a job. */
132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
133 {
134         if (spsc_queue_peek(&entity->job_queue) == NULL)
135                 return false;
136
137         if (READ_ONCE(entity->dependency))
138                 return false;
139
140         return true;
141 }
142
143 /**
144  * drm_sched_entity_error - return error of last scheduled job
145  * @entity: scheduler entity to check
146  *
147  * Opportunistically return the error of the last scheduled job. Result can
148  * change any time when new jobs are pushed to the hw.
149  */
150 int drm_sched_entity_error(struct drm_sched_entity *entity)
151 {
152         struct dma_fence *fence;
153         int r;
154
155         rcu_read_lock();
156         fence = rcu_dereference(entity->last_scheduled);
157         r = fence ? fence->error : 0;
158         rcu_read_unlock();
159
160         return r;
161 }
162 EXPORT_SYMBOL(drm_sched_entity_error);
163
164 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
165 {
166         struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
167
168         drm_sched_fence_finished(job->s_fence, -ESRCH);
169         WARN_ON(job->s_fence->parent);
170         job->sched->ops->free_job(job);
171 }
172
173 /* Signal the scheduler finished fence when the entity in question is killed. */
174 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
175                                           struct dma_fence_cb *cb)
176 {
177         struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
178                                                  finish_cb);
179         int r;
180
181         dma_fence_put(f);
182
183         /* Wait for all dependencies to avoid data corruptions */
184         while (!xa_empty(&job->dependencies)) {
185                 f = xa_erase(&job->dependencies, job->last_dependency++);
186                 r = dma_fence_add_callback(f, &job->finish_cb,
187                                            drm_sched_entity_kill_jobs_cb);
188                 if (!r)
189                         return;
190
191                 dma_fence_put(f);
192         }
193
194         INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
195         schedule_work(&job->work);
196 }
197
198 /* Remove the entity from the scheduler and kill all pending jobs */
199 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
200 {
201         struct drm_sched_job *job;
202         struct dma_fence *prev;
203
204         if (!entity->rq)
205                 return;
206
207         spin_lock(&entity->rq_lock);
208         entity->stopped = true;
209         drm_sched_rq_remove_entity(entity->rq, entity);
210         spin_unlock(&entity->rq_lock);
211
212         /* Make sure this entity is not used by the scheduler at the moment */
213         wait_for_completion(&entity->entity_idle);
214
215         /* The entity is guaranteed to not be used by the scheduler */
216         prev = rcu_dereference_check(entity->last_scheduled, true);
217         dma_fence_get(prev);
218         while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
219                 struct drm_sched_fence *s_fence = job->s_fence;
220
221                 dma_fence_get(&s_fence->finished);
222                 if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
223                                            drm_sched_entity_kill_jobs_cb))
224                         drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
225
226                 prev = &s_fence->finished;
227         }
228         dma_fence_put(prev);
229 }
230
231 /**
232  * drm_sched_entity_flush - Flush a context entity
233  *
234  * @entity: scheduler entity
235  * @timeout: time to wait in for Q to become empty in jiffies.
236  *
237  * Splitting drm_sched_entity_fini() into two functions, The first one does the
238  * waiting, removes the entity from the runqueue and returns an error when the
239  * process was killed.
240  *
241  * Returns the remaining time in jiffies left from the input timeout
242  */
243 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
244 {
245         struct drm_gpu_scheduler *sched;
246         struct task_struct *last_user;
247         long ret = timeout;
248
249         if (!entity->rq)
250                 return 0;
251
252         sched = entity->rq->sched;
253         /**
254          * The client will not queue more IBs during this fini, consume existing
255          * queued IBs or discard them on SIGKILL
256          */
257         if (current->flags & PF_EXITING) {
258                 if (timeout)
259                         ret = wait_event_timeout(
260                                         sched->job_scheduled,
261                                         drm_sched_entity_is_idle(entity),
262                                         timeout);
263         } else {
264                 wait_event_killable(sched->job_scheduled,
265                                     drm_sched_entity_is_idle(entity));
266         }
267
268         /* For killed process disable any more IBs enqueue right now */
269         last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
270         if ((!last_user || last_user == current->group_leader) &&
271             (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
272                 drm_sched_entity_kill(entity);
273
274         return ret;
275 }
276 EXPORT_SYMBOL(drm_sched_entity_flush);
277
278 /**
279  * drm_sched_entity_fini - Destroy a context entity
280  *
281  * @entity: scheduler entity
282  *
283  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
284  *
285  * If there are potentially job still in flight or getting newly queued
286  * drm_sched_entity_flush() must be called first. This function then goes over
287  * the entity and signals all jobs with an error code if the process was killed.
288  */
289 void drm_sched_entity_fini(struct drm_sched_entity *entity)
290 {
291         /*
292          * If consumption of existing IBs wasn't completed. Forcefully remove
293          * them here. Also makes sure that the scheduler won't touch this entity
294          * any more.
295          */
296         drm_sched_entity_kill(entity);
297
298         if (entity->dependency) {
299                 dma_fence_remove_callback(entity->dependency, &entity->cb);
300                 dma_fence_put(entity->dependency);
301                 entity->dependency = NULL;
302         }
303
304         dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
305         RCU_INIT_POINTER(entity->last_scheduled, NULL);
306 }
307 EXPORT_SYMBOL(drm_sched_entity_fini);
308
309 /**
310  * drm_sched_entity_destroy - Destroy a context entity
311  * @entity: scheduler entity
312  *
313  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
314  * convenience wrapper.
315  */
316 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
317 {
318         drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
319         drm_sched_entity_fini(entity);
320 }
321 EXPORT_SYMBOL(drm_sched_entity_destroy);
322
323 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
324 static void drm_sched_entity_clear_dep(struct dma_fence *f,
325                                        struct dma_fence_cb *cb)
326 {
327         struct drm_sched_entity *entity =
328                 container_of(cb, struct drm_sched_entity, cb);
329
330         entity->dependency = NULL;
331         dma_fence_put(f);
332 }
333
334 /*
335  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
336  * wake up scheduler
337  */
338 static void drm_sched_entity_wakeup(struct dma_fence *f,
339                                     struct dma_fence_cb *cb)
340 {
341         struct drm_sched_entity *entity =
342                 container_of(cb, struct drm_sched_entity, cb);
343
344         drm_sched_entity_clear_dep(f, cb);
345         drm_sched_wakeup_if_can_queue(entity->rq->sched);
346 }
347
348 /**
349  * drm_sched_entity_set_priority - Sets priority of the entity
350  *
351  * @entity: scheduler entity
352  * @priority: scheduler priority
353  *
354  * Update the priority of runqueus used for the entity.
355  */
356 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
357                                    enum drm_sched_priority priority)
358 {
359         spin_lock(&entity->rq_lock);
360         entity->priority = priority;
361         spin_unlock(&entity->rq_lock);
362 }
363 EXPORT_SYMBOL(drm_sched_entity_set_priority);
364
365 /*
366  * Add a callback to the current dependency of the entity to wake up the
367  * scheduler when the entity becomes available.
368  */
369 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
370 {
371         struct drm_gpu_scheduler *sched = entity->rq->sched;
372         struct dma_fence *fence = entity->dependency;
373         struct drm_sched_fence *s_fence;
374
375         if (fence->context == entity->fence_context ||
376             fence->context == entity->fence_context + 1) {
377                 /*
378                  * Fence is a scheduled/finished fence from a job
379                  * which belongs to the same entity, we can ignore
380                  * fences from ourself
381                  */
382                 dma_fence_put(entity->dependency);
383                 return false;
384         }
385
386         s_fence = to_drm_sched_fence(fence);
387         if (s_fence && s_fence->sched == sched &&
388             !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
389
390                 /*
391                  * Fence is from the same scheduler, only need to wait for
392                  * it to be scheduled
393                  */
394                 fence = dma_fence_get(&s_fence->scheduled);
395                 dma_fence_put(entity->dependency);
396                 entity->dependency = fence;
397                 if (!dma_fence_add_callback(fence, &entity->cb,
398                                             drm_sched_entity_clear_dep))
399                         return true;
400
401                 /* Ignore it when it is already scheduled */
402                 dma_fence_put(fence);
403                 return false;
404         }
405
406         if (!dma_fence_add_callback(entity->dependency, &entity->cb,
407                                     drm_sched_entity_wakeup))
408                 return true;
409
410         dma_fence_put(entity->dependency);
411         return false;
412 }
413
414 static struct dma_fence *
415 drm_sched_job_dependency(struct drm_sched_job *job,
416                          struct drm_sched_entity *entity)
417 {
418         if (!xa_empty(&job->dependencies))
419                 return xa_erase(&job->dependencies, job->last_dependency++);
420
421         if (job->sched->ops->prepare_job)
422                 return job->sched->ops->prepare_job(job, entity);
423
424         return NULL;
425 }
426
427 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
428 {
429         struct drm_sched_job *sched_job;
430
431         sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
432         if (!sched_job)
433                 return NULL;
434
435         while ((entity->dependency =
436                         drm_sched_job_dependency(sched_job, entity))) {
437                 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
438
439                 if (drm_sched_entity_add_dependency_cb(entity))
440                         return NULL;
441         }
442
443         /* skip jobs from entity that marked guilty */
444         if (entity->guilty && atomic_read(entity->guilty))
445                 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
446
447         dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
448         rcu_assign_pointer(entity->last_scheduled,
449                            dma_fence_get(&sched_job->s_fence->finished));
450
451         /*
452          * If the queue is empty we allow drm_sched_entity_select_rq() to
453          * locklessly access ->last_scheduled. This only works if we set the
454          * pointer before we dequeue and if we a write barrier here.
455          */
456         smp_wmb();
457
458         spsc_queue_pop(&entity->job_queue);
459
460         /*
461          * Update the entity's location in the min heap according to
462          * the timestamp of the next job, if any.
463          */
464         if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
465                 struct drm_sched_job *next;
466
467                 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
468                 if (next)
469                         drm_sched_rq_update_fifo(entity, next->submit_ts);
470         }
471
472         /* Jobs and entities might have different lifecycles. Since we're
473          * removing the job from the entities queue, set the jobs entity pointer
474          * to NULL to prevent any future access of the entity through this job.
475          */
476         sched_job->entity = NULL;
477
478         return sched_job;
479 }
480
481 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
482 {
483         struct dma_fence *fence;
484         struct drm_gpu_scheduler *sched;
485         struct drm_sched_rq *rq;
486
487         /* single possible engine and already selected */
488         if (!entity->sched_list)
489                 return;
490
491         /* queue non-empty, stay on the same engine */
492         if (spsc_queue_count(&entity->job_queue))
493                 return;
494
495         /*
496          * Only when the queue is empty are we guaranteed that the scheduler
497          * thread cannot change ->last_scheduled. To enforce ordering we need
498          * a read barrier here. See drm_sched_entity_pop_job() for the other
499          * side.
500          */
501         smp_rmb();
502
503         fence = rcu_dereference_check(entity->last_scheduled, true);
504
505         /* stay on the same engine if the previous job hasn't finished */
506         if (fence && !dma_fence_is_signaled(fence))
507                 return;
508
509         spin_lock(&entity->rq_lock);
510         sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
511         rq = sched ? &sched->sched_rq[entity->priority] : NULL;
512         if (rq != entity->rq) {
513                 drm_sched_rq_remove_entity(entity->rq, entity);
514                 entity->rq = rq;
515         }
516         spin_unlock(&entity->rq_lock);
517
518         if (entity->num_sched_list == 1)
519                 entity->sched_list = NULL;
520 }
521
522 /**
523  * drm_sched_entity_push_job - Submit a job to the entity's job queue
524  * @sched_job: job to submit
525  *
526  * Note: To guarantee that the order of insertion to queue matches the job's
527  * fence sequence number this function should be called with drm_sched_job_arm()
528  * under common lock for the struct drm_sched_entity that was set up for
529  * @sched_job in drm_sched_job_init().
530  *
531  * Returns 0 for success, negative error code otherwise.
532  */
533 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
534 {
535         struct drm_sched_entity *entity = sched_job->entity;
536         bool first;
537         ktime_t submit_ts;
538
539         trace_drm_sched_job(sched_job, entity);
540         atomic_inc(entity->rq->sched->score);
541         WRITE_ONCE(entity->last_user, current->group_leader);
542
543         /*
544          * After the sched_job is pushed into the entity queue, it may be
545          * completed and freed up at any time. We can no longer access it.
546          * Make sure to set the submit_ts first, to avoid a race.
547          */
548         sched_job->submit_ts = submit_ts = ktime_get();
549         first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
550
551         /* first job wakes up scheduler */
552         if (first) {
553                 /* Add the entity to the run queue */
554                 spin_lock(&entity->rq_lock);
555                 if (entity->stopped) {
556                         spin_unlock(&entity->rq_lock);
557
558                         DRM_ERROR("Trying to push to a killed entity\n");
559                         return;
560                 }
561
562                 drm_sched_rq_add_entity(entity->rq, entity);
563                 spin_unlock(&entity->rq_lock);
564
565                 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
566                         drm_sched_rq_update_fifo(entity, submit_ts);
567
568                 drm_sched_wakeup_if_can_queue(entity->rq->sched);
569         }
570 }
571 EXPORT_SYMBOL(drm_sched_entity_push_job);
This page took 0.066671 seconds and 4 git commands to generate.