]> Git Repo - linux.git/blob - drivers/gpu/drm/scheduler/sched_entity.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux.git] / drivers / gpu / drm / scheduler / sched_entity.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30
31 #include "gpu_scheduler_trace.h"
32
33 #define to_drm_sched_job(sched_job)             \
34                 container_of((sched_job), struct drm_sched_job, queue_node)
35
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note that the &sched_list must have at least one element to schedule the entity.
49  *
50  * For changing @priority later on at runtime see
51  * drm_sched_entity_set_priority(). For changing the set of schedulers
52  * @sched_list at runtime see drm_sched_entity_modify_sched().
53  *
54  * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55  * drm_sched_entity_destroy().
56  *
57  * Returns 0 on success or a negative error code on failure.
58  */
59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60                           enum drm_sched_priority priority,
61                           struct drm_gpu_scheduler **sched_list,
62                           unsigned int num_sched_list,
63                           atomic_t *guilty)
64 {
65         if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66                 return -EINVAL;
67
68         memset(entity, 0, sizeof(struct drm_sched_entity));
69         INIT_LIST_HEAD(&entity->list);
70         entity->rq = NULL;
71         entity->guilty = guilty;
72         entity->num_sched_list = num_sched_list;
73         entity->priority = priority;
74         entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
75         RCU_INIT_POINTER(entity->last_scheduled, NULL);
76         RB_CLEAR_NODE(&entity->rb_tree_node);
77
78         if (!sched_list[0]->sched_rq) {
79                 /* Warn drivers not to do this and to fix their DRM
80                  * calling order.
81                  */
82                 pr_warn("%s: called with uninitialized scheduler\n", __func__);
83         } else if (num_sched_list) {
84                 /* The "priority" of an entity cannot exceed the number
85                  * of run-queues of a scheduler.
86                  */
87                 if (entity->priority >= sched_list[0]->num_rqs)
88                         entity->priority = max_t(u32, sched_list[0]->num_rqs,
89                                                  DRM_SCHED_PRIORITY_MIN);
90                 entity->rq = sched_list[0]->sched_rq[entity->priority];
91         }
92
93         init_completion(&entity->entity_idle);
94
95         /* We start in an idle state. */
96         complete_all(&entity->entity_idle);
97
98         spin_lock_init(&entity->rq_lock);
99         spsc_queue_init(&entity->job_queue);
100
101         atomic_set(&entity->fence_seq, 0);
102         entity->fence_context = dma_fence_context_alloc(2);
103
104         return 0;
105 }
106 EXPORT_SYMBOL(drm_sched_entity_init);
107
108 /**
109  * drm_sched_entity_modify_sched - Modify sched of an entity
110  * @entity: scheduler entity to init
111  * @sched_list: the list of new drm scheds which will replace
112  *               existing entity->sched_list
113  * @num_sched_list: number of drm sched in sched_list
114  *
115  * Note that this must be called under the same common lock for @entity as
116  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
117  * guarantee through some other means that this is never called while new jobs
118  * can be pushed to @entity.
119  */
120 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
121                                     struct drm_gpu_scheduler **sched_list,
122                                     unsigned int num_sched_list)
123 {
124         WARN_ON(!num_sched_list || !sched_list);
125
126         entity->sched_list = sched_list;
127         entity->num_sched_list = num_sched_list;
128 }
129 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
130
131 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
132 {
133         rmb(); /* for list_empty to work without lock */
134
135         if (list_empty(&entity->list) ||
136             spsc_queue_count(&entity->job_queue) == 0 ||
137             entity->stopped)
138                 return true;
139
140         return false;
141 }
142
143 /* Return true if entity could provide a job. */
144 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
145 {
146         if (spsc_queue_peek(&entity->job_queue) == NULL)
147                 return false;
148
149         if (READ_ONCE(entity->dependency))
150                 return false;
151
152         return true;
153 }
154
155 /**
156  * drm_sched_entity_error - return error of last scheduled job
157  * @entity: scheduler entity to check
158  *
159  * Opportunistically return the error of the last scheduled job. Result can
160  * change any time when new jobs are pushed to the hw.
161  */
162 int drm_sched_entity_error(struct drm_sched_entity *entity)
163 {
164         struct dma_fence *fence;
165         int r;
166
167         rcu_read_lock();
168         fence = rcu_dereference(entity->last_scheduled);
169         r = fence ? fence->error : 0;
170         rcu_read_unlock();
171
172         return r;
173 }
174 EXPORT_SYMBOL(drm_sched_entity_error);
175
176 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
177 {
178         struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
179
180         drm_sched_fence_finished(job->s_fence, -ESRCH);
181         WARN_ON(job->s_fence->parent);
182         job->sched->ops->free_job(job);
183 }
184
185 /* Signal the scheduler finished fence when the entity in question is killed. */
186 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
187                                           struct dma_fence_cb *cb)
188 {
189         struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
190                                                  finish_cb);
191         unsigned long index;
192
193         dma_fence_put(f);
194
195         /* Wait for all dependencies to avoid data corruptions */
196         xa_for_each(&job->dependencies, index, f) {
197                 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
198
199                 if (s_fence && f == &s_fence->scheduled) {
200                         /* The dependencies array had a reference on the scheduled
201                          * fence, and the finished fence refcount might have
202                          * dropped to zero. Use dma_fence_get_rcu() so we get
203                          * a NULL fence in that case.
204                          */
205                         f = dma_fence_get_rcu(&s_fence->finished);
206
207                         /* Now that we have a reference on the finished fence,
208                          * we can release the reference the dependencies array
209                          * had on the scheduled fence.
210                          */
211                         dma_fence_put(&s_fence->scheduled);
212                 }
213
214                 xa_erase(&job->dependencies, index);
215                 if (f && !dma_fence_add_callback(f, &job->finish_cb,
216                                                  drm_sched_entity_kill_jobs_cb))
217                         return;
218
219                 dma_fence_put(f);
220         }
221
222         INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
223         schedule_work(&job->work);
224 }
225
226 /* Remove the entity from the scheduler and kill all pending jobs */
227 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
228 {
229         struct drm_sched_job *job;
230         struct dma_fence *prev;
231
232         if (!entity->rq)
233                 return;
234
235         spin_lock(&entity->rq_lock);
236         entity->stopped = true;
237         drm_sched_rq_remove_entity(entity->rq, entity);
238         spin_unlock(&entity->rq_lock);
239
240         /* Make sure this entity is not used by the scheduler at the moment */
241         wait_for_completion(&entity->entity_idle);
242
243         /* The entity is guaranteed to not be used by the scheduler */
244         prev = rcu_dereference_check(entity->last_scheduled, true);
245         dma_fence_get(prev);
246         while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
247                 struct drm_sched_fence *s_fence = job->s_fence;
248
249                 dma_fence_get(&s_fence->finished);
250                 if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
251                                            drm_sched_entity_kill_jobs_cb))
252                         drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
253
254                 prev = &s_fence->finished;
255         }
256         dma_fence_put(prev);
257 }
258
259 /**
260  * drm_sched_entity_flush - Flush a context entity
261  *
262  * @entity: scheduler entity
263  * @timeout: time to wait in for Q to become empty in jiffies.
264  *
265  * Splitting drm_sched_entity_fini() into two functions, The first one does the
266  * waiting, removes the entity from the runqueue and returns an error when the
267  * process was killed.
268  *
269  * Returns the remaining time in jiffies left from the input timeout
270  */
271 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
272 {
273         struct drm_gpu_scheduler *sched;
274         struct task_struct *last_user;
275         long ret = timeout;
276
277         if (!entity->rq)
278                 return 0;
279
280         sched = entity->rq->sched;
281         /**
282          * The client will not queue more IBs during this fini, consume existing
283          * queued IBs or discard them on SIGKILL
284          */
285         if (current->flags & PF_EXITING) {
286                 if (timeout)
287                         ret = wait_event_timeout(
288                                         sched->job_scheduled,
289                                         drm_sched_entity_is_idle(entity),
290                                         timeout);
291         } else {
292                 wait_event_killable(sched->job_scheduled,
293                                     drm_sched_entity_is_idle(entity));
294         }
295
296         /* For killed process disable any more IBs enqueue right now */
297         last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
298         if ((!last_user || last_user == current->group_leader) &&
299             (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
300                 drm_sched_entity_kill(entity);
301
302         return ret;
303 }
304 EXPORT_SYMBOL(drm_sched_entity_flush);
305
306 /**
307  * drm_sched_entity_fini - Destroy a context entity
308  *
309  * @entity: scheduler entity
310  *
311  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
312  *
313  * If there are potentially job still in flight or getting newly queued
314  * drm_sched_entity_flush() must be called first. This function then goes over
315  * the entity and signals all jobs with an error code if the process was killed.
316  */
317 void drm_sched_entity_fini(struct drm_sched_entity *entity)
318 {
319         /*
320          * If consumption of existing IBs wasn't completed. Forcefully remove
321          * them here. Also makes sure that the scheduler won't touch this entity
322          * any more.
323          */
324         drm_sched_entity_kill(entity);
325
326         if (entity->dependency) {
327                 dma_fence_remove_callback(entity->dependency, &entity->cb);
328                 dma_fence_put(entity->dependency);
329                 entity->dependency = NULL;
330         }
331
332         dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
333         RCU_INIT_POINTER(entity->last_scheduled, NULL);
334 }
335 EXPORT_SYMBOL(drm_sched_entity_fini);
336
337 /**
338  * drm_sched_entity_destroy - Destroy a context entity
339  * @entity: scheduler entity
340  *
341  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
342  * convenience wrapper.
343  */
344 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
345 {
346         drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
347         drm_sched_entity_fini(entity);
348 }
349 EXPORT_SYMBOL(drm_sched_entity_destroy);
350
351 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
352 static void drm_sched_entity_clear_dep(struct dma_fence *f,
353                                        struct dma_fence_cb *cb)
354 {
355         struct drm_sched_entity *entity =
356                 container_of(cb, struct drm_sched_entity, cb);
357
358         entity->dependency = NULL;
359         dma_fence_put(f);
360 }
361
362 /*
363  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
364  * wake up scheduler
365  */
366 static void drm_sched_entity_wakeup(struct dma_fence *f,
367                                     struct dma_fence_cb *cb)
368 {
369         struct drm_sched_entity *entity =
370                 container_of(cb, struct drm_sched_entity, cb);
371
372         drm_sched_entity_clear_dep(f, cb);
373         drm_sched_wakeup_if_can_queue(entity->rq->sched);
374 }
375
376 /**
377  * drm_sched_entity_set_priority - Sets priority of the entity
378  *
379  * @entity: scheduler entity
380  * @priority: scheduler priority
381  *
382  * Update the priority of runqueus used for the entity.
383  */
384 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
385                                    enum drm_sched_priority priority)
386 {
387         spin_lock(&entity->rq_lock);
388         entity->priority = priority;
389         spin_unlock(&entity->rq_lock);
390 }
391 EXPORT_SYMBOL(drm_sched_entity_set_priority);
392
393 /*
394  * Add a callback to the current dependency of the entity to wake up the
395  * scheduler when the entity becomes available.
396  */
397 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
398 {
399         struct drm_gpu_scheduler *sched = entity->rq->sched;
400         struct dma_fence *fence = entity->dependency;
401         struct drm_sched_fence *s_fence;
402
403         if (fence->context == entity->fence_context ||
404             fence->context == entity->fence_context + 1) {
405                 /*
406                  * Fence is a scheduled/finished fence from a job
407                  * which belongs to the same entity, we can ignore
408                  * fences from ourself
409                  */
410                 dma_fence_put(entity->dependency);
411                 return false;
412         }
413
414         s_fence = to_drm_sched_fence(fence);
415         if (!fence->error && s_fence && s_fence->sched == sched &&
416             !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
417
418                 /*
419                  * Fence is from the same scheduler, only need to wait for
420                  * it to be scheduled
421                  */
422                 fence = dma_fence_get(&s_fence->scheduled);
423                 dma_fence_put(entity->dependency);
424                 entity->dependency = fence;
425                 if (!dma_fence_add_callback(fence, &entity->cb,
426                                             drm_sched_entity_clear_dep))
427                         return true;
428
429                 /* Ignore it when it is already scheduled */
430                 dma_fence_put(fence);
431                 return false;
432         }
433
434         if (!dma_fence_add_callback(entity->dependency, &entity->cb,
435                                     drm_sched_entity_wakeup))
436                 return true;
437
438         dma_fence_put(entity->dependency);
439         return false;
440 }
441
442 static struct dma_fence *
443 drm_sched_job_dependency(struct drm_sched_job *job,
444                          struct drm_sched_entity *entity)
445 {
446         struct dma_fence *f;
447
448         /* We keep the fence around, so we can iterate over all dependencies
449          * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
450          * before killing the job.
451          */
452         f = xa_load(&job->dependencies, job->last_dependency);
453         if (f) {
454                 job->last_dependency++;
455                 return dma_fence_get(f);
456         }
457
458         if (job->sched->ops->prepare_job)
459                 return job->sched->ops->prepare_job(job, entity);
460
461         return NULL;
462 }
463
464 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
465 {
466         struct drm_sched_job *sched_job;
467
468         sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
469         if (!sched_job)
470                 return NULL;
471
472         while ((entity->dependency =
473                         drm_sched_job_dependency(sched_job, entity))) {
474                 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
475
476                 if (drm_sched_entity_add_dependency_cb(entity))
477                         return NULL;
478         }
479
480         /* skip jobs from entity that marked guilty */
481         if (entity->guilty && atomic_read(entity->guilty))
482                 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
483
484         dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
485         rcu_assign_pointer(entity->last_scheduled,
486                            dma_fence_get(&sched_job->s_fence->finished));
487
488         /*
489          * If the queue is empty we allow drm_sched_entity_select_rq() to
490          * locklessly access ->last_scheduled. This only works if we set the
491          * pointer before we dequeue and if we a write barrier here.
492          */
493         smp_wmb();
494
495         spsc_queue_pop(&entity->job_queue);
496
497         /*
498          * Update the entity's location in the min heap according to
499          * the timestamp of the next job, if any.
500          */
501         if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
502                 struct drm_sched_job *next;
503
504                 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
505                 if (next)
506                         drm_sched_rq_update_fifo(entity, next->submit_ts);
507         }
508
509         /* Jobs and entities might have different lifecycles. Since we're
510          * removing the job from the entities queue, set the jobs entity pointer
511          * to NULL to prevent any future access of the entity through this job.
512          */
513         sched_job->entity = NULL;
514
515         return sched_job;
516 }
517
518 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
519 {
520         struct dma_fence *fence;
521         struct drm_gpu_scheduler *sched;
522         struct drm_sched_rq *rq;
523
524         /* single possible engine and already selected */
525         if (!entity->sched_list)
526                 return;
527
528         /* queue non-empty, stay on the same engine */
529         if (spsc_queue_count(&entity->job_queue))
530                 return;
531
532         /*
533          * Only when the queue is empty are we guaranteed that the scheduler
534          * thread cannot change ->last_scheduled. To enforce ordering we need
535          * a read barrier here. See drm_sched_entity_pop_job() for the other
536          * side.
537          */
538         smp_rmb();
539
540         fence = rcu_dereference_check(entity->last_scheduled, true);
541
542         /* stay on the same engine if the previous job hasn't finished */
543         if (fence && !dma_fence_is_signaled(fence))
544                 return;
545
546         spin_lock(&entity->rq_lock);
547         sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
548         rq = sched ? sched->sched_rq[entity->priority] : NULL;
549         if (rq != entity->rq) {
550                 drm_sched_rq_remove_entity(entity->rq, entity);
551                 entity->rq = rq;
552         }
553         spin_unlock(&entity->rq_lock);
554
555         if (entity->num_sched_list == 1)
556                 entity->sched_list = NULL;
557 }
558
559 /**
560  * drm_sched_entity_push_job - Submit a job to the entity's job queue
561  * @sched_job: job to submit
562  *
563  * Note: To guarantee that the order of insertion to queue matches the job's
564  * fence sequence number this function should be called with drm_sched_job_arm()
565  * under common lock for the struct drm_sched_entity that was set up for
566  * @sched_job in drm_sched_job_init().
567  *
568  * Returns 0 for success, negative error code otherwise.
569  */
570 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
571 {
572         struct drm_sched_entity *entity = sched_job->entity;
573         bool first;
574         ktime_t submit_ts;
575
576         trace_drm_sched_job(sched_job, entity);
577         atomic_inc(entity->rq->sched->score);
578         WRITE_ONCE(entity->last_user, current->group_leader);
579
580         /*
581          * After the sched_job is pushed into the entity queue, it may be
582          * completed and freed up at any time. We can no longer access it.
583          * Make sure to set the submit_ts first, to avoid a race.
584          */
585         sched_job->submit_ts = submit_ts = ktime_get();
586         first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
587
588         /* first job wakes up scheduler */
589         if (first) {
590                 /* Add the entity to the run queue */
591                 spin_lock(&entity->rq_lock);
592                 if (entity->stopped) {
593                         spin_unlock(&entity->rq_lock);
594
595                         DRM_ERROR("Trying to push to a killed entity\n");
596                         return;
597                 }
598
599                 drm_sched_rq_add_entity(entity->rq, entity);
600                 spin_unlock(&entity->rq_lock);
601
602                 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
603                         drm_sched_rq_update_fifo(entity, submit_ts);
604
605                 drm_sched_wakeup_if_can_queue(entity->rq->sched);
606         }
607 }
608 EXPORT_SYMBOL(drm_sched_entity_push_job);
This page took 0.068953 seconds and 4 git commands to generate.