]> Git Repo - linux.git/blob - drivers/gpu/drm/scheduler/sched_main.c
Merge tag 'microblaze-v5.0-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[linux.git] / drivers / gpu / drm / scheduler / sched_main.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
51 #include <drm/drmP.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
54
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
57
58 #define to_drm_sched_job(sched_job)             \
59                 container_of((sched_job), struct drm_sched_job, queue_node)
60
61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
62
63 static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
64
65 /**
66  * drm_sched_rq_init - initialize a given run queue struct
67  *
68  * @rq: scheduler run queue
69  *
70  * Initializes a scheduler runqueue.
71  */
72 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
73                               struct drm_sched_rq *rq)
74 {
75         spin_lock_init(&rq->lock);
76         INIT_LIST_HEAD(&rq->entities);
77         rq->current_entity = NULL;
78         rq->sched = sched;
79 }
80
81 /**
82  * drm_sched_rq_add_entity - add an entity
83  *
84  * @rq: scheduler run queue
85  * @entity: scheduler entity
86  *
87  * Adds a scheduler entity to the run queue.
88  */
89 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
90                              struct drm_sched_entity *entity)
91 {
92         if (!list_empty(&entity->list))
93                 return;
94         spin_lock(&rq->lock);
95         list_add_tail(&entity->list, &rq->entities);
96         spin_unlock(&rq->lock);
97 }
98
99 /**
100  * drm_sched_rq_remove_entity - remove an entity
101  *
102  * @rq: scheduler run queue
103  * @entity: scheduler entity
104  *
105  * Removes a scheduler entity from the run queue.
106  */
107 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
108                                 struct drm_sched_entity *entity)
109 {
110         if (list_empty(&entity->list))
111                 return;
112         spin_lock(&rq->lock);
113         list_del_init(&entity->list);
114         if (rq->current_entity == entity)
115                 rq->current_entity = NULL;
116         spin_unlock(&rq->lock);
117 }
118
119 /**
120  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
121  *
122  * @rq: scheduler run queue to check.
123  *
124  * Try to find a ready entity, returns NULL if none found.
125  */
126 static struct drm_sched_entity *
127 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
128 {
129         struct drm_sched_entity *entity;
130
131         spin_lock(&rq->lock);
132
133         entity = rq->current_entity;
134         if (entity) {
135                 list_for_each_entry_continue(entity, &rq->entities, list) {
136                         if (drm_sched_entity_is_ready(entity)) {
137                                 rq->current_entity = entity;
138                                 spin_unlock(&rq->lock);
139                                 return entity;
140                         }
141                 }
142         }
143
144         list_for_each_entry(entity, &rq->entities, list) {
145
146                 if (drm_sched_entity_is_ready(entity)) {
147                         rq->current_entity = entity;
148                         spin_unlock(&rq->lock);
149                         return entity;
150                 }
151
152                 if (entity == rq->current_entity)
153                         break;
154         }
155
156         spin_unlock(&rq->lock);
157
158         return NULL;
159 }
160
161 /**
162  * drm_sched_dependency_optimized
163  *
164  * @fence: the dependency fence
165  * @entity: the entity which depends on the above fence
166  *
167  * Returns true if the dependency can be optimized and false otherwise
168  */
169 bool drm_sched_dependency_optimized(struct dma_fence* fence,
170                                     struct drm_sched_entity *entity)
171 {
172         struct drm_gpu_scheduler *sched = entity->rq->sched;
173         struct drm_sched_fence *s_fence;
174
175         if (!fence || dma_fence_is_signaled(fence))
176                 return false;
177         if (fence->context == entity->fence_context)
178                 return true;
179         s_fence = to_drm_sched_fence(fence);
180         if (s_fence && s_fence->sched == sched)
181                 return true;
182
183         return false;
184 }
185 EXPORT_SYMBOL(drm_sched_dependency_optimized);
186
187 /**
188  * drm_sched_start_timeout - start timeout for reset worker
189  *
190  * @sched: scheduler instance to start the worker for
191  *
192  * Start the timeout for the given scheduler.
193  */
194 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
195 {
196         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
197             !list_empty(&sched->ring_mirror_list))
198                 schedule_delayed_work(&sched->work_tdr, sched->timeout);
199 }
200
201 /**
202  * drm_sched_fault - immediately start timeout handler
203  *
204  * @sched: scheduler where the timeout handling should be started.
205  *
206  * Start timeout handling immediately when the driver detects a hardware fault.
207  */
208 void drm_sched_fault(struct drm_gpu_scheduler *sched)
209 {
210         mod_delayed_work(system_wq, &sched->work_tdr, 0);
211 }
212 EXPORT_SYMBOL(drm_sched_fault);
213
214 /**
215  * drm_sched_suspend_timeout - Suspend scheduler job timeout
216  *
217  * @sched: scheduler instance for which to suspend the timeout
218  *
219  * Suspend the delayed work timeout for the scheduler. This is done by
220  * modifying the delayed work timeout to an arbitrary large value,
221  * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
222  * called from an IRQ context.
223  *
224  * Returns the timeout remaining
225  *
226  */
227 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
228 {
229         unsigned long sched_timeout, now = jiffies;
230
231         sched_timeout = sched->work_tdr.timer.expires;
232
233         /*
234          * Modify the timeout to an arbitrarily large value. This also prevents
235          * the timeout to be restarted when new submissions arrive
236          */
237         if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
238                         && time_after(sched_timeout, now))
239                 return sched_timeout - now;
240         else
241                 return sched->timeout;
242 }
243 EXPORT_SYMBOL(drm_sched_suspend_timeout);
244
245 /**
246  * drm_sched_resume_timeout - Resume scheduler job timeout
247  *
248  * @sched: scheduler instance for which to resume the timeout
249  * @remaining: remaining timeout
250  *
251  * Resume the delayed work timeout for the scheduler. Note that
252  * this function can be called from an IRQ context.
253  */
254 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
255                 unsigned long remaining)
256 {
257         unsigned long flags;
258
259         spin_lock_irqsave(&sched->job_list_lock, flags);
260
261         if (list_empty(&sched->ring_mirror_list))
262                 cancel_delayed_work(&sched->work_tdr);
263         else
264                 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
265
266         spin_unlock_irqrestore(&sched->job_list_lock, flags);
267 }
268 EXPORT_SYMBOL(drm_sched_resume_timeout);
269
270 /* job_finish is called after hw fence signaled
271  */
272 static void drm_sched_job_finish(struct work_struct *work)
273 {
274         struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
275                                                    finish_work);
276         struct drm_gpu_scheduler *sched = s_job->sched;
277         unsigned long flags;
278
279         /*
280          * Canceling the timeout without removing our job from the ring mirror
281          * list is safe, as we will only end up in this worker if our jobs
282          * finished fence has been signaled. So even if some another worker
283          * manages to find this job as the next job in the list, the fence
284          * signaled check below will prevent the timeout to be restarted.
285          */
286         cancel_delayed_work_sync(&sched->work_tdr);
287
288         spin_lock_irqsave(&sched->job_list_lock, flags);
289         /* remove job from ring_mirror_list */
290         list_del_init(&s_job->node);
291         /* queue TDR for next job */
292         drm_sched_start_timeout(sched);
293         spin_unlock_irqrestore(&sched->job_list_lock, flags);
294
295         sched->ops->free_job(s_job);
296 }
297
298 static void drm_sched_job_finish_cb(struct dma_fence *f,
299                                     struct dma_fence_cb *cb)
300 {
301         struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
302                                                  finish_cb);
303         schedule_work(&job->finish_work);
304 }
305
306 static void drm_sched_job_begin(struct drm_sched_job *s_job)
307 {
308         struct drm_gpu_scheduler *sched = s_job->sched;
309         unsigned long flags;
310
311         dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
312                                drm_sched_job_finish_cb);
313
314         spin_lock_irqsave(&sched->job_list_lock, flags);
315         list_add_tail(&s_job->node, &sched->ring_mirror_list);
316         drm_sched_start_timeout(sched);
317         spin_unlock_irqrestore(&sched->job_list_lock, flags);
318 }
319
320 static void drm_sched_job_timedout(struct work_struct *work)
321 {
322         struct drm_gpu_scheduler *sched;
323         struct drm_sched_job *job;
324         unsigned long flags;
325
326         sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
327         job = list_first_entry_or_null(&sched->ring_mirror_list,
328                                        struct drm_sched_job, node);
329
330         if (job)
331                 job->sched->ops->timedout_job(job);
332
333         spin_lock_irqsave(&sched->job_list_lock, flags);
334         drm_sched_start_timeout(sched);
335         spin_unlock_irqrestore(&sched->job_list_lock, flags);
336 }
337
338 /**
339  * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
340  *
341  * @sched: scheduler instance
342  * @bad: bad scheduler job
343  *
344  */
345 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
346 {
347         struct drm_sched_job *s_job;
348         struct drm_sched_entity *entity, *tmp;
349         unsigned long flags;
350         int i;
351
352         spin_lock_irqsave(&sched->job_list_lock, flags);
353         list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
354                 if (s_job->s_fence->parent &&
355                     dma_fence_remove_callback(s_job->s_fence->parent,
356                                               &s_job->s_fence->cb)) {
357                         dma_fence_put(s_job->s_fence->parent);
358                         s_job->s_fence->parent = NULL;
359                         atomic_dec(&sched->hw_rq_count);
360                 }
361         }
362         spin_unlock_irqrestore(&sched->job_list_lock, flags);
363
364         if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
365                 atomic_inc(&bad->karma);
366                 /* don't increase @bad's karma if it's from KERNEL RQ,
367                  * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
368                  * corrupt but keep in mind that kernel jobs always considered good.
369                  */
370                 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
371                         struct drm_sched_rq *rq = &sched->sched_rq[i];
372
373                         spin_lock(&rq->lock);
374                         list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
375                                 if (bad->s_fence->scheduled.context == entity->fence_context) {
376                                     if (atomic_read(&bad->karma) > bad->sched->hang_limit)
377                                                 if (entity->guilty)
378                                                         atomic_set(entity->guilty, 1);
379                                         break;
380                                 }
381                         }
382                         spin_unlock(&rq->lock);
383                         if (&entity->list != &rq->entities)
384                                 break;
385                 }
386         }
387 }
388 EXPORT_SYMBOL(drm_sched_hw_job_reset);
389
390 /**
391  * drm_sched_job_recovery - recover jobs after a reset
392  *
393  * @sched: scheduler instance
394  *
395  */
396 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
397 {
398         struct drm_sched_job *s_job, *tmp;
399         bool found_guilty = false;
400         unsigned long flags;
401         int r;
402
403         spin_lock_irqsave(&sched->job_list_lock, flags);
404         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
405                 struct drm_sched_fence *s_fence = s_job->s_fence;
406                 struct dma_fence *fence;
407                 uint64_t guilty_context;
408
409                 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
410                         found_guilty = true;
411                         guilty_context = s_job->s_fence->scheduled.context;
412                 }
413
414                 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
415                         dma_fence_set_error(&s_fence->finished, -ECANCELED);
416
417                 spin_unlock_irqrestore(&sched->job_list_lock, flags);
418                 fence = sched->ops->run_job(s_job);
419                 atomic_inc(&sched->hw_rq_count);
420
421                 if (fence) {
422                         s_fence->parent = dma_fence_get(fence);
423                         r = dma_fence_add_callback(fence, &s_fence->cb,
424                                                    drm_sched_process_job);
425                         if (r == -ENOENT)
426                                 drm_sched_process_job(fence, &s_fence->cb);
427                         else if (r)
428                                 DRM_ERROR("fence add callback failed (%d)\n",
429                                           r);
430                         dma_fence_put(fence);
431                 } else {
432                         if (s_fence->finished.error < 0)
433                                 drm_sched_expel_job_unlocked(s_job);
434                         drm_sched_process_job(NULL, &s_fence->cb);
435                 }
436                 spin_lock_irqsave(&sched->job_list_lock, flags);
437         }
438         drm_sched_start_timeout(sched);
439         spin_unlock_irqrestore(&sched->job_list_lock, flags);
440 }
441 EXPORT_SYMBOL(drm_sched_job_recovery);
442
443 /**
444  * drm_sched_job_init - init a scheduler job
445  *
446  * @job: scheduler job to init
447  * @entity: scheduler entity to use
448  * @owner: job owner for debugging
449  *
450  * Refer to drm_sched_entity_push_job() documentation
451  * for locking considerations.
452  *
453  * Returns 0 for success, negative error code otherwise.
454  */
455 int drm_sched_job_init(struct drm_sched_job *job,
456                        struct drm_sched_entity *entity,
457                        void *owner)
458 {
459         struct drm_gpu_scheduler *sched;
460
461         drm_sched_entity_select_rq(entity);
462         if (!entity->rq)
463                 return -ENOENT;
464
465         sched = entity->rq->sched;
466
467         job->sched = sched;
468         job->entity = entity;
469         job->s_priority = entity->rq - sched->sched_rq;
470         job->s_fence = drm_sched_fence_create(entity, owner);
471         if (!job->s_fence)
472                 return -ENOMEM;
473         job->id = atomic64_inc_return(&sched->job_id_count);
474
475         INIT_WORK(&job->finish_work, drm_sched_job_finish);
476         INIT_LIST_HEAD(&job->node);
477
478         return 0;
479 }
480 EXPORT_SYMBOL(drm_sched_job_init);
481
482 /**
483  * drm_sched_job_cleanup - clean up scheduler job resources
484  *
485  * @job: scheduler job to clean up
486  */
487 void drm_sched_job_cleanup(struct drm_sched_job *job)
488 {
489         dma_fence_put(&job->s_fence->finished);
490         job->s_fence = NULL;
491 }
492 EXPORT_SYMBOL(drm_sched_job_cleanup);
493
494 /**
495  * drm_sched_ready - is the scheduler ready
496  *
497  * @sched: scheduler instance
498  *
499  * Return true if we can push more jobs to the hw, otherwise false.
500  */
501 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
502 {
503         return atomic_read(&sched->hw_rq_count) <
504                 sched->hw_submission_limit;
505 }
506
507 /**
508  * drm_sched_wakeup - Wake up the scheduler when it is ready
509  *
510  * @sched: scheduler instance
511  *
512  */
513 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
514 {
515         if (drm_sched_ready(sched))
516                 wake_up_interruptible(&sched->wake_up_worker);
517 }
518
519 /**
520  * drm_sched_select_entity - Select next entity to process
521  *
522  * @sched: scheduler instance
523  *
524  * Returns the entity to process or NULL if none are found.
525  */
526 static struct drm_sched_entity *
527 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
528 {
529         struct drm_sched_entity *entity;
530         int i;
531
532         if (!drm_sched_ready(sched))
533                 return NULL;
534
535         /* Kernel run queue has higher priority than normal run queue*/
536         for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
537                 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
538                 if (entity)
539                         break;
540         }
541
542         return entity;
543 }
544
545 /**
546  * drm_sched_process_job - process a job
547  *
548  * @f: fence
549  * @cb: fence callbacks
550  *
551  * Called after job has finished execution.
552  */
553 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
554 {
555         struct drm_sched_fence *s_fence =
556                 container_of(cb, struct drm_sched_fence, cb);
557         struct drm_gpu_scheduler *sched = s_fence->sched;
558
559         dma_fence_get(&s_fence->finished);
560         atomic_dec(&sched->hw_rq_count);
561         atomic_dec(&sched->num_jobs);
562         drm_sched_fence_finished(s_fence);
563
564         trace_drm_sched_process_job(s_fence);
565         dma_fence_put(&s_fence->finished);
566         wake_up_interruptible(&sched->wake_up_worker);
567 }
568
569 /**
570  * drm_sched_blocked - check if the scheduler is blocked
571  *
572  * @sched: scheduler instance
573  *
574  * Returns true if blocked, otherwise false.
575  */
576 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
577 {
578         if (kthread_should_park()) {
579                 kthread_parkme();
580                 return true;
581         }
582
583         return false;
584 }
585
586 /**
587  * drm_sched_main - main scheduler thread
588  *
589  * @param: scheduler instance
590  *
591  * Returns 0.
592  */
593 static int drm_sched_main(void *param)
594 {
595         struct sched_param sparam = {.sched_priority = 1};
596         struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
597         int r;
598
599         sched_setscheduler(current, SCHED_FIFO, &sparam);
600
601         while (!kthread_should_stop()) {
602                 struct drm_sched_entity *entity = NULL;
603                 struct drm_sched_fence *s_fence;
604                 struct drm_sched_job *sched_job;
605                 struct dma_fence *fence;
606
607                 wait_event_interruptible(sched->wake_up_worker,
608                                          (!drm_sched_blocked(sched) &&
609                                           (entity = drm_sched_select_entity(sched))) ||
610                                          kthread_should_stop());
611
612                 if (!entity)
613                         continue;
614
615                 sched_job = drm_sched_entity_pop_job(entity);
616                 if (!sched_job)
617                         continue;
618
619                 s_fence = sched_job->s_fence;
620
621                 atomic_inc(&sched->hw_rq_count);
622                 drm_sched_job_begin(sched_job);
623
624                 fence = sched->ops->run_job(sched_job);
625                 drm_sched_fence_scheduled(s_fence);
626
627                 if (fence) {
628                         s_fence->parent = dma_fence_get(fence);
629                         r = dma_fence_add_callback(fence, &s_fence->cb,
630                                                    drm_sched_process_job);
631                         if (r == -ENOENT)
632                                 drm_sched_process_job(fence, &s_fence->cb);
633                         else if (r)
634                                 DRM_ERROR("fence add callback failed (%d)\n",
635                                           r);
636                         dma_fence_put(fence);
637                 } else {
638                         if (s_fence->finished.error < 0)
639                                 drm_sched_expel_job_unlocked(sched_job);
640                         drm_sched_process_job(NULL, &s_fence->cb);
641                 }
642
643                 wake_up(&sched->job_scheduled);
644         }
645         return 0;
646 }
647
648 static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
649 {
650         struct drm_gpu_scheduler *sched = s_job->sched;
651
652         spin_lock(&sched->job_list_lock);
653         list_del_init(&s_job->node);
654         spin_unlock(&sched->job_list_lock);
655 }
656
657 /**
658  * drm_sched_init - Init a gpu scheduler instance
659  *
660  * @sched: scheduler instance
661  * @ops: backend operations for this scheduler
662  * @hw_submission: number of hw submissions that can be in flight
663  * @hang_limit: number of times to allow a job to hang before dropping it
664  * @timeout: timeout value in jiffies for the scheduler
665  * @name: name used for debugging
666  *
667  * Return 0 on success, otherwise error code.
668  */
669 int drm_sched_init(struct drm_gpu_scheduler *sched,
670                    const struct drm_sched_backend_ops *ops,
671                    unsigned hw_submission,
672                    unsigned hang_limit,
673                    long timeout,
674                    const char *name)
675 {
676         int i, ret;
677         sched->ops = ops;
678         sched->hw_submission_limit = hw_submission;
679         sched->name = name;
680         sched->timeout = timeout;
681         sched->hang_limit = hang_limit;
682         for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
683                 drm_sched_rq_init(sched, &sched->sched_rq[i]);
684
685         init_waitqueue_head(&sched->wake_up_worker);
686         init_waitqueue_head(&sched->job_scheduled);
687         INIT_LIST_HEAD(&sched->ring_mirror_list);
688         spin_lock_init(&sched->job_list_lock);
689         atomic_set(&sched->hw_rq_count, 0);
690         INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
691         atomic_set(&sched->num_jobs, 0);
692         atomic64_set(&sched->job_id_count, 0);
693
694         /* Each scheduler will run on a seperate kernel thread */
695         sched->thread = kthread_run(drm_sched_main, sched, sched->name);
696         if (IS_ERR(sched->thread)) {
697                 ret = PTR_ERR(sched->thread);
698                 sched->thread = NULL;
699                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
700                 return ret;
701         }
702
703         sched->ready = true;
704         return 0;
705 }
706 EXPORT_SYMBOL(drm_sched_init);
707
708 /**
709  * drm_sched_fini - Destroy a gpu scheduler
710  *
711  * @sched: scheduler instance
712  *
713  * Tears down and cleans up the scheduler.
714  */
715 void drm_sched_fini(struct drm_gpu_scheduler *sched)
716 {
717         if (sched->thread)
718                 kthread_stop(sched->thread);
719
720         sched->ready = false;
721 }
722 EXPORT_SYMBOL(drm_sched_fini);
This page took 0.087585 seconds and 4 git commands to generate.