2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
38 * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39 * on this fence. In other words we always insert a full CPU round trip before
40 * dependen jobs are pushed to the hw queue.
42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
46 struct drm_gem_object;
48 struct drm_gpu_scheduler;
51 /* These are often used as an (initial) index
52 * to an array, and as such should start at 0.
54 enum drm_sched_priority {
55 DRM_SCHED_PRIORITY_MIN,
56 DRM_SCHED_PRIORITY_NORMAL,
57 DRM_SCHED_PRIORITY_HIGH,
58 DRM_SCHED_PRIORITY_KERNEL,
60 DRM_SCHED_PRIORITY_COUNT,
61 DRM_SCHED_PRIORITY_UNSET = -2
64 /* Used to chose between FIFO and RR jobs scheduling */
65 extern int drm_sched_policy;
67 #define DRM_SCHED_POLICY_RR 0
68 #define DRM_SCHED_POLICY_FIFO 1
71 * struct drm_sched_entity - A wrapper around a job queue (typically
72 * attached to the DRM file_priv).
74 * Entities will emit jobs in order to their corresponding hardware
75 * ring, and the scheduler will alternate between entities based on
78 struct drm_sched_entity {
82 * Used to append this struct to the list of entities in the runqueue
83 * @rq under &drm_sched_rq.entities.
85 * Protected by &drm_sched_rq.lock of @rq.
87 struct list_head list;
92 * Runqueue on which this entity is currently scheduled.
94 * FIXME: Locking is very unclear for this. Writers are protected by
95 * @rq_lock, but readers are generally lockless and seem to just race
96 * with not even a READ_ONCE.
98 struct drm_sched_rq *rq;
103 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
104 * be scheduled on any scheduler on this list.
106 * This can be modified by calling drm_sched_entity_modify_sched().
107 * Locking is entirely up to the driver, see the above function for more
110 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
113 * FIXME: This means priority changes through
114 * drm_sched_entity_set_priority() will be lost henceforth in this case.
116 struct drm_gpu_scheduler **sched_list;
121 * Number of drm_gpu_schedulers in the @sched_list.
123 unsigned int num_sched_list;
128 * Priority of the entity. This can be modified by calling
129 * drm_sched_entity_set_priority(). Protected by &rq_lock.
131 enum drm_sched_priority priority;
136 * Lock to modify the runqueue to which this entity belongs.
141 * @job_queue: the list of jobs of this entity.
143 struct spsc_queue job_queue;
148 * A linearly increasing seqno incremented with each new
149 * &drm_sched_fence which is part of the entity.
151 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
152 * this doesn't need to be atomic.
159 * A unique context for all the fences which belong to this entity. The
160 * &drm_sched_fence.scheduled uses the fence_context but
161 * &drm_sched_fence.finished uses fence_context + 1.
163 uint64_t fence_context;
168 * The dependency fence of the job which is on the top of the job queue.
170 struct dma_fence *dependency;
175 * Callback for the dependency fence above.
177 struct dma_fence_cb cb;
182 * Points to entities' guilty.
189 * Points to the finished fence of the last scheduled job. Only written
190 * by the scheduler thread, can be accessed locklessly from
191 * drm_sched_job_arm() iff the queue is empty.
193 struct dma_fence *last_scheduled;
196 * @last_user: last group leader pushing a job into the entity.
198 struct task_struct *last_user;
203 * Marks the enity as removed from rq and destined for
204 * termination. This is set by calling drm_sched_entity_flush() and by
212 * Signals when entity is not in use, used to sequence entity cleanup in
213 * drm_sched_entity_fini().
215 struct completion entity_idle;
218 * @oldest_job_waiting:
220 * Marks earliest job waiting in SW queue
222 ktime_t oldest_job_waiting;
227 * The node used to insert this entity into time based priority queue
229 struct rb_node rb_tree_node;
234 * struct drm_sched_rq - queue of entities to be scheduled.
236 * @lock: to modify the entities list.
237 * @sched: the scheduler to which this rq belongs to.
238 * @entities: list of the entities to be scheduled.
239 * @current_entity: the entity which is to be scheduled.
240 * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
242 * Run queue is a set of entities scheduling command submissions for
243 * one specific ring. It implements the scheduling policy that selects
244 * the next entity to emit commands from.
246 struct drm_sched_rq {
248 struct drm_gpu_scheduler *sched;
249 struct list_head entities;
250 struct drm_sched_entity *current_entity;
251 struct rb_root_cached rb_tree_root;
255 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
257 struct drm_sched_fence {
259 * @scheduled: this fence is what will be signaled by the scheduler
260 * when the job is scheduled.
262 struct dma_fence scheduled;
265 * @finished: this fence is what will be signaled by the scheduler
266 * when the job is completed.
268 * When setting up an out fence for the job, you should use
269 * this, since it's available immediately upon
270 * drm_sched_job_init(), and the fence returned by the driver
271 * from run_job() won't be created until the dependencies have
274 struct dma_fence finished;
277 * @parent: the fence returned by &drm_sched_backend_ops.run_job
278 * when scheduling the job on hardware. We signal the
279 * &drm_sched_fence.finished fence once parent is signalled.
281 struct dma_fence *parent;
283 * @sched: the scheduler instance to which the job having this struct
286 struct drm_gpu_scheduler *sched;
288 * @lock: the lock used by the scheduled and the finished fences.
292 * @owner: job owner for debugging
297 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
300 * struct drm_sched_job - A job to be run by an entity.
302 * @queue_node: used to append this struct to the queue of jobs in an entity.
303 * @list: a job participates in a "pending" and "done" lists.
304 * @sched: the scheduler instance on which this job is scheduled.
305 * @s_fence: contains the fences for the scheduling of job.
306 * @finish_cb: the callback for the finished fence.
307 * @work: Helper to reschdeule job kill to different context.
308 * @id: a unique id assigned to each job scheduled on the scheduler.
309 * @karma: increment on every hang caused by this job. If this exceeds the hang
310 * limit of the scheduler then the job is marked guilty and will not
311 * be scheduled further.
312 * @s_priority: the priority of the job.
313 * @entity: the entity to which this job belongs.
314 * @cb: the callback for the parent fence in s_fence.
316 * A job is created by the driver using drm_sched_job_init(), and
317 * should call drm_sched_entity_push_job() once it wants the scheduler
318 * to schedule the job.
320 struct drm_sched_job {
321 struct spsc_node queue_node;
322 struct list_head list;
323 struct drm_gpu_scheduler *sched;
324 struct drm_sched_fence *s_fence;
327 * work is used only after finish_cb has been used and will not be
331 struct dma_fence_cb finish_cb;
332 struct work_struct work;
337 enum drm_sched_priority s_priority;
338 struct drm_sched_entity *entity;
339 struct dma_fence_cb cb;
343 * Contains the dependencies as struct dma_fence for this job, see
344 * drm_sched_job_add_dependency() and
345 * drm_sched_job_add_implicit_dependencies().
347 struct xarray dependencies;
349 /** @last_dependency: tracks @dependencies as they signal */
350 unsigned long last_dependency;
355 * When the job was pushed into the entity queue.
360 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
363 return s_job && atomic_inc_return(&s_job->karma) > threshold;
366 enum drm_gpu_sched_stat {
367 DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
368 DRM_GPU_SCHED_STAT_NOMINAL,
369 DRM_GPU_SCHED_STAT_ENODEV,
373 * struct drm_sched_backend_ops - Define the backend operations
374 * called by the scheduler
376 * These functions should be implemented in the driver side.
378 struct drm_sched_backend_ops {
382 * Called when the scheduler is considering scheduling this job next, to
383 * get another struct dma_fence for this job to block on. Once it
384 * returns NULL, run_job() may be called.
386 * Can be NULL if no additional preparation to the dependencies are
387 * necessary. Skipped when jobs are killed instead of run.
389 struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
390 struct drm_sched_entity *s_entity);
393 * @run_job: Called to execute the job once all of the dependencies
394 * have been resolved. This may be called multiple times, if
395 * timedout_job() has happened and drm_sched_job_recovery()
396 * decides to try it again.
398 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
401 * @timedout_job: Called when a job has taken too long to execute,
402 * to trigger GPU recovery.
404 * This method is called in a workqueue context.
406 * Drivers typically issue a reset to recover from GPU hangs, and this
407 * procedure usually follows the following workflow:
409 * 1. Stop the scheduler using drm_sched_stop(). This will park the
410 * scheduler thread and cancel the timeout work, guaranteeing that
411 * nothing is queued while we reset the hardware queue
412 * 2. Try to gracefully stop non-faulty jobs (optional)
413 * 3. Issue a GPU reset (driver-specific)
414 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
415 * 5. Restart the scheduler using drm_sched_start(). At that point, new
416 * jobs can be queued, and the scheduler thread is unblocked
418 * Note that some GPUs have distinct hardware queues but need to reset
419 * the GPU globally, which requires extra synchronization between the
420 * timeout handler of the different &drm_gpu_scheduler. One way to
421 * achieve this synchronization is to create an ordered workqueue
422 * (using alloc_ordered_workqueue()) at the driver level, and pass this
423 * queue to drm_sched_init(), to guarantee that timeout handlers are
424 * executed sequentially. The above workflow needs to be slightly
425 * adjusted in that case:
427 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
428 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
429 * the reset (optional)
430 * 3. Issue a GPU reset on all faulty queues (driver-specific)
431 * 4. Re-submit jobs on all schedulers impacted by the reset using
432 * drm_sched_resubmit_jobs()
433 * 5. Restart all schedulers that were stopped in step #1 using
436 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
437 * and the underlying driver has started or completed recovery.
439 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
440 * available, i.e. has been unplugged.
442 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
445 * @free_job: Called once the job's finished fence has been signaled
446 * and it's time to clean it up.
448 void (*free_job)(struct drm_sched_job *sched_job);
452 * struct drm_gpu_scheduler - scheduler instance-specific data
454 * @ops: backend operations provided by the driver.
455 * @hw_submission_limit: the max size of the hardware queue.
456 * @timeout: the time after which a job is removed from the scheduler.
457 * @name: name of the ring for which this scheduler is being used.
458 * @sched_rq: priority wise array of run queues.
459 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
460 * is ready to be scheduled.
461 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
462 * waits on this wait queue until all the scheduled jobs are
464 * @hw_rq_count: the number of jobs currently in the hardware queue.
465 * @job_id_count: used to assign unique id to the each job.
466 * @timeout_wq: workqueue used to queue @work_tdr
467 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
468 * timeout interval is over.
469 * @thread: the kthread on which the scheduler which run.
470 * @pending_list: the list of jobs which are currently in the job queue.
471 * @job_list_lock: lock to protect the pending_list.
472 * @hang_limit: once the hangs by a job crosses this limit then it is marked
473 * guilty and it will no longer be considered for scheduling.
474 * @score: score to help loadbalancer pick a idle sched
475 * @_score: score used when the driver doesn't provide one
476 * @ready: marks if the underlying HW is ready to work
477 * @free_guilty: A hit to time out handler to free the guilty job.
478 * @dev: system &struct device
480 * One scheduler is implemented for each hardware ring.
482 struct drm_gpu_scheduler {
483 const struct drm_sched_backend_ops *ops;
484 uint32_t hw_submission_limit;
487 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
488 wait_queue_head_t wake_up_worker;
489 wait_queue_head_t job_scheduled;
490 atomic_t hw_rq_count;
491 atomic64_t job_id_count;
492 struct workqueue_struct *timeout_wq;
493 struct delayed_work work_tdr;
494 struct task_struct *thread;
495 struct list_head pending_list;
496 spinlock_t job_list_lock;
505 int drm_sched_init(struct drm_gpu_scheduler *sched,
506 const struct drm_sched_backend_ops *ops,
507 uint32_t hw_submission, unsigned hang_limit,
508 long timeout, struct workqueue_struct *timeout_wq,
509 atomic_t *score, const char *name, struct device *dev);
511 void drm_sched_fini(struct drm_gpu_scheduler *sched);
512 int drm_sched_job_init(struct drm_sched_job *job,
513 struct drm_sched_entity *entity,
515 void drm_sched_job_arm(struct drm_sched_job *job);
516 int drm_sched_job_add_dependency(struct drm_sched_job *job,
517 struct dma_fence *fence);
518 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
519 struct dma_resv *resv,
520 enum dma_resv_usage usage);
521 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
522 struct drm_gem_object *obj,
526 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
527 struct drm_gpu_scheduler **sched_list,
528 unsigned int num_sched_list);
530 void drm_sched_job_cleanup(struct drm_sched_job *job);
531 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
532 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
533 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
534 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
535 void drm_sched_increase_karma(struct drm_sched_job *bad);
536 void drm_sched_reset_karma(struct drm_sched_job *bad);
537 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
538 bool drm_sched_dependency_optimized(struct dma_fence* fence,
539 struct drm_sched_entity *entity);
540 void drm_sched_fault(struct drm_gpu_scheduler *sched);
541 void drm_sched_job_kickout(struct drm_sched_job *s_job);
543 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
544 struct drm_sched_entity *entity);
545 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
546 struct drm_sched_entity *entity);
548 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
550 int drm_sched_entity_init(struct drm_sched_entity *entity,
551 enum drm_sched_priority priority,
552 struct drm_gpu_scheduler **sched_list,
553 unsigned int num_sched_list,
555 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
556 void drm_sched_entity_fini(struct drm_sched_entity *entity);
557 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
558 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
559 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
560 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
561 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
562 enum drm_sched_priority priority);
563 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
565 struct drm_sched_fence *drm_sched_fence_alloc(
566 struct drm_sched_entity *s_entity, void *owner);
567 void drm_sched_fence_init(struct drm_sched_fence *fence,
568 struct drm_sched_entity *entity);
569 void drm_sched_fence_free(struct drm_sched_fence *fence);
571 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
572 void drm_sched_fence_finished(struct drm_sched_fence *fence);
574 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
575 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
576 unsigned long remaining);
577 struct drm_gpu_scheduler *
578 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
579 unsigned int num_sched_list);