]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drm/amdgpu: add amdgpu.sched_jobs option
[linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.h
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
26
27 #include <linux/kfifo.h>
28
29 #define AMD_MAX_ACTIVE_HW_SUBMISSION            2
30
31 #define AMD_KERNEL_CONTEXT_ID                   0
32 #define AMD_KERNEL_PROCESS_ID                   0
33
34 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
35
36 struct amd_gpu_scheduler;
37 struct amd_run_queue;
38
39 /**
40  * A scheduler entity is a wrapper around a job queue or a group
41  * of other entities. Entities take turns emitting jobs from their 
42  * job queues to corresponding hardware ring based on scheduling
43  * policy.
44 */
45 struct amd_sched_entity {
46         struct list_head                list;
47         struct amd_run_queue            *belongto_rq;
48         struct amd_sched_entity         *parent;
49 };
50
51 /**
52  * Run queue is a set of entities scheduling command submissions for
53  * one specific ring. It implements the scheduling policy that selects
54  * the next entity to emit commands from.
55 */
56 struct amd_run_queue {
57         struct mutex                    lock;
58         atomic_t                        nr_entity;
59         struct amd_sched_entity         head;
60         struct amd_sched_entity         *current_entity;
61         /**
62          * Return 0 means this entity can be scheduled
63          * Return -1 means this entity cannot be scheduled for reasons,
64          * i.e, it is the head, or these is no job, etc
65         */
66         int (*check_entity_status)(struct amd_sched_entity *entity);
67 };
68
69 /**
70  * Context based scheduler entity, there can be multiple entities for
71  * each context, and one entity per ring
72 */
73 struct amd_context_entity {
74         struct amd_sched_entity         generic_entity;
75         spinlock_t                      lock;
76         /* the virtual_seq is unique per context per ring */
77         atomic64_t                      last_queued_v_seq;
78         atomic64_t                      last_emitted_v_seq;
79         atomic64_t                      last_signaled_v_seq;
80         pid_t                           tgid;
81         uint32_t                        context_id;
82         /* the job_queue maintains the jobs submitted by clients */
83         struct kfifo                    job_queue;
84         spinlock_t                      queue_lock;
85         struct amd_gpu_scheduler        *scheduler;
86         wait_queue_head_t               wait_queue;
87         wait_queue_head_t               wait_emit;
88         bool                            is_pending;
89 };
90
91 /**
92  * Define the backend operations called by the scheduler,
93  * these functions should be implemented in driver side
94 */
95 struct amd_sched_backend_ops {
96         int (*prepare_job)(struct amd_gpu_scheduler *sched,
97                            struct amd_context_entity *c_entity,
98                            void *job);
99         void (*run_job)(struct amd_gpu_scheduler *sched,
100                         struct amd_context_entity *c_entity,
101                         void *job);
102         void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
103 };
104
105 /**
106  * One scheduler is implemented for each hardware ring
107 */
108 struct amd_gpu_scheduler {
109         void                            *device;
110         struct task_struct              *thread;
111         struct amd_run_queue            sched_rq;
112         struct amd_run_queue            kernel_rq;
113         struct kfifo                    active_hw_rq;
114         struct amd_sched_backend_ops    *ops;
115         uint32_t                        ring_id;
116         uint32_t                        granularity; /* in ms unit */
117         uint32_t                        preemption;
118         uint64_t                        last_handled_seq;
119         wait_queue_head_t               wait_queue;
120         struct amd_context_entity       *current_entity;
121         struct mutex                    sched_lock;
122         spinlock_t                      queue_lock;
123 };
124
125
126 struct amd_gpu_scheduler *amd_sched_create(void *device,
127                                 struct amd_sched_backend_ops *ops,
128                                 uint32_t ring,
129                                 uint32_t granularity,
130                                 uint32_t preemption);
131
132 int amd_sched_destroy(struct amd_gpu_scheduler *sched);
133
134 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
135                        struct amd_context_entity *c_entity,
136                        void *job);
137
138 int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq);
139
140 int amd_sched_wait_signal(struct amd_context_entity *c_entity,
141                           uint64_t seq, bool intr, long timeout);
142 int amd_sched_wait_emit(struct amd_context_entity *c_entity,
143                         uint64_t seq,
144                         bool intr,
145                         long timeout);
146
147 void amd_sched_isr(struct amd_gpu_scheduler *sched);
148 uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
149
150 int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
151                             struct amd_context_entity *entity);
152
153 int amd_context_entity_init(struct amd_gpu_scheduler *sched,
154                             struct amd_context_entity *entity,
155                             struct amd_sched_entity *parent,
156                             struct amd_run_queue *rq,
157                             uint32_t context_id,
158                             uint32_t jobs);
159
160 #endif
This page took 0.0458 seconds and 4 git commands to generate.