]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
Merge tag 'pinctrl-v6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.h
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_
26 #define KFD_DEVICE_QUEUE_MANAGER_H_
27
28 #include <linux/rwsem.h>
29 #include <linux/list.h>
30 #include <linux/mutex.h>
31 #include <linux/sched/mm.h>
32 #include "kfd_priv.h"
33 #include "kfd_mqd_manager.h"
34
35
36 #define VMID_NUM 16
37
38 #define KFD_MES_PROCESS_QUANTUM         100000
39 #define KFD_MES_GANG_QUANTUM            10000
40 #define USE_DEFAULT_GRACE_PERIOD 0xffffffff
41
42 struct device_process_node {
43         struct qcm_process_device *qpd;
44         struct list_head list;
45 };
46
47 union SQ_CMD_BITS {
48         struct {
49                 uint32_t cmd:3;
50                 uint32_t:1;
51                 uint32_t mode:3;
52                 uint32_t check_vmid:1;
53                 uint32_t trap_id:3;
54                 uint32_t:5;
55                 uint32_t wave_id:4;
56                 uint32_t simd_id:2;
57                 uint32_t:2;
58                 uint32_t queue_id:3;
59                 uint32_t:1;
60                 uint32_t vm_id:4;
61         } bitfields, bits;
62         uint32_t u32All;
63         signed int i32All;
64         float f32All;
65 };
66
67 union GRBM_GFX_INDEX_BITS {
68         struct {
69                 uint32_t instance_index:8;
70                 uint32_t sh_index:8;
71                 uint32_t se_index:8;
72                 uint32_t:5;
73                 uint32_t sh_broadcast_writes:1;
74                 uint32_t instance_broadcast_writes:1;
75                 uint32_t se_broadcast_writes:1;
76         } bitfields, bits;
77         uint32_t u32All;
78         signed int i32All;
79         float f32All;
80 };
81
82 /**
83  * struct device_queue_manager_ops
84  *
85  * @create_queue: Queue creation routine.
86  *
87  * @destroy_queue: Queue destruction routine.
88  *
89  * @update_queue: Queue update routine.
90  *
91  * @exeute_queues: Dispatches the queues list to the H/W.
92  *
93  * @register_process: This routine associates a specific process with device.
94  *
95  * @unregister_process: destroys the associations between process to device.
96  *
97  * @initialize: Initializes the pipelines and memory module for that device.
98  *
99  * @start: Initializes the resources/modules the device needs for queues
100  * execution. This function is called on device initialization and after the
101  * system woke up after suspension.
102  *
103  * @stop: This routine stops execution of all the active queue running on the
104  * H/W and basically this function called on system suspend.
105  *
106  * @uninitialize: Destroys all the device queue manager resources allocated in
107  * initialize routine.
108  *
109  * @create_kernel_queue: Creates kernel queue. Used for debug queue.
110  *
111  * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
112  *
113  * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
114  * memory apertures.
115  *
116  * @process_termination: Clears all process queues belongs to that device.
117  *
118  * @evict_process_queues: Evict all active queues of a process
119  *
120  * @restore_process_queues: Restore all evicted queues of a process
121  *
122  * @get_wave_state: Retrieves context save state and optionally copies the
123  * control stack, if kept in the MQD, to the given userspace address.
124  *
125  * @reset_queues: reset queues which consume RAS poison
126  * @get_queue_checkpoint_info: Retrieves queue size information for CRIU checkpoint.
127  *
128  * @checkpoint_mqd: checkpoint queue MQD contents for CRIU.
129  */
130
131 struct device_queue_manager_ops {
132         int     (*create_queue)(struct device_queue_manager *dqm,
133                                 struct queue *q,
134                                 struct qcm_process_device *qpd,
135                                 const struct kfd_criu_queue_priv_data *qd,
136                                 const void *restore_mqd,
137                                 const void *restore_ctl_stack);
138
139         int     (*destroy_queue)(struct device_queue_manager *dqm,
140                                 struct qcm_process_device *qpd,
141                                 struct queue *q);
142
143         int     (*update_queue)(struct device_queue_manager *dqm,
144                                 struct queue *q, struct mqd_update_info *minfo);
145
146         int     (*register_process)(struct device_queue_manager *dqm,
147                                         struct qcm_process_device *qpd);
148
149         int     (*unregister_process)(struct device_queue_manager *dqm,
150                                         struct qcm_process_device *qpd);
151
152         int     (*initialize)(struct device_queue_manager *dqm);
153         int     (*start)(struct device_queue_manager *dqm);
154         int     (*stop)(struct device_queue_manager *dqm);
155         void    (*pre_reset)(struct device_queue_manager *dqm);
156         void    (*uninitialize)(struct device_queue_manager *dqm);
157         int     (*create_kernel_queue)(struct device_queue_manager *dqm,
158                                         struct kernel_queue *kq,
159                                         struct qcm_process_device *qpd);
160
161         void    (*destroy_kernel_queue)(struct device_queue_manager *dqm,
162                                         struct kernel_queue *kq,
163                                         struct qcm_process_device *qpd);
164
165         bool    (*set_cache_memory_policy)(struct device_queue_manager *dqm,
166                                            struct qcm_process_device *qpd,
167                                            enum cache_policy default_policy,
168                                            enum cache_policy alternate_policy,
169                                            void __user *alternate_aperture_base,
170                                            uint64_t alternate_aperture_size);
171
172         int (*process_termination)(struct device_queue_manager *dqm,
173                         struct qcm_process_device *qpd);
174
175         int (*evict_process_queues)(struct device_queue_manager *dqm,
176                                     struct qcm_process_device *qpd);
177         int (*restore_process_queues)(struct device_queue_manager *dqm,
178                                       struct qcm_process_device *qpd);
179
180         int     (*get_wave_state)(struct device_queue_manager *dqm,
181                                   struct queue *q,
182                                   void __user *ctl_stack,
183                                   u32 *ctl_stack_used_size,
184                                   u32 *save_area_used_size);
185
186         int (*reset_queues)(struct device_queue_manager *dqm,
187                                         uint16_t pasid);
188         void    (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
189                                   const struct queue *q, u32 *mqd_size,
190                                   u32 *ctl_stack_size);
191
192         int     (*checkpoint_mqd)(struct device_queue_manager *dqm,
193                                   const struct queue *q,
194                                   void *mqd,
195                                   void *ctl_stack);
196 };
197
198 struct device_queue_manager_asic_ops {
199         int     (*update_qpd)(struct device_queue_manager *dqm,
200                                         struct qcm_process_device *qpd);
201         bool    (*set_cache_memory_policy)(struct device_queue_manager *dqm,
202                                            struct qcm_process_device *qpd,
203                                            enum cache_policy default_policy,
204                                            enum cache_policy alternate_policy,
205                                            void __user *alternate_aperture_base,
206                                            uint64_t alternate_aperture_size);
207         void    (*init_sdma_vm)(struct device_queue_manager *dqm,
208                                 struct queue *q,
209                                 struct qcm_process_device *qpd);
210         struct mqd_manager *    (*mqd_manager_init)(enum KFD_MQD_TYPE type,
211                                  struct kfd_node *dev);
212 };
213
214 /**
215  * struct device_queue_manager
216  *
217  * This struct is a base class for the kfd queues scheduler in the
218  * device level. The device base class should expose the basic operations
219  * for queue creation and queue destruction. This base class hides the
220  * scheduling mode of the driver and the specific implementation of the
221  * concrete device. This class is the only class in the queues scheduler
222  * that configures the H/W.
223  *
224  */
225
226 struct device_queue_manager {
227         struct device_queue_manager_ops ops;
228         struct device_queue_manager_asic_ops asic_ops;
229
230         struct mqd_manager      *mqd_mgrs[KFD_MQD_TYPE_MAX];
231         struct packet_manager   packet_mgr;
232         struct kfd_node         *dev;
233         struct mutex            lock_hidden; /* use dqm_lock/unlock(dqm) */
234         struct list_head        queues;
235         unsigned int            saved_flags;
236         unsigned int            processes_count;
237         unsigned int            active_queue_count;
238         unsigned int            active_cp_queue_count;
239         unsigned int            gws_queue_count;
240         unsigned int            total_queue_count;
241         unsigned int            next_pipe_to_allocate;
242         unsigned int            *allocated_queues;
243         DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES);
244         DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
245         /* the pasid mapping for each kfd vmid */
246         uint16_t                vmid_pasid[VMID_NUM];
247         uint64_t                pipelines_addr;
248         uint64_t                fence_gpu_addr;
249         uint64_t                *fence_addr;
250         struct kfd_mem_obj      *fence_mem;
251         bool                    active_runlist;
252         int                     sched_policy;
253         uint32_t                trap_debug_vmid;
254
255         /* hw exception  */
256         bool                    is_hws_hang;
257         bool                    is_resetting;
258         struct work_struct      hw_exception_work;
259         struct kfd_mem_obj      hiq_sdma_mqd;
260         bool                    sched_running;
261
262         /* used for GFX 9.4.3 only */
263         uint32_t                current_logical_xcc_start;
264
265         uint32_t                wait_times;
266
267         wait_queue_head_t       destroy_wait;
268 };
269
270 void device_queue_manager_init_cik(
271                 struct device_queue_manager_asic_ops *asic_ops);
272 void device_queue_manager_init_vi(
273                 struct device_queue_manager_asic_ops *asic_ops);
274 void device_queue_manager_init_v9(
275                 struct device_queue_manager_asic_ops *asic_ops);
276 void device_queue_manager_init_v10(
277                 struct device_queue_manager_asic_ops *asic_ops);
278 void device_queue_manager_init_v11(
279                 struct device_queue_manager_asic_ops *asic_ops);
280 void program_sh_mem_settings(struct device_queue_manager *dqm,
281                                         struct qcm_process_device *qpd);
282 unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
283 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
284 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
285 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
286 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
287 int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
288                         struct qcm_process_device *qpd);
289 int release_debug_trap_vmid(struct device_queue_manager *dqm,
290                         struct qcm_process_device *qpd);
291 int suspend_queues(struct kfd_process *p,
292                         uint32_t num_queues,
293                         uint32_t grace_period,
294                         uint64_t exception_clear_mask,
295                         uint32_t *usr_queue_id_array);
296 int resume_queues(struct kfd_process *p,
297                 uint32_t num_queues,
298                 uint32_t *usr_queue_id_array);
299 void set_queue_snapshot_entry(struct queue *q,
300                               uint64_t exception_clear_mask,
301                               struct kfd_queue_snapshot_entry *qss_entry);
302 int debug_lock_and_unmap(struct device_queue_manager *dqm);
303 int debug_map_and_unlock(struct device_queue_manager *dqm);
304 int debug_refresh_runlist(struct device_queue_manager *dqm);
305
306 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
307 {
308         return (pdd->lds_base >> 16) & 0xFF;
309 }
310
311 static inline unsigned int
312 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
313 {
314         return (pdd->lds_base >> 60) & 0x0E;
315 }
316
317 /* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
318  * happens while holding this lock anywhere to prevent deadlocks when
319  * an MMU notifier runs in reclaim-FS context.
320  */
321 static inline void dqm_lock(struct device_queue_manager *dqm)
322 {
323         mutex_lock(&dqm->lock_hidden);
324         dqm->saved_flags = memalloc_noreclaim_save();
325 }
326 static inline void dqm_unlock(struct device_queue_manager *dqm)
327 {
328         memalloc_noreclaim_restore(dqm->saved_flags);
329         mutex_unlock(&dqm->lock_hidden);
330 }
331
332 static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
333 {
334         /* SDMA activity counter is stored at queue's RPTR + 0x8 location. */
335         return get_user(*val, q_rptr + 1);
336 }
337 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
This page took 0.056648 seconds and 4 git commands to generate.