1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
25 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_
26 #define KFD_DEVICE_QUEUE_MANAGER_H_
28 #include <linux/rwsem.h>
29 #include <linux/list.h>
30 #include <linux/mutex.h>
31 #include <linux/sched/mm.h>
33 #include "kfd_mqd_manager.h"
38 #define KFD_MES_PROCESS_QUANTUM 100000
39 #define KFD_MES_GANG_QUANTUM 10000
40 #define USE_DEFAULT_GRACE_PERIOD 0xffffffff
42 struct device_process_node {
43 struct qcm_process_device *qpd;
44 struct list_head list;
52 uint32_t check_vmid:1;
67 union GRBM_GFX_INDEX_BITS {
69 uint32_t instance_index:8;
73 uint32_t sh_broadcast_writes:1;
74 uint32_t instance_broadcast_writes:1;
75 uint32_t se_broadcast_writes:1;
83 * struct device_queue_manager_ops
85 * @create_queue: Queue creation routine.
87 * @destroy_queue: Queue destruction routine.
89 * @update_queue: Queue update routine.
91 * @exeute_queues: Dispatches the queues list to the H/W.
93 * @register_process: This routine associates a specific process with device.
95 * @unregister_process: destroys the associations between process to device.
97 * @initialize: Initializes the pipelines and memory module for that device.
99 * @start: Initializes the resources/modules the device needs for queues
100 * execution. This function is called on device initialization and after the
101 * system woke up after suspension.
103 * @stop: This routine stops execution of all the active queue running on the
104 * H/W and basically this function called on system suspend.
106 * @uninitialize: Destroys all the device queue manager resources allocated in
107 * initialize routine.
109 * @create_kernel_queue: Creates kernel queue. Used for debug queue.
111 * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
113 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
116 * @process_termination: Clears all process queues belongs to that device.
118 * @evict_process_queues: Evict all active queues of a process
120 * @restore_process_queues: Restore all evicted queues of a process
122 * @get_wave_state: Retrieves context save state and optionally copies the
123 * control stack, if kept in the MQD, to the given userspace address.
125 * @reset_queues: reset queues which consume RAS poison
126 * @get_queue_checkpoint_info: Retrieves queue size information for CRIU checkpoint.
128 * @checkpoint_mqd: checkpoint queue MQD contents for CRIU.
131 struct device_queue_manager_ops {
132 int (*create_queue)(struct device_queue_manager *dqm,
134 struct qcm_process_device *qpd,
135 const struct kfd_criu_queue_priv_data *qd,
136 const void *restore_mqd,
137 const void *restore_ctl_stack);
139 int (*destroy_queue)(struct device_queue_manager *dqm,
140 struct qcm_process_device *qpd,
143 int (*update_queue)(struct device_queue_manager *dqm,
144 struct queue *q, struct mqd_update_info *minfo);
146 int (*register_process)(struct device_queue_manager *dqm,
147 struct qcm_process_device *qpd);
149 int (*unregister_process)(struct device_queue_manager *dqm,
150 struct qcm_process_device *qpd);
152 int (*initialize)(struct device_queue_manager *dqm);
153 int (*start)(struct device_queue_manager *dqm);
154 int (*stop)(struct device_queue_manager *dqm);
155 void (*pre_reset)(struct device_queue_manager *dqm);
156 void (*uninitialize)(struct device_queue_manager *dqm);
157 int (*create_kernel_queue)(struct device_queue_manager *dqm,
158 struct kernel_queue *kq,
159 struct qcm_process_device *qpd);
161 void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
162 struct kernel_queue *kq,
163 struct qcm_process_device *qpd);
165 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
166 struct qcm_process_device *qpd,
167 enum cache_policy default_policy,
168 enum cache_policy alternate_policy,
169 void __user *alternate_aperture_base,
170 uint64_t alternate_aperture_size);
172 int (*process_termination)(struct device_queue_manager *dqm,
173 struct qcm_process_device *qpd);
175 int (*evict_process_queues)(struct device_queue_manager *dqm,
176 struct qcm_process_device *qpd);
177 int (*restore_process_queues)(struct device_queue_manager *dqm,
178 struct qcm_process_device *qpd);
180 int (*get_wave_state)(struct device_queue_manager *dqm,
182 void __user *ctl_stack,
183 u32 *ctl_stack_used_size,
184 u32 *save_area_used_size);
186 int (*reset_queues)(struct device_queue_manager *dqm,
188 void (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
189 const struct queue *q, u32 *mqd_size,
190 u32 *ctl_stack_size);
192 int (*checkpoint_mqd)(struct device_queue_manager *dqm,
193 const struct queue *q,
198 struct device_queue_manager_asic_ops {
199 int (*update_qpd)(struct device_queue_manager *dqm,
200 struct qcm_process_device *qpd);
201 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
202 struct qcm_process_device *qpd,
203 enum cache_policy default_policy,
204 enum cache_policy alternate_policy,
205 void __user *alternate_aperture_base,
206 uint64_t alternate_aperture_size);
207 void (*init_sdma_vm)(struct device_queue_manager *dqm,
209 struct qcm_process_device *qpd);
210 struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
211 struct kfd_node *dev);
215 * struct device_queue_manager
217 * This struct is a base class for the kfd queues scheduler in the
218 * device level. The device base class should expose the basic operations
219 * for queue creation and queue destruction. This base class hides the
220 * scheduling mode of the driver and the specific implementation of the
221 * concrete device. This class is the only class in the queues scheduler
222 * that configures the H/W.
226 struct device_queue_manager {
227 struct device_queue_manager_ops ops;
228 struct device_queue_manager_asic_ops asic_ops;
230 struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
231 struct packet_manager packet_mgr;
232 struct kfd_node *dev;
233 struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
234 struct list_head queues;
235 unsigned int saved_flags;
236 unsigned int processes_count;
237 unsigned int active_queue_count;
238 unsigned int active_cp_queue_count;
239 unsigned int gws_queue_count;
240 unsigned int total_queue_count;
241 unsigned int next_pipe_to_allocate;
242 unsigned int *allocated_queues;
243 DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES);
244 DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
245 /* the pasid mapping for each kfd vmid */
246 uint16_t vmid_pasid[VMID_NUM];
247 uint64_t pipelines_addr;
248 uint64_t fence_gpu_addr;
249 uint64_t *fence_addr;
250 struct kfd_mem_obj *fence_mem;
253 uint32_t trap_debug_vmid;
258 struct work_struct hw_exception_work;
259 struct kfd_mem_obj hiq_sdma_mqd;
262 /* used for GFX 9.4.3 only */
263 uint32_t current_logical_xcc_start;
267 wait_queue_head_t destroy_wait;
270 void device_queue_manager_init_cik(
271 struct device_queue_manager_asic_ops *asic_ops);
272 void device_queue_manager_init_vi(
273 struct device_queue_manager_asic_ops *asic_ops);
274 void device_queue_manager_init_v9(
275 struct device_queue_manager_asic_ops *asic_ops);
276 void device_queue_manager_init_v10(
277 struct device_queue_manager_asic_ops *asic_ops);
278 void device_queue_manager_init_v11(
279 struct device_queue_manager_asic_ops *asic_ops);
280 void program_sh_mem_settings(struct device_queue_manager *dqm,
281 struct qcm_process_device *qpd);
282 unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
283 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
284 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
285 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
286 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
287 int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
288 struct qcm_process_device *qpd);
289 int release_debug_trap_vmid(struct device_queue_manager *dqm,
290 struct qcm_process_device *qpd);
291 int suspend_queues(struct kfd_process *p,
293 uint32_t grace_period,
294 uint64_t exception_clear_mask,
295 uint32_t *usr_queue_id_array);
296 int resume_queues(struct kfd_process *p,
298 uint32_t *usr_queue_id_array);
299 void set_queue_snapshot_entry(struct queue *q,
300 uint64_t exception_clear_mask,
301 struct kfd_queue_snapshot_entry *qss_entry);
302 int debug_lock_and_unmap(struct device_queue_manager *dqm);
303 int debug_map_and_unlock(struct device_queue_manager *dqm);
304 int debug_refresh_runlist(struct device_queue_manager *dqm);
306 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
308 return (pdd->lds_base >> 16) & 0xFF;
311 static inline unsigned int
312 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
314 return (pdd->lds_base >> 60) & 0x0E;
317 /* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
318 * happens while holding this lock anywhere to prevent deadlocks when
319 * an MMU notifier runs in reclaim-FS context.
321 static inline void dqm_lock(struct device_queue_manager *dqm)
323 mutex_lock(&dqm->lock_hidden);
324 dqm->saved_flags = memalloc_noreclaim_save();
326 static inline void dqm_unlock(struct device_queue_manager *dqm)
328 memalloc_noreclaim_restore(dqm->saved_flags);
329 mutex_unlock(&dqm->lock_hidden);
332 static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
334 /* SDMA activity counter is stored at queue's RPTR + 0x8 location. */
335 return get_user(*val, q_rptr + 1);
337 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */