1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/ratelimit.h>
26 #include <linux/printk.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/types.h>
30 #include <linux/bitops.h>
31 #include <linux/sched.h>
33 #include "kfd_device_queue_manager.h"
34 #include "kfd_mqd_manager.h"
36 #include "kfd_kernel_queue.h"
37 #include "amdgpu_amdkfd.h"
38 #include "amdgpu_reset.h"
39 #include "mes_v11_api_def.h"
40 #include "kfd_debug.h"
42 /* Size of the per-pipe EOP queue */
43 #define CIK_HPD_EOP_BYTES_LOG2 11
44 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
46 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
47 u32 pasid, unsigned int vmid);
49 static int execute_queues_cpsch(struct device_queue_manager *dqm,
50 enum kfd_unmap_queues_filter filter,
51 uint32_t filter_param,
52 uint32_t grace_period);
53 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
54 enum kfd_unmap_queues_filter filter,
55 uint32_t filter_param,
56 uint32_t grace_period,
59 static int map_queues_cpsch(struct device_queue_manager *dqm);
61 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
64 static inline void deallocate_hqd(struct device_queue_manager *dqm,
66 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
67 static int allocate_sdma_queue(struct device_queue_manager *dqm,
68 struct queue *q, const uint32_t *restore_sdma_id);
69 static void kfd_process_hw_exception(struct work_struct *work);
72 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
74 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
75 return KFD_MQD_TYPE_SDMA;
76 return KFD_MQD_TYPE_CP;
79 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
82 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
83 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
85 /* queue is available for KFD usage if bit is 1 */
86 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
87 if (test_bit(pipe_offset + i,
88 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
93 unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
95 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
99 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
101 return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
104 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
106 return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
109 static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
111 return kfd_get_num_sdma_engines(dqm->dev) +
112 kfd_get_num_xgmi_sdma_engines(dqm->dev);
115 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
117 return kfd_get_num_sdma_engines(dqm->dev) *
118 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
121 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
123 return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
124 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
127 static void init_sdma_bitmaps(struct device_queue_manager *dqm)
129 bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
130 bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
132 bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
133 bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
135 /* Mask out the reserved queues */
136 bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap,
137 dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap,
138 KFD_MAX_SDMA_QUEUES);
141 void program_sh_mem_settings(struct device_queue_manager *dqm,
142 struct qcm_process_device *qpd)
144 uint32_t xcc_mask = dqm->dev->xcc_mask;
147 for_each_inst(xcc_id, xcc_mask)
148 dqm->dev->kfd2kgd->program_sh_mem_settings(
149 dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
150 qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
151 qpd->sh_mem_bases, xcc_id);
154 static void kfd_hws_hang(struct device_queue_manager *dqm)
156 struct device_process_node *cur;
157 struct qcm_process_device *qpd;
160 /* Mark all device queues as reset. */
161 list_for_each_entry(cur, &dqm->queues, list) {
163 list_for_each_entry(q, &qpd->queues_list, list) {
164 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
166 pdd->has_reset_queue = true;
171 * Issue a GPU reset if HWS is unresponsive
173 schedule_work(&dqm->hw_exception_work);
176 static int convert_to_mes_queue_type(int queue_type)
180 switch (queue_type) {
181 case KFD_QUEUE_TYPE_COMPUTE:
182 mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
184 case KFD_QUEUE_TYPE_SDMA:
185 mes_queue_type = MES_QUEUE_TYPE_SDMA;
188 WARN(1, "Invalid queue type %d", queue_type);
189 mes_queue_type = -EINVAL;
193 return mes_queue_type;
196 static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
197 struct qcm_process_device *qpd)
199 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
200 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
201 struct mes_add_queue_input queue_input;
203 uint64_t wptr_addr_off;
205 if (!dqm->sched_running || dqm->sched_halt)
207 if (!down_read_trylock(&adev->reset_domain->sem))
210 if (!pdd->proc_ctx_cpu_ptr) {
211 r = amdgpu_amdkfd_alloc_gtt_mem(adev,
212 AMDGPU_MES_PROC_CTX_SIZE,
214 &pdd->proc_ctx_gpu_addr,
215 &pdd->proc_ctx_cpu_ptr,
219 "failed to allocate process context bo\n");
222 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
225 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
226 queue_input.process_id = qpd->pqm->process->pasid;
227 queue_input.page_table_base_addr = qpd->page_table_base;
228 queue_input.process_va_start = 0;
229 queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
230 /* MES unit for quantum is 100ns */
231 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */
232 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
233 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
234 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
235 queue_input.inprocess_gang_priority = q->properties.priority;
236 queue_input.gang_global_priority_level =
237 AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
238 queue_input.doorbell_offset = q->properties.doorbell_off;
239 queue_input.mqd_addr = q->gart_mqd_addr;
240 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
242 wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
243 queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off;
245 queue_input.is_kfd_process = 1;
246 queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
247 queue_input.queue_size = q->properties.queue_size >> 2;
249 queue_input.paging = false;
250 queue_input.tba_addr = qpd->tba_addr;
251 queue_input.tma_addr = qpd->tma_addr;
252 queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
253 queue_input.skip_process_ctx_clear =
254 qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED &&
255 (qpd->pqm->process->debug_trap_enabled ||
256 kfd_dbg_has_ttmps_always_setup(q->device));
258 queue_type = convert_to_mes_queue_type(q->properties.type);
259 if (queue_type < 0) {
260 dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n",
262 up_read(&adev->reset_domain->sem);
265 queue_input.queue_type = (uint32_t)queue_type;
267 queue_input.exclusively_scheduled = q->properties.is_gws;
269 amdgpu_mes_lock(&adev->mes);
270 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
271 amdgpu_mes_unlock(&adev->mes);
272 up_read(&adev->reset_domain->sem);
274 dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n",
275 q->properties.doorbell_off);
276 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
283 static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
284 struct qcm_process_device *qpd)
286 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
288 struct mes_remove_queue_input queue_input;
290 if (!dqm->sched_running || dqm->sched_halt)
292 if (!down_read_trylock(&adev->reset_domain->sem))
295 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
296 queue_input.doorbell_offset = q->properties.doorbell_off;
297 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
299 amdgpu_mes_lock(&adev->mes);
300 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
301 amdgpu_mes_unlock(&adev->mes);
302 up_read(&adev->reset_domain->sem);
305 dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n",
306 q->properties.doorbell_off);
307 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
314 static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm)
316 struct device_process_node *cur;
317 struct device *dev = dqm->dev->adev->dev;
318 struct qcm_process_device *qpd;
322 list_for_each_entry(cur, &dqm->queues, list) {
324 list_for_each_entry(q, &qpd->queues_list, list) {
325 if (q->properties.is_active) {
326 retval = remove_queue_mes(dqm, q, qpd);
328 dev_err(dev, "%s: Failed to remove queue %d for dev %d",
330 q->properties.queue_id,
341 static int add_all_kfd_queues_mes(struct device_queue_manager *dqm)
343 struct device_process_node *cur;
344 struct device *dev = dqm->dev->adev->dev;
345 struct qcm_process_device *qpd;
349 list_for_each_entry(cur, &dqm->queues, list) {
351 list_for_each_entry(q, &qpd->queues_list, list) {
352 if (!q->properties.is_active)
354 retval = add_queue_mes(dqm, q, qpd);
356 dev_err(dev, "%s: Failed to add queue %d for dev %d",
358 q->properties.queue_id,
368 static int suspend_all_queues_mes(struct device_queue_manager *dqm)
370 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
373 if (!down_read_trylock(&adev->reset_domain->sem))
376 r = amdgpu_mes_suspend(adev);
377 up_read(&adev->reset_domain->sem);
380 dev_err(adev->dev, "failed to suspend gangs from MES\n");
381 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
388 static int resume_all_queues_mes(struct device_queue_manager *dqm)
390 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
393 if (!down_read_trylock(&adev->reset_domain->sem))
396 r = amdgpu_mes_resume(adev);
397 up_read(&adev->reset_domain->sem);
400 dev_err(adev->dev, "failed to resume gangs from MES\n");
401 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
408 static void increment_queue_count(struct device_queue_manager *dqm,
409 struct qcm_process_device *qpd,
412 dqm->active_queue_count++;
413 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
414 q->properties.type == KFD_QUEUE_TYPE_DIQ)
415 dqm->active_cp_queue_count++;
417 if (q->properties.is_gws) {
418 dqm->gws_queue_count++;
419 qpd->mapped_gws_queue = true;
423 static void decrement_queue_count(struct device_queue_manager *dqm,
424 struct qcm_process_device *qpd,
427 dqm->active_queue_count--;
428 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
429 q->properties.type == KFD_QUEUE_TYPE_DIQ)
430 dqm->active_cp_queue_count--;
432 if (q->properties.is_gws) {
433 dqm->gws_queue_count--;
434 qpd->mapped_gws_queue = false;
439 * Allocate a doorbell ID to this queue.
440 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
442 static int allocate_doorbell(struct qcm_process_device *qpd,
444 uint32_t const *restore_id)
446 struct kfd_node *dev = qpd->dqm->dev;
448 if (!KFD_IS_SOC15(dev)) {
449 /* On pre-SOC15 chips we need to use the queue ID to
450 * preserve the user mode ABI.
453 if (restore_id && *restore_id != q->properties.queue_id)
456 q->doorbell_id = q->properties.queue_id;
457 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
458 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
459 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
460 * doorbell assignments based on the engine and queue id.
461 * The doobell index distance between RLC (2*i) and (2*i+1)
462 * for a SDMA engine is 512.
465 uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
468 * q->properties.sdma_engine_id corresponds to the virtual
469 * sdma engine number. However, for doorbell allocation,
470 * we need the physical sdma engine id in order to get the
471 * correct doorbell offset.
473 uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
474 get_num_all_sdma_engines(qpd->dqm) +
475 q->properties.sdma_engine_id]
476 + (q->properties.sdma_queue_id & 1)
477 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
478 + (q->properties.sdma_queue_id >> 1);
480 if (restore_id && *restore_id != valid_id)
482 q->doorbell_id = valid_id;
484 /* For CP queues on SOC15 */
486 /* make sure that ID is free */
487 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
490 q->doorbell_id = *restore_id;
492 /* or reserve a free doorbell ID */
495 found = find_first_zero_bit(qpd->doorbell_bitmap,
496 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
497 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
498 pr_debug("No doorbells available");
501 set_bit(found, qpd->doorbell_bitmap);
502 q->doorbell_id = found;
506 q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
509 dev->kfd->device_info.doorbell_size);
513 static void deallocate_doorbell(struct qcm_process_device *qpd,
517 struct kfd_node *dev = qpd->dqm->dev;
519 if (!KFD_IS_SOC15(dev) ||
520 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
521 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
524 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
528 static void program_trap_handler_settings(struct device_queue_manager *dqm,
529 struct qcm_process_device *qpd)
531 uint32_t xcc_mask = dqm->dev->xcc_mask;
534 if (dqm->dev->kfd2kgd->program_trap_handler_settings)
535 for_each_inst(xcc_id, xcc_mask)
536 dqm->dev->kfd2kgd->program_trap_handler_settings(
537 dqm->dev->adev, qpd->vmid, qpd->tba_addr,
538 qpd->tma_addr, xcc_id);
541 static int allocate_vmid(struct device_queue_manager *dqm,
542 struct qcm_process_device *qpd,
545 struct device *dev = dqm->dev->adev->dev;
546 int allocated_vmid = -1, i;
548 for (i = dqm->dev->vm_info.first_vmid_kfd;
549 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
550 if (!dqm->vmid_pasid[i]) {
556 if (allocated_vmid < 0) {
557 dev_err(dev, "no more vmid to allocate\n");
561 pr_debug("vmid allocated: %d\n", allocated_vmid);
563 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
565 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
567 qpd->vmid = allocated_vmid;
568 q->properties.vmid = allocated_vmid;
570 program_sh_mem_settings(dqm, qpd);
572 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
573 program_trap_handler_settings(dqm, qpd);
575 /* qpd->page_table_base is set earlier when register_process()
576 * is called, i.e. when the first queue is created.
578 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
580 qpd->page_table_base);
581 /* invalidate the VM context after pasid and vmid mapping is set up */
582 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
584 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
585 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
586 qpd->sh_hidden_private_base, qpd->vmid);
591 static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
592 struct qcm_process_device *qpd)
594 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
600 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
604 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
605 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
606 pmf->release_mem_size / sizeof(uint32_t));
609 static void deallocate_vmid(struct device_queue_manager *dqm,
610 struct qcm_process_device *qpd,
613 struct device *dev = dqm->dev->adev->dev;
615 /* On GFX v7, CP doesn't flush TC at dequeue */
616 if (q->device->adev->asic_type == CHIP_HAWAII)
617 if (flush_texture_cache_nocpsch(q->device, qpd))
618 dev_err(dev, "Failed to flush TC\n");
620 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
622 /* Release the vmid mapping */
623 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
624 dqm->vmid_pasid[qpd->vmid] = 0;
627 q->properties.vmid = 0;
630 static int create_queue_nocpsch(struct device_queue_manager *dqm,
632 struct qcm_process_device *qpd,
633 const struct kfd_criu_queue_priv_data *qd,
634 const void *restore_mqd, const void *restore_ctl_stack)
636 struct mqd_manager *mqd_mgr;
641 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
642 pr_warn("Can't create new usermode queue because %d queues were already created\n",
643 dqm->total_queue_count);
648 if (list_empty(&qpd->queues_list)) {
649 retval = allocate_vmid(dqm, qpd, q);
653 q->properties.vmid = qpd->vmid;
655 * Eviction state logic: mark all queues as evicted, even ones
656 * not currently active. Restoring inactive queues later only
657 * updates the is_evicted flag but is a no-op otherwise.
659 q->properties.is_evicted = !!qpd->evicted;
661 q->properties.tba_addr = qpd->tba_addr;
662 q->properties.tma_addr = qpd->tma_addr;
664 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
665 q->properties.type)];
666 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
667 retval = allocate_hqd(dqm, q);
669 goto deallocate_vmid;
670 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
672 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
673 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
674 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
676 goto deallocate_vmid;
677 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
680 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
682 goto out_deallocate_hqd;
684 /* Temporarily release dqm lock to avoid a circular lock dependency */
686 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
689 if (!q->mqd_mem_obj) {
691 goto out_deallocate_doorbell;
695 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
696 &q->properties, restore_mqd, restore_ctl_stack,
699 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
700 &q->gart_mqd_addr, &q->properties);
702 if (q->properties.is_active) {
703 if (!dqm->sched_running) {
704 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
705 goto add_queue_to_list;
708 if (WARN(q->process->mm != current->mm,
709 "should only run in user thread"))
712 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
713 q->queue, &q->properties, current->mm);
719 list_add(&q->list, &qpd->queues_list);
721 if (q->properties.is_active)
722 increment_queue_count(dqm, qpd, q);
725 * Unconditionally increment this counter, regardless of the queue's
726 * type or whether the queue is active.
728 dqm->total_queue_count++;
729 pr_debug("Total of %d queues are accountable so far\n",
730 dqm->total_queue_count);
734 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
735 out_deallocate_doorbell:
736 deallocate_doorbell(qpd, q);
738 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
739 deallocate_hqd(dqm, q);
740 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
741 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
742 deallocate_sdma_queue(dqm, q);
744 if (list_empty(&qpd->queues_list))
745 deallocate_vmid(dqm, qpd, q);
751 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
758 for (pipe = dqm->next_pipe_to_allocate, i = 0;
759 i < get_pipes_per_mec(dqm);
760 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
762 if (!is_pipe_enabled(dqm, 0, pipe))
765 if (dqm->allocated_queues[pipe] != 0) {
766 bit = ffs(dqm->allocated_queues[pipe]) - 1;
767 dqm->allocated_queues[pipe] &= ~(1 << bit);
778 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
779 /* horizontal hqd allocation */
780 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
785 static inline void deallocate_hqd(struct device_queue_manager *dqm,
788 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
791 #define SQ_IND_CMD_CMD_KILL 0x00000003
792 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001
794 static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
798 uint16_t queried_pasid;
799 union SQ_CMD_BITS reg_sq_cmd;
800 union GRBM_GFX_INDEX_BITS reg_gfx_index;
801 struct kfd_process_device *pdd;
802 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
803 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
804 uint32_t xcc_mask = dev->xcc_mask;
807 reg_sq_cmd.u32All = 0;
808 reg_gfx_index.u32All = 0;
810 pr_debug("Killing all process wavefronts\n");
812 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
813 dev_err(dev->adev->dev, "no vmid pasid mapping supported\n");
817 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
818 * ATC_VMID15_PASID_MAPPING
819 * to check which VMID the current process is mapped to.
822 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
823 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
824 (dev->adev, vmid, &queried_pasid);
826 if (status && queried_pasid == p->pasid) {
827 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
833 if (vmid > last_vmid_to_scan) {
834 dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
838 /* taking the VMID for that process on the safe way using PDD */
839 pdd = kfd_get_process_device_data(dev, p);
843 reg_gfx_index.bits.sh_broadcast_writes = 1;
844 reg_gfx_index.bits.se_broadcast_writes = 1;
845 reg_gfx_index.bits.instance_broadcast_writes = 1;
846 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
847 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
848 reg_sq_cmd.bits.vm_id = vmid;
850 for_each_inst(xcc_id, xcc_mask)
851 dev->kfd2kgd->wave_control_execute(
852 dev->adev, reg_gfx_index.u32All,
853 reg_sq_cmd.u32All, xcc_id);
858 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
859 * to avoid asynchronized access
861 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
862 struct qcm_process_device *qpd,
866 struct mqd_manager *mqd_mgr;
868 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
869 q->properties.type)];
871 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
872 deallocate_hqd(dqm, q);
873 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
874 deallocate_sdma_queue(dqm, q);
875 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
876 deallocate_sdma_queue(dqm, q);
878 pr_debug("q->properties.type %d is invalid\n",
882 dqm->total_queue_count--;
884 deallocate_doorbell(qpd, q);
886 if (!dqm->sched_running) {
887 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
891 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
892 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
893 KFD_UNMAP_LATENCY_MS,
895 if (retval == -ETIME)
896 qpd->reset_wavefronts = true;
899 if (list_empty(&qpd->queues_list)) {
900 if (qpd->reset_wavefronts) {
901 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
903 /* dbgdev_wave_reset_wavefronts has to be called before
904 * deallocate_vmid(), i.e. when vmid is still in use.
906 dbgdev_wave_reset_wavefronts(dqm->dev,
908 qpd->reset_wavefronts = false;
911 deallocate_vmid(dqm, qpd, q);
914 if (q->properties.is_active)
915 decrement_queue_count(dqm, qpd, q);
920 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
921 struct qcm_process_device *qpd,
925 uint64_t sdma_val = 0;
926 struct device *dev = dqm->dev->adev->dev;
927 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
928 struct mqd_manager *mqd_mgr =
929 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
931 /* Get the SDMA queue stats */
932 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
933 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
934 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
937 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
938 q->properties.queue_id);
942 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
944 pdd->sdma_past_activity_counter += sdma_val;
947 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
952 static int update_queue(struct device_queue_manager *dqm, struct queue *q,
953 struct mqd_update_info *minfo)
956 struct device *dev = dqm->dev->adev->dev;
957 struct mqd_manager *mqd_mgr;
958 struct kfd_process_device *pdd;
959 bool prev_active = false;
962 pdd = kfd_get_process_device_data(q->device, q->process);
967 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
968 q->properties.type)];
970 /* Save previous activity state for counters */
971 prev_active = q->properties.is_active;
973 /* Make sure the queue is unmapped before updating the MQD */
974 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
975 if (!dqm->dev->kfd->shared_resources.enable_mes)
976 retval = unmap_queues_cpsch(dqm,
977 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
978 else if (prev_active)
979 retval = remove_queue_mes(dqm, q, &pdd->qpd);
981 /* queue is reset so inaccessable */
982 if (pdd->has_reset_queue) {
988 dev_err(dev, "unmap queue failed\n");
991 } else if (prev_active &&
992 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
993 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
994 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
996 if (!dqm->sched_running) {
997 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
1001 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
1002 (dqm->dev->kfd->cwsr_enabled ?
1003 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
1004 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
1005 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
1007 dev_err(dev, "destroy mqd failed\n");
1012 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
1015 * check active state vs. the previous state and modify
1016 * counter accordingly. map_queues_cpsch uses the
1017 * dqm->active_queue_count to determine whether a new runlist must be
1020 if (q->properties.is_active && !prev_active) {
1021 increment_queue_count(dqm, &pdd->qpd, q);
1022 } else if (!q->properties.is_active && prev_active) {
1023 decrement_queue_count(dqm, &pdd->qpd, q);
1024 } else if (q->gws && !q->properties.is_gws) {
1025 if (q->properties.is_active) {
1026 dqm->gws_queue_count++;
1027 pdd->qpd.mapped_gws_queue = true;
1029 q->properties.is_gws = true;
1030 } else if (!q->gws && q->properties.is_gws) {
1031 if (q->properties.is_active) {
1032 dqm->gws_queue_count--;
1033 pdd->qpd.mapped_gws_queue = false;
1035 q->properties.is_gws = false;
1038 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
1039 if (!dqm->dev->kfd->shared_resources.enable_mes)
1040 retval = map_queues_cpsch(dqm);
1041 else if (q->properties.is_active)
1042 retval = add_queue_mes(dqm, q, &pdd->qpd);
1043 } else if (q->properties.is_active &&
1044 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1045 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1046 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1047 if (WARN(q->process->mm != current->mm,
1048 "should only run in user thread"))
1051 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
1053 &q->properties, current->mm);
1061 /* suspend_single_queue does not lock the dqm like the
1062 * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
1063 * lock the dqm before calling, and unlock after calling.
1065 * The reason we don't lock the dqm is because this function may be
1066 * called on multiple queues in a loop, so rather than locking/unlocking
1067 * multiple times, we will just keep the dqm locked for all of the calls.
1069 static int suspend_single_queue(struct device_queue_manager *dqm,
1070 struct kfd_process_device *pdd,
1075 if (q->properties.is_suspended)
1078 pr_debug("Suspending PASID %u queue [%i]\n",
1079 pdd->process->pasid,
1080 q->properties.queue_id);
1082 is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
1084 if (is_new || q->properties.is_being_destroyed) {
1085 pr_debug("Suspend: skip %s queue id %i\n",
1086 is_new ? "new" : "destroyed",
1087 q->properties.queue_id);
1091 q->properties.is_suspended = true;
1092 if (q->properties.is_active) {
1093 if (dqm->dev->kfd->shared_resources.enable_mes) {
1094 int r = remove_queue_mes(dqm, q, &pdd->qpd);
1100 decrement_queue_count(dqm, &pdd->qpd, q);
1101 q->properties.is_active = false;
1107 /* resume_single_queue does not lock the dqm like the functions
1108 * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
1109 * lock the dqm before calling, and unlock after calling.
1111 * The reason we don't lock the dqm is because this function may be
1112 * called on multiple queues in a loop, so rather than locking/unlocking
1113 * multiple times, we will just keep the dqm locked for all of the calls.
1115 static int resume_single_queue(struct device_queue_manager *dqm,
1116 struct qcm_process_device *qpd,
1119 struct kfd_process_device *pdd;
1121 if (!q->properties.is_suspended)
1124 pdd = qpd_to_pdd(qpd);
1126 pr_debug("Restoring from suspend PASID %u queue [%i]\n",
1127 pdd->process->pasid,
1128 q->properties.queue_id);
1130 q->properties.is_suspended = false;
1132 if (QUEUE_IS_ACTIVE(q->properties)) {
1133 if (dqm->dev->kfd->shared_resources.enable_mes) {
1134 int r = add_queue_mes(dqm, q, &pdd->qpd);
1140 q->properties.is_active = true;
1141 increment_queue_count(dqm, qpd, q);
1147 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
1148 struct qcm_process_device *qpd)
1151 struct mqd_manager *mqd_mgr;
1152 struct kfd_process_device *pdd;
1153 int retval, ret = 0;
1156 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1159 pdd = qpd_to_pdd(qpd);
1160 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
1161 pdd->process->pasid);
1163 pdd->last_evict_timestamp = get_jiffies_64();
1164 /* Mark all queues as evicted. Deactivate all active queues on
1167 list_for_each_entry(q, &qpd->queues_list, list) {
1168 q->properties.is_evicted = true;
1169 if (!q->properties.is_active)
1172 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1173 q->properties.type)];
1174 q->properties.is_active = false;
1175 decrement_queue_count(dqm, qpd, q);
1177 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
1180 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
1181 (dqm->dev->kfd->cwsr_enabled ?
1182 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
1183 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
1184 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
1186 /* Return the first error, but keep going to
1187 * maintain a consistent eviction state
1197 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
1198 struct qcm_process_device *qpd)
1201 struct device *dev = dqm->dev->adev->dev;
1202 struct kfd_process_device *pdd;
1206 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1209 pdd = qpd_to_pdd(qpd);
1211 /* The debugger creates processes that temporarily have not acquired
1212 * all VMs for all devices and has no VMs itself.
1213 * Skip queue eviction on process eviction.
1218 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
1219 pdd->process->pasid);
1221 /* Mark all queues as evicted. Deactivate all active queues on
1224 list_for_each_entry(q, &qpd->queues_list, list) {
1225 q->properties.is_evicted = true;
1226 if (!q->properties.is_active)
1229 q->properties.is_active = false;
1230 decrement_queue_count(dqm, qpd, q);
1232 if (dqm->dev->kfd->shared_resources.enable_mes) {
1233 retval = remove_queue_mes(dqm, q, qpd);
1235 dev_err(dev, "Failed to evict queue %d\n",
1236 q->properties.queue_id);
1241 pdd->last_evict_timestamp = get_jiffies_64();
1242 if (!dqm->dev->kfd->shared_resources.enable_mes)
1243 retval = execute_queues_cpsch(dqm,
1245 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
1246 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1247 USE_DEFAULT_GRACE_PERIOD);
1254 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1255 struct qcm_process_device *qpd)
1257 struct mm_struct *mm = NULL;
1259 struct mqd_manager *mqd_mgr;
1260 struct kfd_process_device *pdd;
1262 uint64_t eviction_duration;
1263 int retval, ret = 0;
1265 pdd = qpd_to_pdd(qpd);
1266 /* Retrieve PD base */
1267 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1270 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1272 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1277 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1278 pdd->process->pasid);
1280 /* Update PD Base in QPD */
1281 qpd->page_table_base = pd_base;
1282 pr_debug("Updated PD address to 0x%llx\n", pd_base);
1284 if (!list_empty(&qpd->queues_list)) {
1285 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
1288 qpd->page_table_base);
1289 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1292 /* Take a safe reference to the mm_struct, which may otherwise
1293 * disappear even while the kfd_process is still referenced.
1295 mm = get_task_mm(pdd->process->lead_thread);
1301 /* Remove the eviction flags. Activate queues that are not
1302 * inactive for other reasons.
1304 list_for_each_entry(q, &qpd->queues_list, list) {
1305 q->properties.is_evicted = false;
1306 if (!QUEUE_IS_ACTIVE(q->properties))
1309 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1310 q->properties.type)];
1311 q->properties.is_active = true;
1312 increment_queue_count(dqm, qpd, q);
1314 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1317 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1318 q->queue, &q->properties, mm);
1320 /* Return the first error, but keep going to
1321 * maintain a consistent eviction state
1326 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1327 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1335 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1336 struct qcm_process_device *qpd)
1339 struct device *dev = dqm->dev->adev->dev;
1340 struct kfd_process_device *pdd;
1341 uint64_t eviction_duration;
1344 pdd = qpd_to_pdd(qpd);
1347 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1349 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1354 /* The debugger creates processes that temporarily have not acquired
1355 * all VMs for all devices and has no VMs itself.
1356 * Skip queue restore on process restore.
1359 goto vm_not_acquired;
1361 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1362 pdd->process->pasid);
1364 /* Update PD Base in QPD */
1365 qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1366 pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
1368 /* activate all active queues on the qpd */
1369 list_for_each_entry(q, &qpd->queues_list, list) {
1370 q->properties.is_evicted = false;
1371 if (!QUEUE_IS_ACTIVE(q->properties))
1374 q->properties.is_active = true;
1375 increment_queue_count(dqm, &pdd->qpd, q);
1377 if (dqm->dev->kfd->shared_resources.enable_mes) {
1378 retval = add_queue_mes(dqm, q, qpd);
1380 dev_err(dev, "Failed to restore queue %d\n",
1381 q->properties.queue_id);
1386 if (!dqm->dev->kfd->shared_resources.enable_mes)
1387 retval = execute_queues_cpsch(dqm,
1388 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1389 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1390 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1398 static int register_process(struct device_queue_manager *dqm,
1399 struct qcm_process_device *qpd)
1401 struct device_process_node *n;
1402 struct kfd_process_device *pdd;
1406 n = kzalloc(sizeof(*n), GFP_KERNEL);
1412 pdd = qpd_to_pdd(qpd);
1413 /* Retrieve PD base */
1414 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1417 list_add(&n->list, &dqm->queues);
1419 /* Update PD Base in QPD */
1420 qpd->page_table_base = pd_base;
1421 pr_debug("Updated PD address to 0x%llx\n", pd_base);
1423 retval = dqm->asic_ops.update_qpd(dqm, qpd);
1425 dqm->processes_count++;
1429 /* Outside the DQM lock because under the DQM lock we can't do
1430 * reclaim or take other locks that others hold while reclaiming.
1432 kfd_inc_compute_active(dqm->dev);
1437 static int unregister_process(struct device_queue_manager *dqm,
1438 struct qcm_process_device *qpd)
1441 struct device_process_node *cur, *next;
1443 pr_debug("qpd->queues_list is %s\n",
1444 list_empty(&qpd->queues_list) ? "empty" : "not empty");
1449 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1450 if (qpd == cur->qpd) {
1451 list_del(&cur->list);
1453 dqm->processes_count--;
1457 /* qpd not found in dqm list */
1462 /* Outside the DQM lock because under the DQM lock we can't do
1463 * reclaim or take other locks that others hold while reclaiming.
1466 kfd_dec_compute_active(dqm->dev);
1472 set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
1475 uint32_t xcc_mask = dqm->dev->xcc_mask;
1478 for_each_inst(xcc_id, xcc_mask) {
1479 ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
1480 dqm->dev->adev, pasid, vmid, xcc_id);
1488 static void init_interrupts(struct device_queue_manager *dqm)
1490 uint32_t xcc_mask = dqm->dev->xcc_mask;
1491 unsigned int i, xcc_id;
1493 for_each_inst(xcc_id, xcc_mask) {
1494 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
1495 if (is_pipe_enabled(dqm, 0, i)) {
1496 dqm->dev->kfd2kgd->init_interrupts(
1497 dqm->dev->adev, i, xcc_id);
1503 static int initialize_nocpsch(struct device_queue_manager *dqm)
1507 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1509 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1510 sizeof(unsigned int), GFP_KERNEL);
1511 if (!dqm->allocated_queues)
1514 mutex_init(&dqm->lock_hidden);
1515 INIT_LIST_HEAD(&dqm->queues);
1516 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
1517 dqm->active_cp_queue_count = 0;
1518 dqm->gws_queue_count = 0;
1520 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1521 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1523 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1524 if (test_bit(pipe_offset + queue,
1525 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
1526 dqm->allocated_queues[pipe] |= 1 << queue;
1529 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1531 init_sdma_bitmaps(dqm);
1536 static void uninitialize(struct device_queue_manager *dqm)
1540 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
1542 kfree(dqm->allocated_queues);
1543 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
1544 kfree(dqm->mqd_mgrs[i]);
1545 mutex_destroy(&dqm->lock_hidden);
1548 static int start_nocpsch(struct device_queue_manager *dqm)
1552 pr_info("SW scheduler is used");
1553 init_interrupts(dqm);
1555 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1556 r = pm_init(&dqm->packet_mgr, dqm);
1558 dqm->sched_running = true;
1563 static int stop_nocpsch(struct device_queue_manager *dqm)
1566 if (!dqm->sched_running) {
1571 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1572 pm_uninit(&dqm->packet_mgr);
1573 dqm->sched_running = false;
1579 static int allocate_sdma_queue(struct device_queue_manager *dqm,
1580 struct queue *q, const uint32_t *restore_sdma_id)
1582 struct device *dev = dqm->dev->adev->dev;
1585 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1586 if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
1587 dev_err(dev, "No more SDMA queue to allocate\n");
1591 if (restore_sdma_id) {
1592 /* Re-use existing sdma_id */
1593 if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
1594 dev_err(dev, "SDMA queue already in use\n");
1597 clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
1598 q->sdma_id = *restore_sdma_id;
1600 /* Find first available sdma_id */
1601 bit = find_first_bit(dqm->sdma_bitmap,
1602 get_num_sdma_queues(dqm));
1603 clear_bit(bit, dqm->sdma_bitmap);
1607 q->properties.sdma_engine_id =
1608 q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
1609 q->properties.sdma_queue_id = q->sdma_id /
1610 kfd_get_num_sdma_engines(dqm->dev);
1611 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1612 if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
1613 dev_err(dev, "No more XGMI SDMA queue to allocate\n");
1616 if (restore_sdma_id) {
1617 /* Re-use existing sdma_id */
1618 if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
1619 dev_err(dev, "SDMA queue already in use\n");
1622 clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
1623 q->sdma_id = *restore_sdma_id;
1625 bit = find_first_bit(dqm->xgmi_sdma_bitmap,
1626 get_num_xgmi_sdma_queues(dqm));
1627 clear_bit(bit, dqm->xgmi_sdma_bitmap);
1630 /* sdma_engine_id is sdma id including
1631 * both PCIe-optimized SDMAs and XGMI-
1632 * optimized SDMAs. The calculation below
1633 * assumes the first N engines are always
1634 * PCIe-optimized ones
1636 q->properties.sdma_engine_id =
1637 kfd_get_num_sdma_engines(dqm->dev) +
1638 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1639 q->properties.sdma_queue_id = q->sdma_id /
1640 kfd_get_num_xgmi_sdma_engines(dqm->dev);
1641 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
1642 int i, num_queues, num_engines, eng_offset = 0, start_engine;
1643 bool free_bit_found = false, is_xgmi = false;
1645 if (q->properties.sdma_engine_id < kfd_get_num_sdma_engines(dqm->dev)) {
1646 num_queues = get_num_sdma_queues(dqm);
1647 num_engines = kfd_get_num_sdma_engines(dqm->dev);
1648 q->properties.type = KFD_QUEUE_TYPE_SDMA;
1650 num_queues = get_num_xgmi_sdma_queues(dqm);
1651 num_engines = kfd_get_num_xgmi_sdma_engines(dqm->dev);
1652 eng_offset = kfd_get_num_sdma_engines(dqm->dev);
1653 q->properties.type = KFD_QUEUE_TYPE_SDMA_XGMI;
1657 /* Scan available bit based on target engine ID. */
1658 start_engine = q->properties.sdma_engine_id - eng_offset;
1659 for (i = start_engine; i < num_queues; i += num_engines) {
1661 if (!test_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap))
1664 clear_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap);
1666 q->properties.sdma_queue_id = q->sdma_id / num_engines;
1667 free_bit_found = true;
1671 if (!free_bit_found) {
1672 dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
1673 q->properties.sdma_engine_id);
1678 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1679 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1684 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1687 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1688 if (q->sdma_id >= get_num_sdma_queues(dqm))
1690 set_bit(q->sdma_id, dqm->sdma_bitmap);
1691 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1692 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1694 set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
1699 * Device Queue Manager implementation for cp scheduler
1702 static int set_sched_resources(struct device_queue_manager *dqm)
1705 struct scheduling_resources res;
1706 struct device *dev = dqm->dev->adev->dev;
1708 res.vmid_mask = dqm->dev->compute_vmid_bitmap;
1711 for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
1712 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
1713 / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
1715 if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
1718 /* only acquire queues from the first MEC */
1722 /* This situation may be hit in the future if a new HW
1723 * generation exposes more than 64 queues. If so, the
1724 * definition of res.queue_mask needs updating
1726 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1727 dev_err(dev, "Invalid queue enabled by amdgpu: %d\n", i);
1731 res.queue_mask |= 1ull
1732 << amdgpu_queue_mask_bit_to_set_resource_bit(
1735 res.gws_mask = ~0ull;
1736 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1738 pr_debug("Scheduling resources:\n"
1739 "vmid mask: 0x%8X\n"
1740 "queue mask: 0x%8llX\n",
1741 res.vmid_mask, res.queue_mask);
1743 return pm_send_set_resources(&dqm->packet_mgr, &res);
1746 static int initialize_cpsch(struct device_queue_manager *dqm)
1748 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1750 mutex_init(&dqm->lock_hidden);
1751 INIT_LIST_HEAD(&dqm->queues);
1752 dqm->active_queue_count = dqm->processes_count = 0;
1753 dqm->active_cp_queue_count = 0;
1754 dqm->gws_queue_count = 0;
1755 dqm->active_runlist = false;
1756 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1757 dqm->trap_debug_vmid = 0;
1759 init_sdma_bitmaps(dqm);
1761 if (dqm->dev->kfd2kgd->get_iq_wait_times)
1762 dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
1764 ffs(dqm->dev->xcc_mask) - 1);
1769 * Unmap queues so the schedule doesn't continue remaining jobs in the queue.
1770 * Then set dqm->sched_halt so queues don't map to runlist until unhalt_cpsch
1773 static int halt_cpsch(struct device_queue_manager *dqm)
1778 if (!dqm->sched_running) {
1783 WARN_ONCE(dqm->sched_halt, "Scheduling is already on halt\n");
1785 if (!dqm->is_hws_hang) {
1786 if (!dqm->dev->kfd->shared_resources.enable_mes)
1787 ret = unmap_queues_cpsch(dqm,
1788 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
1789 USE_DEFAULT_GRACE_PERIOD, false);
1791 ret = remove_all_kfd_queues_mes(dqm);
1793 dqm->sched_halt = true;
1800 * Unset dqm->sched_halt and map queues back to runlist
1802 static int unhalt_cpsch(struct device_queue_manager *dqm)
1807 if (!dqm->sched_running || !dqm->sched_halt) {
1808 WARN_ONCE(!dqm->sched_halt, "Scheduling is not on halt.\n");
1812 dqm->sched_halt = false;
1813 if (!dqm->dev->kfd->shared_resources.enable_mes)
1814 ret = execute_queues_cpsch(dqm,
1815 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
1816 0, USE_DEFAULT_GRACE_PERIOD);
1818 ret = add_all_kfd_queues_mes(dqm);
1825 static int start_cpsch(struct device_queue_manager *dqm)
1827 struct device *dev = dqm->dev->adev->dev;
1828 int retval, num_hw_queue_slots;
1834 if (!dqm->dev->kfd->shared_resources.enable_mes) {
1835 retval = pm_init(&dqm->packet_mgr, dqm);
1837 goto fail_packet_manager_init;
1839 retval = set_sched_resources(dqm);
1841 goto fail_set_sched_resources;
1843 pr_debug("Allocating fence memory\n");
1845 /* allocate fence memory on the gart */
1846 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1850 goto fail_allocate_vidmem;
1852 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
1853 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1855 init_interrupts(dqm);
1857 /* clear hang status when driver try to start the hw scheduler */
1858 dqm->sched_running = true;
1860 if (!dqm->dev->kfd->shared_resources.enable_mes)
1861 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1863 /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
1864 if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
1865 (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
1866 uint32_t reg_offset = 0;
1867 uint32_t grace_period = 1;
1869 retval = pm_update_grace_period(&dqm->packet_mgr,
1872 dev_err(dev, "Setting grace timeout failed\n");
1873 else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
1874 /* Update dqm->wait_times maintained in software */
1875 dqm->dev->kfd2kgd->build_grace_period_packet_info(
1876 dqm->dev->adev, dqm->wait_times,
1877 grace_period, ®_offset,
1881 /* setup per-queue reset detection buffer */
1882 num_hw_queue_slots = dqm->dev->kfd->shared_resources.num_queue_per_pipe *
1883 dqm->dev->kfd->shared_resources.num_pipe_per_mec *
1884 NUM_XCC(dqm->dev->xcc_mask);
1886 dqm->detect_hang_info_size = num_hw_queue_slots * sizeof(struct dqm_detect_hang_info);
1887 dqm->detect_hang_info = kzalloc(dqm->detect_hang_info_size, GFP_KERNEL);
1889 if (!dqm->detect_hang_info) {
1891 goto fail_detect_hang_buffer;
1897 fail_detect_hang_buffer:
1898 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1899 fail_allocate_vidmem:
1900 fail_set_sched_resources:
1901 if (!dqm->dev->kfd->shared_resources.enable_mes)
1902 pm_uninit(&dqm->packet_mgr);
1903 fail_packet_manager_init:
1908 static int stop_cpsch(struct device_queue_manager *dqm)
1911 if (!dqm->sched_running) {
1916 if (!dqm->dev->kfd->shared_resources.enable_mes)
1917 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
1919 remove_all_kfd_queues_mes(dqm);
1921 dqm->sched_running = false;
1923 if (!dqm->dev->kfd->shared_resources.enable_mes)
1924 pm_release_ib(&dqm->packet_mgr);
1926 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1927 if (!dqm->dev->kfd->shared_resources.enable_mes)
1928 pm_uninit(&dqm->packet_mgr);
1929 kfree(dqm->detect_hang_info);
1930 dqm->detect_hang_info = NULL;
1936 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1937 struct kernel_queue *kq,
1938 struct qcm_process_device *qpd)
1941 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1942 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1943 dqm->total_queue_count);
1949 * Unconditionally increment this counter, regardless of the queue's
1950 * type or whether the queue is active.
1952 dqm->total_queue_count++;
1953 pr_debug("Total of %d queues are accountable so far\n",
1954 dqm->total_queue_count);
1956 list_add(&kq->list, &qpd->priv_queue_list);
1957 increment_queue_count(dqm, qpd, kq->queue);
1958 qpd->is_debug = true;
1959 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1960 USE_DEFAULT_GRACE_PERIOD);
1966 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1967 struct kernel_queue *kq,
1968 struct qcm_process_device *qpd)
1971 list_del(&kq->list);
1972 decrement_queue_count(dqm, qpd, kq->queue);
1973 qpd->is_debug = false;
1974 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
1975 USE_DEFAULT_GRACE_PERIOD);
1977 * Unconditionally decrement this counter, regardless of the queue's
1980 dqm->total_queue_count--;
1981 pr_debug("Total of %d queues are accountable so far\n",
1982 dqm->total_queue_count);
1986 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1987 struct qcm_process_device *qpd,
1988 const struct kfd_criu_queue_priv_data *qd,
1989 const void *restore_mqd, const void *restore_ctl_stack)
1992 struct mqd_manager *mqd_mgr;
1994 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1995 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1996 dqm->total_queue_count);
2001 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
2002 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI ||
2003 q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
2005 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
2011 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
2013 goto out_deallocate_sdma_queue;
2015 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2016 q->properties.type)];
2018 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
2019 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
2020 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
2021 q->properties.tba_addr = qpd->tba_addr;
2022 q->properties.tma_addr = qpd->tma_addr;
2023 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
2024 if (!q->mqd_mem_obj) {
2026 goto out_deallocate_doorbell;
2031 * Eviction state logic: mark all queues as evicted, even ones
2032 * not currently active. Restoring inactive queues later only
2033 * updates the is_evicted flag but is a no-op otherwise.
2035 q->properties.is_evicted = !!qpd->evicted;
2036 q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
2037 kfd_dbg_has_cwsr_workaround(q->device);
2040 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
2041 &q->properties, restore_mqd, restore_ctl_stack,
2042 qd->ctl_stack_size);
2044 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
2045 &q->gart_mqd_addr, &q->properties);
2047 list_add(&q->list, &qpd->queues_list);
2050 if (q->properties.is_active) {
2051 increment_queue_count(dqm, qpd, q);
2053 if (!dqm->dev->kfd->shared_resources.enable_mes)
2054 retval = execute_queues_cpsch(dqm,
2055 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
2057 retval = add_queue_mes(dqm, q, qpd);
2063 * Unconditionally increment this counter, regardless of the queue's
2064 * type or whether the queue is active.
2066 dqm->total_queue_count++;
2068 pr_debug("Total of %d queues are accountable so far\n",
2069 dqm->total_queue_count);
2077 if (q->properties.is_active)
2078 decrement_queue_count(dqm, qpd, q);
2079 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2081 out_deallocate_doorbell:
2082 deallocate_doorbell(qpd, q);
2083 out_deallocate_sdma_queue:
2084 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
2085 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
2087 deallocate_sdma_queue(dqm, q);
2094 int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
2095 uint64_t fence_value,
2096 unsigned int timeout_ms)
2098 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
2099 struct device *dev = dqm->dev->adev->dev;
2100 uint64_t *fence_addr = dqm->fence_addr;
2102 while (*fence_addr != fence_value) {
2103 /* Fatal err detected, this response won't come */
2104 if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
2107 if (time_after(jiffies, end_jiffies)) {
2108 dev_err(dev, "qcm fence wait loop timeout expired\n");
2109 /* In HWS case, this is used to halt the driver thread
2110 * in order not to mess up CP states before doing
2111 * scandumps for FW debugging.
2113 while (halt_if_hws_hang)
2124 /* dqm->lock mutex has to be locked before calling this function */
2125 static int map_queues_cpsch(struct device_queue_manager *dqm)
2127 struct device *dev = dqm->dev->adev->dev;
2130 if (!dqm->sched_running || dqm->sched_halt)
2132 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
2134 if (dqm->active_runlist)
2137 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
2138 pr_debug("%s sent runlist\n", __func__);
2140 dev_err(dev, "failed to execute runlist\n");
2143 dqm->active_runlist = true;
2148 static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q,
2149 struct qcm_process_device *qpd)
2151 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
2153 dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n",
2154 q->properties.queue_id, q->process->pasid);
2156 pdd->has_reset_queue = true;
2157 if (q->properties.is_active) {
2158 q->properties.is_active = false;
2159 decrement_queue_count(dqm, qpd, q);
2163 static int detect_queue_hang(struct device_queue_manager *dqm)
2167 /* detect should be used only in dqm locked queue reset */
2168 if (WARN_ON(dqm->detect_hang_count > 0))
2171 memset(dqm->detect_hang_info, 0, dqm->detect_hang_info_size);
2173 for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
2174 uint32_t mec, pipe, queue;
2177 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
2178 / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
2180 if (mec || !test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
2183 amdgpu_queue_mask_bit_to_mec_queue(dqm->dev->adev, i, &mec, &pipe, &queue);
2185 for_each_inst(xcc_id, dqm->dev->xcc_mask) {
2186 uint64_t queue_addr = dqm->dev->kfd2kgd->hqd_get_pq_addr(
2187 dqm->dev->adev, pipe, queue, xcc_id);
2188 struct dqm_detect_hang_info hang_info;
2193 hang_info.pipe_id = pipe;
2194 hang_info.queue_id = queue;
2195 hang_info.xcc_id = xcc_id;
2196 hang_info.queue_address = queue_addr;
2198 dqm->detect_hang_info[dqm->detect_hang_count] = hang_info;
2199 dqm->detect_hang_count++;
2203 return dqm->detect_hang_count;
2206 static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uint64_t queue_address)
2208 struct device_process_node *cur;
2209 struct qcm_process_device *qpd;
2212 list_for_each_entry(cur, &dqm->queues, list) {
2214 list_for_each_entry(q, &qpd->queues_list, list) {
2215 if (queue_address == q->properties.queue_address)
2223 /* only for compute queue */
2224 static int reset_queues_on_hws_hang(struct device_queue_manager *dqm)
2226 int r = 0, reset_count = 0, i;
2228 if (!dqm->detect_hang_info || dqm->is_hws_hang)
2231 /* assume dqm locked. */
2232 if (!detect_queue_hang(dqm))
2233 return -ENOTRECOVERABLE;
2235 for (i = 0; i < dqm->detect_hang_count; i++) {
2236 struct dqm_detect_hang_info hang_info = dqm->detect_hang_info[i];
2237 struct queue *q = find_queue_by_address(dqm, hang_info.queue_address);
2238 struct kfd_process_device *pdd;
2239 uint64_t queue_addr = 0;
2242 r = -ENOTRECOVERABLE;
2246 pdd = kfd_get_process_device_data(dqm->dev, q->process);
2248 r = -ENOTRECOVERABLE;
2252 queue_addr = dqm->dev->kfd2kgd->hqd_reset(dqm->dev->adev,
2253 hang_info.pipe_id, hang_info.queue_id, hang_info.xcc_id,
2254 KFD_UNMAP_LATENCY_MS);
2256 /* either reset failed or we reset an unexpected queue. */
2257 if (queue_addr != q->properties.queue_address) {
2258 r = -ENOTRECOVERABLE;
2262 set_queue_as_reset(dqm, q, &pdd->qpd);
2266 if (reset_count == dqm->detect_hang_count)
2267 kfd_signal_reset_event(dqm->dev);
2269 r = -ENOTRECOVERABLE;
2272 dqm->detect_hang_count = 0;
2277 /* dqm->lock mutex has to be locked before calling this function */
2278 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
2279 enum kfd_unmap_queues_filter filter,
2280 uint32_t filter_param,
2281 uint32_t grace_period,
2284 struct device *dev = dqm->dev->adev->dev;
2285 struct mqd_manager *mqd_mgr;
2288 if (!dqm->sched_running)
2290 if (!dqm->active_runlist)
2292 if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
2295 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
2296 retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
2301 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
2305 *dqm->fence_addr = KFD_FENCE_INIT;
2307 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
2308 KFD_FENCE_COMPLETED);
2309 /* should be timed out */
2310 retval = amdkfd_fence_wait_timeout(dqm, KFD_FENCE_COMPLETED,
2311 queue_preemption_timeout_ms);
2313 dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
2318 /* In the current MEC firmware implementation, if compute queue
2319 * doesn't response to the preemption request in time, HIQ will
2320 * abandon the unmap request without returning any timeout error
2321 * to driver. Instead, MEC firmware will log the doorbell of the
2322 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
2323 * To make sure the queue unmap was successful, driver need to
2324 * check those fields
2326 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
2327 if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
2328 while (halt_if_hws_hang)
2330 if (reset_queues_on_hws_hang(dqm)) {
2331 dqm->is_hws_hang = true;
2338 /* We need to reset the grace period value for this device */
2339 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
2340 if (pm_update_grace_period(&dqm->packet_mgr,
2341 USE_DEFAULT_GRACE_PERIOD))
2342 dev_err(dev, "Failed to reset grace period\n");
2345 pm_release_ib(&dqm->packet_mgr);
2346 dqm->active_runlist = false;
2349 up_read(&dqm->dev->adev->reset_domain->sem);
2353 /* only for compute queue */
2354 static int reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid)
2360 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
2361 pasid, USE_DEFAULT_GRACE_PERIOD, true);
2367 /* dqm->lock mutex has to be locked before calling this function */
2368 static int execute_queues_cpsch(struct device_queue_manager *dqm,
2369 enum kfd_unmap_queues_filter filter,
2370 uint32_t filter_param,
2371 uint32_t grace_period)
2375 if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
2377 retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
2379 retval = map_queues_cpsch(dqm);
2380 up_read(&dqm->dev->adev->reset_domain->sem);
2384 static int wait_on_destroy_queue(struct device_queue_manager *dqm,
2387 struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
2394 if (pdd->qpd.is_debug)
2397 q->properties.is_being_destroyed = true;
2399 if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
2401 mutex_unlock(&q->process->mutex);
2402 ret = wait_event_interruptible(dqm->destroy_wait,
2403 !q->properties.is_suspended);
2405 mutex_lock(&q->process->mutex);
2412 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
2413 struct qcm_process_device *qpd,
2417 struct mqd_manager *mqd_mgr;
2418 uint64_t sdma_val = 0;
2419 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
2420 struct device *dev = dqm->dev->adev->dev;
2422 /* Get the SDMA queue stats */
2423 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
2424 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2425 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
2428 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
2429 q->properties.queue_id);
2432 /* remove queue from list to prevent rescheduling after preemption */
2435 retval = wait_on_destroy_queue(dqm, q);
2442 if (qpd->is_debug) {
2444 * error, currently we do not allow to destroy a queue
2445 * of a currently debugged process
2448 goto failed_try_destroy_debugged_queue;
2452 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2453 q->properties.type)];
2455 deallocate_doorbell(qpd, q);
2457 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
2458 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2459 deallocate_sdma_queue(dqm, q);
2460 pdd->sdma_past_activity_counter += sdma_val;
2463 if (q->properties.is_active) {
2464 decrement_queue_count(dqm, qpd, q);
2465 q->properties.is_active = false;
2466 if (!dqm->dev->kfd->shared_resources.enable_mes) {
2467 retval = execute_queues_cpsch(dqm,
2468 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
2469 USE_DEFAULT_GRACE_PERIOD);
2470 if (retval == -ETIME)
2471 qpd->reset_wavefronts = true;
2473 retval = remove_queue_mes(dqm, q, qpd);
2480 * Unconditionally decrement this counter, regardless of the queue's
2483 dqm->total_queue_count--;
2484 pr_debug("Total of %d queues are accountable so far\n",
2485 dqm->total_queue_count);
2490 * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid
2493 kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE),
2494 qpd->pqm->process, q->device,
2495 -1, false, NULL, 0);
2497 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2501 failed_try_destroy_debugged_queue:
2508 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
2509 * stay in user mode.
2511 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
2512 /* APE1 limit is inclusive and 64K aligned. */
2513 #define APE1_LIMIT_ALIGNMENT 0xFFFF
2515 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
2516 struct qcm_process_device *qpd,
2517 enum cache_policy default_policy,
2518 enum cache_policy alternate_policy,
2519 void __user *alternate_aperture_base,
2520 uint64_t alternate_aperture_size)
2524 if (!dqm->asic_ops.set_cache_memory_policy)
2529 if (alternate_aperture_size == 0) {
2530 /* base > limit disables APE1 */
2531 qpd->sh_mem_ape1_base = 1;
2532 qpd->sh_mem_ape1_limit = 0;
2535 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
2536 * SH_MEM_APE1_BASE[31:0], 0x0000 }
2537 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
2538 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
2539 * Verify that the base and size parameters can be
2540 * represented in this format and convert them.
2541 * Additionally restrict APE1 to user-mode addresses.
2544 uint64_t base = (uintptr_t)alternate_aperture_base;
2545 uint64_t limit = base + alternate_aperture_size - 1;
2547 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
2548 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
2553 qpd->sh_mem_ape1_base = base >> 16;
2554 qpd->sh_mem_ape1_limit = limit >> 16;
2557 retval = dqm->asic_ops.set_cache_memory_policy(
2562 alternate_aperture_base,
2563 alternate_aperture_size);
2565 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
2566 program_sh_mem_settings(dqm, qpd);
2568 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
2569 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
2570 qpd->sh_mem_ape1_limit);
2577 static int process_termination_nocpsch(struct device_queue_manager *dqm,
2578 struct qcm_process_device *qpd)
2581 struct device_process_node *cur, *next_dpn;
2587 /* Clear all user mode queues */
2588 while (!list_empty(&qpd->queues_list)) {
2589 struct mqd_manager *mqd_mgr;
2592 q = list_first_entry(&qpd->queues_list, struct queue, list);
2593 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2594 q->properties.type)];
2595 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
2599 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2603 /* Unregister process */
2604 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2605 if (qpd == cur->qpd) {
2606 list_del(&cur->list);
2608 dqm->processes_count--;
2616 /* Outside the DQM lock because under the DQM lock we can't do
2617 * reclaim or take other locks that others hold while reclaiming.
2620 kfd_dec_compute_active(dqm->dev);
2625 static int get_wave_state(struct device_queue_manager *dqm,
2627 void __user *ctl_stack,
2628 u32 *ctl_stack_used_size,
2629 u32 *save_area_used_size)
2631 struct mqd_manager *mqd_mgr;
2635 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
2637 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
2638 q->properties.is_active || !q->device->kfd->cwsr_enabled ||
2639 !mqd_mgr->get_wave_state) {
2647 * get_wave_state is outside the dqm lock to prevent circular locking
2648 * and the queue should be protected against destruction by the process
2651 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
2652 ctl_stack, ctl_stack_used_size, save_area_used_size);
2655 static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
2656 const struct queue *q,
2658 u32 *ctl_stack_size)
2660 struct mqd_manager *mqd_mgr;
2661 enum KFD_MQD_TYPE mqd_type =
2662 get_mqd_type_from_queue_type(q->properties.type);
2665 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2666 *mqd_size = mqd_mgr->mqd_size;
2667 *ctl_stack_size = 0;
2669 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
2670 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
2675 static int checkpoint_mqd(struct device_queue_manager *dqm,
2676 const struct queue *q,
2680 struct mqd_manager *mqd_mgr;
2682 enum KFD_MQD_TYPE mqd_type =
2683 get_mqd_type_from_queue_type(q->properties.type);
2687 if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
2692 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2693 if (!mqd_mgr->checkpoint_mqd) {
2698 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
2705 static int process_termination_cpsch(struct device_queue_manager *dqm,
2706 struct qcm_process_device *qpd)
2710 struct device *dev = dqm->dev->adev->dev;
2711 struct kernel_queue *kq, *kq_next;
2712 struct mqd_manager *mqd_mgr;
2713 struct device_process_node *cur, *next_dpn;
2714 enum kfd_unmap_queues_filter filter =
2715 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
2722 /* Clean all kernel queues */
2723 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
2724 list_del(&kq->list);
2725 decrement_queue_count(dqm, qpd, kq->queue);
2726 qpd->is_debug = false;
2727 dqm->total_queue_count--;
2728 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
2731 /* Clear all user mode queues */
2732 list_for_each_entry(q, &qpd->queues_list, list) {
2733 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
2734 deallocate_sdma_queue(dqm, q);
2735 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
2736 deallocate_sdma_queue(dqm, q);
2738 if (q->properties.is_active) {
2739 decrement_queue_count(dqm, qpd, q);
2741 if (dqm->dev->kfd->shared_resources.enable_mes) {
2742 retval = remove_queue_mes(dqm, q, qpd);
2744 dev_err(dev, "Failed to remove queue %d\n",
2745 q->properties.queue_id);
2749 dqm->total_queue_count--;
2752 /* Unregister process */
2753 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2754 if (qpd == cur->qpd) {
2755 list_del(&cur->list);
2757 dqm->processes_count--;
2763 if (!dqm->dev->kfd->shared_resources.enable_mes)
2764 retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
2766 if ((retval || qpd->reset_wavefronts) &&
2767 down_read_trylock(&dqm->dev->adev->reset_domain->sem)) {
2768 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
2769 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
2770 qpd->reset_wavefronts = false;
2771 up_read(&dqm->dev->adev->reset_domain->sem);
2774 /* Lastly, free mqd resources.
2775 * Do free_mqd() after dqm_unlock to avoid circular locking.
2777 while (!list_empty(&qpd->queues_list)) {
2778 q = list_first_entry(&qpd->queues_list, struct queue, list);
2779 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2780 q->properties.type)];
2784 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2789 /* Outside the DQM lock because under the DQM lock we can't do
2790 * reclaim or take other locks that others hold while reclaiming.
2793 kfd_dec_compute_active(dqm->dev);
2798 static int init_mqd_managers(struct device_queue_manager *dqm)
2801 struct device *dev = dqm->dev->adev->dev;
2802 struct mqd_manager *mqd_mgr;
2804 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
2805 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
2807 dev_err(dev, "mqd manager [%d] initialization failed\n", i);
2810 dqm->mqd_mgrs[i] = mqd_mgr;
2816 for (j = 0; j < i; j++) {
2817 kfree(dqm->mqd_mgrs[j]);
2818 dqm->mqd_mgrs[j] = NULL;
2824 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
2825 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2828 struct kfd_node *dev = dqm->dev;
2829 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2830 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
2831 get_num_all_sdma_engines(dqm) *
2832 dev->kfd->device_info.num_sdma_queues_per_engine +
2833 (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
2834 NUM_XCC(dqm->dev->xcc_mask));
2836 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
2837 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
2838 (void *)&(mem_obj->cpu_ptr), false);
2843 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
2845 struct device_queue_manager *dqm;
2847 pr_debug("Loading device queue manager\n");
2849 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
2853 switch (dev->adev->asic_type) {
2854 /* HWS is not available on Hawaii. */
2856 /* HWS depends on CWSR for timely dequeue. CWSR is not
2857 * available on Tonga.
2859 * FIXME: This argument also applies to Kaveri.
2862 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2865 dqm->sched_policy = sched_policy;
2870 switch (dqm->sched_policy) {
2871 case KFD_SCHED_POLICY_HWS:
2872 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2873 /* initialize dqm for cp scheduling */
2874 dqm->ops.create_queue = create_queue_cpsch;
2875 dqm->ops.initialize = initialize_cpsch;
2876 dqm->ops.start = start_cpsch;
2877 dqm->ops.stop = stop_cpsch;
2878 dqm->ops.halt = halt_cpsch;
2879 dqm->ops.unhalt = unhalt_cpsch;
2880 dqm->ops.destroy_queue = destroy_queue_cpsch;
2881 dqm->ops.update_queue = update_queue;
2882 dqm->ops.register_process = register_process;
2883 dqm->ops.unregister_process = unregister_process;
2884 dqm->ops.uninitialize = uninitialize;
2885 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2886 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2887 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
2888 dqm->ops.process_termination = process_termination_cpsch;
2889 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2890 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
2891 dqm->ops.get_wave_state = get_wave_state;
2892 dqm->ops.reset_queues = reset_queues_cpsch;
2893 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2894 dqm->ops.checkpoint_mqd = checkpoint_mqd;
2896 case KFD_SCHED_POLICY_NO_HWS:
2897 /* initialize dqm for no cp scheduling */
2898 dqm->ops.start = start_nocpsch;
2899 dqm->ops.stop = stop_nocpsch;
2900 dqm->ops.create_queue = create_queue_nocpsch;
2901 dqm->ops.destroy_queue = destroy_queue_nocpsch;
2902 dqm->ops.update_queue = update_queue;
2903 dqm->ops.register_process = register_process;
2904 dqm->ops.unregister_process = unregister_process;
2905 dqm->ops.initialize = initialize_nocpsch;
2906 dqm->ops.uninitialize = uninitialize;
2907 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
2908 dqm->ops.process_termination = process_termination_nocpsch;
2909 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2910 dqm->ops.restore_process_queues =
2911 restore_process_queues_nocpsch;
2912 dqm->ops.get_wave_state = get_wave_state;
2913 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2914 dqm->ops.checkpoint_mqd = checkpoint_mqd;
2917 dev_err(dev->adev->dev, "Invalid scheduling policy %d\n", dqm->sched_policy);
2921 switch (dev->adev->asic_type) {
2924 device_queue_manager_init_cik(&dqm->asic_ops);
2930 case CHIP_POLARIS10:
2931 case CHIP_POLARIS11:
2932 case CHIP_POLARIS12:
2934 device_queue_manager_init_vi(&dqm->asic_ops);
2938 if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0))
2939 device_queue_manager_init_v12(&dqm->asic_ops);
2940 else if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
2941 device_queue_manager_init_v11(&dqm->asic_ops);
2942 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
2943 device_queue_manager_init_v10(&dqm->asic_ops);
2944 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2945 device_queue_manager_init_v9(&dqm->asic_ops);
2947 WARN(1, "Unexpected ASIC family %u",
2948 dev->adev->asic_type);
2953 if (init_mqd_managers(dqm))
2956 if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
2957 dev_err(dev->adev->dev, "Failed to allocate hiq sdma mqd trunk buffer\n");
2961 if (!dqm->ops.initialize(dqm)) {
2962 init_waitqueue_head(&dqm->destroy_wait);
2971 static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
2972 struct kfd_mem_obj *mqd)
2974 WARN(!mqd, "No hiq sdma mqd trunk to free");
2976 amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
2979 void device_queue_manager_uninit(struct device_queue_manager *dqm)
2982 dqm->ops.uninitialize(dqm);
2983 if (!dqm->dev->kfd->shared_resources.enable_mes)
2984 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
2988 int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id)
2990 struct kfd_process_device *pdd;
2991 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2992 struct device_queue_manager *dqm = knode->dqm;
2993 struct device *dev = dqm->dev->adev->dev;
2994 struct qcm_process_device *qpd;
2995 struct queue *q = NULL;
3003 pdd = kfd_get_process_device_data(dqm->dev, p);
3007 list_for_each_entry(q, &qpd->queues_list, list) {
3008 if (q->doorbell_id == doorbell_id && q->properties.is_active) {
3009 ret = suspend_all_queues_mes(dqm);
3011 dev_err(dev, "Suspending all queues failed");
3015 q->properties.is_evicted = true;
3016 q->properties.is_active = false;
3017 decrement_queue_count(dqm, qpd, q);
3019 ret = remove_queue_mes(dqm, q, qpd);
3021 dev_err(dev, "Removing bad queue failed");
3025 ret = resume_all_queues_mes(dqm);
3027 dev_err(dev, "Resuming all queues failed");
3039 static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
3040 struct qcm_process_device *qpd)
3042 struct device *dev = dqm->dev->adev->dev;
3045 /* Check if process is already evicted */
3048 /* Increment the evicted count to make sure the
3049 * process stays evicted before its terminated.
3057 ret = suspend_all_queues_mes(dqm);
3059 dev_err(dev, "Suspending all queues failed");
3063 ret = dqm->ops.evict_process_queues(dqm, qpd);
3065 dev_err(dev, "Evicting process queues failed");
3069 ret = resume_all_queues_mes(dqm);
3071 dev_err(dev, "Resuming all queues failed");
3077 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
3079 struct kfd_process_device *pdd;
3080 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
3085 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
3086 pdd = kfd_get_process_device_data(dqm->dev, p);
3088 if (dqm->dev->kfd->shared_resources.enable_mes)
3089 ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
3091 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
3094 kfd_unref_process(p);
3099 static void kfd_process_hw_exception(struct work_struct *work)
3101 struct device_queue_manager *dqm = container_of(work,
3102 struct device_queue_manager, hw_exception_work);
3103 amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
3106 int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
3107 struct qcm_process_device *qpd)
3110 struct device *dev = dqm->dev->adev->dev;
3111 int updated_vmid_mask;
3113 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3114 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3120 if (dqm->trap_debug_vmid != 0) {
3121 dev_err(dev, "Trap debug id already reserved\n");
3126 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
3127 USE_DEFAULT_GRACE_PERIOD, false);
3131 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
3132 updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
3134 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
3135 dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
3136 r = set_sched_resources(dqm);
3140 r = map_queues_cpsch(dqm);
3144 pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
3152 * Releases vmid for the trap debugger
3154 int release_debug_trap_vmid(struct device_queue_manager *dqm,
3155 struct qcm_process_device *qpd)
3157 struct device *dev = dqm->dev->adev->dev;
3159 int updated_vmid_mask;
3160 uint32_t trap_debug_vmid;
3162 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3163 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3168 trap_debug_vmid = dqm->trap_debug_vmid;
3169 if (dqm->trap_debug_vmid == 0) {
3170 dev_err(dev, "Trap debug id is not reserved\n");
3175 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
3176 USE_DEFAULT_GRACE_PERIOD, false);
3180 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
3181 updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
3183 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
3184 dqm->trap_debug_vmid = 0;
3185 r = set_sched_resources(dqm);
3189 r = map_queues_cpsch(dqm);
3193 pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
3200 #define QUEUE_NOT_FOUND -1
3201 /* invalidate queue operation in array */
3202 static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
3206 for (i = 0; i < num_queues; i++)
3207 queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK;
3210 /* find queue index in array */
3211 static int q_array_get_index(unsigned int queue_id,
3212 uint32_t num_queues,
3213 uint32_t *queue_ids)
3217 for (i = 0; i < num_queues; i++)
3218 if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK))
3221 return QUEUE_NOT_FOUND;
3224 struct copy_context_work_handler_workarea {
3225 struct work_struct copy_context_work;
3226 struct kfd_process *p;
3229 static void copy_context_work_handler(struct work_struct *work)
3231 struct copy_context_work_handler_workarea *workarea;
3232 struct mqd_manager *mqd_mgr;
3234 struct mm_struct *mm;
3235 struct kfd_process *p;
3236 uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size;
3239 workarea = container_of(work,
3240 struct copy_context_work_handler_workarea,
3244 mm = get_task_mm(p->lead_thread);
3250 for (i = 0; i < p->n_pdds; i++) {
3251 struct kfd_process_device *pdd = p->pdds[i];
3252 struct device_queue_manager *dqm = pdd->dev->dqm;
3253 struct qcm_process_device *qpd = &pdd->qpd;
3255 list_for_each_entry(q, &qpd->queues_list, list) {
3256 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE)
3259 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
3261 /* We ignore the return value from get_wave_state
3263 * i) right now, it always returns 0, and
3264 * ii) if we hit an error, we would continue to the
3265 * next queue anyway.
3267 mqd_mgr->get_wave_state(mqd_mgr,
3270 (void __user *) q->properties.ctx_save_restore_area_address,
3271 &tmp_ctl_stack_used_size,
3272 &tmp_save_area_used_size);
3275 kthread_unuse_mm(mm);
3279 static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
3281 size_t array_size = num_queues * sizeof(uint32_t);
3283 if (!usr_queue_id_array)
3286 return memdup_user(usr_queue_id_array, array_size);
3289 int resume_queues(struct kfd_process *p,
3290 uint32_t num_queues,
3291 uint32_t *usr_queue_id_array)
3293 uint32_t *queue_ids = NULL;
3294 int total_resumed = 0;
3297 if (usr_queue_id_array) {
3298 queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
3300 if (IS_ERR(queue_ids))
3301 return PTR_ERR(queue_ids);
3303 /* mask all queues as invalid. unmask per successful request */
3304 q_array_invalidate(num_queues, queue_ids);
3307 for (i = 0; i < p->n_pdds; i++) {
3308 struct kfd_process_device *pdd = p->pdds[i];
3309 struct device_queue_manager *dqm = pdd->dev->dqm;
3310 struct device *dev = dqm->dev->adev->dev;
3311 struct qcm_process_device *qpd = &pdd->qpd;
3313 int r, per_device_resumed = 0;
3317 /* unmask queues that resume or already resumed as valid */
3318 list_for_each_entry(q, &qpd->queues_list, list) {
3319 int q_idx = QUEUE_NOT_FOUND;
3322 q_idx = q_array_get_index(
3323 q->properties.queue_id,
3327 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) {
3328 int err = resume_single_queue(dqm, &pdd->qpd, q);
3333 ~KFD_DBG_QUEUE_INVALID_MASK;
3336 KFD_DBG_QUEUE_ERROR_MASK;
3341 if (dqm->dev->kfd->shared_resources.enable_mes) {
3342 wake_up_all(&dqm->destroy_wait);
3346 per_device_resumed++;
3351 if (!per_device_resumed) {
3356 r = execute_queues_cpsch(dqm,
3357 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
3359 USE_DEFAULT_GRACE_PERIOD);
3361 dev_err(dev, "Failed to resume process queues\n");
3363 list_for_each_entry(q, &qpd->queues_list, list) {
3364 int q_idx = q_array_get_index(
3365 q->properties.queue_id,
3369 /* mask queue as error on resume fail */
3370 if (q_idx != QUEUE_NOT_FOUND)
3372 KFD_DBG_QUEUE_ERROR_MASK;
3376 wake_up_all(&dqm->destroy_wait);
3377 total_resumed += per_device_resumed;
3384 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
3385 num_queues * sizeof(uint32_t)))
3386 pr_err("copy_to_user failed on queue resume\n");
3391 return total_resumed;
3394 int suspend_queues(struct kfd_process *p,
3395 uint32_t num_queues,
3396 uint32_t grace_period,
3397 uint64_t exception_clear_mask,
3398 uint32_t *usr_queue_id_array)
3400 uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
3401 int total_suspended = 0;
3404 if (IS_ERR(queue_ids))
3405 return PTR_ERR(queue_ids);
3407 /* mask all queues as invalid. umask on successful request */
3408 q_array_invalidate(num_queues, queue_ids);
3410 for (i = 0; i < p->n_pdds; i++) {
3411 struct kfd_process_device *pdd = p->pdds[i];
3412 struct device_queue_manager *dqm = pdd->dev->dqm;
3413 struct device *dev = dqm->dev->adev->dev;
3414 struct qcm_process_device *qpd = &pdd->qpd;
3416 int r, per_device_suspended = 0;
3418 mutex_lock(&p->event_mutex);
3421 /* unmask queues that suspend or already suspended */
3422 list_for_each_entry(q, &qpd->queues_list, list) {
3423 int q_idx = q_array_get_index(q->properties.queue_id,
3427 if (q_idx != QUEUE_NOT_FOUND) {
3428 int err = suspend_single_queue(dqm, pdd, q);
3429 bool is_mes = dqm->dev->kfd->shared_resources.enable_mes;
3432 queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK;
3433 if (exception_clear_mask && is_mes)
3434 q->properties.exception_status &=
3435 ~exception_clear_mask;
3440 per_device_suspended++;
3441 } else if (err != -EBUSY) {
3443 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
3449 if (!per_device_suspended) {
3451 mutex_unlock(&p->event_mutex);
3452 if (total_suspended)
3453 amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev);
3457 r = execute_queues_cpsch(dqm,
3458 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
3462 dev_err(dev, "Failed to suspend process queues.\n");
3464 total_suspended += per_device_suspended;
3466 list_for_each_entry(q, &qpd->queues_list, list) {
3467 int q_idx = q_array_get_index(q->properties.queue_id,
3468 num_queues, queue_ids);
3470 if (q_idx == QUEUE_NOT_FOUND)
3473 /* mask queue as error on suspend fail */
3475 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
3476 else if (exception_clear_mask)
3477 q->properties.exception_status &=
3478 ~exception_clear_mask;
3482 mutex_unlock(&p->event_mutex);
3483 amdgpu_device_flush_hdp(dqm->dev->adev, NULL);
3486 if (total_suspended) {
3487 struct copy_context_work_handler_workarea copy_context_worker;
3490 ©_context_worker.copy_context_work,
3491 copy_context_work_handler);
3493 copy_context_worker.p = p;
3495 schedule_work(©_context_worker.copy_context_work);
3498 flush_work(©_context_worker.copy_context_work);
3499 destroy_work_on_stack(©_context_worker.copy_context_work);
3502 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
3503 num_queues * sizeof(uint32_t)))
3504 pr_err("copy_to_user failed on queue suspend\n");
3508 return total_suspended;
3511 static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
3513 switch (q_props->type) {
3514 case KFD_QUEUE_TYPE_COMPUTE:
3515 return q_props->format == KFD_QUEUE_FORMAT_PM4
3516 ? KFD_IOC_QUEUE_TYPE_COMPUTE
3517 : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
3518 case KFD_QUEUE_TYPE_SDMA:
3519 return KFD_IOC_QUEUE_TYPE_SDMA;
3520 case KFD_QUEUE_TYPE_SDMA_XGMI:
3521 return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
3523 WARN_ONCE(true, "queue type not recognized!");
3528 void set_queue_snapshot_entry(struct queue *q,
3529 uint64_t exception_clear_mask,
3530 struct kfd_queue_snapshot_entry *qss_entry)
3532 qss_entry->ring_base_address = q->properties.queue_address;
3533 qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
3534 qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
3535 qss_entry->ctx_save_restore_address =
3536 q->properties.ctx_save_restore_area_address;
3537 qss_entry->ctx_save_restore_area_size =
3538 q->properties.ctx_save_restore_area_size;
3539 qss_entry->exception_status = q->properties.exception_status;
3540 qss_entry->queue_id = q->properties.queue_id;
3541 qss_entry->gpu_id = q->device->id;
3542 qss_entry->ring_size = (uint32_t)q->properties.queue_size;
3543 qss_entry->queue_type = set_queue_type_for_user(&q->properties);
3544 q->properties.exception_status &= ~exception_clear_mask;
3547 int debug_lock_and_unmap(struct device_queue_manager *dqm)
3549 struct device *dev = dqm->dev->adev->dev;
3552 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3553 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3557 if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
3562 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
3569 int debug_map_and_unlock(struct device_queue_manager *dqm)
3571 struct device *dev = dqm->dev->adev->dev;
3574 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3575 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3579 if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
3582 r = map_queues_cpsch(dqm);
3589 int debug_refresh_runlist(struct device_queue_manager *dqm)
3591 int r = debug_lock_and_unmap(dqm);
3596 return debug_map_and_unlock(dqm);
3599 bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm,
3600 struct qcm_process_device *qpd,
3601 int doorbell_off, u32 *queue_format)
3611 list_for_each_entry(q, &qpd->queues_list, list) {
3612 if (q->properties.doorbell_off == doorbell_off) {
3613 *queue_format = q->properties.format;
3623 #if defined(CONFIG_DEBUG_FS)
3625 static void seq_reg_dump(struct seq_file *m,
3626 uint32_t (*dump)[2], uint32_t n_regs)
3630 for (i = 0, count = 0; i < n_regs; i++) {
3632 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
3633 seq_printf(m, "%s %08x: %08x",
3635 dump[i][0], dump[i][1]);
3638 seq_printf(m, " %08x", dump[i][1]);
3646 int dqm_debugfs_hqds(struct seq_file *m, void *data)
3648 struct device_queue_manager *dqm = data;
3649 uint32_t xcc_mask = dqm->dev->xcc_mask;
3650 uint32_t (*dump)[2], n_regs;
3653 uint32_t sdma_engine_start;
3655 if (!dqm->sched_running) {
3656 seq_puts(m, " Device is stopped\n");
3660 for_each_inst(xcc_id, xcc_mask) {
3661 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
3663 KFD_CIK_HIQ_QUEUE, &dump,
3668 " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
3670 KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
3671 KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
3673 seq_reg_dump(m, dump, n_regs);
3678 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
3679 int pipe_offset = pipe * get_queues_per_pipe(dqm);
3681 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
3682 if (!test_bit(pipe_offset + queue,
3683 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
3686 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
3694 " Inst %d, CP Pipe %d, Queue %d\n",
3695 xcc_id, pipe, queue);
3696 seq_reg_dump(m, dump, n_regs);
3703 sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
3704 for (pipe = sdma_engine_start;
3705 pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
3708 queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
3710 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
3711 dqm->dev->adev, pipe, queue, &dump, &n_regs);
3715 seq_printf(m, " SDMA Engine %d, RLC %d\n",
3717 seq_reg_dump(m, dump, n_regs);
3726 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
3731 r = pm_debugfs_hang_hws(&dqm->packet_mgr);
3736 dqm->active_runlist = true;
3737 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
3738 0, USE_DEFAULT_GRACE_PERIOD);