2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
35 #include "kfd_kernel_queue.h"
37 /* Size of the per-pipe EOP queue */
38 #define CIK_HPD_EOP_BYTES_LOG2 11
39 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
42 unsigned int pasid, unsigned int vmid);
44 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
46 struct qcm_process_device *qpd);
48 static int execute_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
52 enum kfd_unmap_queues_filter filter,
53 uint32_t filter_param);
55 static int map_queues_cpsch(struct device_queue_manager *dqm);
57 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
59 struct qcm_process_device *qpd);
61 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
62 unsigned int sdma_queue_id);
64 static void kfd_process_hw_exception(struct work_struct *work);
67 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
69 if (type == KFD_QUEUE_TYPE_SDMA)
70 return KFD_MQD_TYPE_SDMA;
71 return KFD_MQD_TYPE_CP;
74 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
77 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
78 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
80 /* queue is available for KFD usage if bit is 1 */
81 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
82 if (test_bit(pipe_offset + i,
83 dqm->dev->shared_resources.queue_bitmap))
88 unsigned int get_queues_num(struct device_queue_manager *dqm)
90 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
94 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
96 return dqm->dev->shared_resources.num_queue_per_pipe;
99 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
101 return dqm->dev->shared_resources.num_pipe_per_mec;
104 static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
106 return dqm->dev->device_info->num_sdma_engines;
109 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
111 return dqm->dev->device_info->num_sdma_engines
112 * KFD_SDMA_QUEUES_PER_ENGINE;
115 void program_sh_mem_settings(struct device_queue_manager *dqm,
116 struct qcm_process_device *qpd)
118 return dqm->dev->kfd2kgd->program_sh_mem_settings(
119 dqm->dev->kgd, qpd->vmid,
121 qpd->sh_mem_ape1_base,
122 qpd->sh_mem_ape1_limit,
126 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
128 struct kfd_dev *dev = qpd->dqm->dev;
130 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
131 /* On pre-SOC15 chips we need to use the queue ID to
132 * preserve the user mode ABI.
134 q->doorbell_id = q->properties.queue_id;
135 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
136 /* For SDMA queues on SOC15, use static doorbell
137 * assignments based on the engine and queue.
139 q->doorbell_id = dev->shared_resources.sdma_doorbell
140 [q->properties.sdma_engine_id]
141 [q->properties.sdma_queue_id];
143 /* For CP queues on SOC15 reserve a free doorbell ID */
146 found = find_first_zero_bit(qpd->doorbell_bitmap,
147 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
148 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
149 pr_debug("No doorbells available");
152 set_bit(found, qpd->doorbell_bitmap);
153 q->doorbell_id = found;
156 q->properties.doorbell_off =
157 kfd_doorbell_id_to_offset(dev, q->process,
163 static void deallocate_doorbell(struct qcm_process_device *qpd,
167 struct kfd_dev *dev = qpd->dqm->dev;
169 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
170 q->properties.type == KFD_QUEUE_TYPE_SDMA)
173 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
177 static int allocate_vmid(struct device_queue_manager *dqm,
178 struct qcm_process_device *qpd,
181 int bit, allocated_vmid;
183 if (dqm->vmid_bitmap == 0)
186 bit = ffs(dqm->vmid_bitmap) - 1;
187 dqm->vmid_bitmap &= ~(1 << bit);
189 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
190 pr_debug("vmid allocation %d\n", allocated_vmid);
191 qpd->vmid = allocated_vmid;
192 q->properties.vmid = allocated_vmid;
194 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
195 program_sh_mem_settings(dqm, qpd);
197 /* qpd->page_table_base is set earlier when register_process()
198 * is called, i.e. when the first queue is created.
200 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
202 qpd->page_table_base);
203 /* invalidate the VM context after pasid and vmid mapping is set up */
204 kfd_flush_tlb(qpd_to_pdd(qpd));
209 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
210 struct qcm_process_device *qpd)
212 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
218 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
222 return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
223 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
224 pmf->release_mem_size / sizeof(uint32_t));
227 static void deallocate_vmid(struct device_queue_manager *dqm,
228 struct qcm_process_device *qpd,
231 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
233 /* On GFX v7, CP doesn't flush TC at dequeue */
234 if (q->device->device_info->asic_family == CHIP_HAWAII)
235 if (flush_texture_cache_nocpsch(q->device, qpd))
236 pr_err("Failed to flush TC\n");
238 kfd_flush_tlb(qpd_to_pdd(qpd));
240 /* Release the vmid mapping */
241 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
243 dqm->vmid_bitmap |= (1 << bit);
245 q->properties.vmid = 0;
248 static int create_queue_nocpsch(struct device_queue_manager *dqm,
250 struct qcm_process_device *qpd)
258 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
259 pr_warn("Can't create new usermode queue because %d queues were already created\n",
260 dqm->total_queue_count);
265 if (list_empty(&qpd->queues_list)) {
266 retval = allocate_vmid(dqm, qpd, q);
270 q->properties.vmid = qpd->vmid;
272 * Eviction state logic: we only mark active queues as evicted
273 * to avoid the overhead of restoring inactive queues later
276 q->properties.is_evicted = (q->properties.queue_size > 0 &&
277 q->properties.queue_percent > 0 &&
278 q->properties.queue_address != 0);
280 q->properties.tba_addr = qpd->tba_addr;
281 q->properties.tma_addr = qpd->tma_addr;
283 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
284 retval = create_compute_queue_nocpsch(dqm, q, qpd);
285 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
286 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
291 if (list_empty(&qpd->queues_list))
292 deallocate_vmid(dqm, qpd, q);
296 list_add(&q->list, &qpd->queues_list);
298 if (q->properties.is_active)
301 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
302 dqm->sdma_queue_count++;
305 * Unconditionally increment this counter, regardless of the queue's
306 * type or whether the queue is active.
308 dqm->total_queue_count++;
309 pr_debug("Total of %d queues are accountable so far\n",
310 dqm->total_queue_count);
317 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
324 for (pipe = dqm->next_pipe_to_allocate, i = 0;
325 i < get_pipes_per_mec(dqm);
326 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
328 if (!is_pipe_enabled(dqm, 0, pipe))
331 if (dqm->allocated_queues[pipe] != 0) {
332 bit = ffs(dqm->allocated_queues[pipe]) - 1;
333 dqm->allocated_queues[pipe] &= ~(1 << bit);
344 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
345 /* horizontal hqd allocation */
346 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
351 static inline void deallocate_hqd(struct device_queue_manager *dqm,
354 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
357 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
359 struct qcm_process_device *qpd)
362 struct mqd_manager *mqd_mgr;
364 mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
368 retval = allocate_hqd(dqm, q);
372 retval = allocate_doorbell(qpd, q);
374 goto out_deallocate_hqd;
376 retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
377 &q->gart_mqd_addr, &q->properties);
379 goto out_deallocate_doorbell;
381 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
384 dqm->dev->kfd2kgd->set_scratch_backing_va(
385 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
387 if (!q->properties.is_active)
390 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
391 &q->properties, q->process->mm);
398 mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
399 out_deallocate_doorbell:
400 deallocate_doorbell(qpd, q);
402 deallocate_hqd(dqm, q);
407 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
408 * to avoid asynchronized access
410 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
411 struct qcm_process_device *qpd,
415 struct mqd_manager *mqd_mgr;
417 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
418 get_mqd_type_from_queue_type(q->properties.type));
422 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
423 deallocate_hqd(dqm, q);
424 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
425 dqm->sdma_queue_count--;
426 deallocate_sdma_queue(dqm, q->sdma_id);
428 pr_debug("q->properties.type %d is invalid\n",
432 dqm->total_queue_count--;
434 deallocate_doorbell(qpd, q);
436 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
437 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
438 KFD_UNMAP_LATENCY_MS,
440 if (retval == -ETIME)
441 qpd->reset_wavefronts = true;
443 mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
446 if (list_empty(&qpd->queues_list)) {
447 if (qpd->reset_wavefronts) {
448 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
450 /* dbgdev_wave_reset_wavefronts has to be called before
451 * deallocate_vmid(), i.e. when vmid is still in use.
453 dbgdev_wave_reset_wavefronts(dqm->dev,
455 qpd->reset_wavefronts = false;
458 deallocate_vmid(dqm, qpd, q);
461 if (q->properties.is_active)
467 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
468 struct qcm_process_device *qpd,
474 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
480 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
483 struct mqd_manager *mqd_mgr;
484 struct kfd_process_device *pdd;
485 bool prev_active = false;
488 pdd = kfd_get_process_device_data(q->device, q->process);
493 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
494 get_mqd_type_from_queue_type(q->properties.type));
500 * Eviction state logic: we only mark active queues as evicted
501 * to avoid the overhead of restoring inactive queues later
503 if (pdd->qpd.evicted)
504 q->properties.is_evicted = (q->properties.queue_size > 0 &&
505 q->properties.queue_percent > 0 &&
506 q->properties.queue_address != 0);
508 /* Save previous activity state for counters */
509 prev_active = q->properties.is_active;
511 /* Make sure the queue is unmapped before updating the MQD */
512 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
513 retval = unmap_queues_cpsch(dqm,
514 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
516 pr_err("unmap queue failed\n");
519 } else if (prev_active &&
520 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
521 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
522 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
523 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
524 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
526 pr_err("destroy mqd failed\n");
531 retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
534 * check active state vs. the previous state and modify
535 * counter accordingly. map_queues_cpsch uses the
536 * dqm->queue_count to determine whether a new runlist must be
539 if (q->properties.is_active && !prev_active)
541 else if (!q->properties.is_active && prev_active)
544 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
545 retval = map_queues_cpsch(dqm);
546 else if (q->properties.is_active &&
547 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
548 q->properties.type == KFD_QUEUE_TYPE_SDMA))
549 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
550 &q->properties, q->process->mm);
557 static struct mqd_manager *get_mqd_manager(
558 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
560 struct mqd_manager *mqd_mgr;
562 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
565 pr_debug("mqd type %d\n", type);
567 mqd_mgr = dqm->mqd_mgrs[type];
569 mqd_mgr = mqd_manager_init(type, dqm->dev);
571 pr_err("mqd manager is NULL");
572 dqm->mqd_mgrs[type] = mqd_mgr;
578 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
579 struct qcm_process_device *qpd)
582 struct mqd_manager *mqd_mgr;
583 struct kfd_process_device *pdd;
587 if (qpd->evicted++ > 0) /* already evicted, do nothing */
590 pdd = qpd_to_pdd(qpd);
591 pr_info_ratelimited("Evicting PASID %u queues\n",
592 pdd->process->pasid);
594 /* unactivate all active queues on the qpd */
595 list_for_each_entry(q, &qpd->queues_list, list) {
596 if (!q->properties.is_active)
598 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
599 get_mqd_type_from_queue_type(q->properties.type));
600 if (!mqd_mgr) { /* should not be here */
601 pr_err("Cannot evict queue, mqd mgr is NULL\n");
605 q->properties.is_evicted = true;
606 q->properties.is_active = false;
607 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
608 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
609 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
620 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
621 struct qcm_process_device *qpd)
624 struct kfd_process_device *pdd;
628 if (qpd->evicted++ > 0) /* already evicted, do nothing */
631 pdd = qpd_to_pdd(qpd);
632 pr_info_ratelimited("Evicting PASID %u queues\n",
633 pdd->process->pasid);
635 /* unactivate all active queues on the qpd */
636 list_for_each_entry(q, &qpd->queues_list, list) {
637 if (!q->properties.is_active)
639 q->properties.is_evicted = true;
640 q->properties.is_active = false;
643 retval = execute_queues_cpsch(dqm,
645 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
646 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
653 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
654 struct qcm_process_device *qpd)
657 struct mqd_manager *mqd_mgr;
658 struct kfd_process_device *pdd;
662 pdd = qpd_to_pdd(qpd);
663 /* Retrieve PD base */
664 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
667 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
669 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
674 pr_info_ratelimited("Restoring PASID %u queues\n",
675 pdd->process->pasid);
677 /* Update PD Base in QPD */
678 qpd->page_table_base = pd_base;
679 pr_debug("Updated PD address to 0x%08x\n", pd_base);
681 if (!list_empty(&qpd->queues_list)) {
682 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
685 qpd->page_table_base);
689 /* activate all active queues on the qpd */
690 list_for_each_entry(q, &qpd->queues_list, list) {
691 if (!q->properties.is_evicted)
693 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
694 get_mqd_type_from_queue_type(q->properties.type));
695 if (!mqd_mgr) { /* should not be here */
696 pr_err("Cannot restore queue, mqd mgr is NULL\n");
700 q->properties.is_evicted = false;
701 q->properties.is_active = true;
702 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
703 q->queue, &q->properties,
715 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
716 struct qcm_process_device *qpd)
719 struct kfd_process_device *pdd;
723 pdd = qpd_to_pdd(qpd);
724 /* Retrieve PD base */
725 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
728 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
730 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
735 pr_info_ratelimited("Restoring PASID %u queues\n",
736 pdd->process->pasid);
738 /* Update PD Base in QPD */
739 qpd->page_table_base = pd_base;
740 pr_debug("Updated PD address to 0x%08x\n", pd_base);
742 /* activate all active queues on the qpd */
743 list_for_each_entry(q, &qpd->queues_list, list) {
744 if (!q->properties.is_evicted)
746 q->properties.is_evicted = false;
747 q->properties.is_active = true;
750 retval = execute_queues_cpsch(dqm,
751 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
759 static int register_process(struct device_queue_manager *dqm,
760 struct qcm_process_device *qpd)
762 struct device_process_node *n;
763 struct kfd_process_device *pdd;
767 n = kzalloc(sizeof(*n), GFP_KERNEL);
773 pdd = qpd_to_pdd(qpd);
774 /* Retrieve PD base */
775 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
778 list_add(&n->list, &dqm->queues);
780 /* Update PD Base in QPD */
781 qpd->page_table_base = pd_base;
783 retval = dqm->asic_ops.update_qpd(dqm, qpd);
785 if (dqm->processes_count++ == 0)
786 dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
793 static int unregister_process(struct device_queue_manager *dqm,
794 struct qcm_process_device *qpd)
797 struct device_process_node *cur, *next;
799 pr_debug("qpd->queues_list is %s\n",
800 list_empty(&qpd->queues_list) ? "empty" : "not empty");
805 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
806 if (qpd == cur->qpd) {
807 list_del(&cur->list);
809 if (--dqm->processes_count == 0)
810 dqm->dev->kfd2kgd->set_compute_idle(
811 dqm->dev->kgd, true);
815 /* qpd not found in dqm list */
823 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
826 uint32_t pasid_mapping;
828 pasid_mapping = (pasid == 0) ? 0 :
830 ATC_VMID_PASID_MAPPING_VALID;
832 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
833 dqm->dev->kgd, pasid_mapping,
837 static void init_interrupts(struct device_queue_manager *dqm)
841 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
842 if (is_pipe_enabled(dqm, 0, i))
843 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
846 static int initialize_nocpsch(struct device_queue_manager *dqm)
850 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
852 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
853 sizeof(unsigned int), GFP_KERNEL);
854 if (!dqm->allocated_queues)
857 mutex_init(&dqm->lock_hidden);
858 INIT_LIST_HEAD(&dqm->queues);
859 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
860 dqm->sdma_queue_count = 0;
862 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
863 int pipe_offset = pipe * get_queues_per_pipe(dqm);
865 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
866 if (test_bit(pipe_offset + queue,
867 dqm->dev->shared_resources.queue_bitmap))
868 dqm->allocated_queues[pipe] |= 1 << queue;
871 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
872 dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
877 static void uninitialize(struct device_queue_manager *dqm)
881 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
883 kfree(dqm->allocated_queues);
884 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
885 kfree(dqm->mqd_mgrs[i]);
886 mutex_destroy(&dqm->lock_hidden);
887 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
890 static int start_nocpsch(struct device_queue_manager *dqm)
892 init_interrupts(dqm);
893 return pm_init(&dqm->packets, dqm);
896 static int stop_nocpsch(struct device_queue_manager *dqm)
898 pm_uninit(&dqm->packets);
902 static int allocate_sdma_queue(struct device_queue_manager *dqm,
903 unsigned int *sdma_queue_id)
907 if (dqm->sdma_bitmap == 0)
910 bit = ffs(dqm->sdma_bitmap) - 1;
911 dqm->sdma_bitmap &= ~(1 << bit);
912 *sdma_queue_id = bit;
917 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
918 unsigned int sdma_queue_id)
920 if (sdma_queue_id >= get_num_sdma_queues(dqm))
922 dqm->sdma_bitmap |= (1 << sdma_queue_id);
925 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
927 struct qcm_process_device *qpd)
929 struct mqd_manager *mqd_mgr;
932 mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
936 retval = allocate_sdma_queue(dqm, &q->sdma_id);
940 q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
941 q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
943 retval = allocate_doorbell(qpd, q);
945 goto out_deallocate_sdma_queue;
947 pr_debug("SDMA id is: %d\n", q->sdma_id);
948 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
949 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
951 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
952 retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
953 &q->gart_mqd_addr, &q->properties);
955 goto out_deallocate_doorbell;
957 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
965 mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
966 out_deallocate_doorbell:
967 deallocate_doorbell(qpd, q);
968 out_deallocate_sdma_queue:
969 deallocate_sdma_queue(dqm, q->sdma_id);
975 * Device Queue Manager implementation for cp scheduler
978 static int set_sched_resources(struct device_queue_manager *dqm)
981 struct scheduling_resources res;
983 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
986 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
987 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
988 / dqm->dev->shared_resources.num_pipe_per_mec;
990 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
993 /* only acquire queues from the first MEC */
997 /* This situation may be hit in the future if a new HW
998 * generation exposes more than 64 queues. If so, the
999 * definition of res.queue_mask needs updating
1001 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1002 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1006 res.queue_mask |= (1ull << i);
1008 res.gws_mask = res.oac_mask = res.gds_heap_base =
1009 res.gds_heap_size = 0;
1011 pr_debug("Scheduling resources:\n"
1012 "vmid mask: 0x%8X\n"
1013 "queue mask: 0x%8llX\n",
1014 res.vmid_mask, res.queue_mask);
1016 return pm_send_set_resources(&dqm->packets, &res);
1019 static int initialize_cpsch(struct device_queue_manager *dqm)
1021 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1023 mutex_init(&dqm->lock_hidden);
1024 INIT_LIST_HEAD(&dqm->queues);
1025 dqm->queue_count = dqm->processes_count = 0;
1026 dqm->sdma_queue_count = 0;
1027 dqm->active_runlist = false;
1028 dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
1030 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1035 static int start_cpsch(struct device_queue_manager *dqm)
1041 retval = pm_init(&dqm->packets, dqm);
1043 goto fail_packet_manager_init;
1045 retval = set_sched_resources(dqm);
1047 goto fail_set_sched_resources;
1049 pr_debug("Allocating fence memory\n");
1051 /* allocate fence memory on the gart */
1052 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1056 goto fail_allocate_vidmem;
1058 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1059 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1061 init_interrupts(dqm);
1064 /* clear hang status when driver try to start the hw scheduler */
1065 dqm->is_hws_hang = false;
1066 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1070 fail_allocate_vidmem:
1071 fail_set_sched_resources:
1072 pm_uninit(&dqm->packets);
1073 fail_packet_manager_init:
1077 static int stop_cpsch(struct device_queue_manager *dqm)
1080 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1083 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1084 pm_uninit(&dqm->packets);
1089 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1090 struct kernel_queue *kq,
1091 struct qcm_process_device *qpd)
1094 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1095 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1096 dqm->total_queue_count);
1102 * Unconditionally increment this counter, regardless of the queue's
1103 * type or whether the queue is active.
1105 dqm->total_queue_count++;
1106 pr_debug("Total of %d queues are accountable so far\n",
1107 dqm->total_queue_count);
1109 list_add(&kq->list, &qpd->priv_queue_list);
1111 qpd->is_debug = true;
1112 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1118 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1119 struct kernel_queue *kq,
1120 struct qcm_process_device *qpd)
1123 list_del(&kq->list);
1125 qpd->is_debug = false;
1126 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1128 * Unconditionally decrement this counter, regardless of the queue's
1131 dqm->total_queue_count--;
1132 pr_debug("Total of %d queues are accountable so far\n",
1133 dqm->total_queue_count);
1137 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1138 struct qcm_process_device *qpd)
1141 struct mqd_manager *mqd_mgr;
1147 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1148 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1149 dqm->total_queue_count);
1154 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1155 retval = allocate_sdma_queue(dqm, &q->sdma_id);
1158 q->properties.sdma_queue_id =
1159 q->sdma_id / get_num_sdma_engines(dqm);
1160 q->properties.sdma_engine_id =
1161 q->sdma_id % get_num_sdma_engines(dqm);
1164 retval = allocate_doorbell(qpd, q);
1166 goto out_deallocate_sdma_queue;
1168 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
1169 get_mqd_type_from_queue_type(q->properties.type));
1173 goto out_deallocate_doorbell;
1176 * Eviction state logic: we only mark active queues as evicted
1177 * to avoid the overhead of restoring inactive queues later
1180 q->properties.is_evicted = (q->properties.queue_size > 0 &&
1181 q->properties.queue_percent > 0 &&
1182 q->properties.queue_address != 0);
1184 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1186 q->properties.tba_addr = qpd->tba_addr;
1187 q->properties.tma_addr = qpd->tma_addr;
1188 retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
1189 &q->gart_mqd_addr, &q->properties);
1191 goto out_deallocate_doorbell;
1193 list_add(&q->list, &qpd->queues_list);
1195 if (q->properties.is_active) {
1197 retval = execute_queues_cpsch(dqm,
1198 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1201 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1202 dqm->sdma_queue_count++;
1204 * Unconditionally increment this counter, regardless of the queue's
1205 * type or whether the queue is active.
1207 dqm->total_queue_count++;
1209 pr_debug("Total of %d queues are accountable so far\n",
1210 dqm->total_queue_count);
1215 out_deallocate_doorbell:
1216 deallocate_doorbell(qpd, q);
1217 out_deallocate_sdma_queue:
1218 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1219 deallocate_sdma_queue(dqm, q->sdma_id);
1226 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1227 unsigned int fence_value,
1228 unsigned int timeout_ms)
1230 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1232 while (*fence_addr != fence_value) {
1233 if (time_after(jiffies, end_jiffies)) {
1234 pr_err("qcm fence wait loop timeout expired\n");
1235 /* In HWS case, this is used to halt the driver thread
1236 * in order not to mess up CP states before doing
1237 * scandumps for FW debugging.
1239 while (halt_if_hws_hang)
1250 static int unmap_sdma_queues(struct device_queue_manager *dqm,
1251 unsigned int sdma_engine)
1253 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1254 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
1258 /* dqm->lock mutex has to be locked before calling this function */
1259 static int map_queues_cpsch(struct device_queue_manager *dqm)
1263 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1266 if (dqm->active_runlist)
1269 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1271 pr_err("failed to execute runlist\n");
1274 dqm->active_runlist = true;
1279 /* dqm->lock mutex has to be locked before calling this function */
1280 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1281 enum kfd_unmap_queues_filter filter,
1282 uint32_t filter_param)
1286 if (dqm->is_hws_hang)
1288 if (!dqm->active_runlist)
1291 pr_debug("Before destroying queues, sdma queue count is : %u\n",
1292 dqm->sdma_queue_count);
1294 if (dqm->sdma_queue_count > 0) {
1295 unmap_sdma_queues(dqm, 0);
1296 unmap_sdma_queues(dqm, 1);
1299 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1300 filter, filter_param, false, 0);
1304 *dqm->fence_addr = KFD_FENCE_INIT;
1305 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1306 KFD_FENCE_COMPLETED);
1307 /* should be timed out */
1308 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1309 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
1313 pm_release_ib(&dqm->packets);
1314 dqm->active_runlist = false;
1319 /* dqm->lock mutex has to be locked before calling this function */
1320 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1321 enum kfd_unmap_queues_filter filter,
1322 uint32_t filter_param)
1326 if (dqm->is_hws_hang)
1328 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1330 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1331 dqm->is_hws_hang = true;
1332 schedule_work(&dqm->hw_exception_work);
1336 return map_queues_cpsch(dqm);
1339 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1340 struct qcm_process_device *qpd,
1344 struct mqd_manager *mqd_mgr;
1345 bool preempt_all_queues;
1347 preempt_all_queues = false;
1351 /* remove queue from list to prevent rescheduling after preemption */
1354 if (qpd->is_debug) {
1356 * error, currently we do not allow to destroy a queue
1357 * of a currently debugged process
1360 goto failed_try_destroy_debugged_queue;
1364 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
1365 get_mqd_type_from_queue_type(q->properties.type));
1371 deallocate_doorbell(qpd, q);
1373 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1374 dqm->sdma_queue_count--;
1375 deallocate_sdma_queue(dqm, q->sdma_id);
1380 if (q->properties.is_active) {
1382 retval = execute_queues_cpsch(dqm,
1383 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1384 if (retval == -ETIME)
1385 qpd->reset_wavefronts = true;
1388 mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1391 * Unconditionally decrement this counter, regardless of the queue's
1394 dqm->total_queue_count--;
1395 pr_debug("Total of %d queues are accountable so far\n",
1396 dqm->total_queue_count);
1403 failed_try_destroy_debugged_queue:
1410 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1411 * stay in user mode.
1413 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1414 /* APE1 limit is inclusive and 64K aligned. */
1415 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1417 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1418 struct qcm_process_device *qpd,
1419 enum cache_policy default_policy,
1420 enum cache_policy alternate_policy,
1421 void __user *alternate_aperture_base,
1422 uint64_t alternate_aperture_size)
1426 if (!dqm->asic_ops.set_cache_memory_policy)
1431 if (alternate_aperture_size == 0) {
1432 /* base > limit disables APE1 */
1433 qpd->sh_mem_ape1_base = 1;
1434 qpd->sh_mem_ape1_limit = 0;
1437 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1438 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1439 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1440 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1441 * Verify that the base and size parameters can be
1442 * represented in this format and convert them.
1443 * Additionally restrict APE1 to user-mode addresses.
1446 uint64_t base = (uintptr_t)alternate_aperture_base;
1447 uint64_t limit = base + alternate_aperture_size - 1;
1449 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1450 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1455 qpd->sh_mem_ape1_base = base >> 16;
1456 qpd->sh_mem_ape1_limit = limit >> 16;
1459 retval = dqm->asic_ops.set_cache_memory_policy(
1464 alternate_aperture_base,
1465 alternate_aperture_size);
1467 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1468 program_sh_mem_settings(dqm, qpd);
1470 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1471 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1472 qpd->sh_mem_ape1_limit);
1479 static int set_trap_handler(struct device_queue_manager *dqm,
1480 struct qcm_process_device *qpd,
1486 if (dqm->dev->cwsr_enabled) {
1487 /* Jump from CWSR trap handler to user trap */
1488 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1492 qpd->tba_addr = tba_addr;
1493 qpd->tma_addr = tma_addr;
1499 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1500 struct qcm_process_device *qpd)
1502 struct queue *q, *next;
1503 struct device_process_node *cur, *next_dpn;
1508 /* Clear all user mode queues */
1509 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1512 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1517 /* Unregister process */
1518 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1519 if (qpd == cur->qpd) {
1520 list_del(&cur->list);
1522 dqm->processes_count--;
1532 static int process_termination_cpsch(struct device_queue_manager *dqm,
1533 struct qcm_process_device *qpd)
1536 struct queue *q, *next;
1537 struct kernel_queue *kq, *kq_next;
1538 struct mqd_manager *mqd_mgr;
1539 struct device_process_node *cur, *next_dpn;
1540 enum kfd_unmap_queues_filter filter =
1541 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1547 /* Clean all kernel queues */
1548 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1549 list_del(&kq->list);
1551 qpd->is_debug = false;
1552 dqm->total_queue_count--;
1553 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1556 /* Clear all user mode queues */
1557 list_for_each_entry(q, &qpd->queues_list, list) {
1558 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1559 dqm->sdma_queue_count--;
1560 deallocate_sdma_queue(dqm, q->sdma_id);
1563 if (q->properties.is_active)
1566 dqm->total_queue_count--;
1569 /* Unregister process */
1570 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1571 if (qpd == cur->qpd) {
1572 list_del(&cur->list);
1574 dqm->processes_count--;
1579 retval = execute_queues_cpsch(dqm, filter, 0);
1580 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1581 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1582 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1583 qpd->reset_wavefronts = false;
1586 /* lastly, free mqd resources */
1587 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1588 mqd_mgr = dqm->ops.get_mqd_manager(dqm,
1589 get_mqd_type_from_queue_type(q->properties.type));
1596 mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1604 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1606 struct device_queue_manager *dqm;
1608 pr_debug("Loading device queue manager\n");
1610 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1614 switch (dev->device_info->asic_family) {
1615 /* HWS is not available on Hawaii. */
1617 /* HWS depends on CWSR for timely dequeue. CWSR is not
1618 * available on Tonga.
1620 * FIXME: This argument also applies to Kaveri.
1623 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1626 dqm->sched_policy = sched_policy;
1631 switch (dqm->sched_policy) {
1632 case KFD_SCHED_POLICY_HWS:
1633 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1634 /* initialize dqm for cp scheduling */
1635 dqm->ops.create_queue = create_queue_cpsch;
1636 dqm->ops.initialize = initialize_cpsch;
1637 dqm->ops.start = start_cpsch;
1638 dqm->ops.stop = stop_cpsch;
1639 dqm->ops.destroy_queue = destroy_queue_cpsch;
1640 dqm->ops.update_queue = update_queue;
1641 dqm->ops.get_mqd_manager = get_mqd_manager;
1642 dqm->ops.register_process = register_process;
1643 dqm->ops.unregister_process = unregister_process;
1644 dqm->ops.uninitialize = uninitialize;
1645 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1646 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1647 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1648 dqm->ops.set_trap_handler = set_trap_handler;
1649 dqm->ops.process_termination = process_termination_cpsch;
1650 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1651 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1653 case KFD_SCHED_POLICY_NO_HWS:
1654 /* initialize dqm for no cp scheduling */
1655 dqm->ops.start = start_nocpsch;
1656 dqm->ops.stop = stop_nocpsch;
1657 dqm->ops.create_queue = create_queue_nocpsch;
1658 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1659 dqm->ops.update_queue = update_queue;
1660 dqm->ops.get_mqd_manager = get_mqd_manager;
1661 dqm->ops.register_process = register_process;
1662 dqm->ops.unregister_process = unregister_process;
1663 dqm->ops.initialize = initialize_nocpsch;
1664 dqm->ops.uninitialize = uninitialize;
1665 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1666 dqm->ops.set_trap_handler = set_trap_handler;
1667 dqm->ops.process_termination = process_termination_nocpsch;
1668 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1669 dqm->ops.restore_process_queues =
1670 restore_process_queues_nocpsch;
1673 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1677 switch (dev->device_info->asic_family) {
1679 device_queue_manager_init_vi(&dqm->asic_ops);
1683 device_queue_manager_init_cik(&dqm->asic_ops);
1687 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1692 case CHIP_POLARIS10:
1693 case CHIP_POLARIS11:
1694 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1699 device_queue_manager_init_v9(&dqm->asic_ops);
1702 WARN(1, "Unexpected ASIC family %u",
1703 dev->device_info->asic_family);
1707 if (!dqm->ops.initialize(dqm))
1715 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1717 dqm->ops.uninitialize(dqm);
1721 int kfd_process_vm_fault(struct device_queue_manager *dqm,
1724 struct kfd_process_device *pdd;
1725 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1730 pdd = kfd_get_process_device_data(dqm->dev, p);
1732 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1733 kfd_unref_process(p);
1738 static void kfd_process_hw_exception(struct work_struct *work)
1740 struct device_queue_manager *dqm = container_of(work,
1741 struct device_queue_manager, hw_exception_work);
1742 dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
1745 #if defined(CONFIG_DEBUG_FS)
1747 static void seq_reg_dump(struct seq_file *m,
1748 uint32_t (*dump)[2], uint32_t n_regs)
1752 for (i = 0, count = 0; i < n_regs; i++) {
1754 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1755 seq_printf(m, "%s %08x: %08x",
1757 dump[i][0], dump[i][1]);
1760 seq_printf(m, " %08x", dump[i][1]);
1768 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1770 struct device_queue_manager *dqm = data;
1771 uint32_t (*dump)[2], n_regs;
1775 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1776 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs);
1778 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1779 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1780 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1782 seq_reg_dump(m, dump, n_regs);
1787 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1788 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1790 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1791 if (!test_bit(pipe_offset + queue,
1792 dqm->dev->shared_resources.queue_bitmap))
1795 r = dqm->dev->kfd2kgd->hqd_dump(
1796 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1800 seq_printf(m, " CP Pipe %d, Queue %d\n",
1802 seq_reg_dump(m, dump, n_regs);
1808 for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
1809 for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
1810 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1811 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1815 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1817 seq_reg_dump(m, dump, n_regs);
1826 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
1831 dqm->active_runlist = true;
1832 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);