2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_mes.h"
26 #include "soc15_common.h"
27 #include "amdgpu_mes_ctx.h"
29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30 #define AMDGPU_ONE_DOORBELL_SIZE 8
32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
34 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 unsigned int *doorbell_index)
42 int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 adev->mes.max_doorbell_slices,
51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 unsigned int doorbell_index)
55 ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 struct amdgpu_device *adev,
60 uint32_t doorbell_index,
61 unsigned int doorbell_id)
63 return ((doorbell_index *
64 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 struct amdgpu_mes_process *process,
70 int ip_type, uint64_t *doorbell_index)
72 unsigned int offset, found;
74 if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 offset = adev->doorbell_index.sdma_engine[0];
76 found = find_next_zero_bit(process->doorbell_bitmap,
77 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
80 found = find_first_zero_bit(process->doorbell_bitmap,
81 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
84 if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 DRM_WARN("No doorbell available\n");
89 set_bit(found, process->doorbell_bitmap);
91 *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 process->doorbell_index, found);
97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 struct amdgpu_mes_process *process,
99 uint32_t doorbell_index)
101 unsigned int old, doorbell_id;
103 doorbell_id = doorbell_index -
104 (process->doorbell_index *
105 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
108 old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
114 size_t doorbell_start_offset;
115 size_t doorbell_aperture_size;
116 size_t doorbell_process_limit;
118 doorbell_start_offset = (adev->doorbell_index.max_assignment+1) * sizeof(u32);
119 doorbell_start_offset =
120 roundup(doorbell_start_offset,
121 amdgpu_mes_doorbell_process_slice(adev));
123 doorbell_aperture_size = adev->doorbell.size;
124 doorbell_aperture_size =
125 rounddown(doorbell_aperture_size,
126 amdgpu_mes_doorbell_process_slice(adev));
128 if (doorbell_aperture_size > doorbell_start_offset)
129 doorbell_process_limit =
130 (doorbell_aperture_size - doorbell_start_offset) /
131 amdgpu_mes_doorbell_process_slice(adev);
135 adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
136 adev->mes.max_doorbell_slices = doorbell_process_limit;
138 DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
142 int amdgpu_mes_init(struct amdgpu_device *adev)
146 adev->mes.adev = adev;
148 idr_init(&adev->mes.pasid_idr);
149 idr_init(&adev->mes.gang_id_idr);
150 idr_init(&adev->mes.queue_id_idr);
151 ida_init(&adev->mes.doorbell_ida);
152 spin_lock_init(&adev->mes.queue_id_lock);
153 mutex_init(&adev->mes.mutex_hidden);
155 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
156 adev->mes.vmid_mask_mmhub = 0xffffff00;
157 adev->mes.vmid_mask_gfxhub = 0xffffff00;
159 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
160 /* use only 1st MEC pipes */
163 adev->mes.compute_hqd_mask[i] = 0xc;
166 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
167 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
169 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
170 if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
171 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
173 adev->mes.sdma_hqd_mask[i] = 0xfc;
176 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
177 adev->mes.agreegated_doorbells[i] = 0xffffffff;
179 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
182 "(%d) ring trail_fence_offs wb alloc failed\n", r);
185 adev->mes.sch_ctx_gpu_addr =
186 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
187 adev->mes.sch_ctx_ptr =
188 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
190 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
192 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
194 "(%d) query_status_fence_offs wb alloc failed\n", r);
197 adev->mes.query_status_fence_gpu_addr =
198 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
199 adev->mes.query_status_fence_ptr =
200 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
202 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
204 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
205 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
207 "(%d) read_val_offs alloc failed\n", r);
210 adev->mes.read_val_gpu_addr =
211 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
212 adev->mes.read_val_ptr =
213 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
215 r = amdgpu_mes_doorbell_init(adev);
222 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
223 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
224 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
226 idr_destroy(&adev->mes.pasid_idr);
227 idr_destroy(&adev->mes.gang_id_idr);
228 idr_destroy(&adev->mes.queue_id_idr);
229 ida_destroy(&adev->mes.doorbell_ida);
230 mutex_destroy(&adev->mes.mutex_hidden);
234 void amdgpu_mes_fini(struct amdgpu_device *adev)
236 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
237 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
238 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
240 idr_destroy(&adev->mes.pasid_idr);
241 idr_destroy(&adev->mes.gang_id_idr);
242 idr_destroy(&adev->mes.queue_id_idr);
243 ida_destroy(&adev->mes.doorbell_ida);
244 mutex_destroy(&adev->mes.mutex_hidden);
247 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
249 amdgpu_bo_free_kernel(&q->mqd_obj,
254 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
255 struct amdgpu_vm *vm)
257 struct amdgpu_mes_process *process;
260 /* allocate the mes process buffer */
261 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
263 DRM_ERROR("no more memory to create mes process\n");
267 process->doorbell_bitmap =
268 kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
269 BITS_PER_BYTE), GFP_KERNEL);
270 if (!process->doorbell_bitmap) {
271 DRM_ERROR("failed to allocate doorbell bitmap\n");
276 /* allocate the process context bo and map it */
277 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
278 AMDGPU_GEM_DOMAIN_GTT,
279 &process->proc_ctx_bo,
280 &process->proc_ctx_gpu_addr,
281 &process->proc_ctx_cpu_ptr);
283 DRM_ERROR("failed to allocate process context bo\n");
284 goto clean_up_memory;
286 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
289 * Avoid taking any other locks under MES lock to avoid circular
292 amdgpu_mes_lock(&adev->mes);
294 /* add the mes process to idr list */
295 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
298 DRM_ERROR("failed to lock pasid=%d\n", pasid);
302 /* allocate the starting doorbell index of the process */
303 r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
305 DRM_ERROR("failed to allocate doorbell for process\n");
309 DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
311 INIT_LIST_HEAD(&process->gang_list);
313 process->pasid = pasid;
314 process->process_quantum = adev->mes.default_process_quantum;
315 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
317 amdgpu_mes_unlock(&adev->mes);
321 idr_remove(&adev->mes.pasid_idr, pasid);
322 amdgpu_mes_unlock(&adev->mes);
324 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
325 &process->proc_ctx_gpu_addr,
326 &process->proc_ctx_cpu_ptr);
328 kfree(process->doorbell_bitmap);
333 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
335 struct amdgpu_mes_process *process;
336 struct amdgpu_mes_gang *gang, *tmp1;
337 struct amdgpu_mes_queue *queue, *tmp2;
338 struct mes_remove_queue_input queue_input;
343 * Avoid taking any other locks under MES lock to avoid circular
346 amdgpu_mes_lock(&adev->mes);
348 process = idr_find(&adev->mes.pasid_idr, pasid);
350 DRM_WARN("pasid %d doesn't exist\n", pasid);
351 amdgpu_mes_unlock(&adev->mes);
355 /* Remove all queues from hardware */
356 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
357 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
358 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
359 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
360 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
362 queue_input.doorbell_offset = queue->doorbell_off;
363 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
365 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
368 DRM_WARN("failed to remove hardware queue\n");
371 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
374 amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
375 idr_remove(&adev->mes.pasid_idr, pasid);
376 amdgpu_mes_unlock(&adev->mes);
378 /* free all memory allocated by the process */
379 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
380 /* free all queues in the gang */
381 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
382 amdgpu_mes_queue_free_mqd(queue);
383 list_del(&queue->list);
386 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
387 &gang->gang_ctx_gpu_addr,
388 &gang->gang_ctx_cpu_ptr);
389 list_del(&gang->list);
393 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
394 &process->proc_ctx_gpu_addr,
395 &process->proc_ctx_cpu_ptr);
396 kfree(process->doorbell_bitmap);
400 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
401 struct amdgpu_mes_gang_properties *gprops,
404 struct amdgpu_mes_process *process;
405 struct amdgpu_mes_gang *gang;
408 /* allocate the mes gang buffer */
409 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
414 /* allocate the gang context bo and map it to cpu space */
415 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
416 AMDGPU_GEM_DOMAIN_GTT,
418 &gang->gang_ctx_gpu_addr,
419 &gang->gang_ctx_cpu_ptr);
421 DRM_ERROR("failed to allocate process context bo\n");
424 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
427 * Avoid taking any other locks under MES lock to avoid circular
430 amdgpu_mes_lock(&adev->mes);
432 process = idr_find(&adev->mes.pasid_idr, pasid);
434 DRM_ERROR("pasid %d doesn't exist\n", pasid);
439 /* add the mes gang to idr list */
440 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
443 DRM_ERROR("failed to allocate idr for gang\n");
450 INIT_LIST_HEAD(&gang->queue_list);
451 gang->process = process;
452 gang->priority = gprops->priority;
453 gang->gang_quantum = gprops->gang_quantum ?
454 gprops->gang_quantum : adev->mes.default_gang_quantum;
455 gang->global_priority_level = gprops->global_priority_level;
456 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
457 list_add_tail(&gang->list, &process->gang_list);
459 amdgpu_mes_unlock(&adev->mes);
463 amdgpu_mes_unlock(&adev->mes);
464 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
465 &gang->gang_ctx_gpu_addr,
466 &gang->gang_ctx_cpu_ptr);
472 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
474 struct amdgpu_mes_gang *gang;
477 * Avoid taking any other locks under MES lock to avoid circular
480 amdgpu_mes_lock(&adev->mes);
482 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
484 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
485 amdgpu_mes_unlock(&adev->mes);
489 if (!list_empty(&gang->queue_list)) {
490 DRM_ERROR("queue list is not empty\n");
491 amdgpu_mes_unlock(&adev->mes);
495 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
496 list_del(&gang->list);
497 amdgpu_mes_unlock(&adev->mes);
499 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
500 &gang->gang_ctx_gpu_addr,
501 &gang->gang_ctx_cpu_ptr);
508 int amdgpu_mes_suspend(struct amdgpu_device *adev)
511 struct amdgpu_mes_process *process;
512 struct amdgpu_mes_gang *gang;
513 struct mes_suspend_gang_input input;
517 * Avoid taking any other locks under MES lock to avoid circular
520 amdgpu_mes_lock(&adev->mes);
522 idp = &adev->mes.pasid_idr;
524 idr_for_each_entry(idp, process, pasid) {
525 list_for_each_entry(gang, &process->gang_list, list) {
526 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
528 DRM_ERROR("failed to suspend pasid %d gangid %d",
529 pasid, gang->gang_id);
533 amdgpu_mes_unlock(&adev->mes);
537 int amdgpu_mes_resume(struct amdgpu_device *adev)
540 struct amdgpu_mes_process *process;
541 struct amdgpu_mes_gang *gang;
542 struct mes_resume_gang_input input;
546 * Avoid taking any other locks under MES lock to avoid circular
549 amdgpu_mes_lock(&adev->mes);
551 idp = &adev->mes.pasid_idr;
553 idr_for_each_entry(idp, process, pasid) {
554 list_for_each_entry(gang, &process->gang_list, list) {
555 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
557 DRM_ERROR("failed to resume pasid %d gangid %d",
558 pasid, gang->gang_id);
562 amdgpu_mes_unlock(&adev->mes);
566 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
567 struct amdgpu_mes_queue *q,
568 struct amdgpu_mes_queue_properties *p)
570 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
571 u32 mqd_size = mqd_mgr->mqd_size;
574 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
575 AMDGPU_GEM_DOMAIN_GTT,
577 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
579 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
582 memset(q->mqd_cpu_ptr, 0, mqd_size);
584 r = amdgpu_bo_reserve(q->mqd_obj, false);
585 if (unlikely(r != 0))
591 amdgpu_bo_free_kernel(&q->mqd_obj,
597 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
598 struct amdgpu_mes_queue *q,
599 struct amdgpu_mes_queue_properties *p)
601 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
602 struct amdgpu_mqd_prop mqd_prop = {0};
604 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
605 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
606 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
607 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
608 mqd_prop.queue_size = p->queue_size;
609 mqd_prop.use_doorbell = true;
610 mqd_prop.doorbell_index = p->doorbell_off;
611 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
612 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
613 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
614 mqd_prop.hqd_active = false;
616 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
618 amdgpu_bo_unreserve(q->mqd_obj);
621 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
622 struct amdgpu_mes_queue_properties *qprops,
625 struct amdgpu_mes_queue *queue;
626 struct amdgpu_mes_gang *gang;
627 struct mes_add_queue_input queue_input;
631 /* allocate the mes queue buffer */
632 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
634 DRM_ERROR("Failed to allocate memory for queue\n");
638 /* Allocate the queue mqd */
639 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
641 goto clean_up_memory;
644 * Avoid taking any other locks under MES lock to avoid circular
647 amdgpu_mes_lock(&adev->mes);
649 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
651 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
656 /* add the mes gang to idr list */
657 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
658 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
661 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
664 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
665 *queue_id = queue->queue_id = r;
667 /* allocate a doorbell index for the queue */
668 r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
670 &qprops->doorbell_off);
672 goto clean_up_queue_id;
674 /* initialize the queue mqd */
675 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
677 /* add hw queue to mes */
678 queue_input.process_id = gang->process->pasid;
680 queue_input.page_table_base_addr =
681 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
682 adev->gmc.vram_start;
684 queue_input.process_va_start = 0;
685 queue_input.process_va_end =
686 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
687 queue_input.process_quantum = gang->process->process_quantum;
688 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
689 queue_input.gang_quantum = gang->gang_quantum;
690 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
691 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
692 queue_input.gang_global_priority_level = gang->global_priority_level;
693 queue_input.doorbell_offset = qprops->doorbell_off;
694 queue_input.mqd_addr = queue->mqd_gpu_addr;
695 queue_input.wptr_addr = qprops->wptr_gpu_addr;
696 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
697 queue_input.queue_type = qprops->queue_type;
698 queue_input.paging = qprops->paging;
699 queue_input.is_kfd_process = 0;
701 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
703 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
704 qprops->doorbell_off);
705 goto clean_up_doorbell;
708 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
709 "queue type=%d, doorbell=0x%llx\n",
710 gang->process->pasid, gang_id, qprops->queue_type,
711 qprops->doorbell_off);
713 queue->ring = qprops->ring;
714 queue->doorbell_off = qprops->doorbell_off;
715 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
716 queue->queue_type = qprops->queue_type;
717 queue->paging = qprops->paging;
719 list_add_tail(&queue->list, &gang->queue_list);
721 amdgpu_mes_unlock(&adev->mes);
725 amdgpu_mes_queue_doorbell_free(adev, gang->process,
726 qprops->doorbell_off);
728 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
729 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
730 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
732 amdgpu_mes_unlock(&adev->mes);
733 amdgpu_mes_queue_free_mqd(queue);
739 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
742 struct amdgpu_mes_queue *queue;
743 struct amdgpu_mes_gang *gang;
744 struct mes_remove_queue_input queue_input;
748 * Avoid taking any other locks under MES lock to avoid circular
751 amdgpu_mes_lock(&adev->mes);
753 /* remove the mes gang from idr list */
754 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
756 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
758 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
759 amdgpu_mes_unlock(&adev->mes);
760 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
764 idr_remove(&adev->mes.queue_id_idr, queue_id);
765 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
767 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
768 queue->doorbell_off);
771 queue_input.doorbell_offset = queue->doorbell_off;
772 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
774 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
776 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
779 list_del(&queue->list);
780 amdgpu_mes_queue_doorbell_free(adev, gang->process,
781 queue->doorbell_off);
782 amdgpu_mes_unlock(&adev->mes);
784 amdgpu_mes_queue_free_mqd(queue);
789 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
790 struct amdgpu_ring *ring,
791 enum amdgpu_unmap_queues_action action,
792 u64 gpu_addr, u64 seq)
794 struct mes_unmap_legacy_queue_input queue_input;
797 amdgpu_mes_lock(&adev->mes);
799 queue_input.action = action;
800 queue_input.queue_type = ring->funcs->type;
801 queue_input.doorbell_offset = ring->doorbell_index;
802 queue_input.pipe_id = ring->pipe;
803 queue_input.queue_id = ring->queue;
804 queue_input.trail_fence_addr = gpu_addr;
805 queue_input.trail_fence_data = seq;
807 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
809 DRM_ERROR("failed to unmap legacy queue\n");
811 amdgpu_mes_unlock(&adev->mes);
815 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
817 struct mes_misc_op_input op_input;
820 amdgpu_mes_lock(&adev->mes);
822 op_input.op = MES_MISC_OP_READ_REG;
823 op_input.read_reg.reg_offset = reg;
824 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
826 if (!adev->mes.funcs->misc_op) {
827 DRM_ERROR("mes rreg is not supported!\n");
831 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
833 DRM_ERROR("failed to read reg (0x%x)\n", reg);
835 val = *(adev->mes.read_val_ptr);
838 amdgpu_mes_unlock(&adev->mes);
842 int amdgpu_mes_wreg(struct amdgpu_device *adev,
843 uint32_t reg, uint32_t val)
845 struct mes_misc_op_input op_input;
848 amdgpu_mes_lock(&adev->mes);
850 op_input.op = MES_MISC_OP_WRITE_REG;
851 op_input.write_reg.reg_offset = reg;
852 op_input.write_reg.reg_value = val;
854 if (!adev->mes.funcs->misc_op) {
855 DRM_ERROR("mes wreg is not supported!\n");
860 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
862 DRM_ERROR("failed to write reg (0x%x)\n", reg);
865 amdgpu_mes_unlock(&adev->mes);
869 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
870 uint32_t reg0, uint32_t reg1,
871 uint32_t ref, uint32_t mask)
873 struct mes_misc_op_input op_input;
876 amdgpu_mes_lock(&adev->mes);
878 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
879 op_input.wrm_reg.reg0 = reg0;
880 op_input.wrm_reg.reg1 = reg1;
881 op_input.wrm_reg.ref = ref;
882 op_input.wrm_reg.mask = mask;
884 if (!adev->mes.funcs->misc_op) {
885 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
890 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
892 DRM_ERROR("failed to reg_write_reg_wait\n");
895 amdgpu_mes_unlock(&adev->mes);
899 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
900 uint32_t val, uint32_t mask)
902 struct mes_misc_op_input op_input;
905 amdgpu_mes_lock(&adev->mes);
907 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
908 op_input.wrm_reg.reg0 = reg;
909 op_input.wrm_reg.ref = val;
910 op_input.wrm_reg.mask = mask;
912 if (!adev->mes.funcs->misc_op) {
913 DRM_ERROR("mes reg wait is not supported!\n");
918 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
920 DRM_ERROR("failed to reg_write_reg_wait\n");
923 amdgpu_mes_unlock(&adev->mes);
928 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
929 struct amdgpu_ring *ring,
930 struct amdgpu_mes_queue_properties *props)
932 props->queue_type = ring->funcs->type;
933 props->hqd_base_gpu_addr = ring->gpu_addr;
934 props->rptr_gpu_addr = ring->rptr_gpu_addr;
935 props->wptr_gpu_addr = ring->wptr_gpu_addr;
936 props->wptr_mc_addr =
937 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
938 props->queue_size = ring->ring_size;
939 props->eop_gpu_addr = ring->eop_gpu_addr;
940 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
941 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
942 props->paging = false;
946 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
948 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
949 return offsetof(struct amdgpu_mes_ctx_meta_data, \
950 _eng[ring->idx].slots[id_offs]); \
951 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
952 return offsetof(struct amdgpu_mes_ctx_meta_data, \
953 _eng[ring->idx].ring); \
954 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
955 return offsetof(struct amdgpu_mes_ctx_meta_data, \
956 _eng[ring->idx].ib); \
957 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
958 return offsetof(struct amdgpu_mes_ctx_meta_data, \
959 _eng[ring->idx].padding); \
962 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
964 switch (ring->funcs->type) {
965 case AMDGPU_RING_TYPE_GFX:
966 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
968 case AMDGPU_RING_TYPE_COMPUTE:
969 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
971 case AMDGPU_RING_TYPE_SDMA:
972 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
982 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
983 int queue_type, int idx,
984 struct amdgpu_mes_ctx_data *ctx_data,
985 struct amdgpu_ring **out)
987 struct amdgpu_ring *ring;
988 struct amdgpu_mes_gang *gang;
989 struct amdgpu_mes_queue_properties qprops = {0};
990 int r, queue_id, pasid;
993 * Avoid taking any other locks under MES lock to avoid circular
996 amdgpu_mes_lock(&adev->mes);
997 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
999 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1000 amdgpu_mes_unlock(&adev->mes);
1003 pasid = gang->process->pasid;
1005 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1007 amdgpu_mes_unlock(&adev->mes);
1011 ring->ring_obj = NULL;
1012 ring->use_doorbell = true;
1013 ring->is_mes_queue = true;
1014 ring->mes_ctx = ctx_data;
1016 ring->no_scheduler = true;
1018 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1019 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1020 compute[ring->idx].mec_hpd);
1021 ring->eop_gpu_addr =
1022 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1025 switch (queue_type) {
1026 case AMDGPU_RING_TYPE_GFX:
1027 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1029 case AMDGPU_RING_TYPE_COMPUTE:
1030 ring->funcs = adev->gfx.compute_ring[0].funcs;
1032 case AMDGPU_RING_TYPE_SDMA:
1033 ring->funcs = adev->sdma.instance[0].ring.funcs;
1039 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1040 AMDGPU_RING_PRIO_DEFAULT, NULL);
1042 goto clean_up_memory;
1044 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1046 dma_fence_wait(gang->process->vm->last_update, false);
1047 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1048 amdgpu_mes_unlock(&adev->mes);
1050 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1054 ring->hw_queue_id = queue_id;
1055 ring->doorbell_index = qprops.doorbell_off;
1057 if (queue_type == AMDGPU_RING_TYPE_GFX)
1058 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1059 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1060 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1062 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1063 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1072 amdgpu_ring_fini(ring);
1075 amdgpu_mes_unlock(&adev->mes);
1079 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1080 struct amdgpu_ring *ring)
1085 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1086 amdgpu_ring_fini(ring);
1090 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1091 struct amdgpu_mes_ctx_data *ctx_data)
1095 r = amdgpu_bo_create_kernel(adev,
1096 sizeof(struct amdgpu_mes_ctx_meta_data),
1097 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1098 &ctx_data->meta_data_obj,
1099 &ctx_data->meta_data_mc_addr,
1100 &ctx_data->meta_data_ptr);
1101 if (!ctx_data->meta_data_obj)
1104 memset(ctx_data->meta_data_ptr, 0,
1105 sizeof(struct amdgpu_mes_ctx_meta_data));
1110 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1112 if (ctx_data->meta_data_obj)
1113 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1114 &ctx_data->meta_data_mc_addr,
1115 &ctx_data->meta_data_ptr);
1118 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1119 struct amdgpu_vm *vm,
1120 struct amdgpu_mes_ctx_data *ctx_data)
1122 struct amdgpu_bo_va *bo_va;
1123 struct ww_acquire_ctx ticket;
1124 struct list_head list;
1125 struct amdgpu_bo_list_entry pd;
1126 struct ttm_validate_buffer csa_tv;
1127 struct amdgpu_sync sync;
1130 amdgpu_sync_create(&sync);
1131 INIT_LIST_HEAD(&list);
1132 INIT_LIST_HEAD(&csa_tv.head);
1134 csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1135 csa_tv.num_shared = 1;
1137 list_add(&csa_tv.head, &list);
1138 amdgpu_vm_get_pd_bo(vm, &list, &pd);
1140 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1142 DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1146 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1148 ttm_eu_backoff_reservation(&ticket, &list);
1149 DRM_ERROR("failed to create bo_va for meta data BO\n");
1153 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1154 sizeof(struct amdgpu_mes_ctx_meta_data),
1155 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1156 AMDGPU_PTE_EXECUTABLE);
1159 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1163 r = amdgpu_vm_bo_update(adev, bo_va, false);
1165 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1168 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1170 r = amdgpu_vm_update_pdes(adev, vm, false);
1172 DRM_ERROR("failed to update pdes on meta data\n");
1175 amdgpu_sync_fence(&sync, vm->last_update);
1177 amdgpu_sync_wait(&sync, false);
1178 ttm_eu_backoff_reservation(&ticket, &list);
1180 amdgpu_sync_free(&sync);
1181 ctx_data->meta_data_va = bo_va;
1185 amdgpu_vm_bo_del(adev, bo_va);
1186 ttm_eu_backoff_reservation(&ticket, &list);
1187 amdgpu_sync_free(&sync);
1191 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1192 int pasid, int *gang_id,
1193 int queue_type, int num_queue,
1194 struct amdgpu_ring **added_rings,
1195 struct amdgpu_mes_ctx_data *ctx_data)
1197 struct amdgpu_ring *ring;
1198 struct amdgpu_mes_gang_properties gprops = {0};
1201 /* create a gang for the process */
1202 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1203 gprops.gang_quantum = adev->mes.default_gang_quantum;
1204 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1205 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1206 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1208 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1210 DRM_ERROR("failed to add gang\n");
1214 /* create queues for the gang */
1215 for (j = 0; j < num_queue; j++) {
1216 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1219 DRM_ERROR("failed to add ring\n");
1223 DRM_INFO("ring %s was added\n", ring->name);
1224 added_rings[j] = ring;
1230 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1232 struct amdgpu_ring *ring;
1235 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1236 ring = added_rings[i];
1240 r = amdgpu_ring_test_ring(ring);
1242 DRM_DEV_ERROR(ring->adev->dev,
1243 "ring %s test failed (%d)\n",
1247 DRM_INFO("ring %s test pass\n", ring->name);
1249 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1251 DRM_DEV_ERROR(ring->adev->dev,
1252 "ring %s ib test failed (%d)\n",
1256 DRM_INFO("ring %s ib test pass\n", ring->name);
1262 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1264 struct amdgpu_vm *vm = NULL;
1265 struct amdgpu_mes_ctx_data ctx_data = {0};
1266 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1267 int gang_ids[3] = {0};
1268 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1269 AMDGPU_MES_CTX_MAX_GFX_RINGS},
1270 { AMDGPU_RING_TYPE_COMPUTE,
1271 AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1272 { AMDGPU_RING_TYPE_SDMA,
1273 AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1274 int i, r, pasid, k = 0;
1276 pasid = amdgpu_pasid_alloc(16);
1278 dev_warn(adev->dev, "No more PASIDs available!");
1282 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1288 r = amdgpu_vm_init(adev, vm);
1290 DRM_ERROR("failed to initialize vm\n");
1294 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1296 DRM_ERROR("failed to alloc ctx meta data\n");
1300 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1301 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1303 DRM_ERROR("failed to map ctx meta data\n");
1307 r = amdgpu_mes_create_process(adev, pasid, vm);
1309 DRM_ERROR("failed to create MES process\n");
1313 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1314 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1315 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1316 adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1317 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1320 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1329 k += queue_types[i][1];
1332 /* start ring test and ib test for MES queues */
1333 amdgpu_mes_test_queues(added_rings);
1336 /* remove all queues */
1337 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1338 if (!added_rings[i])
1340 amdgpu_mes_remove_ring(adev, added_rings[i]);
1343 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1346 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1349 amdgpu_mes_destroy_process(adev, pasid);
1352 BUG_ON(amdgpu_bo_reserve(ctx_data.meta_data_obj, true));
1353 amdgpu_vm_bo_del(adev, ctx_data.meta_data_va);
1354 amdgpu_bo_unreserve(ctx_data.meta_data_obj);
1355 amdgpu_vm_fini(adev, vm);
1359 amdgpu_pasid_free(pasid);
1361 amdgpu_mes_ctx_free_meta_data(&ctx_data);