2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
27 #include "amdgpu_mes.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 struct amdgpu_mes_process *process,
44 int ip_type, uint64_t *doorbell_index)
46 unsigned int offset, found;
47 struct amdgpu_mes *mes = &adev->mes;
49 if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 offset = adev->doorbell_index.sdma_engine[0];
54 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 if (found >= mes->num_mes_dbs) {
56 DRM_WARN("No doorbell available\n");
60 set_bit(found, mes->doorbell_bitmap);
62 /* Get the absolute doorbell index on BAR */
63 *doorbell_index = mes->db_start_dw_offset + found * 2;
67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 struct amdgpu_mes_process *process,
69 uint32_t doorbell_index)
71 unsigned int old, rel_index;
72 struct amdgpu_mes *mes = &adev->mes;
74 /* Find the relative index of the doorbell in this object */
75 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
83 struct amdgpu_mes *mes = &adev->mes;
85 /* Bitmap for dynamic allocation of kernel doorbells */
86 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 if (!mes->doorbell_bitmap) {
88 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
92 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 set_bit(i, mes->doorbell_bitmap);
101 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
105 if (!amdgpu_mes_log_enable)
108 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
109 AMDGPU_GEM_DOMAIN_GTT,
110 &adev->mes.event_log_gpu_obj,
111 &adev->mes.event_log_gpu_addr,
112 &adev->mes.event_log_cpu_addr);
114 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
118 memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
124 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
126 bitmap_free(adev->mes.doorbell_bitmap);
129 int amdgpu_mes_init(struct amdgpu_device *adev)
133 adev->mes.adev = adev;
135 idr_init(&adev->mes.pasid_idr);
136 idr_init(&adev->mes.gang_id_idr);
137 idr_init(&adev->mes.queue_id_idr);
138 ida_init(&adev->mes.doorbell_ida);
139 spin_lock_init(&adev->mes.queue_id_lock);
140 spin_lock_init(&adev->mes.ring_lock);
141 mutex_init(&adev->mes.mutex_hidden);
143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 adev->mes.vmid_mask_mmhub = 0xffffff00;
145 adev->mes.vmid_mask_gfxhub = 0xffffff00;
147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 /* use only 1st MEC pipes */
151 adev->mes.compute_hqd_mask[i] = 0xc;
154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161 /* zero sdma_hqd_mask for non-existent engine */
162 else if (adev->sdma.num_instances == 1)
163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
165 adev->mes.sdma_hqd_mask[i] = 0xfc;
168 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
171 "(%d) ring trail_fence_offs wb alloc failed\n", r);
174 adev->mes.sch_ctx_gpu_addr =
175 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
176 adev->mes.sch_ctx_ptr =
177 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
179 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
181 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
183 "(%d) query_status_fence_offs wb alloc failed\n", r);
186 adev->mes.query_status_fence_gpu_addr =
187 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
188 adev->mes.query_status_fence_ptr =
189 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
191 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
193 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
194 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
196 "(%d) read_val_offs alloc failed\n", r);
199 adev->mes.read_val_gpu_addr =
200 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
201 adev->mes.read_val_ptr =
202 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
204 r = amdgpu_mes_doorbell_init(adev);
208 r = amdgpu_mes_event_log_init(adev);
215 amdgpu_mes_doorbell_free(adev);
217 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
218 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
219 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
221 idr_destroy(&adev->mes.pasid_idr);
222 idr_destroy(&adev->mes.gang_id_idr);
223 idr_destroy(&adev->mes.queue_id_idr);
224 ida_destroy(&adev->mes.doorbell_ida);
225 mutex_destroy(&adev->mes.mutex_hidden);
229 void amdgpu_mes_fini(struct amdgpu_device *adev)
231 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
232 &adev->mes.event_log_gpu_addr,
233 &adev->mes.event_log_cpu_addr);
235 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
236 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
237 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
238 amdgpu_mes_doorbell_free(adev);
240 idr_destroy(&adev->mes.pasid_idr);
241 idr_destroy(&adev->mes.gang_id_idr);
242 idr_destroy(&adev->mes.queue_id_idr);
243 ida_destroy(&adev->mes.doorbell_ida);
244 mutex_destroy(&adev->mes.mutex_hidden);
247 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
249 amdgpu_bo_free_kernel(&q->mqd_obj,
254 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
255 struct amdgpu_vm *vm)
257 struct amdgpu_mes_process *process;
260 /* allocate the mes process buffer */
261 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
263 DRM_ERROR("no more memory to create mes process\n");
267 /* allocate the process context bo and map it */
268 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
269 AMDGPU_GEM_DOMAIN_GTT,
270 &process->proc_ctx_bo,
271 &process->proc_ctx_gpu_addr,
272 &process->proc_ctx_cpu_ptr);
274 DRM_ERROR("failed to allocate process context bo\n");
275 goto clean_up_memory;
277 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
280 * Avoid taking any other locks under MES lock to avoid circular
283 amdgpu_mes_lock(&adev->mes);
285 /* add the mes process to idr list */
286 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
289 DRM_ERROR("failed to lock pasid=%d\n", pasid);
293 INIT_LIST_HEAD(&process->gang_list);
295 process->pasid = pasid;
296 process->process_quantum = adev->mes.default_process_quantum;
297 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
299 amdgpu_mes_unlock(&adev->mes);
303 amdgpu_mes_unlock(&adev->mes);
304 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
305 &process->proc_ctx_gpu_addr,
306 &process->proc_ctx_cpu_ptr);
312 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
314 struct amdgpu_mes_process *process;
315 struct amdgpu_mes_gang *gang, *tmp1;
316 struct amdgpu_mes_queue *queue, *tmp2;
317 struct mes_remove_queue_input queue_input;
322 * Avoid taking any other locks under MES lock to avoid circular
325 amdgpu_mes_lock(&adev->mes);
327 process = idr_find(&adev->mes.pasid_idr, pasid);
329 DRM_WARN("pasid %d doesn't exist\n", pasid);
330 amdgpu_mes_unlock(&adev->mes);
334 /* Remove all queues from hardware */
335 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
336 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
337 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
338 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
339 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
341 queue_input.doorbell_offset = queue->doorbell_off;
342 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
344 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
347 DRM_WARN("failed to remove hardware queue\n");
350 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
353 idr_remove(&adev->mes.pasid_idr, pasid);
354 amdgpu_mes_unlock(&adev->mes);
356 /* free all memory allocated by the process */
357 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
358 /* free all queues in the gang */
359 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
360 amdgpu_mes_queue_free_mqd(queue);
361 list_del(&queue->list);
364 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
365 &gang->gang_ctx_gpu_addr,
366 &gang->gang_ctx_cpu_ptr);
367 list_del(&gang->list);
371 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
372 &process->proc_ctx_gpu_addr,
373 &process->proc_ctx_cpu_ptr);
377 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
378 struct amdgpu_mes_gang_properties *gprops,
381 struct amdgpu_mes_process *process;
382 struct amdgpu_mes_gang *gang;
385 /* allocate the mes gang buffer */
386 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
391 /* allocate the gang context bo and map it to cpu space */
392 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
393 AMDGPU_GEM_DOMAIN_GTT,
395 &gang->gang_ctx_gpu_addr,
396 &gang->gang_ctx_cpu_ptr);
398 DRM_ERROR("failed to allocate process context bo\n");
401 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
404 * Avoid taking any other locks under MES lock to avoid circular
407 amdgpu_mes_lock(&adev->mes);
409 process = idr_find(&adev->mes.pasid_idr, pasid);
411 DRM_ERROR("pasid %d doesn't exist\n", pasid);
416 /* add the mes gang to idr list */
417 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
420 DRM_ERROR("failed to allocate idr for gang\n");
427 INIT_LIST_HEAD(&gang->queue_list);
428 gang->process = process;
429 gang->priority = gprops->priority;
430 gang->gang_quantum = gprops->gang_quantum ?
431 gprops->gang_quantum : adev->mes.default_gang_quantum;
432 gang->global_priority_level = gprops->global_priority_level;
433 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
434 list_add_tail(&gang->list, &process->gang_list);
436 amdgpu_mes_unlock(&adev->mes);
440 amdgpu_mes_unlock(&adev->mes);
441 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
442 &gang->gang_ctx_gpu_addr,
443 &gang->gang_ctx_cpu_ptr);
449 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
451 struct amdgpu_mes_gang *gang;
454 * Avoid taking any other locks under MES lock to avoid circular
457 amdgpu_mes_lock(&adev->mes);
459 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
461 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
462 amdgpu_mes_unlock(&adev->mes);
466 if (!list_empty(&gang->queue_list)) {
467 DRM_ERROR("queue list is not empty\n");
468 amdgpu_mes_unlock(&adev->mes);
472 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
473 list_del(&gang->list);
474 amdgpu_mes_unlock(&adev->mes);
476 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
477 &gang->gang_ctx_gpu_addr,
478 &gang->gang_ctx_cpu_ptr);
485 int amdgpu_mes_suspend(struct amdgpu_device *adev)
488 struct amdgpu_mes_process *process;
489 struct amdgpu_mes_gang *gang;
490 struct mes_suspend_gang_input input;
494 * Avoid taking any other locks under MES lock to avoid circular
497 amdgpu_mes_lock(&adev->mes);
499 idp = &adev->mes.pasid_idr;
501 idr_for_each_entry(idp, process, pasid) {
502 list_for_each_entry(gang, &process->gang_list, list) {
503 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
505 DRM_ERROR("failed to suspend pasid %d gangid %d",
506 pasid, gang->gang_id);
510 amdgpu_mes_unlock(&adev->mes);
514 int amdgpu_mes_resume(struct amdgpu_device *adev)
517 struct amdgpu_mes_process *process;
518 struct amdgpu_mes_gang *gang;
519 struct mes_resume_gang_input input;
523 * Avoid taking any other locks under MES lock to avoid circular
526 amdgpu_mes_lock(&adev->mes);
528 idp = &adev->mes.pasid_idr;
530 idr_for_each_entry(idp, process, pasid) {
531 list_for_each_entry(gang, &process->gang_list, list) {
532 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
534 DRM_ERROR("failed to resume pasid %d gangid %d",
535 pasid, gang->gang_id);
539 amdgpu_mes_unlock(&adev->mes);
543 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
544 struct amdgpu_mes_queue *q,
545 struct amdgpu_mes_queue_properties *p)
547 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
548 u32 mqd_size = mqd_mgr->mqd_size;
551 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
552 AMDGPU_GEM_DOMAIN_GTT,
554 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
556 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
559 memset(q->mqd_cpu_ptr, 0, mqd_size);
561 r = amdgpu_bo_reserve(q->mqd_obj, false);
562 if (unlikely(r != 0))
568 amdgpu_bo_free_kernel(&q->mqd_obj,
574 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
575 struct amdgpu_mes_queue *q,
576 struct amdgpu_mes_queue_properties *p)
578 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
579 struct amdgpu_mqd_prop mqd_prop = {0};
581 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
582 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
583 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
584 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
585 mqd_prop.queue_size = p->queue_size;
586 mqd_prop.use_doorbell = true;
587 mqd_prop.doorbell_index = p->doorbell_off;
588 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
589 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
590 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
591 mqd_prop.hqd_active = false;
593 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
594 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
595 mutex_lock(&adev->srbm_mutex);
596 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
599 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
601 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
602 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
603 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
604 mutex_unlock(&adev->srbm_mutex);
607 amdgpu_bo_unreserve(q->mqd_obj);
610 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
611 struct amdgpu_mes_queue_properties *qprops,
614 struct amdgpu_mes_queue *queue;
615 struct amdgpu_mes_gang *gang;
616 struct mes_add_queue_input queue_input;
620 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
622 /* allocate the mes queue buffer */
623 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
625 DRM_ERROR("Failed to allocate memory for queue\n");
629 /* Allocate the queue mqd */
630 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
632 goto clean_up_memory;
635 * Avoid taking any other locks under MES lock to avoid circular
638 amdgpu_mes_lock(&adev->mes);
640 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
642 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
647 /* add the mes gang to idr list */
648 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
649 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
652 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
655 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
656 *queue_id = queue->queue_id = r;
658 /* allocate a doorbell index for the queue */
659 r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
661 &qprops->doorbell_off);
663 goto clean_up_queue_id;
665 /* initialize the queue mqd */
666 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
668 /* add hw queue to mes */
669 queue_input.process_id = gang->process->pasid;
671 queue_input.page_table_base_addr =
672 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
673 adev->gmc.vram_start;
675 queue_input.process_va_start = 0;
676 queue_input.process_va_end =
677 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
678 queue_input.process_quantum = gang->process->process_quantum;
679 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
680 queue_input.gang_quantum = gang->gang_quantum;
681 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
682 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
683 queue_input.gang_global_priority_level = gang->global_priority_level;
684 queue_input.doorbell_offset = qprops->doorbell_off;
685 queue_input.mqd_addr = queue->mqd_gpu_addr;
686 queue_input.wptr_addr = qprops->wptr_gpu_addr;
687 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
688 queue_input.queue_type = qprops->queue_type;
689 queue_input.paging = qprops->paging;
690 queue_input.is_kfd_process = 0;
692 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
694 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
695 qprops->doorbell_off);
696 goto clean_up_doorbell;
699 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
700 "queue type=%d, doorbell=0x%llx\n",
701 gang->process->pasid, gang_id, qprops->queue_type,
702 qprops->doorbell_off);
704 queue->ring = qprops->ring;
705 queue->doorbell_off = qprops->doorbell_off;
706 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
707 queue->queue_type = qprops->queue_type;
708 queue->paging = qprops->paging;
710 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
711 list_add_tail(&queue->list, &gang->queue_list);
713 amdgpu_mes_unlock(&adev->mes);
717 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
718 qprops->doorbell_off);
720 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
721 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
722 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
724 amdgpu_mes_unlock(&adev->mes);
725 amdgpu_mes_queue_free_mqd(queue);
731 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
734 struct amdgpu_mes_queue *queue;
735 struct amdgpu_mes_gang *gang;
736 struct mes_remove_queue_input queue_input;
740 * Avoid taking any other locks under MES lock to avoid circular
743 amdgpu_mes_lock(&adev->mes);
745 /* remove the mes gang from idr list */
746 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
748 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
750 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
751 amdgpu_mes_unlock(&adev->mes);
752 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
756 idr_remove(&adev->mes.queue_id_idr, queue_id);
757 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
759 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
760 queue->doorbell_off);
763 queue_input.doorbell_offset = queue->doorbell_off;
764 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
766 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
768 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
771 list_del(&queue->list);
772 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
773 queue->doorbell_off);
774 amdgpu_mes_unlock(&adev->mes);
776 amdgpu_mes_queue_free_mqd(queue);
781 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
782 struct amdgpu_ring *ring,
783 enum amdgpu_unmap_queues_action action,
784 u64 gpu_addr, u64 seq)
786 struct mes_unmap_legacy_queue_input queue_input;
789 queue_input.action = action;
790 queue_input.queue_type = ring->funcs->type;
791 queue_input.doorbell_offset = ring->doorbell_index;
792 queue_input.pipe_id = ring->pipe;
793 queue_input.queue_id = ring->queue;
794 queue_input.trail_fence_addr = gpu_addr;
795 queue_input.trail_fence_data = seq;
797 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
799 DRM_ERROR("failed to unmap legacy queue\n");
804 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
806 struct mes_misc_op_input op_input;
809 op_input.op = MES_MISC_OP_READ_REG;
810 op_input.read_reg.reg_offset = reg;
811 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
813 if (!adev->mes.funcs->misc_op) {
814 DRM_ERROR("mes rreg is not supported!\n");
818 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
820 DRM_ERROR("failed to read reg (0x%x)\n", reg);
822 val = *(adev->mes.read_val_ptr);
828 int amdgpu_mes_wreg(struct amdgpu_device *adev,
829 uint32_t reg, uint32_t val)
831 struct mes_misc_op_input op_input;
834 op_input.op = MES_MISC_OP_WRITE_REG;
835 op_input.write_reg.reg_offset = reg;
836 op_input.write_reg.reg_value = val;
838 if (!adev->mes.funcs->misc_op) {
839 DRM_ERROR("mes wreg is not supported!\n");
844 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
846 DRM_ERROR("failed to write reg (0x%x)\n", reg);
852 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
853 uint32_t reg0, uint32_t reg1,
854 uint32_t ref, uint32_t mask)
856 struct mes_misc_op_input op_input;
859 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
860 op_input.wrm_reg.reg0 = reg0;
861 op_input.wrm_reg.reg1 = reg1;
862 op_input.wrm_reg.ref = ref;
863 op_input.wrm_reg.mask = mask;
865 if (!adev->mes.funcs->misc_op) {
866 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
871 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
873 DRM_ERROR("failed to reg_write_reg_wait\n");
879 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
880 uint32_t val, uint32_t mask)
882 struct mes_misc_op_input op_input;
885 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
886 op_input.wrm_reg.reg0 = reg;
887 op_input.wrm_reg.ref = val;
888 op_input.wrm_reg.mask = mask;
890 if (!adev->mes.funcs->misc_op) {
891 DRM_ERROR("mes reg wait is not supported!\n");
896 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
898 DRM_ERROR("failed to reg_write_reg_wait\n");
904 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
905 uint64_t process_context_addr,
906 uint32_t spi_gdbg_per_vmid_cntl,
907 const uint32_t *tcp_watch_cntl,
911 struct mes_misc_op_input op_input = {0};
914 if (!adev->mes.funcs->misc_op) {
915 DRM_ERROR("mes set shader debugger is not supported!\n");
919 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
920 op_input.set_shader_debugger.process_context_addr = process_context_addr;
921 op_input.set_shader_debugger.flags.u32all = flags;
923 /* use amdgpu mes_flush_shader_debugger instead */
924 if (op_input.set_shader_debugger.flags.process_ctx_flush)
927 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
928 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
929 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
931 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
932 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
933 op_input.set_shader_debugger.trap_en = trap_en;
935 amdgpu_mes_lock(&adev->mes);
937 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
939 DRM_ERROR("failed to set_shader_debugger\n");
941 amdgpu_mes_unlock(&adev->mes);
946 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
947 uint64_t process_context_addr)
949 struct mes_misc_op_input op_input = {0};
952 if (!adev->mes.funcs->misc_op) {
953 DRM_ERROR("mes flush shader debugger is not supported!\n");
957 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
958 op_input.set_shader_debugger.process_context_addr = process_context_addr;
959 op_input.set_shader_debugger.flags.process_ctx_flush = true;
961 amdgpu_mes_lock(&adev->mes);
963 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
965 DRM_ERROR("failed to set_shader_debugger\n");
967 amdgpu_mes_unlock(&adev->mes);
973 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
974 struct amdgpu_ring *ring,
975 struct amdgpu_mes_queue_properties *props)
977 props->queue_type = ring->funcs->type;
978 props->hqd_base_gpu_addr = ring->gpu_addr;
979 props->rptr_gpu_addr = ring->rptr_gpu_addr;
980 props->wptr_gpu_addr = ring->wptr_gpu_addr;
981 props->wptr_mc_addr =
982 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
983 props->queue_size = ring->ring_size;
984 props->eop_gpu_addr = ring->eop_gpu_addr;
985 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
986 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
987 props->paging = false;
991 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
993 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
994 return offsetof(struct amdgpu_mes_ctx_meta_data, \
995 _eng[ring->idx].slots[id_offs]); \
996 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
997 return offsetof(struct amdgpu_mes_ctx_meta_data, \
998 _eng[ring->idx].ring); \
999 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1000 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1001 _eng[ring->idx].ib); \
1002 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1003 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1004 _eng[ring->idx].padding); \
1007 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1009 switch (ring->funcs->type) {
1010 case AMDGPU_RING_TYPE_GFX:
1011 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1013 case AMDGPU_RING_TYPE_COMPUTE:
1014 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1016 case AMDGPU_RING_TYPE_SDMA:
1017 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1027 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1028 int queue_type, int idx,
1029 struct amdgpu_mes_ctx_data *ctx_data,
1030 struct amdgpu_ring **out)
1032 struct amdgpu_ring *ring;
1033 struct amdgpu_mes_gang *gang;
1034 struct amdgpu_mes_queue_properties qprops = {0};
1035 int r, queue_id, pasid;
1038 * Avoid taking any other locks under MES lock to avoid circular
1039 * lock dependencies.
1041 amdgpu_mes_lock(&adev->mes);
1042 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1044 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1045 amdgpu_mes_unlock(&adev->mes);
1048 pasid = gang->process->pasid;
1050 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1052 amdgpu_mes_unlock(&adev->mes);
1056 ring->ring_obj = NULL;
1057 ring->use_doorbell = true;
1058 ring->is_mes_queue = true;
1059 ring->mes_ctx = ctx_data;
1061 ring->no_scheduler = true;
1063 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1064 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1065 compute[ring->idx].mec_hpd);
1066 ring->eop_gpu_addr =
1067 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1070 switch (queue_type) {
1071 case AMDGPU_RING_TYPE_GFX:
1072 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1073 ring->me = adev->gfx.gfx_ring[0].me;
1074 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1076 case AMDGPU_RING_TYPE_COMPUTE:
1077 ring->funcs = adev->gfx.compute_ring[0].funcs;
1078 ring->me = adev->gfx.compute_ring[0].me;
1079 ring->pipe = adev->gfx.compute_ring[0].pipe;
1081 case AMDGPU_RING_TYPE_SDMA:
1082 ring->funcs = adev->sdma.instance[0].ring.funcs;
1088 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1089 AMDGPU_RING_PRIO_DEFAULT, NULL);
1091 goto clean_up_memory;
1093 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1095 dma_fence_wait(gang->process->vm->last_update, false);
1096 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1097 amdgpu_mes_unlock(&adev->mes);
1099 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1103 ring->hw_queue_id = queue_id;
1104 ring->doorbell_index = qprops.doorbell_off;
1106 if (queue_type == AMDGPU_RING_TYPE_GFX)
1107 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1108 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1109 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1111 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1112 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1121 amdgpu_ring_fini(ring);
1124 amdgpu_mes_unlock(&adev->mes);
1128 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1129 struct amdgpu_ring *ring)
1134 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1135 del_timer_sync(&ring->fence_drv.fallback_timer);
1136 amdgpu_ring_fini(ring);
1140 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1141 enum amdgpu_mes_priority_level prio)
1143 return adev->mes.aggregated_doorbells[prio];
1146 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1147 struct amdgpu_mes_ctx_data *ctx_data)
1151 r = amdgpu_bo_create_kernel(adev,
1152 sizeof(struct amdgpu_mes_ctx_meta_data),
1153 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1154 &ctx_data->meta_data_obj,
1155 &ctx_data->meta_data_mc_addr,
1156 &ctx_data->meta_data_ptr);
1158 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1162 if (!ctx_data->meta_data_obj)
1165 memset(ctx_data->meta_data_ptr, 0,
1166 sizeof(struct amdgpu_mes_ctx_meta_data));
1171 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1173 if (ctx_data->meta_data_obj)
1174 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1175 &ctx_data->meta_data_mc_addr,
1176 &ctx_data->meta_data_ptr);
1179 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1180 struct amdgpu_vm *vm,
1181 struct amdgpu_mes_ctx_data *ctx_data)
1183 struct amdgpu_bo_va *bo_va;
1184 struct amdgpu_sync sync;
1185 struct drm_exec exec;
1188 amdgpu_sync_create(&sync);
1190 drm_exec_init(&exec, 0, 0);
1191 drm_exec_until_all_locked(&exec) {
1192 r = drm_exec_lock_obj(&exec,
1193 &ctx_data->meta_data_obj->tbo.base);
1194 drm_exec_retry_on_contention(&exec);
1196 goto error_fini_exec;
1198 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1199 drm_exec_retry_on_contention(&exec);
1201 goto error_fini_exec;
1204 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1206 DRM_ERROR("failed to create bo_va for meta data BO\n");
1208 goto error_fini_exec;
1211 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1212 sizeof(struct amdgpu_mes_ctx_meta_data),
1213 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1214 AMDGPU_PTE_EXECUTABLE);
1217 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1218 goto error_del_bo_va;
1221 r = amdgpu_vm_bo_update(adev, bo_va, false);
1223 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1224 goto error_del_bo_va;
1226 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1228 r = amdgpu_vm_update_pdes(adev, vm, false);
1230 DRM_ERROR("failed to update pdes on meta data\n");
1231 goto error_del_bo_va;
1233 amdgpu_sync_fence(&sync, vm->last_update);
1235 amdgpu_sync_wait(&sync, false);
1236 drm_exec_fini(&exec);
1238 amdgpu_sync_free(&sync);
1239 ctx_data->meta_data_va = bo_va;
1243 amdgpu_vm_bo_del(adev, bo_va);
1246 drm_exec_fini(&exec);
1247 amdgpu_sync_free(&sync);
1251 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1252 struct amdgpu_mes_ctx_data *ctx_data)
1254 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1255 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1256 struct amdgpu_vm *vm = bo_va->base.vm;
1257 struct dma_fence *fence;
1258 struct drm_exec exec;
1261 drm_exec_init(&exec, 0, 0);
1262 drm_exec_until_all_locked(&exec) {
1263 r = drm_exec_lock_obj(&exec,
1264 &ctx_data->meta_data_obj->tbo.base);
1265 drm_exec_retry_on_contention(&exec);
1269 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1270 drm_exec_retry_on_contention(&exec);
1275 amdgpu_vm_bo_del(adev, bo_va);
1276 if (!amdgpu_vm_ready(vm))
1279 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1284 amdgpu_bo_fence(bo, fence, true);
1288 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1292 dma_fence_wait(fence, false);
1293 amdgpu_bo_fence(bo, fence, true);
1294 dma_fence_put(fence);
1297 if (unlikely(r < 0))
1298 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1299 drm_exec_fini(&exec);
1304 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1305 int pasid, int *gang_id,
1306 int queue_type, int num_queue,
1307 struct amdgpu_ring **added_rings,
1308 struct amdgpu_mes_ctx_data *ctx_data)
1310 struct amdgpu_ring *ring;
1311 struct amdgpu_mes_gang_properties gprops = {0};
1314 /* create a gang for the process */
1315 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1316 gprops.gang_quantum = adev->mes.default_gang_quantum;
1317 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1318 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1319 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1321 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1323 DRM_ERROR("failed to add gang\n");
1327 /* create queues for the gang */
1328 for (j = 0; j < num_queue; j++) {
1329 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1332 DRM_ERROR("failed to add ring\n");
1336 DRM_INFO("ring %s was added\n", ring->name);
1337 added_rings[j] = ring;
1343 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1345 struct amdgpu_ring *ring;
1348 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1349 ring = added_rings[i];
1353 r = amdgpu_ring_test_helper(ring);
1357 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1359 DRM_DEV_ERROR(ring->adev->dev,
1360 "ring %s ib test failed (%d)\n",
1364 DRM_INFO("ring %s ib test pass\n", ring->name);
1370 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1372 struct amdgpu_vm *vm = NULL;
1373 struct amdgpu_mes_ctx_data ctx_data = {0};
1374 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1375 int gang_ids[3] = {0};
1376 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1377 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1378 { AMDGPU_RING_TYPE_SDMA, 1} };
1379 int i, r, pasid, k = 0;
1381 pasid = amdgpu_pasid_alloc(16);
1383 dev_warn(adev->dev, "No more PASIDs available!");
1387 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1393 r = amdgpu_vm_init(adev, vm, -1);
1395 DRM_ERROR("failed to initialize vm\n");
1399 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1401 DRM_ERROR("failed to alloc ctx meta data\n");
1405 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1406 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1408 DRM_ERROR("failed to map ctx meta data\n");
1412 r = amdgpu_mes_create_process(adev, pasid, vm);
1414 DRM_ERROR("failed to create MES process\n");
1418 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1419 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1420 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1421 IP_VERSION(10, 3, 0) &&
1422 amdgpu_ip_version(adev, GC_HWIP, 0) <
1423 IP_VERSION(11, 0, 0) &&
1424 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1427 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1436 k += queue_types[i][1];
1439 /* start ring test and ib test for MES queues */
1440 amdgpu_mes_test_queues(added_rings);
1443 /* remove all queues */
1444 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1445 if (!added_rings[i])
1447 amdgpu_mes_remove_ring(adev, added_rings[i]);
1450 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1453 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1456 amdgpu_mes_destroy_process(adev, pasid);
1459 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1462 amdgpu_vm_fini(adev, vm);
1466 amdgpu_pasid_free(pasid);
1468 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1473 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1475 const struct mes_firmware_header_v1_0 *mes_hdr;
1476 struct amdgpu_firmware_info *info;
1477 char ucode_prefix[30];
1479 bool need_retry = false;
1482 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1483 sizeof(ucode_prefix));
1484 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1485 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1487 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1490 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1492 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1495 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1496 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1497 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1499 DRM_INFO("try to fall back to %s\n", fw_name);
1500 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1507 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1508 adev->mes.fw[pipe]->data;
1509 adev->mes.uc_start_addr[pipe] =
1510 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1511 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1512 adev->mes.data_start_addr[pipe] =
1513 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1514 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1516 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1517 int ucode, ucode_data;
1519 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1520 ucode = AMDGPU_UCODE_ID_CP_MES;
1521 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1523 ucode = AMDGPU_UCODE_ID_CP_MES1;
1524 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1527 info = &adev->firmware.ucode[ucode];
1528 info->ucode_id = ucode;
1529 info->fw = adev->mes.fw[pipe];
1530 adev->firmware.fw_size +=
1531 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1534 info = &adev->firmware.ucode[ucode_data];
1535 info->ucode_id = ucode_data;
1536 info->fw = adev->mes.fw[pipe];
1537 adev->firmware.fw_size +=
1538 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1544 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1548 #if defined(CONFIG_DEBUG_FS)
1550 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1552 struct amdgpu_device *adev = m->private;
1553 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1555 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1556 mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
1561 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1565 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1568 #if defined(CONFIG_DEBUG_FS)
1569 struct drm_minor *minor = adev_to_drm(adev)->primary;
1570 struct dentry *root = minor->debugfs_root;
1571 if (adev->enable_mes && amdgpu_mes_log_enable)
1572 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1573 adev, &amdgpu_debugfs_mes_event_log_fops);