2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
27 #include "amdgpu_mes.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 struct amdgpu_mes_process *process,
44 int ip_type, uint64_t *doorbell_index)
46 unsigned int offset, found;
47 struct amdgpu_mes *mes = &adev->mes;
49 if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 offset = adev->doorbell_index.sdma_engine[0];
54 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 if (found >= mes->num_mes_dbs) {
56 DRM_WARN("No doorbell available\n");
60 set_bit(found, mes->doorbell_bitmap);
62 /* Get the absolute doorbell index on BAR */
63 *doorbell_index = mes->db_start_dw_offset + found * 2;
67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 struct amdgpu_mes_process *process,
69 uint32_t doorbell_index)
71 unsigned int old, rel_index;
72 struct amdgpu_mes *mes = &adev->mes;
74 /* Find the relative index of the doorbell in this object */
75 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
83 struct amdgpu_mes *mes = &adev->mes;
85 /* Bitmap for dynamic allocation of kernel doorbells */
86 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 if (!mes->doorbell_bitmap) {
88 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
92 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 set_bit(i, mes->doorbell_bitmap);
101 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
103 bitmap_free(adev->mes.doorbell_bitmap);
106 int amdgpu_mes_init(struct amdgpu_device *adev)
110 adev->mes.adev = adev;
112 idr_init(&adev->mes.pasid_idr);
113 idr_init(&adev->mes.gang_id_idr);
114 idr_init(&adev->mes.queue_id_idr);
115 ida_init(&adev->mes.doorbell_ida);
116 spin_lock_init(&adev->mes.queue_id_lock);
117 spin_lock_init(&adev->mes.ring_lock);
118 mutex_init(&adev->mes.mutex_hidden);
120 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
121 adev->mes.vmid_mask_mmhub = 0xffffff00;
122 adev->mes.vmid_mask_gfxhub = 0xffffff00;
124 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
125 /* use only 1st MEC pipes */
128 adev->mes.compute_hqd_mask[i] = 0xc;
131 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
132 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
134 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
135 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
137 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
138 /* zero sdma_hqd_mask for non-existent engine */
139 else if (adev->sdma.num_instances == 1)
140 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
142 adev->mes.sdma_hqd_mask[i] = 0xfc;
145 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
148 "(%d) ring trail_fence_offs wb alloc failed\n", r);
151 adev->mes.sch_ctx_gpu_addr =
152 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
153 adev->mes.sch_ctx_ptr =
154 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
156 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
158 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
160 "(%d) query_status_fence_offs wb alloc failed\n", r);
163 adev->mes.query_status_fence_gpu_addr =
164 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
165 adev->mes.query_status_fence_ptr =
166 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
168 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
170 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
171 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
173 "(%d) read_val_offs alloc failed\n", r);
176 adev->mes.read_val_gpu_addr =
177 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
178 adev->mes.read_val_ptr =
179 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
181 r = amdgpu_mes_doorbell_init(adev);
188 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
189 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
190 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
192 idr_destroy(&adev->mes.pasid_idr);
193 idr_destroy(&adev->mes.gang_id_idr);
194 idr_destroy(&adev->mes.queue_id_idr);
195 ida_destroy(&adev->mes.doorbell_ida);
196 mutex_destroy(&adev->mes.mutex_hidden);
200 void amdgpu_mes_fini(struct amdgpu_device *adev)
202 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
203 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
204 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
205 amdgpu_mes_doorbell_free(adev);
207 idr_destroy(&adev->mes.pasid_idr);
208 idr_destroy(&adev->mes.gang_id_idr);
209 idr_destroy(&adev->mes.queue_id_idr);
210 ida_destroy(&adev->mes.doorbell_ida);
211 mutex_destroy(&adev->mes.mutex_hidden);
214 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
216 amdgpu_bo_free_kernel(&q->mqd_obj,
221 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
222 struct amdgpu_vm *vm)
224 struct amdgpu_mes_process *process;
227 /* allocate the mes process buffer */
228 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
230 DRM_ERROR("no more memory to create mes process\n");
234 /* allocate the process context bo and map it */
235 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
236 AMDGPU_GEM_DOMAIN_GTT,
237 &process->proc_ctx_bo,
238 &process->proc_ctx_gpu_addr,
239 &process->proc_ctx_cpu_ptr);
241 DRM_ERROR("failed to allocate process context bo\n");
242 goto clean_up_memory;
244 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
247 * Avoid taking any other locks under MES lock to avoid circular
250 amdgpu_mes_lock(&adev->mes);
252 /* add the mes process to idr list */
253 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
256 DRM_ERROR("failed to lock pasid=%d\n", pasid);
260 INIT_LIST_HEAD(&process->gang_list);
262 process->pasid = pasid;
263 process->process_quantum = adev->mes.default_process_quantum;
264 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
266 amdgpu_mes_unlock(&adev->mes);
270 amdgpu_mes_unlock(&adev->mes);
271 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
272 &process->proc_ctx_gpu_addr,
273 &process->proc_ctx_cpu_ptr);
279 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
281 struct amdgpu_mes_process *process;
282 struct amdgpu_mes_gang *gang, *tmp1;
283 struct amdgpu_mes_queue *queue, *tmp2;
284 struct mes_remove_queue_input queue_input;
289 * Avoid taking any other locks under MES lock to avoid circular
292 amdgpu_mes_lock(&adev->mes);
294 process = idr_find(&adev->mes.pasid_idr, pasid);
296 DRM_WARN("pasid %d doesn't exist\n", pasid);
297 amdgpu_mes_unlock(&adev->mes);
301 /* Remove all queues from hardware */
302 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
303 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
304 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
305 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
306 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
308 queue_input.doorbell_offset = queue->doorbell_off;
309 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
311 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
314 DRM_WARN("failed to remove hardware queue\n");
317 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
320 idr_remove(&adev->mes.pasid_idr, pasid);
321 amdgpu_mes_unlock(&adev->mes);
323 /* free all memory allocated by the process */
324 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
325 /* free all queues in the gang */
326 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
327 amdgpu_mes_queue_free_mqd(queue);
328 list_del(&queue->list);
331 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
332 &gang->gang_ctx_gpu_addr,
333 &gang->gang_ctx_cpu_ptr);
334 list_del(&gang->list);
338 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
339 &process->proc_ctx_gpu_addr,
340 &process->proc_ctx_cpu_ptr);
344 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
345 struct amdgpu_mes_gang_properties *gprops,
348 struct amdgpu_mes_process *process;
349 struct amdgpu_mes_gang *gang;
352 /* allocate the mes gang buffer */
353 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
358 /* allocate the gang context bo and map it to cpu space */
359 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
360 AMDGPU_GEM_DOMAIN_GTT,
362 &gang->gang_ctx_gpu_addr,
363 &gang->gang_ctx_cpu_ptr);
365 DRM_ERROR("failed to allocate process context bo\n");
368 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
371 * Avoid taking any other locks under MES lock to avoid circular
374 amdgpu_mes_lock(&adev->mes);
376 process = idr_find(&adev->mes.pasid_idr, pasid);
378 DRM_ERROR("pasid %d doesn't exist\n", pasid);
383 /* add the mes gang to idr list */
384 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
387 DRM_ERROR("failed to allocate idr for gang\n");
394 INIT_LIST_HEAD(&gang->queue_list);
395 gang->process = process;
396 gang->priority = gprops->priority;
397 gang->gang_quantum = gprops->gang_quantum ?
398 gprops->gang_quantum : adev->mes.default_gang_quantum;
399 gang->global_priority_level = gprops->global_priority_level;
400 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
401 list_add_tail(&gang->list, &process->gang_list);
403 amdgpu_mes_unlock(&adev->mes);
407 amdgpu_mes_unlock(&adev->mes);
408 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
409 &gang->gang_ctx_gpu_addr,
410 &gang->gang_ctx_cpu_ptr);
416 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
418 struct amdgpu_mes_gang *gang;
421 * Avoid taking any other locks under MES lock to avoid circular
424 amdgpu_mes_lock(&adev->mes);
426 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
428 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
429 amdgpu_mes_unlock(&adev->mes);
433 if (!list_empty(&gang->queue_list)) {
434 DRM_ERROR("queue list is not empty\n");
435 amdgpu_mes_unlock(&adev->mes);
439 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
440 list_del(&gang->list);
441 amdgpu_mes_unlock(&adev->mes);
443 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
444 &gang->gang_ctx_gpu_addr,
445 &gang->gang_ctx_cpu_ptr);
452 int amdgpu_mes_suspend(struct amdgpu_device *adev)
455 struct amdgpu_mes_process *process;
456 struct amdgpu_mes_gang *gang;
457 struct mes_suspend_gang_input input;
461 * Avoid taking any other locks under MES lock to avoid circular
464 amdgpu_mes_lock(&adev->mes);
466 idp = &adev->mes.pasid_idr;
468 idr_for_each_entry(idp, process, pasid) {
469 list_for_each_entry(gang, &process->gang_list, list) {
470 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
472 DRM_ERROR("failed to suspend pasid %d gangid %d",
473 pasid, gang->gang_id);
477 amdgpu_mes_unlock(&adev->mes);
481 int amdgpu_mes_resume(struct amdgpu_device *adev)
484 struct amdgpu_mes_process *process;
485 struct amdgpu_mes_gang *gang;
486 struct mes_resume_gang_input input;
490 * Avoid taking any other locks under MES lock to avoid circular
493 amdgpu_mes_lock(&adev->mes);
495 idp = &adev->mes.pasid_idr;
497 idr_for_each_entry(idp, process, pasid) {
498 list_for_each_entry(gang, &process->gang_list, list) {
499 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
501 DRM_ERROR("failed to resume pasid %d gangid %d",
502 pasid, gang->gang_id);
506 amdgpu_mes_unlock(&adev->mes);
510 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
511 struct amdgpu_mes_queue *q,
512 struct amdgpu_mes_queue_properties *p)
514 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
515 u32 mqd_size = mqd_mgr->mqd_size;
518 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
519 AMDGPU_GEM_DOMAIN_GTT,
521 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
523 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
526 memset(q->mqd_cpu_ptr, 0, mqd_size);
528 r = amdgpu_bo_reserve(q->mqd_obj, false);
529 if (unlikely(r != 0))
535 amdgpu_bo_free_kernel(&q->mqd_obj,
541 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
542 struct amdgpu_mes_queue *q,
543 struct amdgpu_mes_queue_properties *p)
545 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
546 struct amdgpu_mqd_prop mqd_prop = {0};
548 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
549 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
550 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
551 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
552 mqd_prop.queue_size = p->queue_size;
553 mqd_prop.use_doorbell = true;
554 mqd_prop.doorbell_index = p->doorbell_off;
555 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
556 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
557 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
558 mqd_prop.hqd_active = false;
560 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
561 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
562 mutex_lock(&adev->srbm_mutex);
563 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
566 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
568 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
569 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
570 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
571 mutex_unlock(&adev->srbm_mutex);
574 amdgpu_bo_unreserve(q->mqd_obj);
577 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
578 struct amdgpu_mes_queue_properties *qprops,
581 struct amdgpu_mes_queue *queue;
582 struct amdgpu_mes_gang *gang;
583 struct mes_add_queue_input queue_input;
587 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
589 /* allocate the mes queue buffer */
590 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
592 DRM_ERROR("Failed to allocate memory for queue\n");
596 /* Allocate the queue mqd */
597 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
599 goto clean_up_memory;
602 * Avoid taking any other locks under MES lock to avoid circular
605 amdgpu_mes_lock(&adev->mes);
607 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
609 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
614 /* add the mes gang to idr list */
615 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
616 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
619 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
622 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
623 *queue_id = queue->queue_id = r;
625 /* allocate a doorbell index for the queue */
626 r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
628 &qprops->doorbell_off);
630 goto clean_up_queue_id;
632 /* initialize the queue mqd */
633 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
635 /* add hw queue to mes */
636 queue_input.process_id = gang->process->pasid;
638 queue_input.page_table_base_addr =
639 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
640 adev->gmc.vram_start;
642 queue_input.process_va_start = 0;
643 queue_input.process_va_end =
644 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
645 queue_input.process_quantum = gang->process->process_quantum;
646 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
647 queue_input.gang_quantum = gang->gang_quantum;
648 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
649 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
650 queue_input.gang_global_priority_level = gang->global_priority_level;
651 queue_input.doorbell_offset = qprops->doorbell_off;
652 queue_input.mqd_addr = queue->mqd_gpu_addr;
653 queue_input.wptr_addr = qprops->wptr_gpu_addr;
654 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
655 queue_input.queue_type = qprops->queue_type;
656 queue_input.paging = qprops->paging;
657 queue_input.is_kfd_process = 0;
659 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
661 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
662 qprops->doorbell_off);
663 goto clean_up_doorbell;
666 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
667 "queue type=%d, doorbell=0x%llx\n",
668 gang->process->pasid, gang_id, qprops->queue_type,
669 qprops->doorbell_off);
671 queue->ring = qprops->ring;
672 queue->doorbell_off = qprops->doorbell_off;
673 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
674 queue->queue_type = qprops->queue_type;
675 queue->paging = qprops->paging;
677 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
678 list_add_tail(&queue->list, &gang->queue_list);
680 amdgpu_mes_unlock(&adev->mes);
684 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
685 qprops->doorbell_off);
687 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
688 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
689 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
691 amdgpu_mes_unlock(&adev->mes);
692 amdgpu_mes_queue_free_mqd(queue);
698 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
701 struct amdgpu_mes_queue *queue;
702 struct amdgpu_mes_gang *gang;
703 struct mes_remove_queue_input queue_input;
707 * Avoid taking any other locks under MES lock to avoid circular
710 amdgpu_mes_lock(&adev->mes);
712 /* remove the mes gang from idr list */
713 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
715 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
717 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
718 amdgpu_mes_unlock(&adev->mes);
719 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
723 idr_remove(&adev->mes.queue_id_idr, queue_id);
724 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
726 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
727 queue->doorbell_off);
730 queue_input.doorbell_offset = queue->doorbell_off;
731 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
733 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
735 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
738 list_del(&queue->list);
739 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
740 queue->doorbell_off);
741 amdgpu_mes_unlock(&adev->mes);
743 amdgpu_mes_queue_free_mqd(queue);
748 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
749 struct amdgpu_ring *ring,
750 enum amdgpu_unmap_queues_action action,
751 u64 gpu_addr, u64 seq)
753 struct mes_unmap_legacy_queue_input queue_input;
756 queue_input.action = action;
757 queue_input.queue_type = ring->funcs->type;
758 queue_input.doorbell_offset = ring->doorbell_index;
759 queue_input.pipe_id = ring->pipe;
760 queue_input.queue_id = ring->queue;
761 queue_input.trail_fence_addr = gpu_addr;
762 queue_input.trail_fence_data = seq;
764 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
766 DRM_ERROR("failed to unmap legacy queue\n");
771 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
773 struct mes_misc_op_input op_input;
776 op_input.op = MES_MISC_OP_READ_REG;
777 op_input.read_reg.reg_offset = reg;
778 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
780 if (!adev->mes.funcs->misc_op) {
781 DRM_ERROR("mes rreg is not supported!\n");
785 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
787 DRM_ERROR("failed to read reg (0x%x)\n", reg);
789 val = *(adev->mes.read_val_ptr);
795 int amdgpu_mes_wreg(struct amdgpu_device *adev,
796 uint32_t reg, uint32_t val)
798 struct mes_misc_op_input op_input;
801 op_input.op = MES_MISC_OP_WRITE_REG;
802 op_input.write_reg.reg_offset = reg;
803 op_input.write_reg.reg_value = val;
805 if (!adev->mes.funcs->misc_op) {
806 DRM_ERROR("mes wreg is not supported!\n");
811 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
813 DRM_ERROR("failed to write reg (0x%x)\n", reg);
819 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
820 uint32_t reg0, uint32_t reg1,
821 uint32_t ref, uint32_t mask)
823 struct mes_misc_op_input op_input;
826 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
827 op_input.wrm_reg.reg0 = reg0;
828 op_input.wrm_reg.reg1 = reg1;
829 op_input.wrm_reg.ref = ref;
830 op_input.wrm_reg.mask = mask;
832 if (!adev->mes.funcs->misc_op) {
833 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
838 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
840 DRM_ERROR("failed to reg_write_reg_wait\n");
846 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
847 uint32_t val, uint32_t mask)
849 struct mes_misc_op_input op_input;
852 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
853 op_input.wrm_reg.reg0 = reg;
854 op_input.wrm_reg.ref = val;
855 op_input.wrm_reg.mask = mask;
857 if (!adev->mes.funcs->misc_op) {
858 DRM_ERROR("mes reg wait is not supported!\n");
863 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
865 DRM_ERROR("failed to reg_write_reg_wait\n");
871 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
872 uint64_t process_context_addr,
873 uint32_t spi_gdbg_per_vmid_cntl,
874 const uint32_t *tcp_watch_cntl,
878 struct mes_misc_op_input op_input = {0};
881 if (!adev->mes.funcs->misc_op) {
882 DRM_ERROR("mes set shader debugger is not supported!\n");
886 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
887 op_input.set_shader_debugger.process_context_addr = process_context_addr;
888 op_input.set_shader_debugger.flags.u32all = flags;
889 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
890 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
891 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
893 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
894 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
895 op_input.set_shader_debugger.trap_en = trap_en;
897 amdgpu_mes_lock(&adev->mes);
899 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
901 DRM_ERROR("failed to set_shader_debugger\n");
903 amdgpu_mes_unlock(&adev->mes);
909 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
910 struct amdgpu_ring *ring,
911 struct amdgpu_mes_queue_properties *props)
913 props->queue_type = ring->funcs->type;
914 props->hqd_base_gpu_addr = ring->gpu_addr;
915 props->rptr_gpu_addr = ring->rptr_gpu_addr;
916 props->wptr_gpu_addr = ring->wptr_gpu_addr;
917 props->wptr_mc_addr =
918 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
919 props->queue_size = ring->ring_size;
920 props->eop_gpu_addr = ring->eop_gpu_addr;
921 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
922 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
923 props->paging = false;
927 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
929 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
930 return offsetof(struct amdgpu_mes_ctx_meta_data, \
931 _eng[ring->idx].slots[id_offs]); \
932 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
933 return offsetof(struct amdgpu_mes_ctx_meta_data, \
934 _eng[ring->idx].ring); \
935 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
936 return offsetof(struct amdgpu_mes_ctx_meta_data, \
937 _eng[ring->idx].ib); \
938 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
939 return offsetof(struct amdgpu_mes_ctx_meta_data, \
940 _eng[ring->idx].padding); \
943 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
945 switch (ring->funcs->type) {
946 case AMDGPU_RING_TYPE_GFX:
947 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
949 case AMDGPU_RING_TYPE_COMPUTE:
950 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
952 case AMDGPU_RING_TYPE_SDMA:
953 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
963 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
964 int queue_type, int idx,
965 struct amdgpu_mes_ctx_data *ctx_data,
966 struct amdgpu_ring **out)
968 struct amdgpu_ring *ring;
969 struct amdgpu_mes_gang *gang;
970 struct amdgpu_mes_queue_properties qprops = {0};
971 int r, queue_id, pasid;
974 * Avoid taking any other locks under MES lock to avoid circular
977 amdgpu_mes_lock(&adev->mes);
978 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
980 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
981 amdgpu_mes_unlock(&adev->mes);
984 pasid = gang->process->pasid;
986 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
988 amdgpu_mes_unlock(&adev->mes);
992 ring->ring_obj = NULL;
993 ring->use_doorbell = true;
994 ring->is_mes_queue = true;
995 ring->mes_ctx = ctx_data;
997 ring->no_scheduler = true;
999 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1000 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1001 compute[ring->idx].mec_hpd);
1002 ring->eop_gpu_addr =
1003 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1006 switch (queue_type) {
1007 case AMDGPU_RING_TYPE_GFX:
1008 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1009 ring->me = adev->gfx.gfx_ring[0].me;
1010 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1012 case AMDGPU_RING_TYPE_COMPUTE:
1013 ring->funcs = adev->gfx.compute_ring[0].funcs;
1014 ring->me = adev->gfx.compute_ring[0].me;
1015 ring->pipe = adev->gfx.compute_ring[0].pipe;
1017 case AMDGPU_RING_TYPE_SDMA:
1018 ring->funcs = adev->sdma.instance[0].ring.funcs;
1024 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1025 AMDGPU_RING_PRIO_DEFAULT, NULL);
1027 goto clean_up_memory;
1029 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1031 dma_fence_wait(gang->process->vm->last_update, false);
1032 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1033 amdgpu_mes_unlock(&adev->mes);
1035 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1039 ring->hw_queue_id = queue_id;
1040 ring->doorbell_index = qprops.doorbell_off;
1042 if (queue_type == AMDGPU_RING_TYPE_GFX)
1043 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1044 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1045 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1047 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1048 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1057 amdgpu_ring_fini(ring);
1060 amdgpu_mes_unlock(&adev->mes);
1064 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1065 struct amdgpu_ring *ring)
1070 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1071 amdgpu_ring_fini(ring);
1075 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1076 enum amdgpu_mes_priority_level prio)
1078 return adev->mes.aggregated_doorbells[prio];
1081 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1082 struct amdgpu_mes_ctx_data *ctx_data)
1086 r = amdgpu_bo_create_kernel(adev,
1087 sizeof(struct amdgpu_mes_ctx_meta_data),
1088 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1089 &ctx_data->meta_data_obj,
1090 &ctx_data->meta_data_mc_addr,
1091 &ctx_data->meta_data_ptr);
1093 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1097 if (!ctx_data->meta_data_obj)
1100 memset(ctx_data->meta_data_ptr, 0,
1101 sizeof(struct amdgpu_mes_ctx_meta_data));
1106 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1108 if (ctx_data->meta_data_obj)
1109 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1110 &ctx_data->meta_data_mc_addr,
1111 &ctx_data->meta_data_ptr);
1114 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1115 struct amdgpu_vm *vm,
1116 struct amdgpu_mes_ctx_data *ctx_data)
1118 struct amdgpu_bo_va *bo_va;
1119 struct amdgpu_sync sync;
1120 struct drm_exec exec;
1123 amdgpu_sync_create(&sync);
1125 drm_exec_init(&exec, 0);
1126 drm_exec_until_all_locked(&exec) {
1127 r = drm_exec_lock_obj(&exec,
1128 &ctx_data->meta_data_obj->tbo.base);
1129 drm_exec_retry_on_contention(&exec);
1131 goto error_fini_exec;
1133 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1134 drm_exec_retry_on_contention(&exec);
1136 goto error_fini_exec;
1139 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1141 DRM_ERROR("failed to create bo_va for meta data BO\n");
1143 goto error_fini_exec;
1146 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1147 sizeof(struct amdgpu_mes_ctx_meta_data),
1148 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1149 AMDGPU_PTE_EXECUTABLE);
1152 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1153 goto error_del_bo_va;
1156 r = amdgpu_vm_bo_update(adev, bo_va, false);
1158 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1159 goto error_del_bo_va;
1161 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1163 r = amdgpu_vm_update_pdes(adev, vm, false);
1165 DRM_ERROR("failed to update pdes on meta data\n");
1166 goto error_del_bo_va;
1168 amdgpu_sync_fence(&sync, vm->last_update);
1170 amdgpu_sync_wait(&sync, false);
1171 drm_exec_fini(&exec);
1173 amdgpu_sync_free(&sync);
1174 ctx_data->meta_data_va = bo_va;
1178 amdgpu_vm_bo_del(adev, bo_va);
1181 drm_exec_fini(&exec);
1182 amdgpu_sync_free(&sync);
1186 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1187 struct amdgpu_mes_ctx_data *ctx_data)
1189 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1190 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1191 struct amdgpu_vm *vm = bo_va->base.vm;
1192 struct dma_fence *fence;
1193 struct drm_exec exec;
1196 drm_exec_init(&exec, 0);
1197 drm_exec_until_all_locked(&exec) {
1198 r = drm_exec_lock_obj(&exec,
1199 &ctx_data->meta_data_obj->tbo.base);
1200 drm_exec_retry_on_contention(&exec);
1204 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1205 drm_exec_retry_on_contention(&exec);
1210 amdgpu_vm_bo_del(adev, bo_va);
1211 if (!amdgpu_vm_ready(vm))
1214 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1219 amdgpu_bo_fence(bo, fence, true);
1223 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1227 dma_fence_wait(fence, false);
1228 amdgpu_bo_fence(bo, fence, true);
1229 dma_fence_put(fence);
1232 if (unlikely(r < 0))
1233 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1234 drm_exec_fini(&exec);
1239 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1240 int pasid, int *gang_id,
1241 int queue_type, int num_queue,
1242 struct amdgpu_ring **added_rings,
1243 struct amdgpu_mes_ctx_data *ctx_data)
1245 struct amdgpu_ring *ring;
1246 struct amdgpu_mes_gang_properties gprops = {0};
1249 /* create a gang for the process */
1250 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1251 gprops.gang_quantum = adev->mes.default_gang_quantum;
1252 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1253 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1254 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1256 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1258 DRM_ERROR("failed to add gang\n");
1262 /* create queues for the gang */
1263 for (j = 0; j < num_queue; j++) {
1264 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1267 DRM_ERROR("failed to add ring\n");
1271 DRM_INFO("ring %s was added\n", ring->name);
1272 added_rings[j] = ring;
1278 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1280 struct amdgpu_ring *ring;
1283 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1284 ring = added_rings[i];
1288 r = amdgpu_ring_test_helper(ring);
1292 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1294 DRM_DEV_ERROR(ring->adev->dev,
1295 "ring %s ib test failed (%d)\n",
1299 DRM_INFO("ring %s ib test pass\n", ring->name);
1305 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1307 struct amdgpu_vm *vm = NULL;
1308 struct amdgpu_mes_ctx_data ctx_data = {0};
1309 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1310 int gang_ids[3] = {0};
1311 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1312 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1313 { AMDGPU_RING_TYPE_SDMA, 1} };
1314 int i, r, pasid, k = 0;
1316 pasid = amdgpu_pasid_alloc(16);
1318 dev_warn(adev->dev, "No more PASIDs available!");
1322 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1328 r = amdgpu_vm_init(adev, vm, -1);
1330 DRM_ERROR("failed to initialize vm\n");
1334 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1336 DRM_ERROR("failed to alloc ctx meta data\n");
1340 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1341 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1343 DRM_ERROR("failed to map ctx meta data\n");
1347 r = amdgpu_mes_create_process(adev, pasid, vm);
1349 DRM_ERROR("failed to create MES process\n");
1353 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1354 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1355 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1356 IP_VERSION(10, 3, 0) &&
1357 amdgpu_ip_version(adev, GC_HWIP, 0) <
1358 IP_VERSION(11, 0, 0) &&
1359 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1362 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1371 k += queue_types[i][1];
1374 /* start ring test and ib test for MES queues */
1375 amdgpu_mes_test_queues(added_rings);
1378 /* remove all queues */
1379 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1380 if (!added_rings[i])
1382 amdgpu_mes_remove_ring(adev, added_rings[i]);
1385 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1388 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1391 amdgpu_mes_destroy_process(adev, pasid);
1394 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1397 amdgpu_vm_fini(adev, vm);
1401 amdgpu_pasid_free(pasid);
1403 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1408 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1410 const struct mes_firmware_header_v1_0 *mes_hdr;
1411 struct amdgpu_firmware_info *info;
1412 char ucode_prefix[30];
1414 bool need_retry = false;
1417 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1418 sizeof(ucode_prefix));
1419 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1420 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1422 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1425 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1427 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1430 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1431 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1432 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1434 DRM_INFO("try to fall back to %s\n", fw_name);
1435 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1442 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1443 adev->mes.fw[pipe]->data;
1444 adev->mes.uc_start_addr[pipe] =
1445 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1446 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1447 adev->mes.data_start_addr[pipe] =
1448 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1449 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1451 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1452 int ucode, ucode_data;
1454 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1455 ucode = AMDGPU_UCODE_ID_CP_MES;
1456 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1458 ucode = AMDGPU_UCODE_ID_CP_MES1;
1459 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1462 info = &adev->firmware.ucode[ucode];
1463 info->ucode_id = ucode;
1464 info->fw = adev->mes.fw[pipe];
1465 adev->firmware.fw_size +=
1466 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1469 info = &adev->firmware.ucode[ucode_data];
1470 info->ucode_id = ucode_data;
1471 info->fw = adev->mes.fw[pipe];
1472 adev->firmware.fw_size +=
1473 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1479 amdgpu_ucode_release(&adev->mes.fw[pipe]);