2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/module.h>
27 #include "soc15_common.h"
29 #include "gfx_v11_0.h"
30 #include "gc/gc_11_0_0_offset.h"
31 #include "gc/gc_11_0_0_sh_mask.h"
32 #include "gc/gc_11_0_0_default.h"
33 #include "v11_structs.h"
34 #include "mes_v11_api_def.h"
36 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
37 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin");
38 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
39 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
40 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin");
41 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
42 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
43 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin");
44 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
45 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
46 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin");
47 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
48 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
49 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
50 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
51 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes_2.bin");
52 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes1.bin");
53 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin");
54 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
55 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
56 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
58 static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
59 static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
60 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
61 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
63 #define MES_EOP_SIZE 2048
64 #define GFX_MES_DRAM_SIZE 0x80000
66 static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
68 struct amdgpu_device *adev = ring->adev;
70 if (ring->use_doorbell) {
71 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
73 WDOORBELL64(ring->doorbell_index, ring->wptr);
79 static u64 mes_v11_0_ring_get_rptr(struct amdgpu_ring *ring)
81 return *ring->rptr_cpu_addr;
84 static u64 mes_v11_0_ring_get_wptr(struct amdgpu_ring *ring)
88 if (ring->use_doorbell)
89 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
95 static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
96 .type = AMDGPU_RING_TYPE_MES,
99 .support_64bit_ptrs = true,
100 .get_rptr = mes_v11_0_ring_get_rptr,
101 .get_wptr = mes_v11_0_ring_get_wptr,
102 .set_wptr = mes_v11_0_ring_set_wptr,
103 .insert_nop = amdgpu_ring_insert_nop,
106 static const char *mes_v11_0_opcodes[] = {
108 "SET_SCHEDULING_CONFIG",
112 "SET_GANG_PRIORITY_LEVEL",
117 "CHANGE_GANG_PRORITY",
118 "QUERY_SCHEDULER_STATUS",
122 "UPDATE_ROOT_PAGE_TABLE",
129 static const char *mes_v11_0_misc_opcodes[] = {
135 "SET_SHADER_DEBUGGER",
138 static const char *mes_v11_0_get_op_string(union MESAPI__MISC *x_pkt)
140 const char *op_str = NULL;
142 if (x_pkt->header.opcode < ARRAY_SIZE(mes_v11_0_opcodes))
143 op_str = mes_v11_0_opcodes[x_pkt->header.opcode];
148 static const char *mes_v11_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
150 const char *op_str = NULL;
152 if ((x_pkt->header.opcode == MES_SCH_API_MISC) &&
153 (x_pkt->opcode < ARRAY_SIZE(mes_v11_0_misc_opcodes)))
154 op_str = mes_v11_0_misc_opcodes[x_pkt->opcode];
159 static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
163 union MESAPI__QUERY_MES_STATUS mes_status_pkt;
164 signed long timeout = 2100000; /* 2100 ms */
165 struct amdgpu_device *adev = mes->adev;
166 struct amdgpu_ring *ring = &mes->ring[0];
167 struct MES_API_STATUS *api_status;
168 union MESAPI__MISC *x_pkt = pkt;
169 const char *op_str, *misc_op_str;
172 u32 seq, status_offset;
177 if (x_pkt->header.opcode >= MES_SCH_API_MAX)
180 if (amdgpu_emu_mode) {
182 } else if (amdgpu_sriov_vf(adev)) {
183 /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
184 timeout = 15 * 600 * 1000;
187 ret = amdgpu_device_wb_get(adev, &status_offset);
191 status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
192 status_ptr = (u64 *)&adev->wb.wb[status_offset];
195 spin_lock_irqsave(&mes->ring_lock[0], flags);
196 r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
198 goto error_unlock_free;
200 seq = ++ring->fence_drv.sync_seq;
201 r = amdgpu_fence_wait_polling(ring,
202 seq - ring->fence_drv.num_fences_mask,
207 api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
208 api_status->api_completion_fence_addr = status_gpu_addr;
209 api_status->api_completion_fence_value = 1;
211 amdgpu_ring_write_multiple(ring, pkt, size / 4);
213 memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
214 mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
215 mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
216 mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
217 mes_status_pkt.api_status.api_completion_fence_addr =
218 ring->fence_drv.gpu_addr;
219 mes_status_pkt.api_status.api_completion_fence_value = seq;
221 amdgpu_ring_write_multiple(ring, &mes_status_pkt,
222 sizeof(mes_status_pkt) / 4);
224 amdgpu_ring_commit(ring);
225 spin_unlock_irqrestore(&mes->ring_lock[0], flags);
227 op_str = mes_v11_0_get_op_string(x_pkt);
228 misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
231 dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
234 dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
236 dev_dbg(adev->dev, "MES msg=%d was emitted\n",
237 x_pkt->header.opcode);
239 r = amdgpu_fence_wait_polling(ring, seq, timeout);
240 if (r < 1 || !*status_ptr) {
243 dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
244 op_str, misc_op_str);
246 dev_err(adev->dev, "MES failed to respond to msg=%s\n",
249 dev_err(adev->dev, "MES failed to respond to msg=%d\n",
250 x_pkt->header.opcode);
252 while (halt_if_hws_hang)
259 amdgpu_device_wb_free(adev, status_offset);
263 dev_err(adev->dev, "MES ring buffer is full.\n");
264 amdgpu_ring_undo(ring);
267 spin_unlock_irqrestore(&mes->ring_lock[0], flags);
270 amdgpu_device_wb_free(adev, status_offset);
274 static int convert_to_mes_queue_type(int queue_type)
276 if (queue_type == AMDGPU_RING_TYPE_GFX)
277 return MES_QUEUE_TYPE_GFX;
278 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
279 return MES_QUEUE_TYPE_COMPUTE;
280 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
281 return MES_QUEUE_TYPE_SDMA;
287 static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
288 struct mes_add_queue_input *input)
290 struct amdgpu_device *adev = mes->adev;
291 union MESAPI__ADD_QUEUE mes_add_queue_pkt;
292 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
293 uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
295 memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
297 mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
298 mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
299 mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
301 mes_add_queue_pkt.process_id = input->process_id;
302 mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
303 mes_add_queue_pkt.process_va_start = input->process_va_start;
304 mes_add_queue_pkt.process_va_end = input->process_va_end;
305 mes_add_queue_pkt.process_quantum = input->process_quantum;
306 mes_add_queue_pkt.process_context_addr = input->process_context_addr;
307 mes_add_queue_pkt.gang_quantum = input->gang_quantum;
308 mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
309 mes_add_queue_pkt.inprocess_gang_priority =
310 input->inprocess_gang_priority;
311 mes_add_queue_pkt.gang_global_priority_level =
312 input->gang_global_priority_level;
313 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
314 mes_add_queue_pkt.mqd_addr = input->mqd_addr;
316 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
317 AMDGPU_MES_API_VERSION_SHIFT) >= 2)
318 mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr;
320 mes_add_queue_pkt.wptr_addr = input->wptr_addr;
322 mes_add_queue_pkt.queue_type =
323 convert_to_mes_queue_type(input->queue_type);
324 mes_add_queue_pkt.paging = input->paging;
325 mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
326 mes_add_queue_pkt.gws_base = input->gws_base;
327 mes_add_queue_pkt.gws_size = input->gws_size;
328 mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
329 mes_add_queue_pkt.tma_addr = input->tma_addr;
330 mes_add_queue_pkt.trap_en = input->trap_en;
331 mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear;
332 mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
334 /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
335 mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
336 mes_add_queue_pkt.gds_size = input->queue_size;
338 mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled;
340 return mes_v11_0_submit_pkt_and_poll_completion(mes,
341 &mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
342 offsetof(union MESAPI__ADD_QUEUE, api_status));
345 static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
346 struct mes_remove_queue_input *input)
348 union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
350 memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
352 mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
353 mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
354 mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
356 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
357 mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
359 return mes_v11_0_submit_pkt_and_poll_completion(mes,
360 &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
361 offsetof(union MESAPI__REMOVE_QUEUE, api_status));
364 static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
365 uint32_t me_id, uint32_t pipe_id,
366 uint32_t queue_id, uint32_t vmid)
368 struct amdgpu_device *adev = mes->adev;
372 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
374 if (queue_type == AMDGPU_RING_TYPE_GFX) {
375 dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
376 me_id, pipe_id, queue_id, vmid);
378 mutex_lock(&adev->gfx.reset_sem_mutex);
379 gfx_v11_0_request_gfx_index_mutex(adev, true);
380 /* all se allow writes */
381 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
382 (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
383 value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
385 value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
387 value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
388 WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
389 gfx_v11_0_request_gfx_index_mutex(adev, false);
390 mutex_unlock(&adev->gfx.reset_sem_mutex);
392 mutex_lock(&adev->srbm_mutex);
393 soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
394 /* wait till dequeue take effects */
395 for (i = 0; i < adev->usec_timeout; i++) {
396 if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
400 if (i >= adev->usec_timeout) {
401 dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
405 soc21_grbm_select(adev, 0, 0, 0, 0);
406 mutex_unlock(&adev->srbm_mutex);
407 } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
408 dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
409 me_id, pipe_id, queue_id);
410 mutex_lock(&adev->srbm_mutex);
411 soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
412 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
413 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
415 /* wait till dequeue take effects */
416 for (i = 0; i < adev->usec_timeout; i++) {
417 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
421 if (i >= adev->usec_timeout) {
422 dev_err(adev->dev, "failed to wait on hqd deactivate\n");
425 soc21_grbm_select(adev, 0, 0, 0, 0);
426 mutex_unlock(&adev->srbm_mutex);
427 } else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
428 dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
429 me_id, pipe_id, queue_id);
432 reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
436 reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
440 value = 1 << queue_id;
442 /* wait for queue reset done */
443 for (i = 0; i < adev->usec_timeout; i++) {
444 if (!(RREG32(reg) & value))
448 if (i >= adev->usec_timeout) {
449 dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
454 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
458 static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
459 struct mes_reset_queue_input *input)
462 return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
463 input->me_id, input->pipe_id,
464 input->queue_id, input->vmid);
466 union MESAPI__RESET mes_reset_queue_pkt;
468 memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
470 mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
471 mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
472 mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
474 mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
475 mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
476 /*mes_reset_queue_pkt.reset_queue_only = 1;*/
478 return mes_v11_0_submit_pkt_and_poll_completion(mes,
479 &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
480 offsetof(union MESAPI__REMOVE_QUEUE, api_status));
483 static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
484 struct mes_map_legacy_queue_input *input)
486 union MESAPI__ADD_QUEUE mes_add_queue_pkt;
488 memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
490 mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
491 mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
492 mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
494 mes_add_queue_pkt.pipe_id = input->pipe_id;
495 mes_add_queue_pkt.queue_id = input->queue_id;
496 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
497 mes_add_queue_pkt.mqd_addr = input->mqd_addr;
498 mes_add_queue_pkt.wptr_addr = input->wptr_addr;
499 mes_add_queue_pkt.queue_type =
500 convert_to_mes_queue_type(input->queue_type);
501 mes_add_queue_pkt.map_legacy_kq = 1;
503 return mes_v11_0_submit_pkt_and_poll_completion(mes,
504 &mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
505 offsetof(union MESAPI__ADD_QUEUE, api_status));
508 static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
509 struct mes_unmap_legacy_queue_input *input)
511 union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
513 memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
515 mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
516 mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
517 mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
519 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
520 mes_remove_queue_pkt.gang_context_addr = 0;
522 mes_remove_queue_pkt.pipe_id = input->pipe_id;
523 mes_remove_queue_pkt.queue_id = input->queue_id;
525 if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
526 mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
527 mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
528 mes_remove_queue_pkt.tf_data =
529 lower_32_bits(input->trail_fence_data);
531 mes_remove_queue_pkt.unmap_legacy_queue = 1;
532 mes_remove_queue_pkt.queue_type =
533 convert_to_mes_queue_type(input->queue_type);
536 return mes_v11_0_submit_pkt_and_poll_completion(mes,
537 &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
538 offsetof(union MESAPI__REMOVE_QUEUE, api_status));
541 static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
542 struct mes_suspend_gang_input *input)
544 union MESAPI__SUSPEND mes_suspend_gang_pkt;
546 memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
548 mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
549 mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
550 mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
552 mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
553 mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
554 mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
555 mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
557 return mes_v11_0_submit_pkt_and_poll_completion(mes,
558 &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
559 offsetof(union MESAPI__SUSPEND, api_status));
562 static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
563 struct mes_resume_gang_input *input)
565 union MESAPI__RESUME mes_resume_gang_pkt;
567 memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
569 mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
570 mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
571 mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
573 mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
574 mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
576 return mes_v11_0_submit_pkt_and_poll_completion(mes,
577 &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
578 offsetof(union MESAPI__RESUME, api_status));
581 static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
583 union MESAPI__QUERY_MES_STATUS mes_status_pkt;
585 memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
587 mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
588 mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
589 mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
591 return mes_v11_0_submit_pkt_and_poll_completion(mes,
592 &mes_status_pkt, sizeof(mes_status_pkt),
593 offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
596 static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
597 struct mes_misc_op_input *input)
599 union MESAPI__MISC misc_pkt;
601 memset(&misc_pkt, 0, sizeof(misc_pkt));
603 misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
604 misc_pkt.header.opcode = MES_SCH_API_MISC;
605 misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
608 case MES_MISC_OP_READ_REG:
609 misc_pkt.opcode = MESAPI_MISC__READ_REG;
610 misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset;
611 misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr;
613 case MES_MISC_OP_WRITE_REG:
614 misc_pkt.opcode = MESAPI_MISC__WRITE_REG;
615 misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset;
616 misc_pkt.write_reg.reg_value = input->write_reg.reg_value;
618 case MES_MISC_OP_WRM_REG_WAIT:
619 misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
620 misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM;
621 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
622 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
623 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
624 misc_pkt.wait_reg_mem.reg_offset2 = 0;
626 case MES_MISC_OP_WRM_REG_WR_WAIT:
627 misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
628 misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG;
629 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
630 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
631 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
632 misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
634 case MES_MISC_OP_SET_SHADER_DEBUGGER:
635 misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
636 misc_pkt.set_shader_debugger.process_context_addr =
637 input->set_shader_debugger.process_context_addr;
638 misc_pkt.set_shader_debugger.flags.u32all =
639 input->set_shader_debugger.flags.u32all;
640 misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl =
641 input->set_shader_debugger.spi_gdbg_per_vmid_cntl;
642 memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl,
643 input->set_shader_debugger.tcp_watch_cntl,
644 sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
645 misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
647 case MES_MISC_OP_CHANGE_CONFIG:
648 if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
649 dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
652 misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
653 misc_pkt.change_config.opcode =
654 MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS;
655 misc_pkt.change_config.option.bits.limit_single_process =
656 input->change_config.option.limit_single_process;
660 DRM_ERROR("unsupported misc op (%d) \n", input->op);
664 return mes_v11_0_submit_pkt_and_poll_completion(mes,
665 &misc_pkt, sizeof(misc_pkt),
666 offsetof(union MESAPI__MISC, api_status));
669 static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
672 struct amdgpu_device *adev = mes->adev;
673 union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
675 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
677 mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
678 mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
679 mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
681 mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
682 mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
683 mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
684 mes_set_hw_res_pkt.paging_vmid = 0;
685 mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr[0];
686 mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
687 mes->query_status_fence_gpu_addr[0];
689 for (i = 0; i < MAX_COMPUTE_PIPES; i++)
690 mes_set_hw_res_pkt.compute_hqd_mask[i] =
691 mes->compute_hqd_mask[i];
693 for (i = 0; i < MAX_GFX_PIPES; i++)
694 mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
696 for (i = 0; i < MAX_SDMA_PIPES; i++)
697 mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
699 for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
700 mes_set_hw_res_pkt.aggregated_doorbells[i] =
701 mes->aggregated_doorbells[i];
703 for (i = 0; i < 5; i++) {
704 mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
705 mes_set_hw_res_pkt.mmhub_base[i] =
706 adev->reg_offset[MMHUB_HWIP][0][i];
707 mes_set_hw_res_pkt.osssys_base[i] =
708 adev->reg_offset[OSSSYS_HWIP][0][i];
711 mes_set_hw_res_pkt.disable_reset = 1;
712 mes_set_hw_res_pkt.disable_mes_log = 1;
713 mes_set_hw_res_pkt.use_different_vmid_compute = 1;
714 mes_set_hw_res_pkt.enable_reg_active_poll = 1;
715 mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
716 mes_set_hw_res_pkt.oversubscription_timer = 50;
717 if (amdgpu_mes_log_enable) {
718 mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
719 mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
720 mes->event_log_gpu_addr;
723 if (enforce_isolation)
724 mes_set_hw_res_pkt.limit_single_process = 1;
726 return mes_v11_0_submit_pkt_and_poll_completion(mes,
727 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
728 offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
731 static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
733 int size = 128 * PAGE_SIZE;
735 struct amdgpu_device *adev = mes->adev;
736 union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
737 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
739 mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
740 mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
741 mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
742 mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
744 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
745 AMDGPU_GEM_DOMAIN_VRAM,
747 &mes->resource_1_gpu_addr,
748 &mes->resource_1_addr);
750 dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret);
754 mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr;
755 mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size;
756 return mes_v11_0_submit_pkt_and_poll_completion(mes,
757 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
758 offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
761 static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
762 struct mes_reset_legacy_queue_input *input)
764 union MESAPI__RESET mes_reset_queue_pkt;
767 return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
768 input->me_id, input->pipe_id,
769 input->queue_id, input->vmid);
771 memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
773 mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
774 mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
775 mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
777 mes_reset_queue_pkt.queue_type =
778 convert_to_mes_queue_type(input->queue_type);
780 if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
781 mes_reset_queue_pkt.reset_legacy_gfx = 1;
782 mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
783 mes_reset_queue_pkt.queue_id_lp = input->queue_id;
784 mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
785 mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
786 mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
787 mes_reset_queue_pkt.vmid_id_lp = input->vmid;
789 mes_reset_queue_pkt.reset_queue_only = 1;
790 mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
793 return mes_v11_0_submit_pkt_and_poll_completion(mes,
794 &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
795 offsetof(union MESAPI__RESET, api_status));
798 static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
799 .add_hw_queue = mes_v11_0_add_hw_queue,
800 .remove_hw_queue = mes_v11_0_remove_hw_queue,
801 .map_legacy_queue = mes_v11_0_map_legacy_queue,
802 .unmap_legacy_queue = mes_v11_0_unmap_legacy_queue,
803 .suspend_gang = mes_v11_0_suspend_gang,
804 .resume_gang = mes_v11_0_resume_gang,
805 .misc_op = mes_v11_0_misc_op,
806 .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
807 .reset_hw_queue = mes_v11_0_reset_hw_queue,
810 static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
811 enum admgpu_mes_pipe pipe)
814 const struct mes_firmware_header_v1_0 *mes_hdr;
815 const __le32 *fw_data;
818 mes_hdr = (const struct mes_firmware_header_v1_0 *)
819 adev->mes.fw[pipe]->data;
821 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
822 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
823 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
825 r = amdgpu_bo_create_reserved(adev, fw_size,
827 AMDGPU_GEM_DOMAIN_VRAM |
828 AMDGPU_GEM_DOMAIN_GTT,
829 &adev->mes.ucode_fw_obj[pipe],
830 &adev->mes.ucode_fw_gpu_addr[pipe],
831 (void **)&adev->mes.ucode_fw_ptr[pipe]);
833 dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
837 memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
839 amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
840 amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
845 static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
846 enum admgpu_mes_pipe pipe)
849 const struct mes_firmware_header_v1_0 *mes_hdr;
850 const __le32 *fw_data;
853 mes_hdr = (const struct mes_firmware_header_v1_0 *)
854 adev->mes.fw[pipe]->data;
856 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
857 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
858 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
860 if (fw_size > GFX_MES_DRAM_SIZE) {
861 dev_err(adev->dev, "PIPE%d ucode data fw size (%d) is greater than dram size (%d)\n",
862 pipe, fw_size, GFX_MES_DRAM_SIZE);
866 r = amdgpu_bo_create_reserved(adev, GFX_MES_DRAM_SIZE,
868 AMDGPU_GEM_DOMAIN_VRAM |
869 AMDGPU_GEM_DOMAIN_GTT,
870 &adev->mes.data_fw_obj[pipe],
871 &adev->mes.data_fw_gpu_addr[pipe],
872 (void **)&adev->mes.data_fw_ptr[pipe]);
874 dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
878 memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
880 amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
881 amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
886 static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
887 enum admgpu_mes_pipe pipe)
889 amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
890 &adev->mes.data_fw_gpu_addr[pipe],
891 (void **)&adev->mes.data_fw_ptr[pipe]);
893 amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
894 &adev->mes.ucode_fw_gpu_addr[pipe],
895 (void **)&adev->mes.ucode_fw_ptr[pipe]);
898 static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
902 /* get MES scheduler/KIQ versions */
903 mutex_lock(&adev->srbm_mutex);
905 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
906 soc21_grbm_select(adev, 3, pipe, 0, 0);
908 if (pipe == AMDGPU_MES_SCHED_PIPE)
909 adev->mes.sched_version =
910 RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
911 else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
912 adev->mes.kiq_version =
913 RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
916 soc21_grbm_select(adev, 0, 0, 0, 0);
917 mutex_unlock(&adev->srbm_mutex);
920 static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
923 uint32_t pipe, data = 0;
926 if (amdgpu_mes_log_enable) {
927 WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
928 lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
929 WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
930 upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
931 dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
932 RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
933 RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
936 data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
937 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
938 data = REG_SET_FIELD(data, CP_MES_CNTL,
939 MES_PIPE1_RESET, adev->enable_mes_kiq ? 1 : 0);
940 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
942 mutex_lock(&adev->srbm_mutex);
943 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
944 if (!adev->enable_mes_kiq &&
945 pipe == AMDGPU_MES_KIQ_PIPE)
948 soc21_grbm_select(adev, 3, pipe, 0, 0);
950 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
951 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
952 lower_32_bits(ucode_addr));
953 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
954 upper_32_bits(ucode_addr));
956 soc21_grbm_select(adev, 0, 0, 0, 0);
957 mutex_unlock(&adev->srbm_mutex);
959 /* unhalt MES and activate pipe0 */
960 data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
961 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
962 adev->enable_mes_kiq ? 1 : 0);
963 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
970 data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
971 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
972 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
973 data = REG_SET_FIELD(data, CP_MES_CNTL,
974 MES_INVALIDATE_ICACHE, 1);
975 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
976 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
977 adev->enable_mes_kiq ? 1 : 0);
978 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
979 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
983 /* This function is for backdoor MES firmware */
984 static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
985 enum admgpu_mes_pipe pipe, bool prime_icache)
991 mes_v11_0_enable(adev, false);
993 if (!adev->mes.fw[pipe])
996 r = mes_v11_0_allocate_ucode_buffer(adev, pipe);
1000 r = mes_v11_0_allocate_ucode_data_buffer(adev, pipe);
1002 mes_v11_0_free_ucode_buffers(adev, pipe);
1006 mutex_lock(&adev->srbm_mutex);
1007 /* me=3, pipe=0, queue=0 */
1008 soc21_grbm_select(adev, 3, pipe, 0, 0);
1010 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0);
1012 /* set ucode start address */
1013 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
1014 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
1015 lower_32_bits(ucode_addr));
1016 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
1017 upper_32_bits(ucode_addr));
1019 /* set ucode fimrware address */
1020 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO,
1021 lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
1022 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI,
1023 upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
1025 /* set ucode instruction cache boundary to 2M-1 */
1026 WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF);
1028 /* set ucode data firmware address */
1029 WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO,
1030 lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
1031 WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
1032 upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
1034 /* Set 0x7FFFF (512K-1) to CP_MES_MDBOUND_LO */
1035 WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF);
1038 /* invalidate ICACHE */
1039 data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
1040 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
1041 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
1042 WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
1044 /* prime the ICACHE. */
1045 data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
1046 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
1047 WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
1050 soc21_grbm_select(adev, 0, 0, 0, 0);
1051 mutex_unlock(&adev->srbm_mutex);
1056 static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
1057 enum admgpu_mes_pipe pipe)
1062 r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
1063 AMDGPU_GEM_DOMAIN_GTT,
1064 &adev->mes.eop_gpu_obj[pipe],
1065 &adev->mes.eop_gpu_addr[pipe],
1068 dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
1073 adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
1075 amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
1076 amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
1081 static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
1083 struct v11_compute_mqd *mqd = ring->mqd_ptr;
1084 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1087 memset(mqd, 0, sizeof(*mqd));
1089 mqd->header = 0xC0310800;
1090 mqd->compute_pipelinestat_enable = 0x00000001;
1091 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1092 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1093 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1094 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1095 mqd->compute_misc_reserved = 0x00000007;
1097 eop_base_addr = ring->eop_gpu_addr >> 8;
1099 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1100 tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
1101 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1102 (order_base_2(MES_EOP_SIZE / 4) - 1));
1104 mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
1105 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1106 mqd->cp_hqd_eop_control = tmp;
1108 /* disable the queue if it's active */
1110 mqd->cp_hqd_pq_rptr = 0;
1111 mqd->cp_hqd_pq_wptr_lo = 0;
1112 mqd->cp_hqd_pq_wptr_hi = 0;
1114 /* set the pointer to the MQD */
1115 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1116 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1118 /* set MQD vmid to 0 */
1119 tmp = regCP_MQD_CONTROL_DEFAULT;
1120 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1121 mqd->cp_mqd_control = tmp;
1123 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1124 hqd_gpu_addr = ring->gpu_addr >> 8;
1125 mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
1126 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1128 /* set the wb address whether it's enabled or not */
1129 wb_gpu_addr = ring->rptr_gpu_addr;
1130 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1131 mqd->cp_hqd_pq_rptr_report_addr_hi =
1132 upper_32_bits(wb_gpu_addr) & 0xffff;
1134 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1135 wb_gpu_addr = ring->wptr_gpu_addr;
1136 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
1137 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1139 /* set up the HQD, this is similar to CP_RB0_CNTL */
1140 tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
1141 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1142 (order_base_2(ring->ring_size / 4) - 1));
1143 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1144 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1145 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
1146 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
1147 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1148 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1149 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
1150 mqd->cp_hqd_pq_control = tmp;
1152 /* enable doorbell */
1154 if (ring->use_doorbell) {
1155 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1156 DOORBELL_OFFSET, ring->doorbell_index);
1157 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1159 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1160 DOORBELL_SOURCE, 0);
1161 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1164 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1166 mqd->cp_hqd_pq_doorbell_control = tmp;
1168 mqd->cp_hqd_vmid = 0;
1169 /* activate the queue */
1170 mqd->cp_hqd_active = 1;
1172 tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
1173 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE,
1174 PRELOAD_SIZE, 0x55);
1175 mqd->cp_hqd_persistent_state = tmp;
1177 mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT;
1178 mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
1179 mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
1181 amdgpu_device_flush_hdp(ring->adev, NULL);
1185 static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
1187 struct v11_compute_mqd *mqd = ring->mqd_ptr;
1188 struct amdgpu_device *adev = ring->adev;
1191 mutex_lock(&adev->srbm_mutex);
1192 soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
1194 /* set CP_HQD_VMID.VMID = 0. */
1195 data = RREG32_SOC15(GC, 0, regCP_HQD_VMID);
1196 data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
1197 WREG32_SOC15(GC, 0, regCP_HQD_VMID, data);
1199 /* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
1200 data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1201 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1203 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1205 /* set CP_MQD_BASE_ADDR/HI with the MQD base address */
1206 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
1207 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
1209 /* set CP_MQD_CONTROL.VMID=0 */
1210 data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
1211 data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
1212 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0);
1214 /* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
1215 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
1216 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
1218 /* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
1219 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
1220 mqd->cp_hqd_pq_rptr_report_addr_lo);
1221 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1222 mqd->cp_hqd_pq_rptr_report_addr_hi);
1224 /* set CP_HQD_PQ_CONTROL */
1225 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
1227 /* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
1228 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
1229 mqd->cp_hqd_pq_wptr_poll_addr_lo);
1230 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1231 mqd->cp_hqd_pq_wptr_poll_addr_hi);
1233 /* set CP_HQD_PQ_DOORBELL_CONTROL */
1234 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
1235 mqd->cp_hqd_pq_doorbell_control);
1237 /* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
1238 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
1240 /* set CP_HQD_ACTIVE.ACTIVE=1 */
1241 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active);
1243 soc21_grbm_select(adev, 0, 0, 0, 0);
1244 mutex_unlock(&adev->srbm_mutex);
1247 static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
1249 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1250 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
1253 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
1256 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
1258 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
1262 kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
1264 return amdgpu_ring_test_helper(kiq_ring);
1267 static int mes_v11_0_queue_init(struct amdgpu_device *adev,
1268 enum admgpu_mes_pipe pipe)
1270 struct amdgpu_ring *ring;
1273 if (pipe == AMDGPU_MES_KIQ_PIPE)
1274 ring = &adev->gfx.kiq[0].ring;
1275 else if (pipe == AMDGPU_MES_SCHED_PIPE)
1276 ring = &adev->mes.ring[0];
1280 if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
1281 (amdgpu_in_reset(adev) || adev->in_suspend)) {
1282 *(ring->wptr_cpu_addr) = 0;
1283 *(ring->rptr_cpu_addr) = 0;
1284 amdgpu_ring_clear_ring(ring);
1287 r = mes_v11_0_mqd_init(ring);
1291 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1292 r = mes_v11_0_kiq_enable_queue(adev);
1296 mes_v11_0_queue_init_register(ring);
1302 static int mes_v11_0_ring_init(struct amdgpu_device *adev)
1304 struct amdgpu_ring *ring;
1306 ring = &adev->mes.ring[0];
1308 ring->funcs = &mes_v11_0_ring_funcs;
1314 ring->ring_obj = NULL;
1315 ring->use_doorbell = true;
1316 ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
1317 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
1318 ring->no_scheduler = true;
1319 sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1321 return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1322 AMDGPU_RING_PRIO_DEFAULT, NULL);
1325 static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
1327 struct amdgpu_ring *ring;
1329 spin_lock_init(&adev->gfx.kiq[0].ring_lock);
1331 ring = &adev->gfx.kiq[0].ring;
1338 ring->ring_obj = NULL;
1339 ring->use_doorbell = true;
1340 ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
1341 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
1342 ring->no_scheduler = true;
1343 sprintf(ring->name, "mes_kiq_%d.%d.%d",
1344 ring->me, ring->pipe, ring->queue);
1346 return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1347 AMDGPU_RING_PRIO_DEFAULT, NULL);
1350 static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
1351 enum admgpu_mes_pipe pipe)
1353 int r, mqd_size = sizeof(struct v11_compute_mqd);
1354 struct amdgpu_ring *ring;
1356 if (pipe == AMDGPU_MES_KIQ_PIPE)
1357 ring = &adev->gfx.kiq[0].ring;
1358 else if (pipe == AMDGPU_MES_SCHED_PIPE)
1359 ring = &adev->mes.ring[0];
1366 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
1367 AMDGPU_GEM_DOMAIN_VRAM |
1368 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
1369 &ring->mqd_gpu_addr, &ring->mqd_ptr);
1371 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
1375 memset(ring->mqd_ptr, 0, mqd_size);
1377 /* prepare MQD backup */
1378 adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
1379 if (!adev->mes.mqd_backup[pipe]) {
1381 "no memory to create MQD backup for ring %s\n",
1389 static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
1391 struct amdgpu_device *adev = ip_block->adev;
1394 adev->mes.funcs = &mes_v11_0_funcs;
1395 adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
1396 adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
1398 adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
1400 r = amdgpu_mes_init(adev);
1404 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1405 if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1408 r = mes_v11_0_allocate_eop_buf(adev, pipe);
1412 r = mes_v11_0_mqd_sw_init(adev, pipe);
1417 if (adev->enable_mes_kiq) {
1418 r = mes_v11_0_kiq_ring_init(adev);
1423 r = mes_v11_0_ring_init(adev);
1430 static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
1432 struct amdgpu_device *adev = ip_block->adev;
1435 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1436 kfree(adev->mes.mqd_backup[pipe]);
1438 amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
1439 &adev->mes.eop_gpu_addr[pipe],
1441 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1444 amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
1445 &adev->gfx.kiq[0].ring.mqd_gpu_addr,
1446 &adev->gfx.kiq[0].ring.mqd_ptr);
1448 amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
1449 &adev->mes.ring[0].mqd_gpu_addr,
1450 &adev->mes.ring[0].mqd_ptr);
1452 amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
1453 amdgpu_ring_fini(&adev->mes.ring[0]);
1455 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1456 mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
1457 mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
1460 amdgpu_mes_fini(adev);
1464 static void mes_v11_0_kiq_dequeue(struct amdgpu_ring *ring)
1468 struct amdgpu_device *adev = ring->adev;
1470 mutex_lock(&adev->srbm_mutex);
1471 soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
1473 /* disable the queue if it's active */
1474 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
1475 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
1476 for (i = 0; i < adev->usec_timeout; i++) {
1477 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
1482 data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1483 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1485 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1487 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1489 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1491 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
1492 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
1493 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
1495 soc21_grbm_select(adev, 0, 0, 0, 0);
1496 mutex_unlock(&adev->srbm_mutex);
1499 static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
1502 struct amdgpu_device *adev = ring->adev;
1504 /* tell RLC which is KIQ queue */
1505 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1507 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1508 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
1511 static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
1515 /* tell RLC which is KIQ dequeue */
1516 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1517 tmp &= ~RLC_CP_SCHEDULERS__scheduler0_MASK;
1518 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1521 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
1524 struct amdgpu_ip_block *ip_block;
1526 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1528 r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
1530 DRM_ERROR("failed to load MES fw, r=%d\n", r);
1534 r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
1536 DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
1542 mes_v11_0_enable(adev, true);
1544 mes_v11_0_get_fw_version(adev);
1546 mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
1548 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
1549 if (unlikely(!ip_block)) {
1550 dev_err(adev->dev, "Failed to get MES handle\n");
1554 r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
1558 if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
1559 adev->mes.enable_legacy_queue_map = true;
1561 adev->mes.enable_legacy_queue_map = false;
1563 if (adev->mes.enable_legacy_queue_map) {
1564 r = mes_v11_0_hw_init(ip_block);
1572 mes_v11_0_hw_fini(ip_block);
1576 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
1578 if (adev->mes.ring[0].sched.ready) {
1579 mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
1580 adev->mes.ring[0].sched.ready = false;
1583 if (amdgpu_sriov_vf(adev)) {
1584 mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
1585 mes_v11_0_kiq_clear(adev);
1588 mes_v11_0_enable(adev, false);
1593 static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
1596 struct amdgpu_device *adev = ip_block->adev;
1598 if (adev->mes.ring[0].sched.ready)
1601 if (!adev->enable_mes_kiq) {
1602 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1603 r = mes_v11_0_load_microcode(adev,
1604 AMDGPU_MES_SCHED_PIPE, true);
1606 DRM_ERROR("failed to MES fw, r=%d\n", r);
1611 mes_v11_0_enable(adev, true);
1614 r = mes_v11_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
1618 r = mes_v11_0_set_hw_resources(&adev->mes);
1622 if (amdgpu_sriov_is_mes_info_enable(adev)) {
1623 r = mes_v11_0_set_hw_resources_1(&adev->mes);
1625 DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
1630 r = mes_v11_0_query_sched_status(&adev->mes);
1632 DRM_ERROR("MES is busy\n");
1638 * Disable KIQ ring usage from the driver once MES is enabled.
1639 * MES uses KIQ ring exclusively so driver cannot access KIQ ring
1642 adev->gfx.kiq[0].ring.sched.ready = false;
1643 adev->mes.ring[0].sched.ready = true;
1648 mes_v11_0_hw_fini(ip_block);
1652 static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
1654 struct amdgpu_device *adev = ip_block->adev;
1655 if (amdgpu_sriov_is_mes_info_enable(adev)) {
1656 amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
1657 &adev->mes.resource_1_addr);
1662 static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
1666 r = amdgpu_mes_suspend(ip_block->adev);
1670 return mes_v11_0_hw_fini(ip_block);
1673 static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
1677 r = mes_v11_0_hw_init(ip_block);
1681 return amdgpu_mes_resume(ip_block->adev);
1684 static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
1686 struct amdgpu_device *adev = ip_block->adev;
1689 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1690 if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1692 r = amdgpu_mes_init_microcode(adev, pipe);
1700 static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
1702 struct amdgpu_device *adev = ip_block->adev;
1704 /* it's only intended for use in mes_self_test case, not for s0ix and reset */
1705 if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
1706 (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
1707 amdgpu_mes_self_test(adev);
1712 static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
1713 .name = "mes_v11_0",
1714 .early_init = mes_v11_0_early_init,
1715 .late_init = mes_v11_0_late_init,
1716 .sw_init = mes_v11_0_sw_init,
1717 .sw_fini = mes_v11_0_sw_fini,
1718 .hw_init = mes_v11_0_hw_init,
1719 .hw_fini = mes_v11_0_hw_fini,
1720 .suspend = mes_v11_0_suspend,
1721 .resume = mes_v11_0_resume,
1724 const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
1725 .type = AMD_IP_BLOCK_TYPE_MES,
1729 .funcs = &mes_v11_0_ip_funcs,