]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
Merge drm/drm-next into drm-intel-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / mes_v10_1.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include "amdgpu.h"
27 #include "soc15_common.h"
28 #include "nv.h"
29 #include "gc/gc_10_1_0_offset.h"
30 #include "gc/gc_10_1_0_sh_mask.h"
31 #include "gc/gc_10_1_0_default.h"
32 #include "v10_structs.h"
33 #include "mes_api_def.h"
34
35 #define mmCP_MES_IC_OP_CNTL_Sienna_Cichlid               0x2820
36 #define mmCP_MES_IC_OP_CNTL_Sienna_Cichlid_BASE_IDX      1
37 #define mmRLC_CP_SCHEDULERS_Sienna_Cichlid              0x4ca1
38 #define mmRLC_CP_SCHEDULERS_Sienna_Cichlid_BASE_IDX     1
39
40 MODULE_FIRMWARE("amdgpu/navi10_mes.bin");
41 MODULE_FIRMWARE("amdgpu/sienna_cichlid_mes.bin");
42 MODULE_FIRMWARE("amdgpu/sienna_cichlid_mes1.bin");
43
44 static int mes_v10_1_hw_fini(void *handle);
45 static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev);
46
47 #define MES_EOP_SIZE   2048
48
49 static void mes_v10_1_ring_set_wptr(struct amdgpu_ring *ring)
50 {
51         struct amdgpu_device *adev = ring->adev;
52
53         if (ring->use_doorbell) {
54                 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
55                              ring->wptr);
56                 WDOORBELL64(ring->doorbell_index, ring->wptr);
57         } else {
58                 BUG();
59         }
60 }
61
62 static u64 mes_v10_1_ring_get_rptr(struct amdgpu_ring *ring)
63 {
64         return *ring->rptr_cpu_addr;
65 }
66
67 static u64 mes_v10_1_ring_get_wptr(struct amdgpu_ring *ring)
68 {
69         u64 wptr;
70
71         if (ring->use_doorbell)
72                 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
73         else
74                 BUG();
75         return wptr;
76 }
77
78 static const struct amdgpu_ring_funcs mes_v10_1_ring_funcs = {
79         .type = AMDGPU_RING_TYPE_MES,
80         .align_mask = 1,
81         .nop = 0,
82         .support_64bit_ptrs = true,
83         .get_rptr = mes_v10_1_ring_get_rptr,
84         .get_wptr = mes_v10_1_ring_get_wptr,
85         .set_wptr = mes_v10_1_ring_set_wptr,
86         .insert_nop = amdgpu_ring_insert_nop,
87 };
88
89 static int mes_v10_1_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
90                                                     void *pkt, int size,
91                                                     int api_status_off)
92 {
93         int ndw = size / 4;
94         signed long r;
95         union MESAPI__ADD_QUEUE *x_pkt = pkt;
96         struct MES_API_STATUS *api_status;
97         struct amdgpu_device *adev = mes->adev;
98         struct amdgpu_ring *ring = &mes->ring;
99         unsigned long flags;
100
101         BUG_ON(size % 4 != 0);
102
103         spin_lock_irqsave(&mes->ring_lock, flags);
104         if (amdgpu_ring_alloc(ring, ndw)) {
105                 spin_unlock_irqrestore(&mes->ring_lock, flags);
106                 return -ENOMEM;
107         }
108
109         api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
110         api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr;
111         api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq;
112
113         amdgpu_ring_write_multiple(ring, pkt, ndw);
114         amdgpu_ring_commit(ring);
115         spin_unlock_irqrestore(&mes->ring_lock, flags);
116
117         DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
118
119         r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
120                                       adev->usec_timeout);
121         if (r < 1) {
122                 DRM_ERROR("MES failed to response msg=%d\n",
123                           x_pkt->header.opcode);
124                 return -ETIMEDOUT;
125         }
126
127         return 0;
128 }
129
130 static int convert_to_mes_queue_type(int queue_type)
131 {
132         if (queue_type == AMDGPU_RING_TYPE_GFX)
133                 return MES_QUEUE_TYPE_GFX;
134         else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
135                 return MES_QUEUE_TYPE_COMPUTE;
136         else if (queue_type == AMDGPU_RING_TYPE_SDMA)
137                 return MES_QUEUE_TYPE_SDMA;
138         else
139                 BUG();
140         return -1;
141 }
142
143 static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
144                                   struct mes_add_queue_input *input)
145 {
146         struct amdgpu_device *adev = mes->adev;
147         union MESAPI__ADD_QUEUE mes_add_queue_pkt;
148         struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
149         uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
150
151         memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
152
153         mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
154         mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
155         mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
156
157         mes_add_queue_pkt.process_id = input->process_id;
158         mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
159         mes_add_queue_pkt.process_va_start = input->process_va_start;
160         mes_add_queue_pkt.process_va_end = input->process_va_end;
161         mes_add_queue_pkt.process_quantum = input->process_quantum;
162         mes_add_queue_pkt.process_context_addr = input->process_context_addr;
163         mes_add_queue_pkt.gang_quantum = input->gang_quantum;
164         mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
165         mes_add_queue_pkt.inprocess_gang_priority =
166                 input->inprocess_gang_priority;
167         mes_add_queue_pkt.gang_global_priority_level =
168                 input->gang_global_priority_level;
169         mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
170         mes_add_queue_pkt.mqd_addr = input->mqd_addr;
171         mes_add_queue_pkt.wptr_addr = input->wptr_addr;
172         mes_add_queue_pkt.queue_type =
173                 convert_to_mes_queue_type(input->queue_type);
174         mes_add_queue_pkt.paging = input->paging;
175         mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
176         mes_add_queue_pkt.gws_base = input->gws_base;
177         mes_add_queue_pkt.gws_size = input->gws_size;
178         mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
179
180         return mes_v10_1_submit_pkt_and_poll_completion(mes,
181                         &mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
182                         offsetof(union MESAPI__ADD_QUEUE, api_status));
183 }
184
185 static int mes_v10_1_remove_hw_queue(struct amdgpu_mes *mes,
186                                      struct mes_remove_queue_input *input)
187 {
188         union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
189
190         memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
191
192         mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
193         mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
194         mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
195
196         mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
197         mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
198
199         return mes_v10_1_submit_pkt_and_poll_completion(mes,
200                         &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
201                         offsetof(union MESAPI__REMOVE_QUEUE, api_status));
202 }
203
204 static int mes_v10_1_unmap_legacy_queue(struct amdgpu_mes *mes,
205                                  struct mes_unmap_legacy_queue_input *input)
206 {
207         union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
208
209         memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
210
211         mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
212         mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
213         mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
214
215         mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
216         mes_remove_queue_pkt.gang_context_addr = 0;
217
218         mes_remove_queue_pkt.pipe_id = input->pipe_id;
219         mes_remove_queue_pkt.queue_id = input->queue_id;
220
221         if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
222                 mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
223                 mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
224                 mes_remove_queue_pkt.tf_data =
225                         lower_32_bits(input->trail_fence_data);
226         } else {
227                 if (input->queue_type == AMDGPU_RING_TYPE_GFX)
228                         mes_remove_queue_pkt.unmap_legacy_gfx_queue = 1;
229                 else
230                         mes_remove_queue_pkt.unmap_kiq_utility_queue = 1;
231         }
232
233         return mes_v10_1_submit_pkt_and_poll_completion(mes,
234                         &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
235                         offsetof(union MESAPI__REMOVE_QUEUE, api_status));
236 }
237
238 static int mes_v10_1_suspend_gang(struct amdgpu_mes *mes,
239                                   struct mes_suspend_gang_input *input)
240 {
241         return 0;
242 }
243
244 static int mes_v10_1_resume_gang(struct amdgpu_mes *mes,
245                                  struct mes_resume_gang_input *input)
246 {
247         return 0;
248 }
249
250 static int mes_v10_1_query_sched_status(struct amdgpu_mes *mes)
251 {
252         union MESAPI__QUERY_MES_STATUS mes_status_pkt;
253
254         memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
255
256         mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
257         mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
258         mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
259
260         return mes_v10_1_submit_pkt_and_poll_completion(mes,
261                         &mes_status_pkt, sizeof(mes_status_pkt),
262                         offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
263 }
264
265 static int mes_v10_1_set_hw_resources(struct amdgpu_mes *mes)
266 {
267         int i;
268         struct amdgpu_device *adev = mes->adev;
269         union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
270
271         memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
272
273         mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
274         mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
275         mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
276
277         mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
278         mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
279         mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
280         mes_set_hw_res_pkt.paging_vmid = 0;
281         mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
282         mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
283                 mes->query_status_fence_gpu_addr;
284
285         for (i = 0; i < MAX_COMPUTE_PIPES; i++)
286                 mes_set_hw_res_pkt.compute_hqd_mask[i] =
287                         mes->compute_hqd_mask[i];
288
289         for (i = 0; i < MAX_GFX_PIPES; i++)
290                 mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
291
292         for (i = 0; i < MAX_SDMA_PIPES; i++)
293                 mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
294
295         for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
296                 mes_set_hw_res_pkt.aggregated_doorbells[i] =
297                         mes->aggregated_doorbells[i];
298
299         for (i = 0; i < 5; i++) {
300                 mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
301                 mes_set_hw_res_pkt.mmhub_base[i] =
302                         adev->reg_offset[MMHUB_HWIP][0][i];
303                 mes_set_hw_res_pkt.osssys_base[i] =
304                         adev->reg_offset[OSSSYS_HWIP][0][i];
305         }
306
307         mes_set_hw_res_pkt.disable_reset = 1;
308         mes_set_hw_res_pkt.disable_mes_log = 1;
309         mes_set_hw_res_pkt.use_different_vmid_compute = 1;
310
311         return mes_v10_1_submit_pkt_and_poll_completion(mes,
312                         &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
313                         offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
314 }
315
316 static void mes_v10_1_init_aggregated_doorbell(struct amdgpu_mes *mes)
317 {
318         struct amdgpu_device *adev = mes->adev;
319         uint32_t data;
320
321         data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL1);
322         data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK |
323                   CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK |
324                   CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK);
325         data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] <<
326                 CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT;
327         data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT;
328         WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL1, data);
329
330         data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL2);
331         data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK |
332                   CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK |
333                   CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK);
334         data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] <<
335                 CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT;
336         data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT;
337         WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL2, data);
338
339         data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL3);
340         data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK |
341                   CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK |
342                   CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK);
343         data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] <<
344                 CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT;
345         data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT;
346         WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL3, data);
347
348         data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL4);
349         data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK |
350                   CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK |
351                   CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK);
352         data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] <<
353                 CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT;
354         data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT;
355         WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL4, data);
356
357         data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL5);
358         data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK |
359                   CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK |
360                   CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK);
361         data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] <<
362                 CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT;
363         data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT;
364         WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL5, data);
365
366         data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT;
367         WREG32_SOC15(GC, 0, mmCP_HQD_GFX_CONTROL, data);
368 }
369
370 static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
371         .add_hw_queue = mes_v10_1_add_hw_queue,
372         .remove_hw_queue = mes_v10_1_remove_hw_queue,
373         .unmap_legacy_queue = mes_v10_1_unmap_legacy_queue,
374         .suspend_gang = mes_v10_1_suspend_gang,
375         .resume_gang = mes_v10_1_resume_gang,
376 };
377
378 static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
379                                     enum admgpu_mes_pipe pipe)
380 {
381         const char *chip_name;
382         char fw_name[30];
383         int err;
384         const struct mes_firmware_header_v1_0 *mes_hdr;
385         struct amdgpu_firmware_info *info;
386
387         switch (adev->ip_versions[GC_HWIP][0]) {
388         case IP_VERSION(10, 1, 10):
389                 chip_name = "navi10";
390                 break;
391         case IP_VERSION(10, 3, 0):
392                 chip_name = "sienna_cichlid";
393                 break;
394         default:
395                 BUG();
396         }
397
398         if (pipe == AMDGPU_MES_SCHED_PIPE)
399                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
400                          chip_name);
401         else
402                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
403                          chip_name);
404
405         err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
406         if (err)
407                 return err;
408
409         err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
410         if (err) {
411                 release_firmware(adev->mes.fw[pipe]);
412                 adev->mes.fw[pipe] = NULL;
413                 return err;
414         }
415
416         mes_hdr = (const struct mes_firmware_header_v1_0 *)
417                 adev->mes.fw[pipe]->data;
418         adev->mes.ucode_fw_version[pipe] =
419                 le32_to_cpu(mes_hdr->mes_ucode_version);
420         adev->mes.ucode_fw_version[pipe] =
421                 le32_to_cpu(mes_hdr->mes_ucode_data_version);
422         adev->mes.uc_start_addr[pipe] =
423                 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
424                 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
425         adev->mes.data_start_addr[pipe] =
426                 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
427                 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
428
429         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
430                 int ucode, ucode_data;
431
432                 if (pipe == AMDGPU_MES_SCHED_PIPE) {
433                         ucode = AMDGPU_UCODE_ID_CP_MES;
434                         ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
435                 } else {
436                         ucode = AMDGPU_UCODE_ID_CP_MES1;
437                         ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
438                 }
439
440                 info = &adev->firmware.ucode[ucode];
441                 info->ucode_id = ucode;
442                 info->fw = adev->mes.fw[pipe];
443                 adev->firmware.fw_size +=
444                         ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
445                               PAGE_SIZE);
446
447                 info = &adev->firmware.ucode[ucode_data];
448                 info->ucode_id = ucode_data;
449                 info->fw = adev->mes.fw[pipe];
450                 adev->firmware.fw_size +=
451                         ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
452                               PAGE_SIZE);
453         }
454
455         return 0;
456 }
457
458 static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
459                                      enum admgpu_mes_pipe pipe)
460 {
461         release_firmware(adev->mes.fw[pipe]);
462         adev->mes.fw[pipe] = NULL;
463 }
464
465 static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
466                                            enum admgpu_mes_pipe pipe)
467 {
468         int r;
469         const struct mes_firmware_header_v1_0 *mes_hdr;
470         const __le32 *fw_data;
471         unsigned fw_size;
472
473         mes_hdr = (const struct mes_firmware_header_v1_0 *)
474                 adev->mes.fw[pipe]->data;
475
476         fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
477                    le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
478         fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
479
480         r = amdgpu_bo_create_reserved(adev, fw_size,
481                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
482                                       &adev->mes.ucode_fw_obj[pipe],
483                                       &adev->mes.ucode_fw_gpu_addr[pipe],
484                                       (void **)&adev->mes.ucode_fw_ptr[pipe]);
485         if (r) {
486                 dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
487                 return r;
488         }
489
490         memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
491
492         amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
493         amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
494
495         return 0;
496 }
497
498 static int mes_v10_1_allocate_ucode_data_buffer(struct amdgpu_device *adev,
499                                                 enum admgpu_mes_pipe pipe)
500 {
501         int r;
502         const struct mes_firmware_header_v1_0 *mes_hdr;
503         const __le32 *fw_data;
504         unsigned fw_size;
505
506         mes_hdr = (const struct mes_firmware_header_v1_0 *)
507                 adev->mes.fw[pipe]->data;
508
509         fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
510                    le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
511         fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
512
513         r = amdgpu_bo_create_reserved(adev, fw_size,
514                                       64 * 1024, AMDGPU_GEM_DOMAIN_GTT,
515                                       &adev->mes.data_fw_obj[pipe],
516                                       &adev->mes.data_fw_gpu_addr[pipe],
517                                       (void **)&adev->mes.data_fw_ptr[pipe]);
518         if (r) {
519                 dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
520                 return r;
521         }
522
523         memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
524
525         amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
526         amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
527
528         return 0;
529 }
530
531 static void mes_v10_1_free_ucode_buffers(struct amdgpu_device *adev,
532                                          enum admgpu_mes_pipe pipe)
533 {
534         amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
535                               &adev->mes.data_fw_gpu_addr[pipe],
536                               (void **)&adev->mes.data_fw_ptr[pipe]);
537
538         amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
539                               &adev->mes.ucode_fw_gpu_addr[pipe],
540                               (void **)&adev->mes.ucode_fw_ptr[pipe]);
541 }
542
543 static void mes_v10_1_enable(struct amdgpu_device *adev, bool enable)
544 {
545         uint32_t pipe, data = 0;
546
547         if (enable) {
548                 data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL);
549                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
550                 data = REG_SET_FIELD(data, CP_MES_CNTL,
551                              MES_PIPE1_RESET, adev->enable_mes_kiq ? 1 : 0);
552                 WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
553
554                 mutex_lock(&adev->srbm_mutex);
555                 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
556                         if (!adev->enable_mes_kiq &&
557                             pipe == AMDGPU_MES_KIQ_PIPE)
558                                 continue;
559
560                         nv_grbm_select(adev, 3, pipe, 0, 0);
561                         WREG32_SOC15(GC, 0, mmCP_MES_PRGRM_CNTR_START,
562                              (uint32_t)(adev->mes.uc_start_addr[pipe]) >> 2);
563                 }
564                 nv_grbm_select(adev, 0, 0, 0, 0);
565                 mutex_unlock(&adev->srbm_mutex);
566
567                 /* clear BYPASS_UNCACHED to avoid hangs after interrupt. */
568                 data = RREG32_SOC15(GC, 0, mmCP_MES_DC_OP_CNTL);
569                 data = REG_SET_FIELD(data, CP_MES_DC_OP_CNTL,
570                                      BYPASS_UNCACHED, 0);
571                 WREG32_SOC15(GC, 0, mmCP_MES_DC_OP_CNTL, data);
572
573                 /* unhalt MES and activate pipe0 */
574                 data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
575                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
576                                      adev->enable_mes_kiq ? 1 : 0);
577                 WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
578                 udelay(100);
579         } else {
580                 data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL);
581                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
582                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
583                 data = REG_SET_FIELD(data, CP_MES_CNTL,
584                                      MES_INVALIDATE_ICACHE, 1);
585                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
586                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
587                                      adev->enable_mes_kiq ? 1 : 0);
588                 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
589                 WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
590         }
591 }
592
593 /* This function is for backdoor MES firmware */
594 static int mes_v10_1_load_microcode(struct amdgpu_device *adev,
595                                     enum admgpu_mes_pipe pipe)
596 {
597         int r;
598         uint32_t data;
599
600         mes_v10_1_enable(adev, false);
601
602         if (!adev->mes.fw[pipe])
603                 return -EINVAL;
604
605         r = mes_v10_1_allocate_ucode_buffer(adev, pipe);
606         if (r)
607                 return r;
608
609         r = mes_v10_1_allocate_ucode_data_buffer(adev, pipe);
610         if (r) {
611                 mes_v10_1_free_ucode_buffers(adev, pipe);
612                 return r;
613         }
614
615         WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_CNTL, 0);
616
617         mutex_lock(&adev->srbm_mutex);
618         /* me=3, pipe=0, queue=0 */
619         nv_grbm_select(adev, 3, pipe, 0, 0);
620
621         /* set ucode start address */
622         WREG32_SOC15(GC, 0, mmCP_MES_PRGRM_CNTR_START,
623                      (uint32_t)(adev->mes.uc_start_addr[pipe]) >> 2);
624
625         /* set ucode fimrware address */
626         WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_LO,
627                      lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
628         WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_HI,
629                      upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
630
631         /* set ucode instruction cache boundary to 2M-1 */
632         WREG32_SOC15(GC, 0, mmCP_MES_MIBOUND_LO, 0x1FFFFF);
633
634         /* set ucode data firmware address */
635         WREG32_SOC15(GC, 0, mmCP_MES_MDBASE_LO,
636                      lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
637         WREG32_SOC15(GC, 0, mmCP_MES_MDBASE_HI,
638                      upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
639
640         /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
641         WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF);
642
643         /* invalidate ICACHE */
644         switch (adev->ip_versions[GC_HWIP][0]) {
645         case IP_VERSION(10, 3, 0):
646                 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid);
647                 break;
648         default:
649                 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL);
650                 break;
651         }
652         data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
653         data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
654         switch (adev->ip_versions[GC_HWIP][0]) {
655         case IP_VERSION(10, 3, 0):
656                 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data);
657                 break;
658         default:
659                 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data);
660                 break;
661         }
662
663         /* prime the ICACHE. */
664         switch (adev->ip_versions[GC_HWIP][0]) {
665         case IP_VERSION(10, 3, 0):
666                 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid);
667                 break;
668         default:
669                 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL);
670                 break;
671         }
672         data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
673         switch (adev->ip_versions[GC_HWIP][0]) {
674         case IP_VERSION(10, 3, 0):
675                 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data);
676                 break;
677         default:
678                 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data);
679                 break;
680         }
681
682         nv_grbm_select(adev, 0, 0, 0, 0);
683         mutex_unlock(&adev->srbm_mutex);
684
685         return 0;
686 }
687
688 static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev,
689                                       enum admgpu_mes_pipe pipe)
690 {
691         int r;
692         u32 *eop;
693
694         r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
695                               AMDGPU_GEM_DOMAIN_GTT,
696                               &adev->mes.eop_gpu_obj[pipe],
697                               &adev->mes.eop_gpu_addr[pipe],
698                               (void **)&eop);
699         if (r) {
700                 dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
701                 return r;
702         }
703
704         memset(eop, 0, adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
705
706         amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
707         amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
708
709         return 0;
710 }
711
712 static int mes_v10_1_mqd_init(struct amdgpu_ring *ring)
713 {
714         struct v10_compute_mqd *mqd = ring->mqd_ptr;
715         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
716         uint32_t tmp;
717
718         mqd->header = 0xC0310800;
719         mqd->compute_pipelinestat_enable = 0x00000001;
720         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
721         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
722         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
723         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
724         mqd->compute_misc_reserved = 0x00000003;
725
726         eop_base_addr = ring->eop_gpu_addr >> 8;
727
728         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
729         tmp = mmCP_HQD_EOP_CONTROL_DEFAULT;
730         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
731                         (order_base_2(MES_EOP_SIZE / 4) - 1));
732
733         mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
734         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
735         mqd->cp_hqd_eop_control = tmp;
736
737         /* disable the queue if it's active */
738         ring->wptr = 0;
739         mqd->cp_hqd_pq_rptr = 0;
740         mqd->cp_hqd_pq_wptr_lo = 0;
741         mqd->cp_hqd_pq_wptr_hi = 0;
742
743         /* set the pointer to the MQD */
744         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
745         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
746
747         /* set MQD vmid to 0 */
748         tmp = mmCP_MQD_CONTROL_DEFAULT;
749         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
750         mqd->cp_mqd_control = tmp;
751
752         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
753         hqd_gpu_addr = ring->gpu_addr >> 8;
754         mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
755         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
756
757         /* set the wb address whether it's enabled or not */
758         wb_gpu_addr = ring->rptr_gpu_addr;
759         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
760         mqd->cp_hqd_pq_rptr_report_addr_hi =
761                 upper_32_bits(wb_gpu_addr) & 0xffff;
762
763         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
764         wb_gpu_addr = ring->wptr_gpu_addr;
765         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
766         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
767
768         /* set up the HQD, this is similar to CP_RB0_CNTL */
769         tmp = mmCP_HQD_PQ_CONTROL_DEFAULT;
770         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
771                             (order_base_2(ring->ring_size / 4) - 1));
772         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
773                             ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
774 #ifdef __BIG_ENDIAN
775         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
776 #endif
777         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
778         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
779         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
780         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
781         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
782         mqd->cp_hqd_pq_control = tmp;
783
784         /* enable doorbell? */
785         tmp = 0;
786         if (ring->use_doorbell) {
787                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
788                                     DOORBELL_OFFSET, ring->doorbell_index);
789                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
790                                     DOORBELL_EN, 1);
791                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
792                                     DOORBELL_SOURCE, 0);
793                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
794                                     DOORBELL_HIT, 0);
795         }
796         else
797                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
798                                     DOORBELL_EN, 0);
799         mqd->cp_hqd_pq_doorbell_control = tmp;
800
801         mqd->cp_hqd_vmid = 0;
802         /* activate the queue */
803         mqd->cp_hqd_active = 1;
804         mqd->cp_hqd_persistent_state = mmCP_HQD_PERSISTENT_STATE_DEFAULT;
805         mqd->cp_hqd_ib_control = mmCP_HQD_IB_CONTROL_DEFAULT;
806         mqd->cp_hqd_iq_timer = mmCP_HQD_IQ_TIMER_DEFAULT;
807         mqd->cp_hqd_quantum = mmCP_HQD_QUANTUM_DEFAULT;
808
809         tmp = mmCP_HQD_GFX_CONTROL_DEFAULT;
810         tmp = REG_SET_FIELD(tmp, CP_HQD_GFX_CONTROL, DB_UPDATED_MSG_EN, 1);
811         /* offset: 184 - this is used for CP_HQD_GFX_CONTROL */
812         mqd->cp_hqd_suspend_cntl_stack_offset = tmp;
813
814         return 0;
815 }
816
817 #if 0
818 static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring)
819 {
820         struct v10_compute_mqd *mqd = ring->mqd_ptr;
821         struct amdgpu_device *adev = ring->adev;
822         uint32_t data = 0;
823
824         mutex_lock(&adev->srbm_mutex);
825         nv_grbm_select(adev, 3, ring->pipe, 0, 0);
826
827         /* set CP_HQD_VMID.VMID = 0. */
828         data = RREG32_SOC15(GC, 0, mmCP_HQD_VMID);
829         data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
830         WREG32_SOC15(GC, 0, mmCP_HQD_VMID, data);
831
832         /* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
833         data = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
834         data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
835                              DOORBELL_EN, 0);
836         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
837
838         /* set CP_MQD_BASE_ADDR/HI with the MQD base address */
839         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
840         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
841
842         /* set CP_MQD_CONTROL.VMID=0 */
843         data = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
844         data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
845         WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, 0);
846
847         /* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
848         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
849         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
850
851         /* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
852         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
853                      mqd->cp_hqd_pq_rptr_report_addr_lo);
854         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
855                      mqd->cp_hqd_pq_rptr_report_addr_hi);
856
857         /* set CP_HQD_PQ_CONTROL */
858         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
859
860         /* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
861         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
862                      mqd->cp_hqd_pq_wptr_poll_addr_lo);
863         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
864                      mqd->cp_hqd_pq_wptr_poll_addr_hi);
865
866         /* set CP_HQD_PQ_DOORBELL_CONTROL */
867         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
868                      mqd->cp_hqd_pq_doorbell_control);
869
870         /* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
871         WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
872
873         /* set CP_HQD_ACTIVE.ACTIVE=1 */
874         WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
875
876         nv_grbm_select(adev, 0, 0, 0, 0);
877         mutex_unlock(&adev->srbm_mutex);
878 }
879 #endif
880
881 static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev)
882 {
883         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
884         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
885         int r;
886
887         if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
888                 return -EINVAL;
889
890         r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
891         if (r) {
892                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
893                 return r;
894         }
895
896         kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
897
898         r = amdgpu_ring_test_ring(kiq_ring);
899         if (r) {
900                 DRM_ERROR("kfq enable failed\n");
901                 kiq_ring->sched.ready = false;
902         }
903
904         return r;
905 }
906
907 static int mes_v10_1_queue_init(struct amdgpu_device *adev)
908 {
909         int r;
910
911         r = mes_v10_1_mqd_init(&adev->mes.ring);
912         if (r)
913                 return r;
914
915         r = mes_v10_1_kiq_enable_queue(adev);
916         if (r)
917                 return r;
918
919         return 0;
920 }
921
922 static int mes_v10_1_ring_init(struct amdgpu_device *adev)
923 {
924         struct amdgpu_ring *ring;
925
926         ring = &adev->mes.ring;
927
928         ring->funcs = &mes_v10_1_ring_funcs;
929
930         ring->me = 3;
931         ring->pipe = 0;
932         ring->queue = 0;
933
934         ring->ring_obj = NULL;
935         ring->use_doorbell = true;
936         ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
937         ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
938         ring->no_scheduler = true;
939         sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
940
941         return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
942                                 AMDGPU_RING_PRIO_DEFAULT, NULL);
943 }
944
945 static int mes_v10_1_kiq_ring_init(struct amdgpu_device *adev)
946 {
947         struct amdgpu_ring *ring;
948
949         spin_lock_init(&adev->gfx.kiq.ring_lock);
950
951         ring = &adev->gfx.kiq.ring;
952
953         ring->me = 3;
954         ring->pipe = 1;
955         ring->queue = 0;
956
957         ring->adev = NULL;
958         ring->ring_obj = NULL;
959         ring->use_doorbell = true;
960         ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
961         ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
962         ring->no_scheduler = true;
963         sprintf(ring->name, "mes_kiq_%d.%d.%d",
964                 ring->me, ring->pipe, ring->queue);
965
966         return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
967                                 AMDGPU_RING_PRIO_DEFAULT, NULL);
968 }
969
970 static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
971                                  enum admgpu_mes_pipe pipe)
972 {
973         int r, mqd_size = sizeof(struct v10_compute_mqd);
974         struct amdgpu_ring *ring;
975
976         if (pipe == AMDGPU_MES_KIQ_PIPE)
977                 ring = &adev->gfx.kiq.ring;
978         else if (pipe == AMDGPU_MES_SCHED_PIPE)
979                 ring = &adev->mes.ring;
980         else
981                 BUG();
982
983         if (ring->mqd_obj)
984                 return 0;
985
986         r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
987                                     AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
988                                     &ring->mqd_gpu_addr, &ring->mqd_ptr);
989         if (r) {
990                 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
991                 return r;
992         }
993         memset(ring->mqd_ptr, 0, mqd_size);
994
995         /* prepare MQD backup */
996         adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
997         if (!adev->mes.mqd_backup[pipe])
998                 dev_warn(adev->dev,
999                          "no memory to create MQD backup for ring %s\n",
1000                          ring->name);
1001
1002         return 0;
1003 }
1004
1005 static int mes_v10_1_sw_init(void *handle)
1006 {
1007         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1008         int pipe, r;
1009
1010         adev->mes.adev = adev;
1011         adev->mes.funcs = &mes_v10_1_funcs;
1012         adev->mes.kiq_hw_init = &mes_v10_1_kiq_hw_init;
1013
1014         r = amdgpu_mes_init(adev);
1015         if (r)
1016                 return r;
1017
1018         for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1019                 if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1020                         continue;
1021
1022                 r = mes_v10_1_init_microcode(adev, pipe);
1023                 if (r)
1024                         return r;
1025
1026                 r = mes_v10_1_allocate_eop_buf(adev, pipe);
1027                 if (r)
1028                         return r;
1029
1030                 r = mes_v10_1_mqd_sw_init(adev, pipe);
1031                 if (r)
1032                         return r;
1033         }
1034
1035         if (adev->enable_mes_kiq) {
1036                 r = mes_v10_1_kiq_ring_init(adev);
1037                 if (r)
1038                         return r;
1039         }
1040
1041         r = mes_v10_1_ring_init(adev);
1042         if (r)
1043                 return r;
1044
1045         return 0;
1046 }
1047
1048 static int mes_v10_1_sw_fini(void *handle)
1049 {
1050         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1051         int pipe;
1052
1053         amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
1054         amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
1055
1056         for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1057                 kfree(adev->mes.mqd_backup[pipe]);
1058
1059                 amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
1060                                       &adev->mes.eop_gpu_addr[pipe],
1061                                       NULL);
1062
1063                 mes_v10_1_free_microcode(adev, pipe);
1064         }
1065
1066         amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
1067                               &adev->gfx.kiq.ring.mqd_gpu_addr,
1068                               &adev->gfx.kiq.ring.mqd_ptr);
1069
1070         amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
1071                               &adev->mes.ring.mqd_gpu_addr,
1072                               &adev->mes.ring.mqd_ptr);
1073
1074         amdgpu_ring_fini(&adev->gfx.kiq.ring);
1075         amdgpu_ring_fini(&adev->mes.ring);
1076
1077         amdgpu_mes_fini(adev);
1078         return 0;
1079 }
1080
1081 static void mes_v10_1_kiq_setting(struct amdgpu_ring *ring)
1082 {
1083         uint32_t tmp;
1084         struct amdgpu_device *adev = ring->adev;
1085
1086         /* tell RLC which is KIQ queue */
1087         switch (adev->ip_versions[GC_HWIP][0]) {
1088         case IP_VERSION(10, 3, 0):
1089         case IP_VERSION(10, 3, 2):
1090         case IP_VERSION(10, 3, 1):
1091         case IP_VERSION(10, 3, 4):
1092                 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
1093                 tmp &= 0xffffff00;
1094                 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1095                 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
1096                 tmp |= 0x80;
1097                 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
1098                 break;
1099         default:
1100                 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
1101                 tmp &= 0xffffff00;
1102                 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1103                 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
1104                 tmp |= 0x80;
1105                 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
1106                 break;
1107         }
1108 }
1109
1110 static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev)
1111 {
1112         int r = 0;
1113
1114         if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1115                 r = mes_v10_1_load_microcode(adev, AMDGPU_MES_KIQ_PIPE);
1116                 if (r) {
1117                         DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
1118                         return r;
1119                 }
1120
1121                 r = mes_v10_1_load_microcode(adev, AMDGPU_MES_SCHED_PIPE);
1122                 if (r) {
1123                         DRM_ERROR("failed to load MES fw, r=%d\n", r);
1124                         return r;
1125                 }
1126         }
1127
1128         mes_v10_1_enable(adev, true);
1129
1130         mes_v10_1_kiq_setting(&adev->gfx.kiq.ring);
1131
1132         r = mes_v10_1_queue_init(adev);
1133         if (r)
1134                 goto failure;
1135
1136         return r;
1137
1138 failure:
1139         mes_v10_1_hw_fini(adev);
1140         return r;
1141 }
1142
1143 static int mes_v10_1_hw_init(void *handle)
1144 {
1145         int r;
1146         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1147
1148         if (!adev->enable_mes_kiq) {
1149                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1150                         r = mes_v10_1_load_microcode(adev,
1151                                              AMDGPU_MES_SCHED_PIPE);
1152                         if (r) {
1153                                 DRM_ERROR("failed to MES fw, r=%d\n", r);
1154                                 return r;
1155                         }
1156                 }
1157
1158                 mes_v10_1_enable(adev, true);
1159         }
1160
1161         r = mes_v10_1_queue_init(adev);
1162         if (r)
1163                 goto failure;
1164
1165         r = mes_v10_1_set_hw_resources(&adev->mes);
1166         if (r)
1167                 goto failure;
1168
1169         mes_v10_1_init_aggregated_doorbell(&adev->mes);
1170
1171         r = mes_v10_1_query_sched_status(&adev->mes);
1172         if (r) {
1173                 DRM_ERROR("MES is busy\n");
1174                 goto failure;
1175         }
1176
1177         /*
1178          * Disable KIQ ring usage from the driver once MES is enabled.
1179          * MES uses KIQ ring exclusively so driver cannot access KIQ ring
1180          * with MES enabled.
1181          */
1182         adev->gfx.kiq.ring.sched.ready = false;
1183         adev->mes.ring.sched.ready = true;
1184
1185         return 0;
1186
1187 failure:
1188         mes_v10_1_hw_fini(adev);
1189         return r;
1190 }
1191
1192 static int mes_v10_1_hw_fini(void *handle)
1193 {
1194         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1195
1196         adev->mes.ring.sched.ready = false;
1197
1198         mes_v10_1_enable(adev, false);
1199
1200         if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1201                 mes_v10_1_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
1202                 mes_v10_1_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int mes_v10_1_suspend(void *handle)
1209 {
1210         int r;
1211         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1212
1213         r = amdgpu_mes_suspend(adev);
1214         if (r)
1215                 return r;
1216
1217         return mes_v10_1_hw_fini(adev);
1218 }
1219
1220 static int mes_v10_1_resume(void *handle)
1221 {
1222         int r;
1223         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224
1225         r = mes_v10_1_hw_init(adev);
1226         if (r)
1227                 return r;
1228
1229         return amdgpu_mes_resume(adev);
1230 }
1231
1232 static int mes_v10_0_late_init(void *handle)
1233 {
1234         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1235
1236         if (!amdgpu_in_reset(adev))
1237                 amdgpu_mes_self_test(adev);
1238
1239         return 0;
1240 }
1241
1242 static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
1243         .name = "mes_v10_1",
1244         .late_init = mes_v10_0_late_init,
1245         .sw_init = mes_v10_1_sw_init,
1246         .sw_fini = mes_v10_1_sw_fini,
1247         .hw_init = mes_v10_1_hw_init,
1248         .hw_fini = mes_v10_1_hw_fini,
1249         .suspend = mes_v10_1_suspend,
1250         .resume = mes_v10_1_resume,
1251 };
1252
1253 const struct amdgpu_ip_block_version mes_v10_1_ip_block = {
1254         .type = AMD_IP_BLOCK_TYPE_MES,
1255         .major = 10,
1256         .minor = 1,
1257         .rev = 0,
1258         .funcs = &mes_v10_1_ip_funcs,
1259 };
This page took 0.112702 seconds and 4 git commands to generate.