]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
Merge tag 'ata-6.12-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/libat...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_mes.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37         return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38                        AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39                        PAGE_SIZE);
40 }
41
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43                                          int ip_type, uint64_t *doorbell_index)
44 {
45         unsigned int offset, found;
46         struct amdgpu_mes *mes = &adev->mes;
47
48         if (ip_type == AMDGPU_RING_TYPE_SDMA)
49                 offset = adev->doorbell_index.sdma_engine[0];
50         else
51                 offset = 0;
52
53         found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54         if (found >= mes->num_mes_dbs) {
55                 DRM_WARN("No doorbell available\n");
56                 return -ENOSPC;
57         }
58
59         set_bit(found, mes->doorbell_bitmap);
60
61         /* Get the absolute doorbell index on BAR */
62         *doorbell_index = mes->db_start_dw_offset + found * 2;
63         return 0;
64 }
65
66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67                                            uint32_t doorbell_index)
68 {
69         unsigned int old, rel_index;
70         struct amdgpu_mes *mes = &adev->mes;
71
72         /* Find the relative index of the doorbell in this object */
73         rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74         old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75         WARN_ON(!old);
76 }
77
78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80         int i;
81         struct amdgpu_mes *mes = &adev->mes;
82
83         /* Bitmap for dynamic allocation of kernel doorbells */
84         mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85         if (!mes->doorbell_bitmap) {
86                 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87                 return -ENOMEM;
88         }
89
90         mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91         for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92                 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93                 set_bit(i, mes->doorbell_bitmap);
94         }
95
96         return 0;
97 }
98
99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101         int r;
102
103         if (!amdgpu_mes_log_enable)
104                 return 0;
105
106         r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107                                     AMDGPU_GEM_DOMAIN_GTT,
108                                     &adev->mes.event_log_gpu_obj,
109                                     &adev->mes.event_log_gpu_addr,
110                                     &adev->mes.event_log_cpu_addr);
111         if (r) {
112                 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113                 return r;
114         }
115
116         memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117
118         return  0;
119
120 }
121
122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124         bitmap_free(adev->mes.doorbell_bitmap);
125 }
126
127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129         int i, r;
130
131         adev->mes.adev = adev;
132
133         idr_init(&adev->mes.pasid_idr);
134         idr_init(&adev->mes.gang_id_idr);
135         idr_init(&adev->mes.queue_id_idr);
136         ida_init(&adev->mes.doorbell_ida);
137         spin_lock_init(&adev->mes.queue_id_lock);
138         mutex_init(&adev->mes.mutex_hidden);
139
140         for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141                 spin_lock_init(&adev->mes.ring_lock[i]);
142
143         adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144         adev->mes.vmid_mask_mmhub = 0xffffff00;
145         adev->mes.vmid_mask_gfxhub = 0xffffff00;
146
147         for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148                 /* use only 1st MEC pipes */
149                 if (i >= adev->gfx.mec.num_pipe_per_mec)
150                         continue;
151                 adev->mes.compute_hqd_mask[i] = 0xc;
152         }
153
154         for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155                 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
156
157         for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158                 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
159                     IP_VERSION(6, 0, 0))
160                         adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161                 /* zero sdma_hqd_mask for non-existent engine */
162                 else if (adev->sdma.num_instances == 1)
163                         adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
164                 else
165                         adev->mes.sdma_hqd_mask[i] = 0xfc;
166         }
167
168         for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
169                 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
170                 if (r) {
171                         dev_err(adev->dev,
172                                 "(%d) ring trail_fence_offs wb alloc failed\n",
173                                 r);
174                         goto error;
175                 }
176                 adev->mes.sch_ctx_gpu_addr[i] =
177                         adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
178                 adev->mes.sch_ctx_ptr[i] =
179                         (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
180
181                 r = amdgpu_device_wb_get(adev,
182                                  &adev->mes.query_status_fence_offs[i]);
183                 if (r) {
184                         dev_err(adev->dev,
185                               "(%d) query_status_fence_offs wb alloc failed\n",
186                               r);
187                         goto error;
188                 }
189                 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
190                         (adev->mes.query_status_fence_offs[i] * 4);
191                 adev->mes.query_status_fence_ptr[i] =
192                         (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
193         }
194
195         r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
196         if (r) {
197                 dev_err(adev->dev,
198                         "(%d) read_val_offs alloc failed\n", r);
199                 goto error;
200         }
201         adev->mes.read_val_gpu_addr =
202                 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
203         adev->mes.read_val_ptr =
204                 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
205
206         r = amdgpu_mes_doorbell_init(adev);
207         if (r)
208                 goto error;
209
210         r = amdgpu_mes_event_log_init(adev);
211         if (r)
212                 goto error_doorbell;
213
214         return 0;
215
216 error_doorbell:
217         amdgpu_mes_doorbell_free(adev);
218 error:
219         for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
220                 if (adev->mes.sch_ctx_ptr[i])
221                         amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
222                 if (adev->mes.query_status_fence_ptr[i])
223                         amdgpu_device_wb_free(adev,
224                                       adev->mes.query_status_fence_offs[i]);
225         }
226         if (adev->mes.read_val_ptr)
227                 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
228
229         idr_destroy(&adev->mes.pasid_idr);
230         idr_destroy(&adev->mes.gang_id_idr);
231         idr_destroy(&adev->mes.queue_id_idr);
232         ida_destroy(&adev->mes.doorbell_ida);
233         mutex_destroy(&adev->mes.mutex_hidden);
234         return r;
235 }
236
237 void amdgpu_mes_fini(struct amdgpu_device *adev)
238 {
239         int i;
240
241         amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
242                               &adev->mes.event_log_gpu_addr,
243                               &adev->mes.event_log_cpu_addr);
244
245         for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
246                 if (adev->mes.sch_ctx_ptr[i])
247                         amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
248                 if (adev->mes.query_status_fence_ptr[i])
249                         amdgpu_device_wb_free(adev,
250                                       adev->mes.query_status_fence_offs[i]);
251         }
252         if (adev->mes.read_val_ptr)
253                 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
254
255         amdgpu_mes_doorbell_free(adev);
256
257         idr_destroy(&adev->mes.pasid_idr);
258         idr_destroy(&adev->mes.gang_id_idr);
259         idr_destroy(&adev->mes.queue_id_idr);
260         ida_destroy(&adev->mes.doorbell_ida);
261         mutex_destroy(&adev->mes.mutex_hidden);
262 }
263
264 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
265 {
266         amdgpu_bo_free_kernel(&q->mqd_obj,
267                               &q->mqd_gpu_addr,
268                               &q->mqd_cpu_ptr);
269 }
270
271 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
272                               struct amdgpu_vm *vm)
273 {
274         struct amdgpu_mes_process *process;
275         int r;
276
277         /* allocate the mes process buffer */
278         process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
279         if (!process) {
280                 DRM_ERROR("no more memory to create mes process\n");
281                 return -ENOMEM;
282         }
283
284         /* allocate the process context bo and map it */
285         r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
286                                     AMDGPU_GEM_DOMAIN_GTT,
287                                     &process->proc_ctx_bo,
288                                     &process->proc_ctx_gpu_addr,
289                                     &process->proc_ctx_cpu_ptr);
290         if (r) {
291                 DRM_ERROR("failed to allocate process context bo\n");
292                 goto clean_up_memory;
293         }
294         memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
295
296         /*
297          * Avoid taking any other locks under MES lock to avoid circular
298          * lock dependencies.
299          */
300         amdgpu_mes_lock(&adev->mes);
301
302         /* add the mes process to idr list */
303         r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
304                       GFP_KERNEL);
305         if (r < 0) {
306                 DRM_ERROR("failed to lock pasid=%d\n", pasid);
307                 goto clean_up_ctx;
308         }
309
310         INIT_LIST_HEAD(&process->gang_list);
311         process->vm = vm;
312         process->pasid = pasid;
313         process->process_quantum = adev->mes.default_process_quantum;
314         process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
315
316         amdgpu_mes_unlock(&adev->mes);
317         return 0;
318
319 clean_up_ctx:
320         amdgpu_mes_unlock(&adev->mes);
321         amdgpu_bo_free_kernel(&process->proc_ctx_bo,
322                               &process->proc_ctx_gpu_addr,
323                               &process->proc_ctx_cpu_ptr);
324 clean_up_memory:
325         kfree(process);
326         return r;
327 }
328
329 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
330 {
331         struct amdgpu_mes_process *process;
332         struct amdgpu_mes_gang *gang, *tmp1;
333         struct amdgpu_mes_queue *queue, *tmp2;
334         struct mes_remove_queue_input queue_input;
335         unsigned long flags;
336         int r;
337
338         /*
339          * Avoid taking any other locks under MES lock to avoid circular
340          * lock dependencies.
341          */
342         amdgpu_mes_lock(&adev->mes);
343
344         process = idr_find(&adev->mes.pasid_idr, pasid);
345         if (!process) {
346                 DRM_WARN("pasid %d doesn't exist\n", pasid);
347                 amdgpu_mes_unlock(&adev->mes);
348                 return;
349         }
350
351         /* Remove all queues from hardware */
352         list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
353                 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
354                         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
355                         idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
356                         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
357
358                         queue_input.doorbell_offset = queue->doorbell_off;
359                         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
360
361                         r = adev->mes.funcs->remove_hw_queue(&adev->mes,
362                                                              &queue_input);
363                         if (r)
364                                 DRM_WARN("failed to remove hardware queue\n");
365                 }
366
367                 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
368         }
369
370         idr_remove(&adev->mes.pasid_idr, pasid);
371         amdgpu_mes_unlock(&adev->mes);
372
373         /* free all memory allocated by the process */
374         list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
375                 /* free all queues in the gang */
376                 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
377                         amdgpu_mes_queue_free_mqd(queue);
378                         list_del(&queue->list);
379                         kfree(queue);
380                 }
381                 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
382                                       &gang->gang_ctx_gpu_addr,
383                                       &gang->gang_ctx_cpu_ptr);
384                 list_del(&gang->list);
385                 kfree(gang);
386
387         }
388         amdgpu_bo_free_kernel(&process->proc_ctx_bo,
389                               &process->proc_ctx_gpu_addr,
390                               &process->proc_ctx_cpu_ptr);
391         kfree(process);
392 }
393
394 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
395                         struct amdgpu_mes_gang_properties *gprops,
396                         int *gang_id)
397 {
398         struct amdgpu_mes_process *process;
399         struct amdgpu_mes_gang *gang;
400         int r;
401
402         /* allocate the mes gang buffer */
403         gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
404         if (!gang) {
405                 return -ENOMEM;
406         }
407
408         /* allocate the gang context bo and map it to cpu space */
409         r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
410                                     AMDGPU_GEM_DOMAIN_GTT,
411                                     &gang->gang_ctx_bo,
412                                     &gang->gang_ctx_gpu_addr,
413                                     &gang->gang_ctx_cpu_ptr);
414         if (r) {
415                 DRM_ERROR("failed to allocate process context bo\n");
416                 goto clean_up_mem;
417         }
418         memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
419
420         /*
421          * Avoid taking any other locks under MES lock to avoid circular
422          * lock dependencies.
423          */
424         amdgpu_mes_lock(&adev->mes);
425
426         process = idr_find(&adev->mes.pasid_idr, pasid);
427         if (!process) {
428                 DRM_ERROR("pasid %d doesn't exist\n", pasid);
429                 r = -EINVAL;
430                 goto clean_up_ctx;
431         }
432
433         /* add the mes gang to idr list */
434         r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
435                       GFP_KERNEL);
436         if (r < 0) {
437                 DRM_ERROR("failed to allocate idr for gang\n");
438                 goto clean_up_ctx;
439         }
440
441         gang->gang_id = r;
442         *gang_id = r;
443
444         INIT_LIST_HEAD(&gang->queue_list);
445         gang->process = process;
446         gang->priority = gprops->priority;
447         gang->gang_quantum = gprops->gang_quantum ?
448                 gprops->gang_quantum : adev->mes.default_gang_quantum;
449         gang->global_priority_level = gprops->global_priority_level;
450         gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
451         list_add_tail(&gang->list, &process->gang_list);
452
453         amdgpu_mes_unlock(&adev->mes);
454         return 0;
455
456 clean_up_ctx:
457         amdgpu_mes_unlock(&adev->mes);
458         amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
459                               &gang->gang_ctx_gpu_addr,
460                               &gang->gang_ctx_cpu_ptr);
461 clean_up_mem:
462         kfree(gang);
463         return r;
464 }
465
466 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
467 {
468         struct amdgpu_mes_gang *gang;
469
470         /*
471          * Avoid taking any other locks under MES lock to avoid circular
472          * lock dependencies.
473          */
474         amdgpu_mes_lock(&adev->mes);
475
476         gang = idr_find(&adev->mes.gang_id_idr, gang_id);
477         if (!gang) {
478                 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
479                 amdgpu_mes_unlock(&adev->mes);
480                 return -EINVAL;
481         }
482
483         if (!list_empty(&gang->queue_list)) {
484                 DRM_ERROR("queue list is not empty\n");
485                 amdgpu_mes_unlock(&adev->mes);
486                 return -EBUSY;
487         }
488
489         idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
490         list_del(&gang->list);
491         amdgpu_mes_unlock(&adev->mes);
492
493         amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
494                               &gang->gang_ctx_gpu_addr,
495                               &gang->gang_ctx_cpu_ptr);
496
497         kfree(gang);
498
499         return 0;
500 }
501
502 int amdgpu_mes_suspend(struct amdgpu_device *adev)
503 {
504         struct mes_suspend_gang_input input;
505         int r;
506
507         if (!amdgpu_mes_suspend_resume_all_supported(adev))
508                 return 0;
509
510         memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
511         input.suspend_all_gangs = 1;
512
513         /*
514          * Avoid taking any other locks under MES lock to avoid circular
515          * lock dependencies.
516          */
517         amdgpu_mes_lock(&adev->mes);
518         r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
519         amdgpu_mes_unlock(&adev->mes);
520         if (r)
521                 DRM_ERROR("failed to suspend all gangs");
522
523         return r;
524 }
525
526 int amdgpu_mes_resume(struct amdgpu_device *adev)
527 {
528         struct mes_resume_gang_input input;
529         int r;
530
531         if (!amdgpu_mes_suspend_resume_all_supported(adev))
532                 return 0;
533
534         memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
535         input.resume_all_gangs = 1;
536
537         /*
538          * Avoid taking any other locks under MES lock to avoid circular
539          * lock dependencies.
540          */
541         amdgpu_mes_lock(&adev->mes);
542         r = adev->mes.funcs->resume_gang(&adev->mes, &input);
543         amdgpu_mes_unlock(&adev->mes);
544         if (r)
545                 DRM_ERROR("failed to resume all gangs");
546
547         return r;
548 }
549
550 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
551                                      struct amdgpu_mes_queue *q,
552                                      struct amdgpu_mes_queue_properties *p)
553 {
554         struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
555         u32 mqd_size = mqd_mgr->mqd_size;
556         int r;
557
558         r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
559                                     AMDGPU_GEM_DOMAIN_GTT,
560                                     &q->mqd_obj,
561                                     &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
562         if (r) {
563                 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
564                 return r;
565         }
566         memset(q->mqd_cpu_ptr, 0, mqd_size);
567
568         r = amdgpu_bo_reserve(q->mqd_obj, false);
569         if (unlikely(r != 0))
570                 goto clean_up;
571
572         return 0;
573
574 clean_up:
575         amdgpu_bo_free_kernel(&q->mqd_obj,
576                               &q->mqd_gpu_addr,
577                               &q->mqd_cpu_ptr);
578         return r;
579 }
580
581 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
582                                      struct amdgpu_mes_queue *q,
583                                      struct amdgpu_mes_queue_properties *p)
584 {
585         struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
586         struct amdgpu_mqd_prop mqd_prop = {0};
587
588         mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
589         mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
590         mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
591         mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
592         mqd_prop.queue_size = p->queue_size;
593         mqd_prop.use_doorbell = true;
594         mqd_prop.doorbell_index = p->doorbell_off;
595         mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
596         mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
597         mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
598         mqd_prop.hqd_active = false;
599
600         if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
601             p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
602                 mutex_lock(&adev->srbm_mutex);
603                 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
604         }
605
606         mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
607
608         if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
609             p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
610                 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
611                 mutex_unlock(&adev->srbm_mutex);
612         }
613
614         amdgpu_bo_unreserve(q->mqd_obj);
615 }
616
617 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
618                             struct amdgpu_mes_queue_properties *qprops,
619                             int *queue_id)
620 {
621         struct amdgpu_mes_queue *queue;
622         struct amdgpu_mes_gang *gang;
623         struct mes_add_queue_input queue_input;
624         unsigned long flags;
625         int r;
626
627         memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
628
629         /* allocate the mes queue buffer */
630         queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
631         if (!queue) {
632                 DRM_ERROR("Failed to allocate memory for queue\n");
633                 return -ENOMEM;
634         }
635
636         /* Allocate the queue mqd */
637         r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
638         if (r)
639                 goto clean_up_memory;
640
641         /*
642          * Avoid taking any other locks under MES lock to avoid circular
643          * lock dependencies.
644          */
645         amdgpu_mes_lock(&adev->mes);
646
647         gang = idr_find(&adev->mes.gang_id_idr, gang_id);
648         if (!gang) {
649                 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
650                 r = -EINVAL;
651                 goto clean_up_mqd;
652         }
653
654         /* add the mes gang to idr list */
655         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
656         r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
657                       GFP_ATOMIC);
658         if (r < 0) {
659                 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
660                 goto clean_up_mqd;
661         }
662         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
663         *queue_id = queue->queue_id = r;
664
665         /* allocate a doorbell index for the queue */
666         r = amdgpu_mes_kernel_doorbell_get(adev,
667                                           qprops->queue_type,
668                                           &qprops->doorbell_off);
669         if (r)
670                 goto clean_up_queue_id;
671
672         /* initialize the queue mqd */
673         amdgpu_mes_queue_init_mqd(adev, queue, qprops);
674
675         /* add hw queue to mes */
676         queue_input.process_id = gang->process->pasid;
677
678         queue_input.page_table_base_addr =
679                 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
680                 adev->gmc.vram_start;
681
682         queue_input.process_va_start = 0;
683         queue_input.process_va_end =
684                 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
685         queue_input.process_quantum = gang->process->process_quantum;
686         queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
687         queue_input.gang_quantum = gang->gang_quantum;
688         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
689         queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
690         queue_input.gang_global_priority_level = gang->global_priority_level;
691         queue_input.doorbell_offset = qprops->doorbell_off;
692         queue_input.mqd_addr = queue->mqd_gpu_addr;
693         queue_input.wptr_addr = qprops->wptr_gpu_addr;
694         queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
695         queue_input.queue_type = qprops->queue_type;
696         queue_input.paging = qprops->paging;
697         queue_input.is_kfd_process = 0;
698
699         r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
700         if (r) {
701                 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
702                           qprops->doorbell_off);
703                 goto clean_up_doorbell;
704         }
705
706         DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
707                   "queue type=%d, doorbell=0x%llx\n",
708                   gang->process->pasid, gang_id, qprops->queue_type,
709                   qprops->doorbell_off);
710
711         queue->ring = qprops->ring;
712         queue->doorbell_off = qprops->doorbell_off;
713         queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
714         queue->queue_type = qprops->queue_type;
715         queue->paging = qprops->paging;
716         queue->gang = gang;
717         queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
718         list_add_tail(&queue->list, &gang->queue_list);
719
720         amdgpu_mes_unlock(&adev->mes);
721         return 0;
722
723 clean_up_doorbell:
724         amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
725 clean_up_queue_id:
726         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
727         idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
728         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
729 clean_up_mqd:
730         amdgpu_mes_unlock(&adev->mes);
731         amdgpu_mes_queue_free_mqd(queue);
732 clean_up_memory:
733         kfree(queue);
734         return r;
735 }
736
737 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
738 {
739         unsigned long flags;
740         struct amdgpu_mes_queue *queue;
741         struct amdgpu_mes_gang *gang;
742         struct mes_remove_queue_input queue_input;
743         int r;
744
745         /*
746          * Avoid taking any other locks under MES lock to avoid circular
747          * lock dependencies.
748          */
749         amdgpu_mes_lock(&adev->mes);
750
751         /* remove the mes gang from idr list */
752         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
753
754         queue = idr_find(&adev->mes.queue_id_idr, queue_id);
755         if (!queue) {
756                 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
757                 amdgpu_mes_unlock(&adev->mes);
758                 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
759                 return -EINVAL;
760         }
761
762         idr_remove(&adev->mes.queue_id_idr, queue_id);
763         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
764
765         DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
766                   queue->doorbell_off);
767
768         gang = queue->gang;
769         queue_input.doorbell_offset = queue->doorbell_off;
770         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
771
772         r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
773         if (r)
774                 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
775                           queue_id);
776
777         list_del(&queue->list);
778         amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
779         amdgpu_mes_unlock(&adev->mes);
780
781         amdgpu_mes_queue_free_mqd(queue);
782         kfree(queue);
783         return 0;
784 }
785
786 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
787 {
788         unsigned long flags;
789         struct amdgpu_mes_queue *queue;
790         struct amdgpu_mes_gang *gang;
791         struct mes_reset_queue_input queue_input;
792         int r;
793
794         /*
795          * Avoid taking any other locks under MES lock to avoid circular
796          * lock dependencies.
797          */
798         amdgpu_mes_lock(&adev->mes);
799
800         /* remove the mes gang from idr list */
801         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
802
803         queue = idr_find(&adev->mes.queue_id_idr, queue_id);
804         if (!queue) {
805                 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
806                 amdgpu_mes_unlock(&adev->mes);
807                 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
808                 return -EINVAL;
809         }
810         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
811
812         DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
813                   queue->doorbell_off);
814
815         gang = queue->gang;
816         queue_input.doorbell_offset = queue->doorbell_off;
817         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
818
819         r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
820         if (r)
821                 DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
822                           queue_id);
823
824         amdgpu_mes_unlock(&adev->mes);
825
826         return 0;
827 }
828
829 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
830                                    int me_id, int pipe_id, int queue_id, int vmid)
831 {
832         struct mes_reset_queue_input queue_input;
833         int r;
834
835         queue_input.queue_type = queue_type;
836         queue_input.use_mmio = true;
837         queue_input.me_id = me_id;
838         queue_input.pipe_id = pipe_id;
839         queue_input.queue_id = queue_id;
840         queue_input.vmid = vmid;
841         r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
842         if (r)
843                 DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
844                           queue_id);
845         return r;
846 }
847
848 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
849                                 struct amdgpu_ring *ring)
850 {
851         struct mes_map_legacy_queue_input queue_input;
852         int r;
853
854         memset(&queue_input, 0, sizeof(queue_input));
855
856         queue_input.queue_type = ring->funcs->type;
857         queue_input.doorbell_offset = ring->doorbell_index;
858         queue_input.pipe_id = ring->pipe;
859         queue_input.queue_id = ring->queue;
860         queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
861         queue_input.wptr_addr = ring->wptr_gpu_addr;
862
863         r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
864         if (r)
865                 DRM_ERROR("failed to map legacy queue\n");
866
867         return r;
868 }
869
870 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
871                                   struct amdgpu_ring *ring,
872                                   enum amdgpu_unmap_queues_action action,
873                                   u64 gpu_addr, u64 seq)
874 {
875         struct mes_unmap_legacy_queue_input queue_input;
876         int r;
877
878         queue_input.action = action;
879         queue_input.queue_type = ring->funcs->type;
880         queue_input.doorbell_offset = ring->doorbell_index;
881         queue_input.pipe_id = ring->pipe;
882         queue_input.queue_id = ring->queue;
883         queue_input.trail_fence_addr = gpu_addr;
884         queue_input.trail_fence_data = seq;
885
886         r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
887         if (r)
888                 DRM_ERROR("failed to unmap legacy queue\n");
889
890         return r;
891 }
892
893 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
894                                   struct amdgpu_ring *ring,
895                                   unsigned int vmid,
896                                   bool use_mmio)
897 {
898         struct mes_reset_legacy_queue_input queue_input;
899         int r;
900
901         memset(&queue_input, 0, sizeof(queue_input));
902
903         queue_input.queue_type = ring->funcs->type;
904         queue_input.doorbell_offset = ring->doorbell_index;
905         queue_input.me_id = ring->me;
906         queue_input.pipe_id = ring->pipe;
907         queue_input.queue_id = ring->queue;
908         queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
909         queue_input.wptr_addr = ring->wptr_gpu_addr;
910         queue_input.vmid = vmid;
911         queue_input.use_mmio = use_mmio;
912
913         r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
914         if (r)
915                 DRM_ERROR("failed to reset legacy queue\n");
916
917         return r;
918 }
919
920 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
921 {
922         struct mes_misc_op_input op_input;
923         int r, val = 0;
924
925         op_input.op = MES_MISC_OP_READ_REG;
926         op_input.read_reg.reg_offset = reg;
927         op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
928
929         if (!adev->mes.funcs->misc_op) {
930                 DRM_ERROR("mes rreg is not supported!\n");
931                 goto error;
932         }
933
934         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
935         if (r)
936                 DRM_ERROR("failed to read reg (0x%x)\n", reg);
937         else
938                 val = *(adev->mes.read_val_ptr);
939
940 error:
941         return val;
942 }
943
944 int amdgpu_mes_wreg(struct amdgpu_device *adev,
945                     uint32_t reg, uint32_t val)
946 {
947         struct mes_misc_op_input op_input;
948         int r;
949
950         op_input.op = MES_MISC_OP_WRITE_REG;
951         op_input.write_reg.reg_offset = reg;
952         op_input.write_reg.reg_value = val;
953
954         if (!adev->mes.funcs->misc_op) {
955                 DRM_ERROR("mes wreg is not supported!\n");
956                 r = -EINVAL;
957                 goto error;
958         }
959
960         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
961         if (r)
962                 DRM_ERROR("failed to write reg (0x%x)\n", reg);
963
964 error:
965         return r;
966 }
967
968 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
969                                   uint32_t reg0, uint32_t reg1,
970                                   uint32_t ref, uint32_t mask)
971 {
972         struct mes_misc_op_input op_input;
973         int r;
974
975         op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
976         op_input.wrm_reg.reg0 = reg0;
977         op_input.wrm_reg.reg1 = reg1;
978         op_input.wrm_reg.ref = ref;
979         op_input.wrm_reg.mask = mask;
980
981         if (!adev->mes.funcs->misc_op) {
982                 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
983                 r = -EINVAL;
984                 goto error;
985         }
986
987         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
988         if (r)
989                 DRM_ERROR("failed to reg_write_reg_wait\n");
990
991 error:
992         return r;
993 }
994
995 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
996                         uint32_t val, uint32_t mask)
997 {
998         struct mes_misc_op_input op_input;
999         int r;
1000
1001         op_input.op = MES_MISC_OP_WRM_REG_WAIT;
1002         op_input.wrm_reg.reg0 = reg;
1003         op_input.wrm_reg.ref = val;
1004         op_input.wrm_reg.mask = mask;
1005
1006         if (!adev->mes.funcs->misc_op) {
1007                 DRM_ERROR("mes reg wait is not supported!\n");
1008                 r = -EINVAL;
1009                 goto error;
1010         }
1011
1012         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1013         if (r)
1014                 DRM_ERROR("failed to reg_write_reg_wait\n");
1015
1016 error:
1017         return r;
1018 }
1019
1020 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1021                                 uint64_t process_context_addr,
1022                                 uint32_t spi_gdbg_per_vmid_cntl,
1023                                 const uint32_t *tcp_watch_cntl,
1024                                 uint32_t flags,
1025                                 bool trap_en)
1026 {
1027         struct mes_misc_op_input op_input = {0};
1028         int r;
1029
1030         if (!adev->mes.funcs->misc_op) {
1031                 DRM_ERROR("mes set shader debugger is not supported!\n");
1032                 return -EINVAL;
1033         }
1034
1035         op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1036         op_input.set_shader_debugger.process_context_addr = process_context_addr;
1037         op_input.set_shader_debugger.flags.u32all = flags;
1038
1039         /* use amdgpu mes_flush_shader_debugger instead */
1040         if (op_input.set_shader_debugger.flags.process_ctx_flush)
1041                 return -EINVAL;
1042
1043         op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1044         memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1045                         sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1046
1047         if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1048                         AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1049                 op_input.set_shader_debugger.trap_en = trap_en;
1050
1051         amdgpu_mes_lock(&adev->mes);
1052
1053         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1054         if (r)
1055                 DRM_ERROR("failed to set_shader_debugger\n");
1056
1057         amdgpu_mes_unlock(&adev->mes);
1058
1059         return r;
1060 }
1061
1062 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1063                                      uint64_t process_context_addr)
1064 {
1065         struct mes_misc_op_input op_input = {0};
1066         int r;
1067
1068         if (!adev->mes.funcs->misc_op) {
1069                 DRM_ERROR("mes flush shader debugger is not supported!\n");
1070                 return -EINVAL;
1071         }
1072
1073         op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1074         op_input.set_shader_debugger.process_context_addr = process_context_addr;
1075         op_input.set_shader_debugger.flags.process_ctx_flush = true;
1076
1077         amdgpu_mes_lock(&adev->mes);
1078
1079         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1080         if (r)
1081                 DRM_ERROR("failed to set_shader_debugger\n");
1082
1083         amdgpu_mes_unlock(&adev->mes);
1084
1085         return r;
1086 }
1087
1088 static void
1089 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1090                                struct amdgpu_ring *ring,
1091                                struct amdgpu_mes_queue_properties *props)
1092 {
1093         props->queue_type = ring->funcs->type;
1094         props->hqd_base_gpu_addr = ring->gpu_addr;
1095         props->rptr_gpu_addr = ring->rptr_gpu_addr;
1096         props->wptr_gpu_addr = ring->wptr_gpu_addr;
1097         props->wptr_mc_addr =
1098                 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1099         props->queue_size = ring->ring_size;
1100         props->eop_gpu_addr = ring->eop_gpu_addr;
1101         props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1102         props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1103         props->paging = false;
1104         props->ring = ring;
1105 }
1106
1107 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)                        \
1108 do {                                                                    \
1109        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)                           \
1110                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1111                                 _eng[ring->idx].slots[id_offs]);        \
1112        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)                    \
1113                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1114                                 _eng[ring->idx].ring);                  \
1115        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)                      \
1116                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1117                                 _eng[ring->idx].ib);                    \
1118        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)                 \
1119                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1120                                 _eng[ring->idx].padding);               \
1121 } while(0)
1122
1123 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1124 {
1125         switch (ring->funcs->type) {
1126         case AMDGPU_RING_TYPE_GFX:
1127                 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1128                 break;
1129         case AMDGPU_RING_TYPE_COMPUTE:
1130                 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1131                 break;
1132         case AMDGPU_RING_TYPE_SDMA:
1133                 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1134                 break;
1135         default:
1136                 break;
1137         }
1138
1139         WARN_ON(1);
1140         return -EINVAL;
1141 }
1142
1143 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1144                         int queue_type, int idx,
1145                         struct amdgpu_mes_ctx_data *ctx_data,
1146                         struct amdgpu_ring **out)
1147 {
1148         struct amdgpu_ring *ring;
1149         struct amdgpu_mes_gang *gang;
1150         struct amdgpu_mes_queue_properties qprops = {0};
1151         int r, queue_id, pasid;
1152
1153         /*
1154          * Avoid taking any other locks under MES lock to avoid circular
1155          * lock dependencies.
1156          */
1157         amdgpu_mes_lock(&adev->mes);
1158         gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1159         if (!gang) {
1160                 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1161                 amdgpu_mes_unlock(&adev->mes);
1162                 return -EINVAL;
1163         }
1164         pasid = gang->process->pasid;
1165
1166         ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1167         if (!ring) {
1168                 amdgpu_mes_unlock(&adev->mes);
1169                 return -ENOMEM;
1170         }
1171
1172         ring->ring_obj = NULL;
1173         ring->use_doorbell = true;
1174         ring->is_mes_queue = true;
1175         ring->mes_ctx = ctx_data;
1176         ring->idx = idx;
1177         ring->no_scheduler = true;
1178
1179         if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1180                 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1181                                       compute[ring->idx].mec_hpd);
1182                 ring->eop_gpu_addr =
1183                         amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1184         }
1185
1186         switch (queue_type) {
1187         case AMDGPU_RING_TYPE_GFX:
1188                 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1189                 ring->me = adev->gfx.gfx_ring[0].me;
1190                 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1191                 break;
1192         case AMDGPU_RING_TYPE_COMPUTE:
1193                 ring->funcs = adev->gfx.compute_ring[0].funcs;
1194                 ring->me = adev->gfx.compute_ring[0].me;
1195                 ring->pipe = adev->gfx.compute_ring[0].pipe;
1196                 break;
1197         case AMDGPU_RING_TYPE_SDMA:
1198                 ring->funcs = adev->sdma.instance[0].ring.funcs;
1199                 break;
1200         default:
1201                 BUG();
1202         }
1203
1204         r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1205                              AMDGPU_RING_PRIO_DEFAULT, NULL);
1206         if (r)
1207                 goto clean_up_memory;
1208
1209         amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1210
1211         dma_fence_wait(gang->process->vm->last_update, false);
1212         dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1213         amdgpu_mes_unlock(&adev->mes);
1214
1215         r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1216         if (r)
1217                 goto clean_up_ring;
1218
1219         ring->hw_queue_id = queue_id;
1220         ring->doorbell_index = qprops.doorbell_off;
1221
1222         if (queue_type == AMDGPU_RING_TYPE_GFX)
1223                 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1224         else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1225                 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1226                         queue_id);
1227         else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1228                 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1229                         queue_id);
1230         else
1231                 BUG();
1232
1233         *out = ring;
1234         return 0;
1235
1236 clean_up_ring:
1237         amdgpu_ring_fini(ring);
1238 clean_up_memory:
1239         kfree(ring);
1240         amdgpu_mes_unlock(&adev->mes);
1241         return r;
1242 }
1243
1244 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1245                             struct amdgpu_ring *ring)
1246 {
1247         if (!ring)
1248                 return;
1249
1250         amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1251         del_timer_sync(&ring->fence_drv.fallback_timer);
1252         amdgpu_ring_fini(ring);
1253         kfree(ring);
1254 }
1255
1256 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1257                                                    enum amdgpu_mes_priority_level prio)
1258 {
1259         return adev->mes.aggregated_doorbells[prio];
1260 }
1261
1262 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1263                                    struct amdgpu_mes_ctx_data *ctx_data)
1264 {
1265         int r;
1266
1267         r = amdgpu_bo_create_kernel(adev,
1268                             sizeof(struct amdgpu_mes_ctx_meta_data),
1269                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1270                             &ctx_data->meta_data_obj,
1271                             &ctx_data->meta_data_mc_addr,
1272                             &ctx_data->meta_data_ptr);
1273         if (r) {
1274                 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1275                 return r;
1276         }
1277
1278         if (!ctx_data->meta_data_obj)
1279                 return -ENOMEM;
1280
1281         memset(ctx_data->meta_data_ptr, 0,
1282                sizeof(struct amdgpu_mes_ctx_meta_data));
1283
1284         return 0;
1285 }
1286
1287 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1288 {
1289         if (ctx_data->meta_data_obj)
1290                 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1291                                       &ctx_data->meta_data_mc_addr,
1292                                       &ctx_data->meta_data_ptr);
1293 }
1294
1295 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1296                                  struct amdgpu_vm *vm,
1297                                  struct amdgpu_mes_ctx_data *ctx_data)
1298 {
1299         struct amdgpu_bo_va *bo_va;
1300         struct amdgpu_sync sync;
1301         struct drm_exec exec;
1302         int r;
1303
1304         amdgpu_sync_create(&sync);
1305
1306         drm_exec_init(&exec, 0, 0);
1307         drm_exec_until_all_locked(&exec) {
1308                 r = drm_exec_lock_obj(&exec,
1309                                       &ctx_data->meta_data_obj->tbo.base);
1310                 drm_exec_retry_on_contention(&exec);
1311                 if (unlikely(r))
1312                         goto error_fini_exec;
1313
1314                 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1315                 drm_exec_retry_on_contention(&exec);
1316                 if (unlikely(r))
1317                         goto error_fini_exec;
1318         }
1319
1320         bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1321         if (!bo_va) {
1322                 DRM_ERROR("failed to create bo_va for meta data BO\n");
1323                 r = -ENOMEM;
1324                 goto error_fini_exec;
1325         }
1326
1327         r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1328                              sizeof(struct amdgpu_mes_ctx_meta_data),
1329                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1330                              AMDGPU_PTE_EXECUTABLE);
1331
1332         if (r) {
1333                 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1334                 goto error_del_bo_va;
1335         }
1336
1337         r = amdgpu_vm_bo_update(adev, bo_va, false);
1338         if (r) {
1339                 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1340                 goto error_del_bo_va;
1341         }
1342         amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1343
1344         r = amdgpu_vm_update_pdes(adev, vm, false);
1345         if (r) {
1346                 DRM_ERROR("failed to update pdes on meta data\n");
1347                 goto error_del_bo_va;
1348         }
1349         amdgpu_sync_fence(&sync, vm->last_update);
1350
1351         amdgpu_sync_wait(&sync, false);
1352         drm_exec_fini(&exec);
1353
1354         amdgpu_sync_free(&sync);
1355         ctx_data->meta_data_va = bo_va;
1356         return 0;
1357
1358 error_del_bo_va:
1359         amdgpu_vm_bo_del(adev, bo_va);
1360
1361 error_fini_exec:
1362         drm_exec_fini(&exec);
1363         amdgpu_sync_free(&sync);
1364         return r;
1365 }
1366
1367 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1368                                    struct amdgpu_mes_ctx_data *ctx_data)
1369 {
1370         struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1371         struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1372         struct amdgpu_vm *vm = bo_va->base.vm;
1373         struct dma_fence *fence;
1374         struct drm_exec exec;
1375         long r;
1376
1377         drm_exec_init(&exec, 0, 0);
1378         drm_exec_until_all_locked(&exec) {
1379                 r = drm_exec_lock_obj(&exec,
1380                                       &ctx_data->meta_data_obj->tbo.base);
1381                 drm_exec_retry_on_contention(&exec);
1382                 if (unlikely(r))
1383                         goto out_unlock;
1384
1385                 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1386                 drm_exec_retry_on_contention(&exec);
1387                 if (unlikely(r))
1388                         goto out_unlock;
1389         }
1390
1391         amdgpu_vm_bo_del(adev, bo_va);
1392         if (!amdgpu_vm_ready(vm))
1393                 goto out_unlock;
1394
1395         r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1396                                    &fence);
1397         if (r)
1398                 goto out_unlock;
1399         if (fence) {
1400                 amdgpu_bo_fence(bo, fence, true);
1401                 fence = NULL;
1402         }
1403
1404         r = amdgpu_vm_clear_freed(adev, vm, &fence);
1405         if (r || !fence)
1406                 goto out_unlock;
1407
1408         dma_fence_wait(fence, false);
1409         amdgpu_bo_fence(bo, fence, true);
1410         dma_fence_put(fence);
1411
1412 out_unlock:
1413         if (unlikely(r < 0))
1414                 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1415         drm_exec_fini(&exec);
1416
1417         return r;
1418 }
1419
1420 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1421                                           int pasid, int *gang_id,
1422                                           int queue_type, int num_queue,
1423                                           struct amdgpu_ring **added_rings,
1424                                           struct amdgpu_mes_ctx_data *ctx_data)
1425 {
1426         struct amdgpu_ring *ring;
1427         struct amdgpu_mes_gang_properties gprops = {0};
1428         int r, j;
1429
1430         /* create a gang for the process */
1431         gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1432         gprops.gang_quantum = adev->mes.default_gang_quantum;
1433         gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1434         gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1435         gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1436
1437         r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1438         if (r) {
1439                 DRM_ERROR("failed to add gang\n");
1440                 return r;
1441         }
1442
1443         /* create queues for the gang */
1444         for (j = 0; j < num_queue; j++) {
1445                 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1446                                         ctx_data, &ring);
1447                 if (r) {
1448                         DRM_ERROR("failed to add ring\n");
1449                         break;
1450                 }
1451
1452                 DRM_INFO("ring %s was added\n", ring->name);
1453                 added_rings[j] = ring;
1454         }
1455
1456         return 0;
1457 }
1458
1459 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1460 {
1461         struct amdgpu_ring *ring;
1462         int i, r;
1463
1464         for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1465                 ring = added_rings[i];
1466                 if (!ring)
1467                         continue;
1468
1469                 r = amdgpu_ring_test_helper(ring);
1470                 if (r)
1471                         return r;
1472
1473                 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1474                 if (r) {
1475                         DRM_DEV_ERROR(ring->adev->dev,
1476                                       "ring %s ib test failed (%d)\n",
1477                                       ring->name, r);
1478                         return r;
1479                 } else
1480                         DRM_INFO("ring %s ib test pass\n", ring->name);
1481         }
1482
1483         return 0;
1484 }
1485
1486 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1487 {
1488         struct amdgpu_vm *vm = NULL;
1489         struct amdgpu_mes_ctx_data ctx_data = {0};
1490         struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1491         int gang_ids[3] = {0};
1492         int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1493                                  { AMDGPU_RING_TYPE_COMPUTE, 1 },
1494                                  { AMDGPU_RING_TYPE_SDMA, 1} };
1495         int i, r, pasid, k = 0;
1496
1497         pasid = amdgpu_pasid_alloc(16);
1498         if (pasid < 0) {
1499                 dev_warn(adev->dev, "No more PASIDs available!");
1500                 pasid = 0;
1501         }
1502
1503         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1504         if (!vm) {
1505                 r = -ENOMEM;
1506                 goto error_pasid;
1507         }
1508
1509         r = amdgpu_vm_init(adev, vm, -1);
1510         if (r) {
1511                 DRM_ERROR("failed to initialize vm\n");
1512                 goto error_pasid;
1513         }
1514
1515         r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1516         if (r) {
1517                 DRM_ERROR("failed to alloc ctx meta data\n");
1518                 goto error_fini;
1519         }
1520
1521         ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1522         r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1523         if (r) {
1524                 DRM_ERROR("failed to map ctx meta data\n");
1525                 goto error_vm;
1526         }
1527
1528         r = amdgpu_mes_create_process(adev, pasid, vm);
1529         if (r) {
1530                 DRM_ERROR("failed to create MES process\n");
1531                 goto error_vm;
1532         }
1533
1534         for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1535                 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1536                 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1537                             IP_VERSION(10, 3, 0) &&
1538                     amdgpu_ip_version(adev, GC_HWIP, 0) <
1539                             IP_VERSION(11, 0, 0) &&
1540                     queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1541                         continue;
1542
1543                 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1544                                                            &gang_ids[i],
1545                                                            queue_types[i][0],
1546                                                            queue_types[i][1],
1547                                                            &added_rings[k],
1548                                                            &ctx_data);
1549                 if (r)
1550                         goto error_queues;
1551
1552                 k += queue_types[i][1];
1553         }
1554
1555         /* start ring test and ib test for MES queues */
1556         amdgpu_mes_test_queues(added_rings);
1557
1558 error_queues:
1559         /* remove all queues */
1560         for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1561                 if (!added_rings[i])
1562                         continue;
1563                 amdgpu_mes_remove_ring(adev, added_rings[i]);
1564         }
1565
1566         for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1567                 if (!gang_ids[i])
1568                         continue;
1569                 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1570         }
1571
1572         amdgpu_mes_destroy_process(adev, pasid);
1573
1574 error_vm:
1575         amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1576
1577 error_fini:
1578         amdgpu_vm_fini(adev, vm);
1579
1580 error_pasid:
1581         if (pasid)
1582                 amdgpu_pasid_free(pasid);
1583
1584         amdgpu_mes_ctx_free_meta_data(&ctx_data);
1585         kfree(vm);
1586         return 0;
1587 }
1588
1589 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1590 {
1591         const struct mes_firmware_header_v1_0 *mes_hdr;
1592         struct amdgpu_firmware_info *info;
1593         char ucode_prefix[30];
1594         char fw_name[50];
1595         bool need_retry = false;
1596         int r;
1597
1598         amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1599                                        sizeof(ucode_prefix));
1600         if (adev->enable_uni_mes) {
1601                 snprintf(fw_name, sizeof(fw_name),
1602                          "amdgpu/%s_uni_mes.bin", ucode_prefix);
1603         } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1604             amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1605                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1606                          ucode_prefix,
1607                          pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1608                 need_retry = true;
1609         } else {
1610                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1611                          ucode_prefix,
1612                          pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1613         }
1614
1615         r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
1616         if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1617                 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1618                 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1619                                          "amdgpu/%s_mes.bin", ucode_prefix);
1620         }
1621
1622         if (r)
1623                 goto out;
1624
1625         mes_hdr = (const struct mes_firmware_header_v1_0 *)
1626                 adev->mes.fw[pipe]->data;
1627         adev->mes.uc_start_addr[pipe] =
1628                 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1629                 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1630         adev->mes.data_start_addr[pipe] =
1631                 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1632                 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1633
1634         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1635                 int ucode, ucode_data;
1636
1637                 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1638                         ucode = AMDGPU_UCODE_ID_CP_MES;
1639                         ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1640                 } else {
1641                         ucode = AMDGPU_UCODE_ID_CP_MES1;
1642                         ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1643                 }
1644
1645                 info = &adev->firmware.ucode[ucode];
1646                 info->ucode_id = ucode;
1647                 info->fw = adev->mes.fw[pipe];
1648                 adev->firmware.fw_size +=
1649                         ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1650                               PAGE_SIZE);
1651
1652                 info = &adev->firmware.ucode[ucode_data];
1653                 info->ucode_id = ucode_data;
1654                 info->fw = adev->mes.fw[pipe];
1655                 adev->firmware.fw_size +=
1656                         ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1657                               PAGE_SIZE);
1658         }
1659
1660         return 0;
1661 out:
1662         amdgpu_ucode_release(&adev->mes.fw[pipe]);
1663         return r;
1664 }
1665
1666 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1667 {
1668         uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1669         bool is_supported = false;
1670
1671         if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1672             amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1673             mes_rev >= 0x63)
1674                 is_supported = true;
1675
1676         return is_supported;
1677 }
1678
1679 #if defined(CONFIG_DEBUG_FS)
1680
1681 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1682 {
1683         struct amdgpu_device *adev = m->private;
1684         uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1685
1686         seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1687                      mem, adev->mes.event_log_size, false);
1688
1689         return 0;
1690 }
1691
1692 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1693
1694 #endif
1695
1696 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1697 {
1698
1699 #if defined(CONFIG_DEBUG_FS)
1700         struct drm_minor *minor = adev_to_drm(adev)->primary;
1701         struct dentry *root = minor->debugfs_root;
1702         if (adev->enable_mes && amdgpu_mes_log_enable)
1703                 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1704                                     adev, &amdgpu_debugfs_mes_event_log_fops);
1705
1706 #endif
1707 }
This page took 0.136492 seconds and 4 git commands to generate.