]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drm/radeon: Prefer strscpy over strlcpy calls in radeon_atombios.c
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_mes.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu_mes.h"
27 #include "amdgpu.h"
28 #include "soc15_common.h"
29 #include "amdgpu_mes_ctx.h"
30
31 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
32 #define AMDGPU_ONE_DOORBELL_SIZE 8
33
34 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
35 {
36         return roundup(AMDGPU_ONE_DOORBELL_SIZE *
37                        AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
38                        PAGE_SIZE);
39 }
40
41 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
42                                       unsigned int *doorbell_index)
43 {
44         int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
45                                adev->mes.max_doorbell_slices,
46                                GFP_KERNEL);
47         if (r > 0)
48                 *doorbell_index = r;
49
50         return r;
51 }
52
53 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
54                                       unsigned int doorbell_index)
55 {
56         if (doorbell_index)
57                 ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
58 }
59
60 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
61                                         struct amdgpu_device *adev,
62                                         uint32_t doorbell_index,
63                                         unsigned int doorbell_id)
64 {
65         return ((doorbell_index *
66                 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
67                 doorbell_id * 2);
68 }
69
70 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
71                                          struct amdgpu_mes_process *process,
72                                          int ip_type, uint64_t *doorbell_index)
73 {
74         unsigned int offset, found;
75
76         if (ip_type == AMDGPU_RING_TYPE_SDMA) {
77                 offset = adev->doorbell_index.sdma_engine[0];
78                 found = find_next_zero_bit(process->doorbell_bitmap,
79                                            AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
80                                            offset);
81         } else {
82                 found = find_first_zero_bit(process->doorbell_bitmap,
83                                             AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
84         }
85
86         if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
87                 DRM_WARN("No doorbell available\n");
88                 return -ENOSPC;
89         }
90
91         set_bit(found, process->doorbell_bitmap);
92
93         *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
94                                 process->doorbell_index, found);
95
96         return 0;
97 }
98
99 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
100                                            struct amdgpu_mes_process *process,
101                                            uint32_t doorbell_index)
102 {
103         unsigned int old, doorbell_id;
104
105         doorbell_id = doorbell_index -
106                 (process->doorbell_index *
107                  amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
108         doorbell_id /= 2;
109
110         old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
111         WARN_ON(!old);
112 }
113
114 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
115 {
116         size_t doorbell_start_offset;
117         size_t doorbell_aperture_size;
118         size_t doorbell_process_limit;
119         size_t aggregated_doorbell_start;
120         int i;
121
122         aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
123         aggregated_doorbell_start =
124                 roundup(aggregated_doorbell_start, PAGE_SIZE);
125
126         doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
127         doorbell_start_offset =
128                 roundup(doorbell_start_offset,
129                         amdgpu_mes_doorbell_process_slice(adev));
130
131         doorbell_aperture_size = adev->doorbell.size;
132         doorbell_aperture_size =
133                         rounddown(doorbell_aperture_size,
134                                   amdgpu_mes_doorbell_process_slice(adev));
135
136         if (doorbell_aperture_size > doorbell_start_offset)
137                 doorbell_process_limit =
138                         (doorbell_aperture_size - doorbell_start_offset) /
139                         amdgpu_mes_doorbell_process_slice(adev);
140         else
141                 return -ENOSPC;
142
143         adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
144         adev->mes.max_doorbell_slices = doorbell_process_limit;
145
146         /* allocate Qword range for aggregated doorbell */
147         for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
148                 adev->mes.aggregated_doorbells[i] =
149                         aggregated_doorbell_start / sizeof(u32) + i * 2;
150
151         DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
152         return 0;
153 }
154
155 int amdgpu_mes_init(struct amdgpu_device *adev)
156 {
157         int i, r;
158
159         adev->mes.adev = adev;
160
161         idr_init(&adev->mes.pasid_idr);
162         idr_init(&adev->mes.gang_id_idr);
163         idr_init(&adev->mes.queue_id_idr);
164         ida_init(&adev->mes.doorbell_ida);
165         spin_lock_init(&adev->mes.queue_id_lock);
166         spin_lock_init(&adev->mes.ring_lock);
167         mutex_init(&adev->mes.mutex_hidden);
168
169         adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
170         adev->mes.vmid_mask_mmhub = 0xffffff00;
171         adev->mes.vmid_mask_gfxhub = 0xffffff00;
172
173         for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
174                 /* use only 1st MEC pipes */
175                 if (i >= 4)
176                         continue;
177                 adev->mes.compute_hqd_mask[i] = 0xc;
178         }
179
180         for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
181                 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
182
183         for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
184                 if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
185                         adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
186                 /* zero sdma_hqd_mask for non-existent engine */
187                 else if (adev->sdma.num_instances == 1)
188                         adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
189                 else
190                         adev->mes.sdma_hqd_mask[i] = 0xfc;
191         }
192
193         r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
194         if (r) {
195                 dev_err(adev->dev,
196                         "(%d) ring trail_fence_offs wb alloc failed\n", r);
197                 goto error_ids;
198         }
199         adev->mes.sch_ctx_gpu_addr =
200                 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
201         adev->mes.sch_ctx_ptr =
202                 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
203
204         r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
205         if (r) {
206                 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
207                 dev_err(adev->dev,
208                         "(%d) query_status_fence_offs wb alloc failed\n", r);
209                 goto error_ids;
210         }
211         adev->mes.query_status_fence_gpu_addr =
212                 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
213         adev->mes.query_status_fence_ptr =
214                 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
215
216         r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
217         if (r) {
218                 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
219                 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
220                 dev_err(adev->dev,
221                         "(%d) read_val_offs alloc failed\n", r);
222                 goto error_ids;
223         }
224         adev->mes.read_val_gpu_addr =
225                 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
226         adev->mes.read_val_ptr =
227                 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
228
229         r = amdgpu_mes_doorbell_init(adev);
230         if (r)
231                 goto error;
232
233         return 0;
234
235 error:
236         amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
237         amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
238         amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
239 error_ids:
240         idr_destroy(&adev->mes.pasid_idr);
241         idr_destroy(&adev->mes.gang_id_idr);
242         idr_destroy(&adev->mes.queue_id_idr);
243         ida_destroy(&adev->mes.doorbell_ida);
244         mutex_destroy(&adev->mes.mutex_hidden);
245         return r;
246 }
247
248 void amdgpu_mes_fini(struct amdgpu_device *adev)
249 {
250         amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
251         amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
252         amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
253
254         idr_destroy(&adev->mes.pasid_idr);
255         idr_destroy(&adev->mes.gang_id_idr);
256         idr_destroy(&adev->mes.queue_id_idr);
257         ida_destroy(&adev->mes.doorbell_ida);
258         mutex_destroy(&adev->mes.mutex_hidden);
259 }
260
261 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
262 {
263         amdgpu_bo_free_kernel(&q->mqd_obj,
264                               &q->mqd_gpu_addr,
265                               &q->mqd_cpu_ptr);
266 }
267
268 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
269                               struct amdgpu_vm *vm)
270 {
271         struct amdgpu_mes_process *process;
272         int r;
273
274         /* allocate the mes process buffer */
275         process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
276         if (!process) {
277                 DRM_ERROR("no more memory to create mes process\n");
278                 return -ENOMEM;
279         }
280
281         process->doorbell_bitmap =
282                 kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
283                                      BITS_PER_BYTE), GFP_KERNEL);
284         if (!process->doorbell_bitmap) {
285                 DRM_ERROR("failed to allocate doorbell bitmap\n");
286                 kfree(process);
287                 return -ENOMEM;
288         }
289
290         /* allocate the process context bo and map it */
291         r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
292                                     AMDGPU_GEM_DOMAIN_GTT,
293                                     &process->proc_ctx_bo,
294                                     &process->proc_ctx_gpu_addr,
295                                     &process->proc_ctx_cpu_ptr);
296         if (r) {
297                 DRM_ERROR("failed to allocate process context bo\n");
298                 goto clean_up_memory;
299         }
300         memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
301
302         /*
303          * Avoid taking any other locks under MES lock to avoid circular
304          * lock dependencies.
305          */
306         amdgpu_mes_lock(&adev->mes);
307
308         /* add the mes process to idr list */
309         r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
310                       GFP_KERNEL);
311         if (r < 0) {
312                 DRM_ERROR("failed to lock pasid=%d\n", pasid);
313                 goto clean_up_ctx;
314         }
315
316         /* allocate the starting doorbell index of the process */
317         r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
318         if (r < 0) {
319                 DRM_ERROR("failed to allocate doorbell for process\n");
320                 goto clean_up_pasid;
321         }
322
323         DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
324
325         INIT_LIST_HEAD(&process->gang_list);
326         process->vm = vm;
327         process->pasid = pasid;
328         process->process_quantum = adev->mes.default_process_quantum;
329         process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
330
331         amdgpu_mes_unlock(&adev->mes);
332         return 0;
333
334 clean_up_pasid:
335         idr_remove(&adev->mes.pasid_idr, pasid);
336         amdgpu_mes_unlock(&adev->mes);
337 clean_up_ctx:
338         amdgpu_bo_free_kernel(&process->proc_ctx_bo,
339                               &process->proc_ctx_gpu_addr,
340                               &process->proc_ctx_cpu_ptr);
341 clean_up_memory:
342         kfree(process->doorbell_bitmap);
343         kfree(process);
344         return r;
345 }
346
347 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
348 {
349         struct amdgpu_mes_process *process;
350         struct amdgpu_mes_gang *gang, *tmp1;
351         struct amdgpu_mes_queue *queue, *tmp2;
352         struct mes_remove_queue_input queue_input;
353         unsigned long flags;
354         int r;
355
356         /*
357          * Avoid taking any other locks under MES lock to avoid circular
358          * lock dependencies.
359          */
360         amdgpu_mes_lock(&adev->mes);
361
362         process = idr_find(&adev->mes.pasid_idr, pasid);
363         if (!process) {
364                 DRM_WARN("pasid %d doesn't exist\n", pasid);
365                 amdgpu_mes_unlock(&adev->mes);
366                 return;
367         }
368
369         /* Remove all queues from hardware */
370         list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
371                 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
372                         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
373                         idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
374                         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
375
376                         queue_input.doorbell_offset = queue->doorbell_off;
377                         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
378
379                         r = adev->mes.funcs->remove_hw_queue(&adev->mes,
380                                                              &queue_input);
381                         if (r)
382                                 DRM_WARN("failed to remove hardware queue\n");
383                 }
384
385                 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
386         }
387
388         amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
389         idr_remove(&adev->mes.pasid_idr, pasid);
390         amdgpu_mes_unlock(&adev->mes);
391
392         /* free all memory allocated by the process */
393         list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
394                 /* free all queues in the gang */
395                 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
396                         amdgpu_mes_queue_free_mqd(queue);
397                         list_del(&queue->list);
398                         kfree(queue);
399                 }
400                 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
401                                       &gang->gang_ctx_gpu_addr,
402                                       &gang->gang_ctx_cpu_ptr);
403                 list_del(&gang->list);
404                 kfree(gang);
405
406         }
407         amdgpu_bo_free_kernel(&process->proc_ctx_bo,
408                               &process->proc_ctx_gpu_addr,
409                               &process->proc_ctx_cpu_ptr);
410         kfree(process->doorbell_bitmap);
411         kfree(process);
412 }
413
414 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
415                         struct amdgpu_mes_gang_properties *gprops,
416                         int *gang_id)
417 {
418         struct amdgpu_mes_process *process;
419         struct amdgpu_mes_gang *gang;
420         int r;
421
422         /* allocate the mes gang buffer */
423         gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
424         if (!gang) {
425                 return -ENOMEM;
426         }
427
428         /* allocate the gang context bo and map it to cpu space */
429         r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
430                                     AMDGPU_GEM_DOMAIN_GTT,
431                                     &gang->gang_ctx_bo,
432                                     &gang->gang_ctx_gpu_addr,
433                                     &gang->gang_ctx_cpu_ptr);
434         if (r) {
435                 DRM_ERROR("failed to allocate process context bo\n");
436                 goto clean_up_mem;
437         }
438         memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
439
440         /*
441          * Avoid taking any other locks under MES lock to avoid circular
442          * lock dependencies.
443          */
444         amdgpu_mes_lock(&adev->mes);
445
446         process = idr_find(&adev->mes.pasid_idr, pasid);
447         if (!process) {
448                 DRM_ERROR("pasid %d doesn't exist\n", pasid);
449                 r = -EINVAL;
450                 goto clean_up_ctx;
451         }
452
453         /* add the mes gang to idr list */
454         r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
455                       GFP_KERNEL);
456         if (r < 0) {
457                 DRM_ERROR("failed to allocate idr for gang\n");
458                 goto clean_up_ctx;
459         }
460
461         gang->gang_id = r;
462         *gang_id = r;
463
464         INIT_LIST_HEAD(&gang->queue_list);
465         gang->process = process;
466         gang->priority = gprops->priority;
467         gang->gang_quantum = gprops->gang_quantum ?
468                 gprops->gang_quantum : adev->mes.default_gang_quantum;
469         gang->global_priority_level = gprops->global_priority_level;
470         gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
471         list_add_tail(&gang->list, &process->gang_list);
472
473         amdgpu_mes_unlock(&adev->mes);
474         return 0;
475
476 clean_up_ctx:
477         amdgpu_mes_unlock(&adev->mes);
478         amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
479                               &gang->gang_ctx_gpu_addr,
480                               &gang->gang_ctx_cpu_ptr);
481 clean_up_mem:
482         kfree(gang);
483         return r;
484 }
485
486 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
487 {
488         struct amdgpu_mes_gang *gang;
489
490         /*
491          * Avoid taking any other locks under MES lock to avoid circular
492          * lock dependencies.
493          */
494         amdgpu_mes_lock(&adev->mes);
495
496         gang = idr_find(&adev->mes.gang_id_idr, gang_id);
497         if (!gang) {
498                 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
499                 amdgpu_mes_unlock(&adev->mes);
500                 return -EINVAL;
501         }
502
503         if (!list_empty(&gang->queue_list)) {
504                 DRM_ERROR("queue list is not empty\n");
505                 amdgpu_mes_unlock(&adev->mes);
506                 return -EBUSY;
507         }
508
509         idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
510         list_del(&gang->list);
511         amdgpu_mes_unlock(&adev->mes);
512
513         amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
514                               &gang->gang_ctx_gpu_addr,
515                               &gang->gang_ctx_cpu_ptr);
516
517         kfree(gang);
518
519         return 0;
520 }
521
522 int amdgpu_mes_suspend(struct amdgpu_device *adev)
523 {
524         struct idr *idp;
525         struct amdgpu_mes_process *process;
526         struct amdgpu_mes_gang *gang;
527         struct mes_suspend_gang_input input;
528         int r, pasid;
529
530         /*
531          * Avoid taking any other locks under MES lock to avoid circular
532          * lock dependencies.
533          */
534         amdgpu_mes_lock(&adev->mes);
535
536         idp = &adev->mes.pasid_idr;
537
538         idr_for_each_entry(idp, process, pasid) {
539                 list_for_each_entry(gang, &process->gang_list, list) {
540                         r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
541                         if (r)
542                                 DRM_ERROR("failed to suspend pasid %d gangid %d",
543                                          pasid, gang->gang_id);
544                 }
545         }
546
547         amdgpu_mes_unlock(&adev->mes);
548         return 0;
549 }
550
551 int amdgpu_mes_resume(struct amdgpu_device *adev)
552 {
553         struct idr *idp;
554         struct amdgpu_mes_process *process;
555         struct amdgpu_mes_gang *gang;
556         struct mes_resume_gang_input input;
557         int r, pasid;
558
559         /*
560          * Avoid taking any other locks under MES lock to avoid circular
561          * lock dependencies.
562          */
563         amdgpu_mes_lock(&adev->mes);
564
565         idp = &adev->mes.pasid_idr;
566
567         idr_for_each_entry(idp, process, pasid) {
568                 list_for_each_entry(gang, &process->gang_list, list) {
569                         r = adev->mes.funcs->resume_gang(&adev->mes, &input);
570                         if (r)
571                                 DRM_ERROR("failed to resume pasid %d gangid %d",
572                                          pasid, gang->gang_id);
573                 }
574         }
575
576         amdgpu_mes_unlock(&adev->mes);
577         return 0;
578 }
579
580 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
581                                      struct amdgpu_mes_queue *q,
582                                      struct amdgpu_mes_queue_properties *p)
583 {
584         struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
585         u32 mqd_size = mqd_mgr->mqd_size;
586         int r;
587
588         r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
589                                     AMDGPU_GEM_DOMAIN_GTT,
590                                     &q->mqd_obj,
591                                     &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
592         if (r) {
593                 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
594                 return r;
595         }
596         memset(q->mqd_cpu_ptr, 0, mqd_size);
597
598         r = amdgpu_bo_reserve(q->mqd_obj, false);
599         if (unlikely(r != 0))
600                 goto clean_up;
601
602         return 0;
603
604 clean_up:
605         amdgpu_bo_free_kernel(&q->mqd_obj,
606                               &q->mqd_gpu_addr,
607                               &q->mqd_cpu_ptr);
608         return r;
609 }
610
611 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
612                                      struct amdgpu_mes_queue *q,
613                                      struct amdgpu_mes_queue_properties *p)
614 {
615         struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
616         struct amdgpu_mqd_prop mqd_prop = {0};
617
618         mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
619         mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
620         mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
621         mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
622         mqd_prop.queue_size = p->queue_size;
623         mqd_prop.use_doorbell = true;
624         mqd_prop.doorbell_index = p->doorbell_off;
625         mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
626         mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
627         mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
628         mqd_prop.hqd_active = false;
629
630         mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
631
632         amdgpu_bo_unreserve(q->mqd_obj);
633 }
634
635 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
636                             struct amdgpu_mes_queue_properties *qprops,
637                             int *queue_id)
638 {
639         struct amdgpu_mes_queue *queue;
640         struct amdgpu_mes_gang *gang;
641         struct mes_add_queue_input queue_input;
642         unsigned long flags;
643         int r;
644
645         memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
646
647         /* allocate the mes queue buffer */
648         queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
649         if (!queue) {
650                 DRM_ERROR("Failed to allocate memory for queue\n");
651                 return -ENOMEM;
652         }
653
654         /* Allocate the queue mqd */
655         r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
656         if (r)
657                 goto clean_up_memory;
658
659         /*
660          * Avoid taking any other locks under MES lock to avoid circular
661          * lock dependencies.
662          */
663         amdgpu_mes_lock(&adev->mes);
664
665         gang = idr_find(&adev->mes.gang_id_idr, gang_id);
666         if (!gang) {
667                 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
668                 r = -EINVAL;
669                 goto clean_up_mqd;
670         }
671
672         /* add the mes gang to idr list */
673         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
674         r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
675                       GFP_ATOMIC);
676         if (r < 0) {
677                 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
678                 goto clean_up_mqd;
679         }
680         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
681         *queue_id = queue->queue_id = r;
682
683         /* allocate a doorbell index for the queue */
684         r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
685                                           qprops->queue_type,
686                                           &qprops->doorbell_off);
687         if (r)
688                 goto clean_up_queue_id;
689
690         /* initialize the queue mqd */
691         amdgpu_mes_queue_init_mqd(adev, queue, qprops);
692
693         /* add hw queue to mes */
694         queue_input.process_id = gang->process->pasid;
695
696         queue_input.page_table_base_addr =
697                 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
698                 adev->gmc.vram_start;
699
700         queue_input.process_va_start = 0;
701         queue_input.process_va_end =
702                 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
703         queue_input.process_quantum = gang->process->process_quantum;
704         queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
705         queue_input.gang_quantum = gang->gang_quantum;
706         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
707         queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
708         queue_input.gang_global_priority_level = gang->global_priority_level;
709         queue_input.doorbell_offset = qprops->doorbell_off;
710         queue_input.mqd_addr = queue->mqd_gpu_addr;
711         queue_input.wptr_addr = qprops->wptr_gpu_addr;
712         queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
713         queue_input.queue_type = qprops->queue_type;
714         queue_input.paging = qprops->paging;
715         queue_input.is_kfd_process = 0;
716
717         r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
718         if (r) {
719                 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
720                           qprops->doorbell_off);
721                 goto clean_up_doorbell;
722         }
723
724         DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
725                   "queue type=%d, doorbell=0x%llx\n",
726                   gang->process->pasid, gang_id, qprops->queue_type,
727                   qprops->doorbell_off);
728
729         queue->ring = qprops->ring;
730         queue->doorbell_off = qprops->doorbell_off;
731         queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
732         queue->queue_type = qprops->queue_type;
733         queue->paging = qprops->paging;
734         queue->gang = gang;
735         queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
736         list_add_tail(&queue->list, &gang->queue_list);
737
738         amdgpu_mes_unlock(&adev->mes);
739         return 0;
740
741 clean_up_doorbell:
742         amdgpu_mes_queue_doorbell_free(adev, gang->process,
743                                        qprops->doorbell_off);
744 clean_up_queue_id:
745         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
746         idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
747         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
748 clean_up_mqd:
749         amdgpu_mes_unlock(&adev->mes);
750         amdgpu_mes_queue_free_mqd(queue);
751 clean_up_memory:
752         kfree(queue);
753         return r;
754 }
755
756 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
757 {
758         unsigned long flags;
759         struct amdgpu_mes_queue *queue;
760         struct amdgpu_mes_gang *gang;
761         struct mes_remove_queue_input queue_input;
762         int r;
763
764         /*
765          * Avoid taking any other locks under MES lock to avoid circular
766          * lock dependencies.
767          */
768         amdgpu_mes_lock(&adev->mes);
769
770         /* remove the mes gang from idr list */
771         spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
772
773         queue = idr_find(&adev->mes.queue_id_idr, queue_id);
774         if (!queue) {
775                 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
776                 amdgpu_mes_unlock(&adev->mes);
777                 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
778                 return -EINVAL;
779         }
780
781         idr_remove(&adev->mes.queue_id_idr, queue_id);
782         spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
783
784         DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
785                   queue->doorbell_off);
786
787         gang = queue->gang;
788         queue_input.doorbell_offset = queue->doorbell_off;
789         queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
790
791         r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
792         if (r)
793                 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
794                           queue_id);
795
796         list_del(&queue->list);
797         amdgpu_mes_queue_doorbell_free(adev, gang->process,
798                                        queue->doorbell_off);
799         amdgpu_mes_unlock(&adev->mes);
800
801         amdgpu_mes_queue_free_mqd(queue);
802         kfree(queue);
803         return 0;
804 }
805
806 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
807                                   struct amdgpu_ring *ring,
808                                   enum amdgpu_unmap_queues_action action,
809                                   u64 gpu_addr, u64 seq)
810 {
811         struct mes_unmap_legacy_queue_input queue_input;
812         int r;
813
814         queue_input.action = action;
815         queue_input.queue_type = ring->funcs->type;
816         queue_input.doorbell_offset = ring->doorbell_index;
817         queue_input.pipe_id = ring->pipe;
818         queue_input.queue_id = ring->queue;
819         queue_input.trail_fence_addr = gpu_addr;
820         queue_input.trail_fence_data = seq;
821
822         r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
823         if (r)
824                 DRM_ERROR("failed to unmap legacy queue\n");
825
826         return r;
827 }
828
829 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
830 {
831         struct mes_misc_op_input op_input;
832         int r, val = 0;
833
834         op_input.op = MES_MISC_OP_READ_REG;
835         op_input.read_reg.reg_offset = reg;
836         op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
837
838         if (!adev->mes.funcs->misc_op) {
839                 DRM_ERROR("mes rreg is not supported!\n");
840                 goto error;
841         }
842
843         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
844         if (r)
845                 DRM_ERROR("failed to read reg (0x%x)\n", reg);
846         else
847                 val = *(adev->mes.read_val_ptr);
848
849 error:
850         return val;
851 }
852
853 int amdgpu_mes_wreg(struct amdgpu_device *adev,
854                     uint32_t reg, uint32_t val)
855 {
856         struct mes_misc_op_input op_input;
857         int r;
858
859         op_input.op = MES_MISC_OP_WRITE_REG;
860         op_input.write_reg.reg_offset = reg;
861         op_input.write_reg.reg_value = val;
862
863         if (!adev->mes.funcs->misc_op) {
864                 DRM_ERROR("mes wreg is not supported!\n");
865                 r = -EINVAL;
866                 goto error;
867         }
868
869         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
870         if (r)
871                 DRM_ERROR("failed to write reg (0x%x)\n", reg);
872
873 error:
874         return r;
875 }
876
877 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
878                                   uint32_t reg0, uint32_t reg1,
879                                   uint32_t ref, uint32_t mask)
880 {
881         struct mes_misc_op_input op_input;
882         int r;
883
884         op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
885         op_input.wrm_reg.reg0 = reg0;
886         op_input.wrm_reg.reg1 = reg1;
887         op_input.wrm_reg.ref = ref;
888         op_input.wrm_reg.mask = mask;
889
890         if (!adev->mes.funcs->misc_op) {
891                 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
892                 r = -EINVAL;
893                 goto error;
894         }
895
896         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
897         if (r)
898                 DRM_ERROR("failed to reg_write_reg_wait\n");
899
900 error:
901         return r;
902 }
903
904 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
905                         uint32_t val, uint32_t mask)
906 {
907         struct mes_misc_op_input op_input;
908         int r;
909
910         op_input.op = MES_MISC_OP_WRM_REG_WAIT;
911         op_input.wrm_reg.reg0 = reg;
912         op_input.wrm_reg.ref = val;
913         op_input.wrm_reg.mask = mask;
914
915         if (!adev->mes.funcs->misc_op) {
916                 DRM_ERROR("mes reg wait is not supported!\n");
917                 r = -EINVAL;
918                 goto error;
919         }
920
921         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
922         if (r)
923                 DRM_ERROR("failed to reg_write_reg_wait\n");
924
925 error:
926         return r;
927 }
928
929 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
930                                 uint64_t process_context_addr,
931                                 uint32_t spi_gdbg_per_vmid_cntl,
932                                 const uint32_t *tcp_watch_cntl,
933                                 uint32_t flags,
934                                 bool trap_en)
935 {
936         struct mes_misc_op_input op_input = {0};
937         int r;
938
939         if (!adev->mes.funcs->misc_op) {
940                 DRM_ERROR("mes set shader debugger is not supported!\n");
941                 return -EINVAL;
942         }
943
944         op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
945         op_input.set_shader_debugger.process_context_addr = process_context_addr;
946         op_input.set_shader_debugger.flags.u32all = flags;
947         op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
948         memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
949                         sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
950
951         if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
952                         AMDGPU_MES_API_VERSION_SHIFT) >= 14)
953                 op_input.set_shader_debugger.trap_en = trap_en;
954
955         amdgpu_mes_lock(&adev->mes);
956
957         r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
958         if (r)
959                 DRM_ERROR("failed to set_shader_debugger\n");
960
961         amdgpu_mes_unlock(&adev->mes);
962
963         return r;
964 }
965
966 static void
967 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
968                                struct amdgpu_ring *ring,
969                                struct amdgpu_mes_queue_properties *props)
970 {
971         props->queue_type = ring->funcs->type;
972         props->hqd_base_gpu_addr = ring->gpu_addr;
973         props->rptr_gpu_addr = ring->rptr_gpu_addr;
974         props->wptr_gpu_addr = ring->wptr_gpu_addr;
975         props->wptr_mc_addr =
976                 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
977         props->queue_size = ring->ring_size;
978         props->eop_gpu_addr = ring->eop_gpu_addr;
979         props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
980         props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
981         props->paging = false;
982         props->ring = ring;
983 }
984
985 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)                        \
986 do {                                                                    \
987        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)                           \
988                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
989                                 _eng[ring->idx].slots[id_offs]);        \
990        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)                    \
991                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
992                                 _eng[ring->idx].ring);                  \
993        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)                      \
994                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
995                                 _eng[ring->idx].ib);                    \
996        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)                 \
997                 return offsetof(struct amdgpu_mes_ctx_meta_data,        \
998                                 _eng[ring->idx].padding);               \
999 } while(0)
1000
1001 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1002 {
1003         switch (ring->funcs->type) {
1004         case AMDGPU_RING_TYPE_GFX:
1005                 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1006                 break;
1007         case AMDGPU_RING_TYPE_COMPUTE:
1008                 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1009                 break;
1010         case AMDGPU_RING_TYPE_SDMA:
1011                 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1012                 break;
1013         default:
1014                 break;
1015         }
1016
1017         WARN_ON(1);
1018         return -EINVAL;
1019 }
1020
1021 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1022                         int queue_type, int idx,
1023                         struct amdgpu_mes_ctx_data *ctx_data,
1024                         struct amdgpu_ring **out)
1025 {
1026         struct amdgpu_ring *ring;
1027         struct amdgpu_mes_gang *gang;
1028         struct amdgpu_mes_queue_properties qprops = {0};
1029         int r, queue_id, pasid;
1030
1031         /*
1032          * Avoid taking any other locks under MES lock to avoid circular
1033          * lock dependencies.
1034          */
1035         amdgpu_mes_lock(&adev->mes);
1036         gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1037         if (!gang) {
1038                 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1039                 amdgpu_mes_unlock(&adev->mes);
1040                 return -EINVAL;
1041         }
1042         pasid = gang->process->pasid;
1043
1044         ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1045         if (!ring) {
1046                 amdgpu_mes_unlock(&adev->mes);
1047                 return -ENOMEM;
1048         }
1049
1050         ring->ring_obj = NULL;
1051         ring->use_doorbell = true;
1052         ring->is_mes_queue = true;
1053         ring->mes_ctx = ctx_data;
1054         ring->idx = idx;
1055         ring->no_scheduler = true;
1056
1057         if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1058                 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1059                                       compute[ring->idx].mec_hpd);
1060                 ring->eop_gpu_addr =
1061                         amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1062         }
1063
1064         switch (queue_type) {
1065         case AMDGPU_RING_TYPE_GFX:
1066                 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1067                 break;
1068         case AMDGPU_RING_TYPE_COMPUTE:
1069                 ring->funcs = adev->gfx.compute_ring[0].funcs;
1070                 break;
1071         case AMDGPU_RING_TYPE_SDMA:
1072                 ring->funcs = adev->sdma.instance[0].ring.funcs;
1073                 break;
1074         default:
1075                 BUG();
1076         }
1077
1078         r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1079                              AMDGPU_RING_PRIO_DEFAULT, NULL);
1080         if (r)
1081                 goto clean_up_memory;
1082
1083         amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1084
1085         dma_fence_wait(gang->process->vm->last_update, false);
1086         dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1087         amdgpu_mes_unlock(&adev->mes);
1088
1089         r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1090         if (r)
1091                 goto clean_up_ring;
1092
1093         ring->hw_queue_id = queue_id;
1094         ring->doorbell_index = qprops.doorbell_off;
1095
1096         if (queue_type == AMDGPU_RING_TYPE_GFX)
1097                 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1098         else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1099                 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1100                         queue_id);
1101         else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1102                 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1103                         queue_id);
1104         else
1105                 BUG();
1106
1107         *out = ring;
1108         return 0;
1109
1110 clean_up_ring:
1111         amdgpu_ring_fini(ring);
1112 clean_up_memory:
1113         kfree(ring);
1114         amdgpu_mes_unlock(&adev->mes);
1115         return r;
1116 }
1117
1118 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1119                             struct amdgpu_ring *ring)
1120 {
1121         if (!ring)
1122                 return;
1123
1124         amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1125         amdgpu_ring_fini(ring);
1126         kfree(ring);
1127 }
1128
1129 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1130                                                    enum amdgpu_mes_priority_level prio)
1131 {
1132         return adev->mes.aggregated_doorbells[prio];
1133 }
1134
1135 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1136                                    struct amdgpu_mes_ctx_data *ctx_data)
1137 {
1138         int r;
1139
1140         r = amdgpu_bo_create_kernel(adev,
1141                             sizeof(struct amdgpu_mes_ctx_meta_data),
1142                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1143                             &ctx_data->meta_data_obj,
1144                             &ctx_data->meta_data_mc_addr,
1145                             &ctx_data->meta_data_ptr);
1146         if (r) {
1147                 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1148                 return r;
1149         }
1150
1151         if (!ctx_data->meta_data_obj)
1152                 return -ENOMEM;
1153
1154         memset(ctx_data->meta_data_ptr, 0,
1155                sizeof(struct amdgpu_mes_ctx_meta_data));
1156
1157         return 0;
1158 }
1159
1160 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1161 {
1162         if (ctx_data->meta_data_obj)
1163                 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1164                                       &ctx_data->meta_data_mc_addr,
1165                                       &ctx_data->meta_data_ptr);
1166 }
1167
1168 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1169                                  struct amdgpu_vm *vm,
1170                                  struct amdgpu_mes_ctx_data *ctx_data)
1171 {
1172         struct amdgpu_bo_va *bo_va;
1173         struct ww_acquire_ctx ticket;
1174         struct list_head list;
1175         struct amdgpu_bo_list_entry pd;
1176         struct ttm_validate_buffer csa_tv;
1177         struct amdgpu_sync sync;
1178         int r;
1179
1180         amdgpu_sync_create(&sync);
1181         INIT_LIST_HEAD(&list);
1182         INIT_LIST_HEAD(&csa_tv.head);
1183
1184         csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1185         csa_tv.num_shared = 1;
1186
1187         list_add(&csa_tv.head, &list);
1188         amdgpu_vm_get_pd_bo(vm, &list, &pd);
1189
1190         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1191         if (r) {
1192                 DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1193                 return r;
1194         }
1195
1196         bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1197         if (!bo_va) {
1198                 ttm_eu_backoff_reservation(&ticket, &list);
1199                 DRM_ERROR("failed to create bo_va for meta data BO\n");
1200                 return -ENOMEM;
1201         }
1202
1203         r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1204                              sizeof(struct amdgpu_mes_ctx_meta_data),
1205                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1206                              AMDGPU_PTE_EXECUTABLE);
1207
1208         if (r) {
1209                 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1210                 goto error;
1211         }
1212
1213         r = amdgpu_vm_bo_update(adev, bo_va, false);
1214         if (r) {
1215                 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1216                 goto error;
1217         }
1218         amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1219
1220         r = amdgpu_vm_update_pdes(adev, vm, false);
1221         if (r) {
1222                 DRM_ERROR("failed to update pdes on meta data\n");
1223                 goto error;
1224         }
1225         amdgpu_sync_fence(&sync, vm->last_update);
1226
1227         amdgpu_sync_wait(&sync, false);
1228         ttm_eu_backoff_reservation(&ticket, &list);
1229
1230         amdgpu_sync_free(&sync);
1231         ctx_data->meta_data_va = bo_va;
1232         return 0;
1233
1234 error:
1235         amdgpu_vm_bo_del(adev, bo_va);
1236         ttm_eu_backoff_reservation(&ticket, &list);
1237         amdgpu_sync_free(&sync);
1238         return r;
1239 }
1240
1241 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1242                                    struct amdgpu_mes_ctx_data *ctx_data)
1243 {
1244         struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1245         struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1246         struct amdgpu_vm *vm = bo_va->base.vm;
1247         struct amdgpu_bo_list_entry vm_pd;
1248         struct list_head list, duplicates;
1249         struct dma_fence *fence = NULL;
1250         struct ttm_validate_buffer tv;
1251         struct ww_acquire_ctx ticket;
1252         long r = 0;
1253
1254         INIT_LIST_HEAD(&list);
1255         INIT_LIST_HEAD(&duplicates);
1256
1257         tv.bo = &bo->tbo;
1258         tv.num_shared = 2;
1259         list_add(&tv.head, &list);
1260
1261         amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
1262
1263         r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
1264         if (r) {
1265                 dev_err(adev->dev, "leaking bo va because "
1266                         "we fail to reserve bo (%ld)\n", r);
1267                 return r;
1268         }
1269
1270         amdgpu_vm_bo_del(adev, bo_va);
1271         if (!amdgpu_vm_ready(vm))
1272                 goto out_unlock;
1273
1274         r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
1275         if (r)
1276                 goto out_unlock;
1277         if (fence) {
1278                 amdgpu_bo_fence(bo, fence, true);
1279                 fence = NULL;
1280         }
1281
1282         r = amdgpu_vm_clear_freed(adev, vm, &fence);
1283         if (r || !fence)
1284                 goto out_unlock;
1285
1286         dma_fence_wait(fence, false);
1287         amdgpu_bo_fence(bo, fence, true);
1288         dma_fence_put(fence);
1289
1290 out_unlock:
1291         if (unlikely(r < 0))
1292                 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1293         ttm_eu_backoff_reservation(&ticket, &list);
1294
1295         return r;
1296 }
1297
1298 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1299                                           int pasid, int *gang_id,
1300                                           int queue_type, int num_queue,
1301                                           struct amdgpu_ring **added_rings,
1302                                           struct amdgpu_mes_ctx_data *ctx_data)
1303 {
1304         struct amdgpu_ring *ring;
1305         struct amdgpu_mes_gang_properties gprops = {0};
1306         int r, j;
1307
1308         /* create a gang for the process */
1309         gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1310         gprops.gang_quantum = adev->mes.default_gang_quantum;
1311         gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1312         gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1313         gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1314
1315         r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1316         if (r) {
1317                 DRM_ERROR("failed to add gang\n");
1318                 return r;
1319         }
1320
1321         /* create queues for the gang */
1322         for (j = 0; j < num_queue; j++) {
1323                 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1324                                         ctx_data, &ring);
1325                 if (r) {
1326                         DRM_ERROR("failed to add ring\n");
1327                         break;
1328                 }
1329
1330                 DRM_INFO("ring %s was added\n", ring->name);
1331                 added_rings[j] = ring;
1332         }
1333
1334         return 0;
1335 }
1336
1337 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1338 {
1339         struct amdgpu_ring *ring;
1340         int i, r;
1341
1342         for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1343                 ring = added_rings[i];
1344                 if (!ring)
1345                         continue;
1346
1347                 r = amdgpu_ring_test_helper(ring);
1348                 if (r)
1349                         return r;
1350
1351                 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1352                 if (r) {
1353                         DRM_DEV_ERROR(ring->adev->dev,
1354                                       "ring %s ib test failed (%d)\n",
1355                                       ring->name, r);
1356                         return r;
1357                 } else
1358                         DRM_INFO("ring %s ib test pass\n", ring->name);
1359         }
1360
1361         return 0;
1362 }
1363
1364 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1365 {
1366         struct amdgpu_vm *vm = NULL;
1367         struct amdgpu_mes_ctx_data ctx_data = {0};
1368         struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1369         int gang_ids[3] = {0};
1370         int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1371                                  { AMDGPU_RING_TYPE_COMPUTE, 1 },
1372                                  { AMDGPU_RING_TYPE_SDMA, 1} };
1373         int i, r, pasid, k = 0;
1374
1375         pasid = amdgpu_pasid_alloc(16);
1376         if (pasid < 0) {
1377                 dev_warn(adev->dev, "No more PASIDs available!");
1378                 pasid = 0;
1379         }
1380
1381         vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1382         if (!vm) {
1383                 r = -ENOMEM;
1384                 goto error_pasid;
1385         }
1386
1387         r = amdgpu_vm_init(adev, vm, -1);
1388         if (r) {
1389                 DRM_ERROR("failed to initialize vm\n");
1390                 goto error_pasid;
1391         }
1392
1393         r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1394         if (r) {
1395                 DRM_ERROR("failed to alloc ctx meta data\n");
1396                 goto error_fini;
1397         }
1398
1399         ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1400         r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1401         if (r) {
1402                 DRM_ERROR("failed to map ctx meta data\n");
1403                 goto error_vm;
1404         }
1405
1406         r = amdgpu_mes_create_process(adev, pasid, vm);
1407         if (r) {
1408                 DRM_ERROR("failed to create MES process\n");
1409                 goto error_vm;
1410         }
1411
1412         for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1413                 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1414                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1415                     adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1416                     queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1417                         continue;
1418
1419                 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1420                                                            &gang_ids[i],
1421                                                            queue_types[i][0],
1422                                                            queue_types[i][1],
1423                                                            &added_rings[k],
1424                                                            &ctx_data);
1425                 if (r)
1426                         goto error_queues;
1427
1428                 k += queue_types[i][1];
1429         }
1430
1431         /* start ring test and ib test for MES queues */
1432         amdgpu_mes_test_queues(added_rings);
1433
1434 error_queues:
1435         /* remove all queues */
1436         for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1437                 if (!added_rings[i])
1438                         continue;
1439                 amdgpu_mes_remove_ring(adev, added_rings[i]);
1440         }
1441
1442         for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1443                 if (!gang_ids[i])
1444                         continue;
1445                 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1446         }
1447
1448         amdgpu_mes_destroy_process(adev, pasid);
1449
1450 error_vm:
1451         amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1452
1453 error_fini:
1454         amdgpu_vm_fini(adev, vm);
1455
1456 error_pasid:
1457         if (pasid)
1458                 amdgpu_pasid_free(pasid);
1459
1460         amdgpu_mes_ctx_free_meta_data(&ctx_data);
1461         kfree(vm);
1462         return 0;
1463 }
1464
1465 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1466 {
1467         const struct mes_firmware_header_v1_0 *mes_hdr;
1468         struct amdgpu_firmware_info *info;
1469         char ucode_prefix[30];
1470         char fw_name[40];
1471         bool need_retry = false;
1472         int r;
1473
1474         amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1475                                        sizeof(ucode_prefix));
1476         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
1477                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1478                          ucode_prefix,
1479                          pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1480                 need_retry = true;
1481         } else {
1482                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1483                          ucode_prefix,
1484                          pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1485         }
1486
1487         r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1488         if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1489                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1490                          ucode_prefix);
1491                 DRM_INFO("try to fall back to %s\n", fw_name);
1492                 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1493                                          fw_name);
1494         }
1495
1496         if (r)
1497                 goto out;
1498
1499         mes_hdr = (const struct mes_firmware_header_v1_0 *)
1500                 adev->mes.fw[pipe]->data;
1501         adev->mes.uc_start_addr[pipe] =
1502                 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1503                 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1504         adev->mes.data_start_addr[pipe] =
1505                 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1506                 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1507
1508         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1509                 int ucode, ucode_data;
1510
1511                 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1512                         ucode = AMDGPU_UCODE_ID_CP_MES;
1513                         ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1514                 } else {
1515                         ucode = AMDGPU_UCODE_ID_CP_MES1;
1516                         ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1517                 }
1518
1519                 info = &adev->firmware.ucode[ucode];
1520                 info->ucode_id = ucode;
1521                 info->fw = adev->mes.fw[pipe];
1522                 adev->firmware.fw_size +=
1523                         ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1524                               PAGE_SIZE);
1525
1526                 info = &adev->firmware.ucode[ucode_data];
1527                 info->ucode_id = ucode_data;
1528                 info->fw = adev->mes.fw[pipe];
1529                 adev->firmware.fw_size +=
1530                         ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1531                               PAGE_SIZE);
1532         }
1533
1534         return 0;
1535 out:
1536         amdgpu_ucode_release(&adev->mes.fw[pipe]);
1537         return r;
1538 }
This page took 0.139806 seconds and 4 git commands to generate.