]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_queue.c
Merge tag 'sound-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_queue.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/slab.h>
26 #include "kfd_priv.h"
27 #include "kfd_topology.h"
28 #include "kfd_svm.h"
29
30 void print_queue_properties(struct queue_properties *q)
31 {
32         if (!q)
33                 return;
34
35         pr_debug("Printing queue properties:\n");
36         pr_debug("Queue Type: %u\n", q->type);
37         pr_debug("Queue Size: %llu\n", q->queue_size);
38         pr_debug("Queue percent: %u\n", q->queue_percent);
39         pr_debug("Queue Address: 0x%llX\n", q->queue_address);
40         pr_debug("Queue Id: %u\n", q->queue_id);
41         pr_debug("Queue Process Vmid: %u\n", q->vmid);
42         pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
43         pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
44         pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
45         pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
46 }
47
48 void print_queue(struct queue *q)
49 {
50         if (!q)
51                 return;
52         pr_debug("Printing queue:\n");
53         pr_debug("Queue Type: %u\n", q->properties.type);
54         pr_debug("Queue Size: %llu\n", q->properties.queue_size);
55         pr_debug("Queue percent: %u\n", q->properties.queue_percent);
56         pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
57         pr_debug("Queue Id: %u\n", q->properties.queue_id);
58         pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
59         pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
60         pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
61         pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
62         pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
63         pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
64         pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
65         pr_debug("Queue Process Address: 0x%p\n", q->process);
66         pr_debug("Queue Device Address: 0x%p\n", q->device);
67 }
68
69 int init_queue(struct queue **q, const struct queue_properties *properties)
70 {
71         struct queue *tmp_q;
72
73         tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
74         if (!tmp_q)
75                 return -ENOMEM;
76
77         memcpy(&tmp_q->properties, properties, sizeof(*properties));
78
79         *q = tmp_q;
80         return 0;
81 }
82
83 void uninit_queue(struct queue *q)
84 {
85         kfree(q);
86 }
87
88 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
89
90 static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size)
91 {
92         struct kfd_process *p = pdd->process;
93         struct list_head update_list;
94         struct svm_range *prange;
95         int ret = -EINVAL;
96
97         INIT_LIST_HEAD(&update_list);
98         addr >>= PAGE_SHIFT;
99         size >>= PAGE_SHIFT;
100
101         mutex_lock(&p->svms.lock);
102
103         /*
104          * range may split to multiple svm pranges aligned to granularity boundaery.
105          */
106         while (size) {
107                 uint32_t gpuid, gpuidx;
108                 int r;
109
110                 prange = svm_range_from_addr(&p->svms, addr, NULL);
111                 if (!prange)
112                         break;
113
114                 if (!prange->mapped_to_gpu)
115                         break;
116
117                 r = kfd_process_gpuid_from_node(p, pdd->dev, &gpuid, &gpuidx);
118                 if (r < 0)
119                         break;
120                 if (!test_bit(gpuidx, prange->bitmap_access) &&
121                     !test_bit(gpuidx, prange->bitmap_aip))
122                         break;
123
124                 if (!(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED))
125                         break;
126
127                 list_add(&prange->update_list, &update_list);
128
129                 if (prange->last - prange->start + 1 >= size) {
130                         size = 0;
131                         break;
132                 }
133
134                 size -= prange->last - prange->start + 1;
135                 addr += prange->last - prange->start + 1;
136         }
137         if (size) {
138                 pr_debug("[0x%llx 0x%llx] not registered\n", addr, addr + size - 1);
139                 goto out_unlock;
140         }
141
142         list_for_each_entry(prange, &update_list, update_list)
143                 atomic_inc(&prange->queue_refcount);
144         ret = 0;
145
146 out_unlock:
147         mutex_unlock(&p->svms.lock);
148         return ret;
149 }
150
151 static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size)
152 {
153         struct kfd_process *p = pdd->process;
154         struct svm_range *prange, *pchild;
155         struct interval_tree_node *node;
156         unsigned long last;
157
158         addr >>= PAGE_SHIFT;
159         last = addr + (size >> PAGE_SHIFT) - 1;
160
161         mutex_lock(&p->svms.lock);
162
163         node = interval_tree_iter_first(&p->svms.objects, addr, last);
164         while (node) {
165                 struct interval_tree_node *next_node;
166                 unsigned long next_start;
167
168                 prange = container_of(node, struct svm_range, it_node);
169                 next_node = interval_tree_iter_next(node, addr, last);
170                 next_start = min(node->last, last) + 1;
171
172                 if (atomic_add_unless(&prange->queue_refcount, -1, 0)) {
173                         list_for_each_entry(pchild, &prange->child_list, child_list)
174                                 atomic_add_unless(&pchild->queue_refcount, -1, 0);
175                 }
176
177                 node = next_node;
178                 addr = next_start;
179         }
180
181         mutex_unlock(&p->svms.lock);
182 }
183 #else
184
185 static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size)
186 {
187         return -EINVAL;
188 }
189
190 static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size)
191 {
192 }
193
194 #endif
195
196 int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
197                          u64 expected_size)
198 {
199         struct amdgpu_bo_va_mapping *mapping;
200         u64 user_addr;
201         u64 size;
202
203         user_addr = (u64)addr >> AMDGPU_GPU_PAGE_SHIFT;
204         size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
205
206         mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
207         if (!mapping)
208                 goto out_err;
209
210         if (user_addr != mapping->start ||
211             (size != 0 && user_addr + size - 1 != mapping->last)) {
212                 pr_debug("expected size 0x%llx not equal to mapping addr 0x%llx size 0x%llx\n",
213                         expected_size, mapping->start << AMDGPU_GPU_PAGE_SHIFT,
214                         (mapping->last - mapping->start + 1) << AMDGPU_GPU_PAGE_SHIFT);
215                 goto out_err;
216         }
217
218         *pbo = amdgpu_bo_ref(mapping->bo_va->base.bo);
219         mapping->bo_va->queue_refcount++;
220         return 0;
221
222 out_err:
223         *pbo = NULL;
224         return -EINVAL;
225 }
226
227 /* FIXME: remove this function, just call amdgpu_bo_unref directly */
228 void kfd_queue_buffer_put(struct amdgpu_bo **bo)
229 {
230         amdgpu_bo_unref(bo);
231 }
232
233 int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
234 {
235         struct kfd_topology_device *topo_dev;
236         struct amdgpu_vm *vm;
237         u32 total_cwsr_size;
238         int err;
239
240         topo_dev = kfd_topology_device_by_id(pdd->dev->id);
241         if (!topo_dev)
242                 return -EINVAL;
243
244         vm = drm_priv_to_vm(pdd->drm_priv);
245         err = amdgpu_bo_reserve(vm->root.bo, false);
246         if (err)
247                 return err;
248
249         err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE);
250         if (err)
251                 goto out_err_unreserve;
252
253         err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE);
254         if (err)
255                 goto out_err_unreserve;
256
257         err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
258                                    &properties->ring_bo, properties->queue_size);
259         if (err)
260                 goto out_err_unreserve;
261
262         /* only compute queue requires EOP buffer and CWSR area */
263         if (properties->type != KFD_QUEUE_TYPE_COMPUTE)
264                 goto out_unreserve;
265
266         /* EOP buffer is not required for all ASICs */
267         if (properties->eop_ring_buffer_address) {
268                 if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
269                         pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
270                                 properties->eop_buf_bo->tbo.base.size,
271                                 topo_dev->node_props.eop_buffer_size);
272                         err = -EINVAL;
273                         goto out_err_unreserve;
274                 }
275                 err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
276                                            &properties->eop_buf_bo,
277                                            properties->eop_ring_buffer_size);
278                 if (err)
279                         goto out_err_unreserve;
280         }
281
282         if (properties->ctl_stack_size != topo_dev->node_props.ctl_stack_size) {
283                 pr_debug("queue ctl stack size 0x%x not equal to node ctl stack size 0x%x\n",
284                         properties->ctl_stack_size,
285                         topo_dev->node_props.ctl_stack_size);
286                 err = -EINVAL;
287                 goto out_err_unreserve;
288         }
289
290         if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
291                 pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
292                         properties->ctx_save_restore_area_size,
293                         topo_dev->node_props.cwsr_size);
294                 err = -EINVAL;
295                 goto out_err_unreserve;
296         }
297
298         total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
299                           * NUM_XCC(pdd->dev->xcc_mask);
300         total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
301
302         err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
303                                    &properties->cwsr_bo, total_cwsr_size);
304         if (!err)
305                 goto out_unreserve;
306
307         amdgpu_bo_unreserve(vm->root.bo);
308
309         err = kfd_queue_buffer_svm_get(pdd, properties->ctx_save_restore_area_address,
310                                        total_cwsr_size);
311         if (err)
312                 goto out_err_release;
313
314         return 0;
315
316 out_unreserve:
317         amdgpu_bo_unreserve(vm->root.bo);
318         return 0;
319
320 out_err_unreserve:
321         amdgpu_bo_unreserve(vm->root.bo);
322 out_err_release:
323         /* FIXME: make a _locked version of this that can be called before
324          * dropping the VM reservation.
325          */
326         kfd_queue_unref_bo_vas(pdd, properties);
327         kfd_queue_release_buffers(pdd, properties);
328         return err;
329 }
330
331 int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
332 {
333         struct kfd_topology_device *topo_dev;
334         u32 total_cwsr_size;
335
336         kfd_queue_buffer_put(&properties->wptr_bo);
337         kfd_queue_buffer_put(&properties->rptr_bo);
338         kfd_queue_buffer_put(&properties->ring_bo);
339         kfd_queue_buffer_put(&properties->eop_buf_bo);
340         kfd_queue_buffer_put(&properties->cwsr_bo);
341
342         topo_dev = kfd_topology_device_by_id(pdd->dev->id);
343         if (!topo_dev)
344                 return -EINVAL;
345         total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
346                           * NUM_XCC(pdd->dev->xcc_mask);
347         total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
348
349         kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
350         return 0;
351 }
352
353 void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo)
354 {
355         if (*bo) {
356                 struct amdgpu_bo_va *bo_va;
357
358                 bo_va = amdgpu_vm_bo_find(vm, *bo);
359                 if (bo_va && bo_va->queue_refcount)
360                         bo_va->queue_refcount--;
361         }
362 }
363
364 int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd,
365                            struct queue_properties *properties)
366 {
367         struct amdgpu_vm *vm;
368         int err;
369
370         vm = drm_priv_to_vm(pdd->drm_priv);
371         err = amdgpu_bo_reserve(vm->root.bo, false);
372         if (err)
373                 return err;
374
375         kfd_queue_unref_bo_va(vm, &properties->wptr_bo);
376         kfd_queue_unref_bo_va(vm, &properties->rptr_bo);
377         kfd_queue_unref_bo_va(vm, &properties->ring_bo);
378         kfd_queue_unref_bo_va(vm, &properties->eop_buf_bo);
379         kfd_queue_unref_bo_va(vm, &properties->cwsr_bo);
380
381         amdgpu_bo_unreserve(vm->root.bo);
382         return 0;
383 }
384
385 #define SGPR_SIZE_PER_CU        0x4000
386 #define LDS_SIZE_PER_CU         0x10000
387 #define HWREG_SIZE_PER_CU       0x1000
388 #define DEBUGGER_BYTES_ALIGN    64
389 #define DEBUGGER_BYTES_PER_WAVE 32
390
391 static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
392 {
393         u32 vgpr_size = 0x40000;
394
395         if ((gfxv / 100 * 100) == 90400 ||      /* GFX_VERSION_AQUA_VANJARAM */
396             gfxv == 90010 ||                    /* GFX_VERSION_ALDEBARAN */
397             gfxv == 90008 ||                    /* GFX_VERSION_ARCTURUS */
398             gfxv == 90500)
399                 vgpr_size = 0x80000;
400         else if (gfxv == 110000 ||              /* GFX_VERSION_PLUM_BONITO */
401                  gfxv == 110001 ||              /* GFX_VERSION_WHEAT_NAS */
402                  gfxv == 120000 ||              /* GFX_VERSION_GFX1200 */
403                  gfxv == 120001)                /* GFX_VERSION_GFX1201 */
404                 vgpr_size = 0x60000;
405
406         return vgpr_size;
407 }
408
409 #define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props)        \
410         (kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\
411          (((gfxv) == 90500) ? (props->lds_size_in_kb << 10) : LDS_SIZE_PER_CU) +\
412          HWREG_SIZE_PER_CU)
413
414 #define CNTL_STACK_BYTES_PER_WAVE(gfxv) \
415         ((gfxv) >= 100100 ? 12 : 8)     /* GFX_VERSION_NAVI10*/
416
417 #define SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER 40
418
419 void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
420 {
421         struct kfd_node_properties *props = &dev->node_props;
422         u32 gfxv = props->gfx_target_version;
423         u32 ctl_stack_size;
424         u32 wg_data_size;
425         u32 wave_num;
426         u32 cu_num;
427
428         if (gfxv < 80001)       /* GFX_VERSION_CARRIZO */
429                 return;
430
431         cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask);
432         wave_num = (gfxv < 100100) ?    /* GFX_VERSION_NAVI10 */
433                     min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512)
434                     : cu_num * 32;
435
436         wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE);
437         ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
438         ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
439                                PAGE_SIZE);
440
441         if ((gfxv / 10000 * 10000) == 100000) {
442                 /* HW design limits control stack size to 0x7000.
443                  * This is insufficient for theoretical PM4 cases
444                  * but sufficient for AQL, limited by SPI events.
445                  */
446                 ctl_stack_size = min(ctl_stack_size, 0x7000);
447         }
448
449         props->ctl_stack_size = ctl_stack_size;
450         props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN);
451         props->cwsr_size = ctl_stack_size + wg_data_size;
452
453         if (gfxv == 80002)      /* GFX_VERSION_TONGA */
454                 props->eop_buffer_size = 0x8000;
455         else if ((gfxv / 100 * 100) == 90400)   /* GFX_VERSION_AQUA_VANJARAM */
456                 props->eop_buffer_size = 4096;
457         else if (gfxv >= 80000)
458                 props->eop_buffer_size = 4096;
459 }
This page took 0.056583 seconds and 4 git commands to generate.