]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_packet_manager.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/slab.h>
26 #include <linux/mutex.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_kernel_queue.h"
29 #include "kfd_priv.h"
30
31 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
32                                 unsigned int buffer_size_bytes)
33 {
34         unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
35
36         WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
37              "Runlist IB overflow");
38         *wptr = temp;
39 }
40
41 static void pm_calc_rlib_size(struct packet_manager *pm,
42                                 unsigned int *rlib_size,
43                                 bool *over_subscription)
44 {
45         unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
46         unsigned int map_queue_size;
47         unsigned int max_proc_per_quantum = 1;
48         struct kfd_node *node = pm->dqm->dev;
49         struct device *dev = node->adev->dev;
50
51         process_count = pm->dqm->processes_count;
52         queue_count = pm->dqm->active_queue_count;
53         compute_queue_count = pm->dqm->active_cp_queue_count;
54         gws_queue_count = pm->dqm->gws_queue_count;
55
56         /* check if there is over subscription
57          * Note: the arbitration between the number of VMIDs and
58          * hws_max_conc_proc has been done in
59          * kgd2kfd_device_init().
60          */
61         *over_subscription = false;
62
63         if (node->max_proc_per_quantum > 1)
64                 max_proc_per_quantum = node->max_proc_per_quantum;
65
66         if ((process_count > max_proc_per_quantum) ||
67             compute_queue_count > get_cp_queues_num(pm->dqm) ||
68             gws_queue_count > 1) {
69                 *over_subscription = true;
70                 dev_dbg(dev, "Over subscribed runlist\n");
71         }
72
73         map_queue_size = pm->pmf->map_queues_size;
74         /* calculate run list ib allocation size */
75         *rlib_size = process_count * pm->pmf->map_process_size +
76                      queue_count * map_queue_size;
77
78         /*
79          * Increase the allocation size in case we need a chained run list
80          * when over subscription
81          */
82         if (*over_subscription)
83                 *rlib_size += pm->pmf->runlist_size;
84
85         dev_dbg(dev, "runlist ib size %d\n", *rlib_size);
86 }
87
88 static int pm_allocate_runlist_ib(struct packet_manager *pm,
89                                 unsigned int **rl_buffer,
90                                 uint64_t *rl_gpu_buffer,
91                                 unsigned int *rl_buffer_size,
92                                 bool *is_over_subscription)
93 {
94         struct kfd_node *node = pm->dqm->dev;
95         struct device *dev = node->adev->dev;
96         int retval;
97
98         if (WARN_ON(pm->allocated))
99                 return -EINVAL;
100
101         pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
102
103         mutex_lock(&pm->lock);
104
105         retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
106
107         if (retval) {
108                 dev_err(dev, "Failed to allocate runlist IB\n");
109                 goto out;
110         }
111
112         *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
113         *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
114
115         memset(*rl_buffer, 0, *rl_buffer_size);
116         pm->allocated = true;
117
118 out:
119         mutex_unlock(&pm->lock);
120         return retval;
121 }
122
123 static int pm_create_runlist_ib(struct packet_manager *pm,
124                                 struct list_head *queues,
125                                 uint64_t *rl_gpu_addr,
126                                 size_t *rl_size_bytes)
127 {
128         unsigned int alloc_size_bytes;
129         unsigned int *rl_buffer, rl_wptr, i;
130         struct kfd_node *node = pm->dqm->dev;
131         struct device *dev = node->adev->dev;
132         int retval, processes_mapped;
133         struct device_process_node *cur;
134         struct qcm_process_device *qpd;
135         struct queue *q;
136         struct kernel_queue *kq;
137         bool is_over_subscription;
138
139         rl_wptr = retval = processes_mapped = 0;
140
141         retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
142                                 &alloc_size_bytes, &is_over_subscription);
143         if (retval)
144                 return retval;
145
146         *rl_size_bytes = alloc_size_bytes;
147         pm->ib_size_bytes = alloc_size_bytes;
148
149         dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
150                 pm->dqm->processes_count, pm->dqm->active_queue_count);
151
152         /* build the run list ib packet */
153         list_for_each_entry(cur, queues, list) {
154                 qpd = cur->qpd;
155                 /* build map process packet */
156                 if (processes_mapped >= pm->dqm->processes_count) {
157                         dev_dbg(dev, "Not enough space left in runlist IB\n");
158                         pm_release_ib(pm);
159                         return -ENOMEM;
160                 }
161
162                 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
163                 if (retval)
164                         return retval;
165
166                 processes_mapped++;
167                 inc_wptr(&rl_wptr, pm->pmf->map_process_size,
168                                 alloc_size_bytes);
169
170                 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
171                         if (!kq->queue->properties.is_active)
172                                 continue;
173
174                         dev_dbg(dev,
175                                 "static_queue, mapping kernel q %d, is debug status %d\n",
176                                 kq->queue->queue, qpd->is_debug);
177
178                         retval = pm->pmf->map_queues(pm,
179                                                 &rl_buffer[rl_wptr],
180                                                 kq->queue,
181                                                 qpd->is_debug);
182                         if (retval)
183                                 return retval;
184
185                         inc_wptr(&rl_wptr,
186                                 pm->pmf->map_queues_size,
187                                 alloc_size_bytes);
188                 }
189
190                 list_for_each_entry(q, &qpd->queues_list, list) {
191                         if (!q->properties.is_active)
192                                 continue;
193
194                         dev_dbg(dev,
195                                 "static_queue, mapping user queue %d, is debug status %d\n",
196                                 q->queue, qpd->is_debug);
197
198                         retval = pm->pmf->map_queues(pm,
199                                                 &rl_buffer[rl_wptr],
200                                                 q,
201                                                 qpd->is_debug);
202
203                         if (retval)
204                                 return retval;
205
206                         inc_wptr(&rl_wptr,
207                                 pm->pmf->map_queues_size,
208                                 alloc_size_bytes);
209                 }
210         }
211
212         dev_dbg(dev, "Finished map process and queues to runlist\n");
213
214         if (is_over_subscription) {
215                 if (!pm->is_over_subscription)
216                         dev_warn(
217                                 dev,
218                                 "Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
219                 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
220                                         *rl_gpu_addr,
221                                         alloc_size_bytes / sizeof(uint32_t),
222                                         true);
223         }
224         pm->is_over_subscription = is_over_subscription;
225
226         for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
227                 pr_debug("0x%2X ", rl_buffer[i]);
228         pr_debug("\n");
229
230         return retval;
231 }
232
233 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
234 {
235         switch (dqm->dev->adev->asic_type) {
236         case CHIP_KAVERI:
237         case CHIP_HAWAII:
238                 /* PM4 packet structures on CIK are the same as on VI */
239         case CHIP_CARRIZO:
240         case CHIP_TONGA:
241         case CHIP_FIJI:
242         case CHIP_POLARIS10:
243         case CHIP_POLARIS11:
244         case CHIP_POLARIS12:
245         case CHIP_VEGAM:
246                 pm->pmf = &kfd_vi_pm_funcs;
247                 break;
248         default:
249                 if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
250                     KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) ||
251                     KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4))
252                         pm->pmf = &kfd_aldebaran_pm_funcs;
253                 else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
254                         pm->pmf = &kfd_v9_pm_funcs;
255                 else {
256                         WARN(1, "Unexpected ASIC family %u",
257                              dqm->dev->adev->asic_type);
258                         return -EINVAL;
259                 }
260         }
261
262         pm->dqm = dqm;
263         mutex_init(&pm->lock);
264         pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
265         if (!pm->priv_queue) {
266                 mutex_destroy(&pm->lock);
267                 return -ENOMEM;
268         }
269         pm->allocated = false;
270
271         return 0;
272 }
273
274 void pm_uninit(struct packet_manager *pm)
275 {
276         mutex_destroy(&pm->lock);
277         kernel_queue_uninit(pm->priv_queue);
278         pm->priv_queue = NULL;
279 }
280
281 int pm_send_set_resources(struct packet_manager *pm,
282                                 struct scheduling_resources *res)
283 {
284         struct kfd_node *node = pm->dqm->dev;
285         struct device *dev = node->adev->dev;
286         uint32_t *buffer, size;
287         int retval = 0;
288
289         size = pm->pmf->set_resources_size;
290         mutex_lock(&pm->lock);
291         kq_acquire_packet_buffer(pm->priv_queue,
292                                         size / sizeof(uint32_t),
293                                         (unsigned int **)&buffer);
294         if (!buffer) {
295                 dev_err(dev, "Failed to allocate buffer on kernel queue\n");
296                 retval = -ENOMEM;
297                 goto out;
298         }
299
300         retval = pm->pmf->set_resources(pm, buffer, res);
301         if (!retval)
302                 retval = kq_submit_packet(pm->priv_queue);
303         else
304                 kq_rollback_packet(pm->priv_queue);
305
306 out:
307         mutex_unlock(&pm->lock);
308
309         return retval;
310 }
311
312 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
313 {
314         uint64_t rl_gpu_ib_addr;
315         uint32_t *rl_buffer;
316         size_t rl_ib_size, packet_size_dwords;
317         int retval;
318
319         retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
320                                         &rl_ib_size);
321         if (retval)
322                 goto fail_create_runlist_ib;
323
324         pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
325
326         packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
327         mutex_lock(&pm->lock);
328
329         retval = kq_acquire_packet_buffer(pm->priv_queue,
330                                         packet_size_dwords, &rl_buffer);
331         if (retval)
332                 goto fail_acquire_packet_buffer;
333
334         retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
335                                         rl_ib_size / sizeof(uint32_t), false);
336         if (retval)
337                 goto fail_create_runlist;
338
339         retval = kq_submit_packet(pm->priv_queue);
340
341         mutex_unlock(&pm->lock);
342
343         return retval;
344
345 fail_create_runlist:
346         kq_rollback_packet(pm->priv_queue);
347 fail_acquire_packet_buffer:
348         mutex_unlock(&pm->lock);
349 fail_create_runlist_ib:
350         pm_release_ib(pm);
351         return retval;
352 }
353
354 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
355                         uint64_t fence_value)
356 {
357         struct kfd_node *node = pm->dqm->dev;
358         struct device *dev = node->adev->dev;
359         uint32_t *buffer, size;
360         int retval = 0;
361
362         if (WARN_ON(!fence_address))
363                 return -EFAULT;
364
365         size = pm->pmf->query_status_size;
366         mutex_lock(&pm->lock);
367         kq_acquire_packet_buffer(pm->priv_queue,
368                         size / sizeof(uint32_t), (unsigned int **)&buffer);
369         if (!buffer) {
370                 dev_err(dev, "Failed to allocate buffer on kernel queue\n");
371                 retval = -ENOMEM;
372                 goto out;
373         }
374
375         retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
376         if (!retval)
377                 retval = kq_submit_packet(pm->priv_queue);
378         else
379                 kq_rollback_packet(pm->priv_queue);
380
381 out:
382         mutex_unlock(&pm->lock);
383         return retval;
384 }
385
386 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
387 {
388         struct kfd_node *node = pm->dqm->dev;
389         struct device *dev = node->adev->dev;
390         int retval = 0;
391         uint32_t *buffer, size;
392
393         size = pm->pmf->set_grace_period_size;
394
395         mutex_lock(&pm->lock);
396
397         if (size) {
398                 kq_acquire_packet_buffer(pm->priv_queue,
399                         size / sizeof(uint32_t),
400                         (unsigned int **)&buffer);
401
402                 if (!buffer) {
403                         dev_err(dev,
404                                 "Failed to allocate buffer on kernel queue\n");
405                         retval = -ENOMEM;
406                         goto out;
407                 }
408
409                 retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
410                 if (!retval)
411                         retval = kq_submit_packet(pm->priv_queue);
412                 else
413                         kq_rollback_packet(pm->priv_queue);
414         }
415
416 out:
417         mutex_unlock(&pm->lock);
418         return retval;
419 }
420
421 int pm_send_unmap_queue(struct packet_manager *pm,
422                         enum kfd_unmap_queues_filter filter,
423                         uint32_t filter_param, bool reset)
424 {
425         struct kfd_node *node = pm->dqm->dev;
426         struct device *dev = node->adev->dev;
427         uint32_t *buffer, size;
428         int retval = 0;
429
430         size = pm->pmf->unmap_queues_size;
431         mutex_lock(&pm->lock);
432         kq_acquire_packet_buffer(pm->priv_queue,
433                         size / sizeof(uint32_t), (unsigned int **)&buffer);
434         if (!buffer) {
435                 dev_err(dev, "Failed to allocate buffer on kernel queue\n");
436                 retval = -ENOMEM;
437                 goto out;
438         }
439
440         retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
441         if (!retval)
442                 retval = kq_submit_packet(pm->priv_queue);
443         else
444                 kq_rollback_packet(pm->priv_queue);
445
446 out:
447         mutex_unlock(&pm->lock);
448         return retval;
449 }
450
451 void pm_release_ib(struct packet_manager *pm)
452 {
453         mutex_lock(&pm->lock);
454         if (pm->allocated) {
455                 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
456                 pm->allocated = false;
457         }
458         mutex_unlock(&pm->lock);
459 }
460
461 #if defined(CONFIG_DEBUG_FS)
462
463 int pm_debugfs_runlist(struct seq_file *m, void *data)
464 {
465         struct packet_manager *pm = data;
466
467         mutex_lock(&pm->lock);
468
469         if (!pm->allocated) {
470                 seq_puts(m, "  No active runlist\n");
471                 goto out;
472         }
473
474         seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
475                      pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
476
477 out:
478         mutex_unlock(&pm->lock);
479         return 0;
480 }
481
482 int pm_debugfs_hang_hws(struct packet_manager *pm)
483 {
484         struct kfd_node *node = pm->dqm->dev;
485         struct device *dev = node->adev->dev;
486         uint32_t *buffer, size;
487         int r = 0;
488
489         if (!pm->priv_queue)
490                 return -EAGAIN;
491
492         size = pm->pmf->query_status_size;
493         mutex_lock(&pm->lock);
494         kq_acquire_packet_buffer(pm->priv_queue,
495                         size / sizeof(uint32_t), (unsigned int **)&buffer);
496         if (!buffer) {
497                 dev_err(dev, "Failed to allocate buffer on kernel queue\n");
498                 r = -ENOMEM;
499                 goto out;
500         }
501         memset(buffer, 0x55, size);
502         kq_submit_packet(pm->priv_queue);
503
504         dev_info(dev, "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
505                  buffer[0], buffer[1], buffer[2], buffer[3], buffer[4],
506                  buffer[5], buffer[6]);
507 out:
508         mutex_unlock(&pm->lock);
509         return r;
510 }
511
512
513 #endif
This page took 0.052977 seconds and 4 git commands to generate.