1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/slab.h>
26 #include <linux/mutex.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_kernel_queue.h"
31 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
32 unsigned int buffer_size_bytes)
34 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
36 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
37 "Runlist IB overflow");
41 static void pm_calc_rlib_size(struct packet_manager *pm,
42 unsigned int *rlib_size,
43 bool *over_subscription)
45 unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
46 unsigned int map_queue_size;
47 unsigned int max_proc_per_quantum = 1;
48 struct kfd_node *dev = pm->dqm->dev;
50 process_count = pm->dqm->processes_count;
51 queue_count = pm->dqm->active_queue_count;
52 compute_queue_count = pm->dqm->active_cp_queue_count;
53 gws_queue_count = pm->dqm->gws_queue_count;
55 /* check if there is over subscription
56 * Note: the arbitration between the number of VMIDs and
57 * hws_max_conc_proc has been done in
58 * kgd2kfd_device_init().
60 *over_subscription = false;
62 if (dev->max_proc_per_quantum > 1)
63 max_proc_per_quantum = dev->max_proc_per_quantum;
65 if ((process_count > max_proc_per_quantum) ||
66 compute_queue_count > get_cp_queues_num(pm->dqm) ||
67 gws_queue_count > 1) {
68 *over_subscription = true;
69 pr_debug("Over subscribed runlist\n");
72 map_queue_size = pm->pmf->map_queues_size;
73 /* calculate run list ib allocation size */
74 *rlib_size = process_count * pm->pmf->map_process_size +
75 queue_count * map_queue_size;
78 * Increase the allocation size in case we need a chained run list
79 * when over subscription
81 if (*over_subscription)
82 *rlib_size += pm->pmf->runlist_size;
84 pr_debug("runlist ib size %d\n", *rlib_size);
87 static int pm_allocate_runlist_ib(struct packet_manager *pm,
88 unsigned int **rl_buffer,
89 uint64_t *rl_gpu_buffer,
90 unsigned int *rl_buffer_size,
91 bool *is_over_subscription)
95 if (WARN_ON(pm->allocated))
98 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
100 mutex_lock(&pm->lock);
102 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
106 pr_err("Failed to allocate runlist IB\n");
110 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
111 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
113 memset(*rl_buffer, 0, *rl_buffer_size);
114 pm->allocated = true;
117 mutex_unlock(&pm->lock);
121 static int pm_create_runlist_ib(struct packet_manager *pm,
122 struct list_head *queues,
123 uint64_t *rl_gpu_addr,
124 size_t *rl_size_bytes)
126 unsigned int alloc_size_bytes;
127 unsigned int *rl_buffer, rl_wptr, i;
128 int retval, processes_mapped;
129 struct device_process_node *cur;
130 struct qcm_process_device *qpd;
132 struct kernel_queue *kq;
133 bool is_over_subscription;
135 rl_wptr = retval = processes_mapped = 0;
137 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
138 &alloc_size_bytes, &is_over_subscription);
142 *rl_size_bytes = alloc_size_bytes;
143 pm->ib_size_bytes = alloc_size_bytes;
145 pr_debug("Building runlist ib process count: %d queues count %d\n",
146 pm->dqm->processes_count, pm->dqm->active_queue_count);
148 /* build the run list ib packet */
149 list_for_each_entry(cur, queues, list) {
151 /* build map process packet */
152 if (processes_mapped >= pm->dqm->processes_count) {
153 pr_debug("Not enough space left in runlist IB\n");
158 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
163 inc_wptr(&rl_wptr, pm->pmf->map_process_size,
166 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
167 if (!kq->queue->properties.is_active)
170 pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
171 kq->queue->queue, qpd->is_debug);
173 retval = pm->pmf->map_queues(pm,
181 pm->pmf->map_queues_size,
185 list_for_each_entry(q, &qpd->queues_list, list) {
186 if (!q->properties.is_active)
189 pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
190 q->queue, qpd->is_debug);
192 retval = pm->pmf->map_queues(pm,
201 pm->pmf->map_queues_size,
206 pr_debug("Finished map process and queues to runlist\n");
208 if (is_over_subscription) {
209 if (!pm->is_over_subscription)
210 pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
211 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
213 alloc_size_bytes / sizeof(uint32_t),
216 pm->is_over_subscription = is_over_subscription;
218 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
219 pr_debug("0x%2X ", rl_buffer[i]);
225 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
227 switch (dqm->dev->adev->asic_type) {
230 /* PM4 packet structures on CIK are the same as on VI */
238 pm->pmf = &kfd_vi_pm_funcs;
241 if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
242 KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))
243 pm->pmf = &kfd_aldebaran_pm_funcs;
244 else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
245 pm->pmf = &kfd_v9_pm_funcs;
247 WARN(1, "Unexpected ASIC family %u",
248 dqm->dev->adev->asic_type);
254 mutex_init(&pm->lock);
255 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
256 if (!pm->priv_queue) {
257 mutex_destroy(&pm->lock);
260 pm->allocated = false;
265 void pm_uninit(struct packet_manager *pm, bool hanging)
267 mutex_destroy(&pm->lock);
268 kernel_queue_uninit(pm->priv_queue, hanging);
269 pm->priv_queue = NULL;
272 int pm_send_set_resources(struct packet_manager *pm,
273 struct scheduling_resources *res)
275 uint32_t *buffer, size;
278 size = pm->pmf->set_resources_size;
279 mutex_lock(&pm->lock);
280 kq_acquire_packet_buffer(pm->priv_queue,
281 size / sizeof(uint32_t),
282 (unsigned int **)&buffer);
284 pr_err("Failed to allocate buffer on kernel queue\n");
289 retval = pm->pmf->set_resources(pm, buffer, res);
291 kq_submit_packet(pm->priv_queue);
293 kq_rollback_packet(pm->priv_queue);
296 mutex_unlock(&pm->lock);
301 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
303 uint64_t rl_gpu_ib_addr;
305 size_t rl_ib_size, packet_size_dwords;
308 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
311 goto fail_create_runlist_ib;
313 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
315 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
316 mutex_lock(&pm->lock);
318 retval = kq_acquire_packet_buffer(pm->priv_queue,
319 packet_size_dwords, &rl_buffer);
321 goto fail_acquire_packet_buffer;
323 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
324 rl_ib_size / sizeof(uint32_t), false);
326 goto fail_create_runlist;
328 kq_submit_packet(pm->priv_queue);
330 mutex_unlock(&pm->lock);
335 kq_rollback_packet(pm->priv_queue);
336 fail_acquire_packet_buffer:
337 mutex_unlock(&pm->lock);
338 fail_create_runlist_ib:
343 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
344 uint64_t fence_value)
346 uint32_t *buffer, size;
349 if (WARN_ON(!fence_address))
352 size = pm->pmf->query_status_size;
353 mutex_lock(&pm->lock);
354 kq_acquire_packet_buffer(pm->priv_queue,
355 size / sizeof(uint32_t), (unsigned int **)&buffer);
357 pr_err("Failed to allocate buffer on kernel queue\n");
362 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
364 kq_submit_packet(pm->priv_queue);
366 kq_rollback_packet(pm->priv_queue);
369 mutex_unlock(&pm->lock);
373 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
376 uint32_t *buffer, size;
378 size = pm->pmf->set_grace_period_size;
380 mutex_lock(&pm->lock);
383 kq_acquire_packet_buffer(pm->priv_queue,
384 size / sizeof(uint32_t),
385 (unsigned int **)&buffer);
388 pr_err("Failed to allocate buffer on kernel queue\n");
393 retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
395 kq_submit_packet(pm->priv_queue);
397 kq_rollback_packet(pm->priv_queue);
401 mutex_unlock(&pm->lock);
405 int pm_send_unmap_queue(struct packet_manager *pm,
406 enum kfd_unmap_queues_filter filter,
407 uint32_t filter_param, bool reset)
409 uint32_t *buffer, size;
412 size = pm->pmf->unmap_queues_size;
413 mutex_lock(&pm->lock);
414 kq_acquire_packet_buffer(pm->priv_queue,
415 size / sizeof(uint32_t), (unsigned int **)&buffer);
417 pr_err("Failed to allocate buffer on kernel queue\n");
422 retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
424 kq_submit_packet(pm->priv_queue);
426 kq_rollback_packet(pm->priv_queue);
429 mutex_unlock(&pm->lock);
433 void pm_release_ib(struct packet_manager *pm)
435 mutex_lock(&pm->lock);
437 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
438 pm->allocated = false;
440 mutex_unlock(&pm->lock);
443 #if defined(CONFIG_DEBUG_FS)
445 int pm_debugfs_runlist(struct seq_file *m, void *data)
447 struct packet_manager *pm = data;
449 mutex_lock(&pm->lock);
451 if (!pm->allocated) {
452 seq_puts(m, " No active runlist\n");
456 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
457 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
460 mutex_unlock(&pm->lock);
464 int pm_debugfs_hang_hws(struct packet_manager *pm)
466 uint32_t *buffer, size;
472 size = pm->pmf->query_status_size;
473 mutex_lock(&pm->lock);
474 kq_acquire_packet_buffer(pm->priv_queue,
475 size / sizeof(uint32_t), (unsigned int **)&buffer);
477 pr_err("Failed to allocate buffer on kernel queue\n");
481 memset(buffer, 0x55, size);
482 kq_submit_packet(pm->priv_queue);
484 pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
485 buffer[0], buffer[1], buffer[2], buffer[3],
486 buffer[4], buffer[5], buffer[6]);
488 mutex_unlock(&pm->lock);