2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_opcodes.h"
32 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
33 unsigned int buffer_size_bytes)
35 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
37 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
38 "Runlist IB overflow");
42 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
44 union PM4_MES_TYPE_3_HEADER header;
47 header.opcode = opcode;
48 header.count = packet_size/sizeof(uint32_t) - 2;
49 header.type = PM4_TYPE_3;
54 static void pm_calc_rlib_size(struct packet_manager *pm,
55 unsigned int *rlib_size,
56 bool *over_subscription)
58 unsigned int process_count, queue_count;
59 unsigned int map_queue_size;
61 process_count = pm->dqm->processes_count;
62 queue_count = pm->dqm->queue_count;
64 /* check if there is over subscription*/
65 *over_subscription = false;
66 if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
67 *over_subscription = true;
68 pr_debug("Over subscribed runlist\n");
71 map_queue_size = sizeof(struct pm4_mes_map_queues);
72 /* calculate run list ib allocation size */
73 *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
74 queue_count * map_queue_size;
77 * Increase the allocation size in case we need a chained run list
78 * when over subscription
80 if (*over_subscription)
81 *rlib_size += sizeof(struct pm4_mes_runlist);
83 pr_debug("runlist ib size %d\n", *rlib_size);
86 static int pm_allocate_runlist_ib(struct packet_manager *pm,
87 unsigned int **rl_buffer,
88 uint64_t *rl_gpu_buffer,
89 unsigned int *rl_buffer_size,
90 bool *is_over_subscription)
94 if (WARN_ON(pm->allocated))
97 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
99 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
103 pr_err("Failed to allocate runlist IB\n");
107 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
108 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
110 memset(*rl_buffer, 0, *rl_buffer_size);
111 pm->allocated = true;
115 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
116 uint64_t ib, size_t ib_size_in_dwords, bool chain)
118 struct pm4_mes_runlist *packet;
123 packet = (struct pm4_mes_runlist *)buffer;
125 memset(buffer, 0, sizeof(struct pm4_mes_runlist));
126 packet->header.u32All = build_pm4_header(IT_RUN_LIST,
127 sizeof(struct pm4_mes_runlist));
129 packet->bitfields4.ib_size = ib_size_in_dwords;
130 packet->bitfields4.chain = chain ? 1 : 0;
131 packet->bitfields4.offload_polling = 0;
132 packet->bitfields4.valid = 1;
133 packet->ordinal2 = lower_32_bits(ib);
134 packet->bitfields3.ib_base_hi = upper_32_bits(ib);
139 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
140 struct qcm_process_device *qpd)
142 struct pm4_mes_map_process *packet;
144 packet = (struct pm4_mes_map_process *)buffer;
146 memset(buffer, 0, sizeof(struct pm4_mes_map_process));
148 packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
149 sizeof(struct pm4_mes_map_process));
150 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
151 packet->bitfields2.process_quantum = 1;
152 packet->bitfields2.pasid = qpd->pqm->process->pasid;
153 packet->bitfields3.page_table_base = qpd->page_table_base;
154 packet->bitfields10.gds_size = qpd->gds_size;
155 packet->bitfields10.num_gws = qpd->num_gws;
156 packet->bitfields10.num_oac = qpd->num_oac;
157 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
159 packet->sh_mem_config = qpd->sh_mem_config;
160 packet->sh_mem_bases = qpd->sh_mem_bases;
161 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
162 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
164 /* TODO: scratch support */
165 packet->sh_hidden_private_base_vmid = 0;
167 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
168 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
173 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
174 struct queue *q, bool is_static)
176 struct pm4_mes_map_queues *packet;
177 bool use_static = is_static;
179 packet = (struct pm4_mes_map_queues *)buffer;
180 memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
182 packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
183 sizeof(struct pm4_mes_map_queues));
184 packet->bitfields2.alloc_format =
185 alloc_format__mes_map_queues__one_per_pipe_vi;
186 packet->bitfields2.num_queues = 1;
187 packet->bitfields2.queue_sel =
188 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
190 packet->bitfields2.engine_sel =
191 engine_sel__mes_map_queues__compute_vi;
192 packet->bitfields2.queue_type =
193 queue_type__mes_map_queues__normal_compute_vi;
195 switch (q->properties.type) {
196 case KFD_QUEUE_TYPE_COMPUTE:
198 packet->bitfields2.queue_type =
199 queue_type__mes_map_queues__normal_latency_static_queue_vi;
201 case KFD_QUEUE_TYPE_DIQ:
202 packet->bitfields2.queue_type =
203 queue_type__mes_map_queues__debug_interface_queue_vi;
205 case KFD_QUEUE_TYPE_SDMA:
206 packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
207 engine_sel__mes_map_queues__sdma0_vi;
208 use_static = false; /* no static queues under SDMA */
211 WARN(1, "queue type %d", q->properties.type);
214 packet->bitfields3.doorbell_offset =
215 q->properties.doorbell_off;
217 packet->mqd_addr_lo =
218 lower_32_bits(q->gart_mqd_addr);
220 packet->mqd_addr_hi =
221 upper_32_bits(q->gart_mqd_addr);
223 packet->wptr_addr_lo =
224 lower_32_bits((uint64_t)q->properties.write_ptr);
226 packet->wptr_addr_hi =
227 upper_32_bits((uint64_t)q->properties.write_ptr);
232 static int pm_create_runlist_ib(struct packet_manager *pm,
233 struct list_head *queues,
234 uint64_t *rl_gpu_addr,
235 size_t *rl_size_bytes)
237 unsigned int alloc_size_bytes;
238 unsigned int *rl_buffer, rl_wptr, i;
239 int retval, proccesses_mapped;
240 struct device_process_node *cur;
241 struct qcm_process_device *qpd;
243 struct kernel_queue *kq;
244 bool is_over_subscription;
246 rl_wptr = retval = proccesses_mapped = 0;
248 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
249 &alloc_size_bytes, &is_over_subscription);
253 *rl_size_bytes = alloc_size_bytes;
255 pr_debug("Building runlist ib process count: %d queues count %d\n",
256 pm->dqm->processes_count, pm->dqm->queue_count);
258 /* build the run list ib packet */
259 list_for_each_entry(cur, queues, list) {
261 /* build map process packet */
262 if (proccesses_mapped >= pm->dqm->processes_count) {
263 pr_debug("Not enough space left in runlist IB\n");
268 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
273 inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
276 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
277 if (!kq->queue->properties.is_active)
280 pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
281 kq->queue->queue, qpd->is_debug);
283 retval = pm_create_map_queue(pm,
291 sizeof(struct pm4_mes_map_queues),
295 list_for_each_entry(q, &qpd->queues_list, list) {
296 if (!q->properties.is_active)
299 pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
300 q->queue, qpd->is_debug);
302 retval = pm_create_map_queue(pm,
311 sizeof(struct pm4_mes_map_queues),
316 pr_debug("Finished map process and queues to runlist\n");
318 if (is_over_subscription)
319 retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
321 alloc_size_bytes / sizeof(uint32_t),
324 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
325 pr_debug("0x%2X ", rl_buffer[i]);
331 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
334 mutex_init(&pm->lock);
335 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
336 if (!pm->priv_queue) {
337 mutex_destroy(&pm->lock);
340 pm->allocated = false;
345 void pm_uninit(struct packet_manager *pm)
347 mutex_destroy(&pm->lock);
348 kernel_queue_uninit(pm->priv_queue);
351 int pm_send_set_resources(struct packet_manager *pm,
352 struct scheduling_resources *res)
354 struct pm4_mes_set_resources *packet;
357 mutex_lock(&pm->lock);
358 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
359 sizeof(*packet) / sizeof(uint32_t),
360 (unsigned int **)&packet);
362 pr_err("Failed to allocate buffer on kernel queue\n");
367 memset(packet, 0, sizeof(struct pm4_mes_set_resources));
368 packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
369 sizeof(struct pm4_mes_set_resources));
371 packet->bitfields2.queue_type =
372 queue_type__mes_set_resources__hsa_interface_queue_hiq;
373 packet->bitfields2.vmid_mask = res->vmid_mask;
374 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
375 packet->bitfields7.oac_mask = res->oac_mask;
376 packet->bitfields8.gds_heap_base = res->gds_heap_base;
377 packet->bitfields8.gds_heap_size = res->gds_heap_size;
379 packet->gws_mask_lo = lower_32_bits(res->gws_mask);
380 packet->gws_mask_hi = upper_32_bits(res->gws_mask);
382 packet->queue_mask_lo = lower_32_bits(res->queue_mask);
383 packet->queue_mask_hi = upper_32_bits(res->queue_mask);
385 pm->priv_queue->ops.submit_packet(pm->priv_queue);
388 mutex_unlock(&pm->lock);
393 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
395 uint64_t rl_gpu_ib_addr;
397 size_t rl_ib_size, packet_size_dwords;
400 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
403 goto fail_create_runlist_ib;
405 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
407 packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
408 mutex_lock(&pm->lock);
410 retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
411 packet_size_dwords, &rl_buffer);
413 goto fail_acquire_packet_buffer;
415 retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
416 rl_ib_size / sizeof(uint32_t), false);
418 goto fail_create_runlist;
420 pm->priv_queue->ops.submit_packet(pm->priv_queue);
422 mutex_unlock(&pm->lock);
427 pm->priv_queue->ops.rollback_packet(pm->priv_queue);
428 fail_acquire_packet_buffer:
429 mutex_unlock(&pm->lock);
430 fail_create_runlist_ib:
435 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
436 uint32_t fence_value)
439 struct pm4_mes_query_status *packet;
441 if (WARN_ON(!fence_address))
444 mutex_lock(&pm->lock);
445 retval = pm->priv_queue->ops.acquire_packet_buffer(
447 sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
448 (unsigned int **)&packet);
450 goto fail_acquire_packet_buffer;
452 packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
453 sizeof(struct pm4_mes_query_status));
455 packet->bitfields2.context_id = 0;
456 packet->bitfields2.interrupt_sel =
457 interrupt_sel__mes_query_status__completion_status;
458 packet->bitfields2.command =
459 command__mes_query_status__fence_only_after_write_ack;
461 packet->addr_hi = upper_32_bits((uint64_t)fence_address);
462 packet->addr_lo = lower_32_bits((uint64_t)fence_address);
463 packet->data_hi = upper_32_bits((uint64_t)fence_value);
464 packet->data_lo = lower_32_bits((uint64_t)fence_value);
466 pm->priv_queue->ops.submit_packet(pm->priv_queue);
468 fail_acquire_packet_buffer:
469 mutex_unlock(&pm->lock);
473 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
474 enum kfd_unmap_queues_filter filter,
475 uint32_t filter_param, bool reset,
476 unsigned int sdma_engine)
480 struct pm4_mes_unmap_queues *packet;
482 mutex_lock(&pm->lock);
483 retval = pm->priv_queue->ops.acquire_packet_buffer(
485 sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
488 goto err_acquire_packet_buffer;
490 packet = (struct pm4_mes_unmap_queues *)buffer;
491 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
492 pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
493 filter, reset, type);
494 packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
495 sizeof(struct pm4_mes_unmap_queues));
497 case KFD_QUEUE_TYPE_COMPUTE:
498 case KFD_QUEUE_TYPE_DIQ:
499 packet->bitfields2.engine_sel =
500 engine_sel__mes_unmap_queues__compute;
502 case KFD_QUEUE_TYPE_SDMA:
503 packet->bitfields2.engine_sel =
504 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
507 WARN(1, "queue type %d", type);
513 packet->bitfields2.action =
514 action__mes_unmap_queues__reset_queues;
516 packet->bitfields2.action =
517 action__mes_unmap_queues__preempt_queues;
520 case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
521 packet->bitfields2.queue_sel =
522 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
523 packet->bitfields2.num_queues = 1;
524 packet->bitfields3b.doorbell_offset0 = filter_param;
526 case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
527 packet->bitfields2.queue_sel =
528 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
529 packet->bitfields3a.pasid = filter_param;
531 case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
532 packet->bitfields2.queue_sel =
533 queue_sel__mes_unmap_queues__unmap_all_queues;
535 case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
536 /* in this case, we do not preempt static queues */
537 packet->bitfields2.queue_sel =
538 queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
541 WARN(1, "filter %d", filter);
546 pm->priv_queue->ops.submit_packet(pm->priv_queue);
548 mutex_unlock(&pm->lock);
552 pm->priv_queue->ops.rollback_packet(pm->priv_queue);
553 err_acquire_packet_buffer:
554 mutex_unlock(&pm->lock);
558 void pm_release_ib(struct packet_manager *pm)
560 mutex_lock(&pm->lock);
562 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
563 pm->allocated = false;
565 mutex_unlock(&pm->lock);