1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
32 static inline struct process_queue_node *get_queue_by_qid(
33 struct process_queue_manager *pqm, unsigned int qid)
35 struct process_queue_node *pqn;
37 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
38 if ((pqn->q && pqn->q->properties.queue_id == qid) ||
39 (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
46 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
49 if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
52 if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
53 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
60 static int find_available_queue_slot(struct process_queue_manager *pqm,
65 found = find_first_zero_bit(pqm->queue_slot_bitmap,
66 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
68 pr_debug("The new slot id %lu\n", found);
70 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
71 pr_info("Cannot open more queues for process with pasid 0x%x\n",
76 set_bit(found, pqm->queue_slot_bitmap);
82 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
84 struct kfd_node *dev = pdd->dev;
86 if (pdd->already_dequeued)
89 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
90 pdd->already_dequeued = true;
93 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
96 struct kfd_node *dev = NULL;
97 struct process_queue_node *pqn;
98 struct kfd_process_device *pdd;
99 struct kgd_mem *mem = NULL;
102 pqn = get_queue_by_qid(pqm, qid);
104 pr_err("Queue id does not match any known queue\n");
109 dev = pqn->q->device;
113 pdd = kfd_get_process_device_data(dev, pqm->process);
115 pr_err("Process device data doesn't exist\n");
119 /* Only allow one queue per process can have GWS assigned */
120 if (gws && pdd->qpd.num_gws)
123 if (!gws && pdd->qpd.num_gws == 0)
126 if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) {
128 ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
131 ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
138 * Intentionally set GWS to a non-NULL value
139 * for devices that do not use GWS for global wave
140 * synchronization but require the formality
141 * of setting GWS for cooperative groups.
143 pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
146 pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
148 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
152 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
156 for (i = 0; i < p->n_pdds; i++)
157 kfd_process_dequeue_from_device(p->pdds[i]);
160 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
162 INIT_LIST_HEAD(&pqm->queues);
163 pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
165 if (!pqm->queue_slot_bitmap)
172 void pqm_uninit(struct process_queue_manager *pqm)
174 struct process_queue_node *pqn, *next;
176 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
177 if (pqn->q && pqn->q->gws &&
178 KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
179 !pqn->q->device->kfd->shared_resources.enable_mes)
180 amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
182 kfd_procfs_del_queue(pqn->q);
183 uninit_queue(pqn->q);
184 list_del(&pqn->process_queue_list);
188 bitmap_free(pqm->queue_slot_bitmap);
189 pqm->queue_slot_bitmap = NULL;
192 static int init_user_queue(struct process_queue_manager *pqm,
193 struct kfd_node *dev, struct queue **q,
194 struct queue_properties *q_properties,
195 struct file *f, struct amdgpu_bo *wptr_bo,
200 /* Doorbell initialized in user space*/
201 q_properties->doorbell_ptr = NULL;
202 q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
204 /* let DQM handle it*/
205 q_properties->vmid = 0;
206 q_properties->queue_id = qid;
208 retval = init_queue(q, q_properties);
213 (*q)->process = pqm->process;
215 if (dev->kfd->shared_resources.enable_mes) {
216 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
217 AMDGPU_MES_GANG_CTX_SIZE,
219 &(*q)->gang_ctx_gpu_addr,
220 &(*q)->gang_ctx_cpu_ptr,
223 pr_err("failed to allocate gang context bo\n");
226 memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
227 (*q)->wptr_bo = wptr_bo;
230 pr_debug("PQM After init queue");
239 int pqm_create_queue(struct process_queue_manager *pqm,
240 struct kfd_node *dev,
242 struct queue_properties *properties,
244 struct amdgpu_bo *wptr_bo,
245 const struct kfd_criu_queue_priv_data *q_data,
246 const void *restore_mqd,
247 const void *restore_ctl_stack,
248 uint32_t *p_doorbell_offset_in_process)
251 struct kfd_process_device *pdd;
253 struct process_queue_node *pqn;
254 struct kernel_queue *kq;
255 enum kfd_queue_type type = properties->type;
256 unsigned int max_queues = 127; /* HWS limit */
259 * On GFX 9.4.3, increase the number of queues that
260 * can be created to 255. No HWS limit on GFX 9.4.3.
262 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
268 pdd = kfd_get_process_device_data(dev, pqm->process);
270 pr_err("Process device data doesn't exist\n");
275 * for debug process, verify that it is within the static queues limit
276 * currently limit is set to half of the total avail HQD slots
277 * If we are just about to create DIQ, the is_debug flag is not set yet
278 * Hence we also check the type as well
280 if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
281 max_queues = dev->kfd->device_info.max_no_of_hqd/2;
283 if (pdd->qpd.queue_count >= max_queues)
287 retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
290 retval = find_available_queue_slot(pqm, qid);
295 if (list_empty(&pdd->qpd.queues_list) &&
296 list_empty(&pdd->qpd.priv_queue_list))
297 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
299 pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
302 goto err_allocate_pqn;
306 case KFD_QUEUE_TYPE_SDMA:
307 case KFD_QUEUE_TYPE_SDMA_XGMI:
308 /* SDMA queues are always allocated statically no matter
309 * which scheduler mode is used. We also do not need to
310 * check whether a SDMA queue can be allocated here, because
311 * allocate_sdma_queue() in create_queue() has the
312 * corresponding check logic.
314 retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
316 goto err_create_queue;
319 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
320 restore_mqd, restore_ctl_stack);
324 case KFD_QUEUE_TYPE_COMPUTE:
325 /* check if there is over subscription */
326 if ((dev->dqm->sched_policy ==
327 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
328 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
329 (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
330 pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
332 goto err_create_queue;
335 retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
337 goto err_create_queue;
340 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
341 restore_mqd, restore_ctl_stack);
344 case KFD_QUEUE_TYPE_DIQ:
345 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
348 goto err_create_queue;
350 kq->queue->properties.queue_id = *qid;
353 retval = kfd_process_drain_interrupts(pdd);
357 retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
361 WARN(1, "Invalid queue type %d", type);
366 pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
367 pqm->process->pasid, type, retval);
368 goto err_create_queue;
371 if (q && p_doorbell_offset_in_process) {
372 /* Return the doorbell offset within the doorbell page
373 * to the caller so it can be passed up to user mode
375 * relative doorbell index = Absolute doorbell index -
376 * absolute index of first doorbell in the page.
378 uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
379 pdd->qpd.proc_doorbells,
381 pdd->dev->kfd->device_info.doorbell_size);
383 *p_doorbell_offset_in_process = (q->properties.doorbell_off
384 - first_db_index) * sizeof(uint32_t);
387 pr_debug("PQM After DQM create queue\n");
389 list_add(&pqn->process_queue_list, &pqm->queues);
392 pr_debug("PQM done creating queue\n");
393 kfd_procfs_add_queue(q);
394 print_queue_properties(&q->properties);
402 kernel_queue_uninit(kq, false);
405 /* check if queues list is empty unregister process from device */
406 clear_bit(*qid, pqm->queue_slot_bitmap);
407 if (list_empty(&pdd->qpd.queues_list) &&
408 list_empty(&pdd->qpd.priv_queue_list))
409 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
413 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
415 struct process_queue_node *pqn;
416 struct kfd_process_device *pdd;
417 struct device_queue_manager *dqm;
418 struct kfd_node *dev;
425 pqn = get_queue_by_qid(pqm, qid);
427 pr_err("Queue id does not match any known queue\n");
435 dev = pqn->q->device;
439 pdd = kfd_get_process_device_data(dev, pqm->process);
441 pr_err("Process device data doesn't exist\n");
446 /* destroy kernel queue (DIQ) */
447 dqm = pqn->kq->dev->dqm;
448 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
449 kernel_queue_uninit(pqn->kq, false);
453 kfd_procfs_del_queue(pqn->q);
454 dqm = pqn->q->device->dqm;
455 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
457 pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
459 pqn->q->properties.queue_id, retval);
460 if (retval != -ETIME)
461 goto err_destroy_queue;
465 if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
466 !dev->kfd->shared_resources.enable_mes)
467 amdgpu_amdkfd_remove_gws_from_process(
468 pqm->process->kgd_process_info,
470 pdd->qpd.num_gws = 0;
473 if (dev->kfd->shared_resources.enable_mes) {
474 amdgpu_amdkfd_free_gtt_mem(dev->adev,
475 pqn->q->gang_ctx_bo);
477 amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
480 uninit_queue(pqn->q);
483 list_del(&pqn->process_queue_list);
485 clear_bit(qid, pqm->queue_slot_bitmap);
487 if (list_empty(&pdd->qpd.queues_list) &&
488 list_empty(&pdd->qpd.priv_queue_list))
489 dqm->ops.unregister_process(dqm, &pdd->qpd);
495 int pqm_update_queue_properties(struct process_queue_manager *pqm,
496 unsigned int qid, struct queue_properties *p)
499 struct process_queue_node *pqn;
501 pqn = get_queue_by_qid(pqm, qid);
503 pr_debug("No queue %d exists for update operation\n", qid);
507 pqn->q->properties.queue_address = p->queue_address;
508 pqn->q->properties.queue_size = p->queue_size;
509 pqn->q->properties.queue_percent = p->queue_percent;
510 pqn->q->properties.priority = p->priority;
511 pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
513 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
521 int pqm_update_mqd(struct process_queue_manager *pqm,
522 unsigned int qid, struct mqd_update_info *minfo)
525 struct process_queue_node *pqn;
527 pqn = get_queue_by_qid(pqm, qid);
529 pr_debug("No queue %d exists for update operation\n", qid);
533 /* CUs are masked for debugger requirements so deny user mask */
534 if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
537 /* ASICs that have WGPs must enforce pairwise enabled mask checks. */
538 if (minfo && minfo->cu_mask.ptr &&
539 KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
542 for (i = 0; i < minfo->cu_mask.count; i += 2) {
543 uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
545 if (cu_pair && cu_pair != 0x3) {
546 pr_debug("CUs must be adjacent pairwise enabled.\n");
552 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
557 if (minfo && minfo->cu_mask.ptr)
558 pqn->q->properties.is_user_cu_masked = true;
563 struct kernel_queue *pqm_get_kernel_queue(
564 struct process_queue_manager *pqm,
567 struct process_queue_node *pqn;
569 pqn = get_queue_by_qid(pqm, qid);
576 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
579 struct process_queue_node *pqn;
581 pqn = get_queue_by_qid(pqm, qid);
582 return pqn ? pqn->q : NULL;
585 int pqm_get_wave_state(struct process_queue_manager *pqm,
587 void __user *ctl_stack,
588 u32 *ctl_stack_used_size,
589 u32 *save_area_used_size)
591 struct process_queue_node *pqn;
593 pqn = get_queue_by_qid(pqm, qid);
595 pr_debug("amdkfd: No queue %d exists for operation\n",
600 return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
604 save_area_used_size);
607 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
608 uint64_t exception_clear_mask,
610 int *num_qss_entries,
611 uint32_t *entry_size)
613 struct process_queue_node *pqn;
614 struct kfd_queue_snapshot_entry src;
615 uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
618 *num_qss_entries = 0;
622 *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
623 mutex_lock(&pqm->process->event_mutex);
625 memset(&src, 0, sizeof(src));
627 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
631 if (*num_qss_entries < tmp_qss_entries) {
632 set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
634 if (copy_to_user(buf, &src, *entry_size)) {
638 buf += tmp_entry_size;
640 *num_qss_entries += 1;
643 mutex_unlock(&pqm->process->event_mutex);
647 static int get_queue_data_sizes(struct kfd_process_device *pdd,
650 uint32_t *ctl_stack_size)
654 ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
655 q->properties.queue_id,
659 pr_err("Failed to get queue dump info (%d)\n", ret);
664 int kfd_process_get_queue_info(struct kfd_process *p,
665 uint32_t *num_queues,
666 uint64_t *priv_data_sizes)
668 uint32_t extra_data_sizes = 0;
675 /* Run over all PDDs of the process */
676 for (i = 0; i < p->n_pdds; i++) {
677 struct kfd_process_device *pdd = p->pdds[i];
679 list_for_each_entry(q, &pdd->qpd.queues_list, list) {
680 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
681 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
682 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
683 uint32_t mqd_size, ctl_stack_size;
685 *num_queues = *num_queues + 1;
687 ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
691 extra_data_sizes += mqd_size + ctl_stack_size;
693 pr_err("Unsupported queue type (%d)\n", q->properties.type);
698 *priv_data_sizes = extra_data_sizes +
699 (*num_queues * sizeof(struct kfd_criu_queue_priv_data));
704 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
709 struct process_queue_node *pqn;
711 pqn = get_queue_by_qid(pqm, qid);
713 pr_debug("amdkfd: No queue %d exists for operation\n", qid);
717 if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
718 pr_err("amdkfd: queue dumping not supported on this device\n");
722 return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
723 pqn->q, mqd, ctl_stack);
726 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
728 struct kfd_criu_queue_priv_data *q_data)
730 uint8_t *mqd, *ctl_stack;
733 mqd = (void *)(q_data + 1);
734 ctl_stack = mqd + q_data->mqd_size;
736 q_data->gpu_id = pdd->user_gpu_id;
737 q_data->type = q->properties.type;
738 q_data->format = q->properties.format;
739 q_data->q_id = q->properties.queue_id;
740 q_data->q_address = q->properties.queue_address;
741 q_data->q_size = q->properties.queue_size;
742 q_data->priority = q->properties.priority;
743 q_data->q_percent = q->properties.queue_percent;
744 q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
745 q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
746 q_data->doorbell_id = q->doorbell_id;
748 q_data->sdma_id = q->sdma_id;
750 q_data->eop_ring_buffer_address =
751 q->properties.eop_ring_buffer_address;
753 q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
755 q_data->ctx_save_restore_area_address =
756 q->properties.ctx_save_restore_area_address;
758 q_data->ctx_save_restore_area_size =
759 q->properties.ctx_save_restore_area_size;
761 q_data->gws = !!q->gws;
763 ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
765 pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
769 pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
773 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
774 uint8_t __user *user_priv,
775 unsigned int *q_index,
776 uint64_t *queues_priv_data_offset)
778 unsigned int q_private_data_size = 0;
779 uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
783 list_for_each_entry(q, &pdd->qpd.queues_list, list) {
784 struct kfd_criu_queue_priv_data *q_data;
785 uint64_t q_data_size;
787 uint32_t ctl_stack_size;
789 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
790 q->properties.type != KFD_QUEUE_TYPE_SDMA &&
791 q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
793 pr_err("Unsupported queue type (%d)\n", q->properties.type);
798 ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
802 q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
804 /* Increase local buffer space if needed */
805 if (q_private_data_size < q_data_size) {
806 kfree(q_private_data);
808 q_private_data = kzalloc(q_data_size, GFP_KERNEL);
809 if (!q_private_data) {
813 q_private_data_size = q_data_size;
816 q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
818 /* data stored in this order: priv_data, mqd, ctl_stack */
819 q_data->mqd_size = mqd_size;
820 q_data->ctl_stack_size = ctl_stack_size;
822 ret = criu_checkpoint_queue(pdd, q, q_data);
826 q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
828 ret = copy_to_user(user_priv + *queues_priv_data_offset,
829 q_data, q_data_size);
834 *queues_priv_data_offset += q_data_size;
835 *q_index = *q_index + 1;
838 kfree(q_private_data);
843 int kfd_criu_checkpoint_queues(struct kfd_process *p,
844 uint8_t __user *user_priv_data,
845 uint64_t *priv_data_offset)
847 int ret = 0, pdd_index, q_index = 0;
849 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
850 struct kfd_process_device *pdd = p->pdds[pdd_index];
853 * criu_checkpoint_queues_device will copy data to user and update q_index and
854 * queues_priv_data_offset
856 ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
866 static void set_queue_properties_from_criu(struct queue_properties *qp,
867 struct kfd_criu_queue_priv_data *q_data)
869 qp->is_interop = false;
870 qp->queue_percent = q_data->q_percent;
871 qp->priority = q_data->priority;
872 qp->queue_address = q_data->q_address;
873 qp->queue_size = q_data->q_size;
874 qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
875 qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
876 qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
877 qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
878 qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
879 qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
880 qp->ctl_stack_size = q_data->ctl_stack_size;
881 qp->type = q_data->type;
882 qp->format = q_data->format;
885 int kfd_criu_restore_queue(struct kfd_process *p,
886 uint8_t __user *user_priv_ptr,
887 uint64_t *priv_data_offset,
888 uint64_t max_priv_data_size)
890 uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
891 struct kfd_criu_queue_priv_data *q_data;
892 struct kfd_process_device *pdd;
893 uint64_t q_extra_data_size;
894 struct queue_properties qp;
895 unsigned int queue_id;
898 if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
901 q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
905 ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
911 *priv_data_offset += sizeof(*q_data);
912 q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
914 if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
919 q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
925 ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
931 *priv_data_offset += q_extra_data_size;
933 pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
935 pr_err("Failed to get pdd\n");
940 /* data stored in this order: mqd, ctl_stack */
942 ctl_stack = mqd + q_data->mqd_size;
944 memset(&qp, 0, sizeof(qp));
945 set_queue_properties_from_criu(&qp, q_data);
947 print_queue_properties(&qp);
949 ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
952 pr_err("Failed to create new queue err:%d\n", ret);
957 ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
961 pr_err("Failed to restore queue (%d)\n", ret);
963 pr_debug("Queue id %d was restored successfully\n", queue_id);
970 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
973 uint32_t *ctl_stack_size)
975 struct process_queue_node *pqn;
977 pqn = get_queue_by_qid(pqm, qid);
979 pr_debug("amdkfd: No queue %d exists for operation\n", qid);
983 if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
984 pr_err("amdkfd: queue dumping not supported on this device\n");
988 pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
994 #if defined(CONFIG_DEBUG_FS)
996 int pqm_debugfs_mqds(struct seq_file *m, void *data)
998 struct process_queue_manager *pqm = data;
999 struct process_queue_node *pqn;
1001 enum KFD_MQD_TYPE mqd_type;
1002 struct mqd_manager *mqd_mgr;
1003 int r = 0, xcc, num_xccs = 1;
1007 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1010 switch (q->properties.type) {
1011 case KFD_QUEUE_TYPE_SDMA:
1012 case KFD_QUEUE_TYPE_SDMA_XGMI:
1013 seq_printf(m, " SDMA queue on device %x\n",
1015 mqd_type = KFD_MQD_TYPE_SDMA;
1017 case KFD_QUEUE_TYPE_COMPUTE:
1018 seq_printf(m, " Compute queue on device %x\n",
1020 mqd_type = KFD_MQD_TYPE_CP;
1021 num_xccs = NUM_XCC(q->device->xcc_mask);
1025 " Bad user queue type %d on device %x\n",
1026 q->properties.type, q->device->id);
1029 mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1030 size = mqd_mgr->mqd_stride(mqd_mgr,
1032 } else if (pqn->kq) {
1034 mqd_mgr = pqn->kq->mqd_mgr;
1035 switch (q->properties.type) {
1036 case KFD_QUEUE_TYPE_DIQ:
1037 seq_printf(m, " DIQ on device %x\n",
1042 " Bad kernel queue type %d on device %x\n",
1049 " Weird: Queue node with neither kernel nor user queue\n");
1053 for (xcc = 0; xcc < num_xccs; xcc++) {
1054 mqd = q->mqd + size * xcc;
1055 r = mqd_mgr->debugfs_show_mqd(m, mqd);