2 * Copyright 2023 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "kfd_debug.h"
24 #include "kfd_device_queue_manager.h"
25 #include "kfd_topology.h"
26 #include <linux/file.h>
27 #include <uapi/linux/kfd_ioctl.h>
29 #define MAX_WATCH_ADDRESSES 4
31 int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
32 unsigned int *queue_id,
34 uint64_t exception_clear_mask,
35 uint64_t *event_status)
37 struct process_queue_manager *pqm;
38 struct process_queue_node *pqn;
41 if (!(process && process->debug_trap_enabled))
44 mutex_lock(&process->event_mutex);
49 /* find and report queue events */
51 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
52 uint64_t tmp = process->exception_enable_mask;
57 tmp &= pqn->q->properties.exception_status;
62 *event_status = pqn->q->properties.exception_status;
63 *queue_id = pqn->q->properties.queue_id;
64 *gpu_id = pqn->q->device->id;
65 pqn->q->properties.exception_status &= ~exception_clear_mask;
69 /* find and report device events */
70 for (i = 0; i < process->n_pdds; i++) {
71 struct kfd_process_device *pdd = process->pdds[i];
72 uint64_t tmp = process->exception_enable_mask
73 & pdd->exception_status;
78 *event_status = pdd->exception_status;
79 *gpu_id = pdd->dev->id;
80 pdd->exception_status &= ~exception_clear_mask;
84 /* report process events */
85 if (process->exception_enable_mask & process->exception_status) {
86 *event_status = process->exception_status;
87 process->exception_status &= ~exception_clear_mask;
91 mutex_unlock(&process->event_mutex);
92 return *event_status ? 0 : -EAGAIN;
95 void debug_event_write_work_handler(struct work_struct *work)
97 struct kfd_process *process;
99 static const char write_data = '.';
102 process = container_of(work,
104 debug_event_workarea);
106 kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
109 /* update process/device/queue exception status, write to descriptor
110 * only if exception_status is enabled.
112 bool kfd_dbg_ev_raise(uint64_t event_mask,
113 struct kfd_process *process, struct kfd_node *dev,
114 unsigned int source_id, bool use_worker,
115 void *exception_data, size_t exception_data_size)
117 struct process_queue_manager *pqm;
118 struct process_queue_node *pqn;
120 static const char write_data = '.';
122 bool is_subscribed = true;
124 if (!(process && process->debug_trap_enabled))
127 mutex_lock(&process->event_mutex);
129 if (event_mask & KFD_EC_MASK_DEVICE) {
130 for (i = 0; i < process->n_pdds; i++) {
131 struct kfd_process_device *pdd = process->pdds[i];
136 pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE;
138 if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
139 if (!pdd->vm_fault_exc_data) {
140 pdd->vm_fault_exc_data = kmemdup(
144 if (!pdd->vm_fault_exc_data)
145 pr_debug("Failed to allocate exception data memory");
147 pr_debug("Debugger exception data not saved\n");
148 print_hex_dump_bytes("exception data: ",
151 exception_data_size);
156 } else if (event_mask & KFD_EC_MASK_PROCESS) {
157 process->exception_status |= event_mask & KFD_EC_MASK_PROCESS;
160 list_for_each_entry(pqn, &pqm->queues,
161 process_queue_list) {
167 target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ?
168 pqn->q->properties.queue_id :
171 if (pqn->q->device != dev || target_id != source_id)
174 pqn->q->properties.exception_status |= event_mask;
179 if (process->exception_enable_mask & event_mask) {
181 schedule_work(&process->debug_event_workarea);
183 kernel_write(process->dbg_ev_file,
188 is_subscribed = false;
191 mutex_unlock(&process->event_mutex);
193 return is_subscribed;
196 /* set pending event queue entry from ring entry */
197 bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
199 uint32_t doorbell_id,
201 void *exception_data,
202 size_t exception_data_size)
204 struct kfd_process *p;
205 bool signaled_to_debugger_or_runtime = false;
207 p = kfd_lookup_process_by_pasid(pasid);
212 if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
213 exception_data, exception_data_size)) {
214 struct process_queue_manager *pqm;
215 struct process_queue_node *pqn;
217 if (!!(trap_mask & KFD_EC_MASK_QUEUE) &&
218 p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
219 mutex_lock(&p->mutex);
222 list_for_each_entry(pqn, &pqm->queues,
223 process_queue_list) {
225 if (!(pqn->q && pqn->q->device == dev &&
226 pqn->q->doorbell_id == doorbell_id))
229 kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id,
232 signaled_to_debugger_or_runtime = true;
237 mutex_unlock(&p->mutex);
238 } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
239 kfd_dqm_evict_pasid(dev->dqm, p->pasid);
240 kfd_signal_vm_fault_event(dev, p->pasid, NULL,
243 signaled_to_debugger_or_runtime = true;
246 signaled_to_debugger_or_runtime = true;
249 kfd_unref_process(p);
251 return signaled_to_debugger_or_runtime;
254 int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
256 unsigned int queue_id,
257 uint64_t error_reason)
259 if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
260 struct kfd_process_device *pdd = NULL;
261 struct kfd_hsa_memory_exception_data *data;
264 for (i = 0; i < p->n_pdds; i++) {
265 if (p->pdds[i]->dev->id == dev_id) {
274 data = (struct kfd_hsa_memory_exception_data *)
275 pdd->vm_fault_exc_data;
277 kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
278 kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
279 error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
282 if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) {
284 * block should only happen after the debugger receives runtime
287 up(&p->runtime_enable_sema);
288 error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME);
292 return kfd_send_exception_to_runtime(p, queue_id, error_reason);
297 static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
299 struct mqd_update_info minfo = {0};
305 if (!kfd_dbg_has_cwsr_workaround(q->device))
308 if (enable && q->properties.is_user_cu_masked)
311 minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE;
313 q->properties.is_dbg_wa = enable;
314 err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo);
316 q->properties.is_dbg_wa = false;
321 static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable)
323 struct process_queue_manager *pqm = &target->pqm;
324 struct process_queue_node *pqn;
327 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
328 r = kfd_dbg_set_queue_workaround(pqn->q, enable);
336 list_for_each_entry(pqn, &pqm->queues, process_queue_list)
337 kfd_dbg_set_queue_workaround(pqn->q, false);
340 target->runtime_info.runtime_state = r == -EBUSY ?
341 DEBUG_RUNTIME_STATE_ENABLED_BUSY :
342 DEBUG_RUNTIME_STATE_ENABLED_ERROR;
347 int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
349 uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
350 uint32_t flags = pdd->process->dbg_flags;
352 if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
355 return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
356 pdd->watch_points, flags, sq_trap_en);
359 #define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1
360 static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id)
364 *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
366 spin_lock(&pdd->dev->kfd->watch_points_lock);
368 for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
369 /* device watchpoint in use so skip */
370 if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
373 pdd->alloc_watch_ids |= 0x1 << i;
374 pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
376 spin_unlock(&pdd->dev->kfd->watch_points_lock);
380 spin_unlock(&pdd->dev->kfd->watch_points_lock);
385 static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
387 spin_lock(&pdd->dev->kfd->watch_points_lock);
389 /* process owns device watch point so safe to clear */
390 if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
391 pdd->alloc_watch_ids &= ~(0x1 << watch_id);
392 pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
395 spin_unlock(&pdd->dev->kfd->watch_points_lock);
398 static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
400 bool owns_watch_id = false;
402 spin_lock(&pdd->dev->kfd->watch_points_lock);
403 owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
404 ((pdd->alloc_watch_ids >> watch_id) & 0x1);
406 spin_unlock(&pdd->dev->kfd->watch_points_lock);
408 return owns_watch_id;
411 int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
416 if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id))
419 if (!pdd->dev->kfd->shared_resources.enable_mes) {
420 r = debug_lock_and_unmap(pdd->dev->dqm);
425 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
426 pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch(
429 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
431 if (!pdd->dev->kfd->shared_resources.enable_mes)
432 r = debug_map_and_unlock(pdd->dev->dqm);
434 r = kfd_dbg_set_mes_debug_mode(pdd, true);
436 kfd_dbg_clear_dev_watch_id(pdd, watch_id);
441 int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
442 uint64_t watch_address,
443 uint32_t watch_address_mask,
447 int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id);
448 uint32_t xcc_mask = pdd->dev->xcc_mask;
453 if (!pdd->dev->kfd->shared_resources.enable_mes) {
454 r = debug_lock_and_unmap(pdd->dev->dqm);
456 kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
461 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
462 for_each_inst(xcc_id, xcc_mask)
463 pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch(
469 pdd->dev->vm_info.last_vmid_kfd,
471 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
473 if (!pdd->dev->kfd->shared_resources.enable_mes)
474 r = debug_map_and_unlock(pdd->dev->dqm);
476 r = kfd_dbg_set_mes_debug_mode(pdd, true);
478 /* HWS is broken so no point in HW rollback but release the watchpoint anyways */
480 kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
485 static void kfd_dbg_clear_process_address_watch(struct kfd_process *target)
489 for (i = 0; i < target->n_pdds; i++)
490 for (j = 0; j < MAX_WATCH_ADDRESSES; j++)
491 kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j);
494 int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags)
496 uint32_t prev_flags = target->dbg_flags;
497 int i, r = 0, rewind_count = 0;
499 for (i = 0; i < target->n_pdds; i++) {
500 if (!kfd_dbg_is_per_vmid_supported(target->pdds[i]->dev) &&
501 (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) {
507 target->dbg_flags = *flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP;
509 for (i = 0; i < target->n_pdds; i++) {
510 struct kfd_process_device *pdd = target->pdds[i];
512 if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
515 if (!pdd->dev->kfd->shared_resources.enable_mes)
516 r = debug_refresh_runlist(pdd->dev->dqm);
518 r = kfd_dbg_set_mes_debug_mode(pdd, true);
521 target->dbg_flags = prev_flags;
530 target->dbg_flags = prev_flags;
532 for (i = 0; i < rewind_count; i++) {
533 struct kfd_process_device *pdd = target->pdds[i];
535 if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
538 if (!pdd->dev->kfd->shared_resources.enable_mes)
539 debug_refresh_runlist(pdd->dev->dqm);
541 kfd_dbg_set_mes_debug_mode(pdd, true);
548 /* kfd_dbg_trap_deactivate:
549 * target: target process
550 * unwind: If this is unwinding a failed kfd_dbg_trap_enable()
552 * If unwind == true, how far down the pdd list we need
556 void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count)
562 int resume_count = resume_queues(target, 0, NULL);
565 pr_debug("Resumed %d queues\n", resume_count);
567 cancel_work_sync(&target->debug_event_workarea);
568 kfd_dbg_clear_process_address_watch(target);
569 kfd_dbg_trap_set_wave_launch_mode(target, 0);
571 kfd_dbg_trap_set_flags(target, &flags);
574 for (i = 0; i < target->n_pdds; i++) {
575 struct kfd_process_device *pdd = target->pdds[i];
577 /* If this is an unwind, and we have unwound the required
578 * enable calls on the pdd list, we need to stop now
579 * otherwise we may mess up another debugger session.
581 if (unwind && i == unwind_count)
584 kfd_process_set_trap_debug_flag(&pdd->qpd, false);
586 /* GFX off is already disabled by debug activate if not RLC restore supported. */
587 if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
588 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
589 pdd->spi_dbg_override =
590 pdd->dev->kfd2kgd->disable_debug_trap(
592 target->runtime_info.ttmp_setup,
593 pdd->dev->vm_info.last_vmid_kfd);
594 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
596 if (!kfd_dbg_is_per_vmid_supported(pdd->dev) &&
597 release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd))
598 pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id);
600 if (!pdd->dev->kfd->shared_resources.enable_mes)
601 debug_refresh_runlist(pdd->dev->dqm);
603 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
606 kfd_dbg_set_workaround(target, false);
609 static void kfd_dbg_clean_exception_status(struct kfd_process *target)
611 struct process_queue_manager *pqm;
612 struct process_queue_node *pqn;
615 for (i = 0; i < target->n_pdds; i++) {
616 struct kfd_process_device *pdd = target->pdds[i];
618 kfd_process_drain_interrupts(pdd);
620 pdd->exception_status = 0;
624 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
628 pqn->q->properties.exception_status = 0;
631 target->exception_status = 0;
634 int kfd_dbg_trap_disable(struct kfd_process *target)
636 if (!target->debug_trap_enabled)
640 * Defer deactivation to runtime if runtime not enabled otherwise reset
641 * attached running target runtime state to enable for re-attach.
643 if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
644 kfd_dbg_trap_deactivate(target, false, 0);
645 else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
646 target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
648 fput(target->dbg_ev_file);
649 target->dbg_ev_file = NULL;
651 if (target->debugger_process) {
652 atomic_dec(&target->debugger_process->debugged_process_count);
653 target->debugger_process = NULL;
656 target->debug_trap_enabled = false;
657 kfd_dbg_clean_exception_status(target);
658 kfd_unref_process(target);
663 int kfd_dbg_trap_activate(struct kfd_process *target)
667 r = kfd_dbg_set_workaround(target, true);
671 for (i = 0; i < target->n_pdds; i++) {
672 struct kfd_process_device *pdd = target->pdds[i];
674 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) {
675 r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd);
678 target->runtime_info.runtime_state = (r == -EBUSY) ?
679 DEBUG_RUNTIME_STATE_ENABLED_BUSY :
680 DEBUG_RUNTIME_STATE_ENABLED_ERROR;
686 /* Disable GFX OFF to prevent garbage read/writes to debug registers.
687 * If RLC restore of debug registers is not supported and runtime enable
688 * hasn't done so already on ttmp setup request, restore the trap config registers.
690 * If RLC restore of debug registers is not supported, keep gfx off disabled for
693 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
694 if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) ||
695 target->runtime_info.ttmp_setup))
696 pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true,
697 pdd->dev->vm_info.last_vmid_kfd);
699 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
702 pdd->dev->vm_info.last_vmid_kfd);
704 if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
705 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
708 * Setting the debug flag in the trap handler requires that the TMA has been
709 * allocated, which occurs during CWSR initialization.
710 * In the event that CWSR has not been initialized at this point, setting the
711 * flag will be called again during CWSR initialization if the target process
712 * is still debug enabled.
714 kfd_process_set_trap_debug_flag(&pdd->qpd, true);
716 if (!pdd->dev->kfd->shared_resources.enable_mes)
717 r = debug_refresh_runlist(pdd->dev->dqm);
719 r = kfd_dbg_set_mes_debug_mode(pdd, true);
722 target->runtime_info.runtime_state =
723 DEBUG_RUNTIME_STATE_ENABLED_ERROR;
731 /* Enabling debug failed, we need to disable on
732 * all GPUs so the enable is all or nothing.
734 kfd_dbg_trap_deactivate(target, true, i);
738 int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
739 void __user *runtime_info, uint32_t *runtime_size)
745 if (target->debug_trap_enabled)
748 /* Enable pre-checks */
749 for (i = 0; i < target->n_pdds; i++) {
750 struct kfd_process_device *pdd = target->pdds[i];
752 if (!KFD_IS_SOC15(pdd->dev))
755 if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) ||
756 kfd_dbg_has_cwsr_workaround(pdd->dev)))
760 copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info));
764 pr_err("Failed to get file for (%i)\n", fd);
768 target->dbg_ev_file = f;
770 /* defer activation to runtime if not runtime enabled */
771 if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
772 kfd_dbg_trap_activate(target);
774 /* We already hold the process reference but hold another one for the
777 kref_get(&target->ref);
778 target->debug_trap_enabled = true;
780 if (target->debugger_process)
781 atomic_inc(&target->debugger_process->debugged_process_count);
783 if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) {
784 kfd_dbg_trap_deactivate(target, false, 0);
788 *runtime_size = sizeof(target->runtime_info);
793 static int kfd_dbg_validate_trap_override_request(struct kfd_process *p,
794 uint32_t trap_override,
795 uint32_t trap_mask_request,
796 uint32_t *trap_mask_supported)
800 *trap_mask_supported = 0xffffffff;
802 for (i = 0; i < p->n_pdds; i++) {
803 struct kfd_process_device *pdd = p->pdds[i];
804 int err = pdd->dev->kfd2kgd->validate_trap_override_request(
807 trap_mask_supported);
813 if (trap_mask_request & ~*trap_mask_supported)
819 int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
820 uint32_t trap_override,
821 uint32_t trap_mask_bits,
822 uint32_t trap_mask_request,
823 uint32_t *trap_mask_prev,
824 uint32_t *trap_mask_supported)
828 r = kfd_dbg_validate_trap_override_request(target,
831 trap_mask_supported);
836 for (i = 0; i < target->n_pdds; i++) {
837 struct kfd_process_device *pdd = target->pdds[i];
839 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
840 pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override(
842 pdd->dev->vm_info.last_vmid_kfd,
847 pdd->spi_dbg_override);
848 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
850 if (!pdd->dev->kfd->shared_resources.enable_mes)
851 r = debug_refresh_runlist(pdd->dev->dqm);
853 r = kfd_dbg_set_mes_debug_mode(pdd, true);
862 int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
863 uint8_t wave_launch_mode)
867 if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL &&
868 wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT &&
869 wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG)
872 for (i = 0; i < target->n_pdds; i++) {
873 struct kfd_process_device *pdd = target->pdds[i];
875 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
876 pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode(
879 pdd->dev->vm_info.last_vmid_kfd);
880 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
882 if (!pdd->dev->kfd->shared_resources.enable_mes)
883 r = debug_refresh_runlist(pdd->dev->dqm);
885 r = kfd_dbg_set_mes_debug_mode(pdd, true);
894 int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
896 uint32_t exception_code,
897 bool clear_exception,
903 uint32_t copy_size, actual_info_size = 0;
904 uint64_t *exception_status_ptr = NULL;
909 if (!info || !info_size)
912 mutex_lock(&target->event_mutex);
914 if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) {
915 /* Per queue exceptions */
916 struct queue *queue = NULL;
919 for (i = 0; i < target->n_pdds; i++) {
920 struct kfd_process_device *pdd = target->pdds[i];
921 struct qcm_process_device *qpd = &pdd->qpd;
923 list_for_each_entry(queue, &qpd->queues_list, list) {
924 if (!found && queue->properties.queue_id == source_id) {
938 if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) {
942 exception_status_ptr = &queue->properties.exception_status;
943 } else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) {
944 /* Per device exceptions */
945 struct kfd_process_device *pdd = NULL;
948 for (i = 0; i < target->n_pdds; i++) {
949 pdd = target->pdds[i];
950 if (pdd->dev->id == source_id) {
961 if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) {
966 if (exception_code == EC_DEVICE_MEMORY_VIOLATION) {
967 copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size);
969 if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) {
973 actual_info_size = pdd->vm_fault_exc_data_size;
974 if (clear_exception) {
975 kfree(pdd->vm_fault_exc_data);
976 pdd->vm_fault_exc_data = NULL;
977 pdd->vm_fault_exc_data_size = 0;
980 exception_status_ptr = &pdd->exception_status;
981 } else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) {
982 /* Per process exceptions */
983 if (!(target->exception_status & KFD_EC_MASK(exception_code))) {
988 if (exception_code == EC_PROCESS_RUNTIME) {
989 copy_size = min((size_t)(*info_size), sizeof(target->runtime_info));
991 if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) {
996 actual_info_size = sizeof(target->runtime_info);
999 exception_status_ptr = &target->exception_status;
1001 pr_debug("Bad exception type [%i]\n", exception_code);
1006 *info_size = actual_info_size;
1007 if (clear_exception)
1008 *exception_status_ptr &= ~KFD_EC_MASK(exception_code);
1010 mutex_unlock(&target->event_mutex);
1014 int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
1015 uint64_t exception_clear_mask,
1016 void __user *user_info,
1017 uint32_t *number_of_device_infos,
1018 uint32_t *entry_size)
1020 struct kfd_dbg_device_info_entry device_info;
1021 uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
1024 if (!(target && user_info && number_of_device_infos && entry_size))
1027 tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
1028 *number_of_device_infos = target->n_pdds;
1029 *entry_size = min_t(size_t, *entry_size, sizeof(device_info));
1031 if (!tmp_num_devices)
1034 memset(&device_info, 0, sizeof(device_info));
1036 mutex_lock(&target->event_mutex);
1038 /* Run over all pdd of the process */
1039 for (i = 0; i < tmp_num_devices; i++) {
1040 struct kfd_process_device *pdd = target->pdds[i];
1041 struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id);
1043 device_info.gpu_id = pdd->dev->id;
1044 device_info.exception_status = pdd->exception_status;
1045 device_info.lds_base = pdd->lds_base;
1046 device_info.lds_limit = pdd->lds_limit;
1047 device_info.scratch_base = pdd->scratch_base;
1048 device_info.scratch_limit = pdd->scratch_limit;
1049 device_info.gpuvm_base = pdd->gpuvm_base;
1050 device_info.gpuvm_limit = pdd->gpuvm_limit;
1051 device_info.location_id = topo_dev->node_props.location_id;
1052 device_info.vendor_id = topo_dev->node_props.vendor_id;
1053 device_info.device_id = topo_dev->node_props.device_id;
1054 device_info.revision_id = pdd->dev->adev->pdev->revision;
1055 device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor;
1056 device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device;
1057 device_info.fw_version = pdd->dev->kfd->mec_fw_version;
1058 device_info.gfx_target_version =
1059 topo_dev->node_props.gfx_target_version;
1060 device_info.simd_count = topo_dev->node_props.simd_count;
1061 device_info.max_waves_per_simd =
1062 topo_dev->node_props.max_waves_per_simd;
1063 device_info.array_count = topo_dev->node_props.array_count;
1064 device_info.simd_arrays_per_engine =
1065 topo_dev->node_props.simd_arrays_per_engine;
1066 device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask);
1067 device_info.capability = topo_dev->node_props.capability;
1068 device_info.debug_prop = topo_dev->node_props.debug_prop;
1070 if (exception_clear_mask)
1071 pdd->exception_status &= ~exception_clear_mask;
1073 if (copy_to_user(user_info, &device_info, *entry_size)) {
1078 user_info += tmp_entry_size;
1081 mutex_unlock(&target->event_mutex);
1086 void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
1087 uint64_t exception_set_mask)
1089 uint64_t found_mask = 0;
1090 struct process_queue_manager *pqm;
1091 struct process_queue_node *pqn;
1092 static const char write_data = '.';
1096 mutex_lock(&target->event_mutex);
1098 found_mask |= target->exception_status;
1101 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1105 found_mask |= pqn->q->properties.exception_status;
1108 for (i = 0; i < target->n_pdds; i++) {
1109 struct kfd_process_device *pdd = target->pdds[i];
1111 found_mask |= pdd->exception_status;
1114 if (exception_set_mask & found_mask)
1115 kernel_write(target->dbg_ev_file, &write_data, 1, &pos);
1117 target->exception_enable_mask = exception_set_mask;
1119 mutex_unlock(&target->event_mutex);