2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_amdkfd.h"
28 #include "gca/gfx_7_2_d.h"
29 #include "gca/gfx_7_2_enum.h"
30 #include "gca/gfx_7_2_sh_mask.h"
31 #include "oss/oss_2_0_d.h"
32 #include "oss/oss_2_0_sh_mask.h"
33 #include "gmc/gmc_7_1_d.h"
34 #include "gmc/gmc_7_1_sh_mask.h"
35 #include "cik_structs.h"
37 enum hqd_dequeue_request_type {
44 MAX_TRAPID = 8, /* 3 bits in the bitfield. */
45 MAX_WATCH_ADDRESSES = 4
48 static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
49 uint32_t queue, uint32_t vmid)
51 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
53 mutex_lock(&adev->srbm_mutex);
54 WREG32(mmSRBM_GFX_CNTL, value);
57 static void unlock_srbm(struct amdgpu_device *adev)
59 WREG32(mmSRBM_GFX_CNTL, 0);
60 mutex_unlock(&adev->srbm_mutex);
63 static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
66 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
67 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
69 lock_srbm(adev, mec, pipe, queue_id, 0);
72 static void release_queue(struct amdgpu_device *adev)
77 static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
78 uint32_t sh_mem_config,
79 uint32_t sh_mem_ape1_base,
80 uint32_t sh_mem_ape1_limit,
81 uint32_t sh_mem_bases)
83 lock_srbm(adev, 0, 0, 0, vmid);
85 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
86 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
87 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
88 WREG32(mmSH_MEM_BASES, sh_mem_bases);
93 static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
97 * We have to assume that there is no outstanding mapping.
98 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
99 * a mapping is in progress or because a mapping finished and the
100 * SW cleared it. So the protocol is to always wait & clear.
102 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
103 ATC_VMID0_PASID_MAPPING__VALID_MASK;
105 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
107 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
109 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
111 /* Mapping vmid to pasid also for IH block */
112 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
117 static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
122 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
123 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
125 lock_srbm(adev, mec, pipe, 0, 0);
127 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
128 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
135 static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
139 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
140 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
142 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
143 m->sdma_engine_id, m->sdma_queue_id, retval);
148 static inline struct cik_mqd *get_mqd(void *mqd)
150 return (struct cik_mqd *)mqd;
153 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
155 return (struct cik_sdma_rlc_registers *)mqd;
158 static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
159 uint32_t pipe_id, uint32_t queue_id,
160 uint32_t __user *wptr, uint32_t wptr_shift,
161 uint32_t wptr_mask, struct mm_struct *mm)
165 uint32_t reg, wptr_val, data;
166 bool valid_wptr = false;
170 acquire_queue(adev, pipe_id, queue_id);
172 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
173 mqd_hqd = &m->cp_mqd_base_addr_lo;
175 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
176 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
178 /* Copy userspace write pointer value to register.
179 * Activate doorbell logic to monitor subsequent changes.
181 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
182 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
183 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
185 /* read_user_ptr may take the mm->mmap_lock.
186 * release srbm_mutex to avoid circular dependency between
187 * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
190 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
191 acquire_queue(adev, pipe_id, queue_id);
193 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
195 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
196 WREG32(mmCP_HQD_ACTIVE, data);
203 static int kgd_hqd_dump(struct amdgpu_device *adev,
204 uint32_t pipe_id, uint32_t queue_id,
205 uint32_t (**dump)[2], uint32_t *n_regs)
208 #define HQD_N_REGS (35+4)
209 #define DUMP_REG(addr) do { \
210 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
212 (*dump)[i][0] = (addr) << 2; \
213 (*dump)[i++][1] = RREG32(addr); \
216 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
220 acquire_queue(adev, pipe_id, queue_id);
222 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
223 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
224 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
225 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
227 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
232 WARN_ON_ONCE(i != HQD_N_REGS);
238 static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
239 uint32_t __user *wptr, struct mm_struct *mm)
241 struct cik_sdma_rlc_registers *m;
242 unsigned long end_jiffies;
243 uint32_t sdma_rlc_reg_offset;
246 m = get_sdma_mqd(mqd);
247 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
249 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
250 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
252 end_jiffies = msecs_to_jiffies(2000) + jiffies;
254 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
255 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
257 if (time_after(jiffies, end_jiffies)) {
258 pr_err("SDMA RLC not idle in %s\n", __func__);
261 usleep_range(500, 1000);
264 data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
266 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
267 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
268 m->sdma_rlc_rb_rptr);
270 if (read_user_wptr(mm, wptr, data))
271 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
273 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
274 m->sdma_rlc_rb_rptr);
276 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
277 m->sdma_rlc_virtual_addr);
278 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
279 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
280 m->sdma_rlc_rb_base_hi);
281 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
282 m->sdma_rlc_rb_rptr_addr_lo);
283 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
284 m->sdma_rlc_rb_rptr_addr_hi);
286 data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
288 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
293 static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
294 uint32_t engine_id, uint32_t queue_id,
295 uint32_t (**dump)[2], uint32_t *n_regs)
297 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
298 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
301 #define HQD_N_REGS (19+4)
303 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
307 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
308 DUMP_REG(sdma_offset + reg);
309 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
311 DUMP_REG(sdma_offset + reg);
313 WARN_ON_ONCE(i != HQD_N_REGS);
319 static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
320 uint64_t queue_address, uint32_t pipe_id,
327 acquire_queue(adev, pipe_id, queue_id);
328 act = RREG32(mmCP_HQD_ACTIVE);
330 low = lower_32_bits(queue_address >> 8);
331 high = upper_32_bits(queue_address >> 8);
333 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
334 high == RREG32(mmCP_HQD_PQ_BASE_HI))
341 static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
343 struct cik_sdma_rlc_registers *m;
344 uint32_t sdma_rlc_reg_offset;
345 uint32_t sdma_rlc_rb_cntl;
347 m = get_sdma_mqd(mqd);
348 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
350 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
352 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
358 static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
359 enum kfd_preempt_type reset_type,
360 unsigned int utimeout, uint32_t pipe_id,
364 enum hqd_dequeue_request_type type;
365 unsigned long flags, end_jiffies;
368 if (amdgpu_in_reset(adev))
371 acquire_queue(adev, pipe_id, queue_id);
372 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
374 switch (reset_type) {
375 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
378 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
386 /* Workaround: If IQ timer is active and the wait time is close to or
387 * equal to 0, dequeueing is not safe. Wait until either the wait time
388 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
389 * cleared before continuing. Also, ensure wait times are set to at
392 local_irq_save(flags);
394 retry = 5000; /* wait for 500 usecs at maximum */
396 temp = RREG32(mmCP_HQD_IQ_TIMER);
397 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
398 pr_debug("HW is processing IQ\n");
401 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
402 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
403 == 3) /* SEM-rearm is safe */
405 /* Wait time 3 is safe for CP, but our MMIO read/write
406 * time is close to 1 microsecond, so check for 10 to
407 * leave more buffer room
409 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
412 pr_debug("IQ timer is active\n");
417 pr_err("CP HQD IQ timer status time out\n");
425 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
426 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
428 pr_debug("Dequeue request is pending\n");
431 pr_err("CP HQD dequeue request time out\n");
437 local_irq_restore(flags);
440 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
442 end_jiffies = (utimeout * HZ / 1000) + jiffies;
444 temp = RREG32(mmCP_HQD_ACTIVE);
445 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
447 if (time_after(jiffies, end_jiffies)) {
448 pr_err("cp queue preemption time out\n");
452 usleep_range(500, 1000);
459 static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
460 unsigned int utimeout)
462 struct cik_sdma_rlc_registers *m;
463 uint32_t sdma_rlc_reg_offset;
465 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
467 m = get_sdma_mqd(mqd);
468 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
470 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
471 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
472 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
475 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
476 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
478 if (time_after(jiffies, end_jiffies)) {
479 pr_err("SDMA RLC not idle in %s\n", __func__);
482 usleep_range(500, 1000);
485 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
486 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
487 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
488 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
490 m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
495 static int kgd_wave_control_execute(struct amdgpu_device *adev,
496 uint32_t gfx_index_val,
501 mutex_lock(&adev->grbm_idx_mutex);
503 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
504 WREG32(mmSQ_CMD, sq_cmd);
506 /* Restore the GRBM_GFX_INDEX register */
508 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
509 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
510 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
512 WREG32(mmGRBM_GFX_INDEX, data);
514 mutex_unlock(&adev->grbm_idx_mutex);
519 static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
520 uint8_t vmid, uint16_t *p_pasid)
524 value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
525 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
527 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
530 static void set_scratch_backing_va(struct amdgpu_device *adev,
531 uint64_t va, uint32_t vmid)
533 lock_srbm(adev, 0, 0, 0, vmid);
534 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
538 static void set_vm_context_page_table_base(struct amdgpu_device *adev,
539 uint32_t vmid, uint64_t page_table_base)
541 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
542 pr_err("trying to set page table base for wrong VMID\n");
545 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
546 lower_32_bits(page_table_base));
550 * read_vmid_from_vmfault_reg - read vmid from register
552 * adev: amdgpu_device pointer
553 * @vmid: vmid pointer
554 * read vmid from register (CIK).
556 static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
558 uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
560 return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
563 const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
564 .program_sh_mem_settings = kgd_program_sh_mem_settings,
565 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
566 .init_interrupts = kgd_init_interrupts,
567 .hqd_load = kgd_hqd_load,
568 .hqd_sdma_load = kgd_hqd_sdma_load,
569 .hqd_dump = kgd_hqd_dump,
570 .hqd_sdma_dump = kgd_hqd_sdma_dump,
571 .hqd_is_occupied = kgd_hqd_is_occupied,
572 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
573 .hqd_destroy = kgd_hqd_destroy,
574 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
575 .wave_control_execute = kgd_wave_control_execute,
576 .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
577 .set_scratch_backing_va = set_scratch_backing_va,
578 .set_vm_context_page_table_base = set_vm_context_page_table_base,
579 .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,