2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mmu_context.h>
26 #include "amdgpu_amdkfd.h"
28 #include "gca/gfx_8_0_sh_mask.h"
29 #include "gca/gfx_8_0_d.h"
30 #include "gca/gfx_8_0_enum.h"
31 #include "oss/oss_3_0_sh_mask.h"
32 #include "oss/oss_3_0_d.h"
33 #include "gmc/gmc_8_1_sh_mask.h"
34 #include "gmc/gmc_8_1_d.h"
35 #include "vi_structs.h"
38 enum hqd_dequeue_request_type {
44 /* Because of REG_GET_FIELD() being used, we put this function in the
47 static int get_tile_config(struct kgd_dev *kgd,
48 struct tile_config *config)
50 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
52 config->gb_addr_config = adev->gfx.config.gb_addr_config;
53 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
54 MC_ARB_RAMCFG, NOOFBANK);
55 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
56 MC_ARB_RAMCFG, NOOFRANKS);
58 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
59 config->num_tile_configs =
60 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
61 config->macro_tile_config_ptr =
62 adev->gfx.config.macrotile_mode_array;
63 config->num_macro_tile_configs =
64 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
69 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 return (struct amdgpu_device *)kgd;
74 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
75 uint32_t queue, uint32_t vmid)
77 struct amdgpu_device *adev = get_amdgpu_device(kgd);
78 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
80 mutex_lock(&adev->srbm_mutex);
81 WREG32(mmSRBM_GFX_CNTL, value);
84 static void unlock_srbm(struct kgd_dev *kgd)
86 struct amdgpu_device *adev = get_amdgpu_device(kgd);
88 WREG32(mmSRBM_GFX_CNTL, 0);
89 mutex_unlock(&adev->srbm_mutex);
92 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
95 struct amdgpu_device *adev = get_amdgpu_device(kgd);
97 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
98 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
100 lock_srbm(kgd, mec, pipe, queue_id, 0);
103 static void release_queue(struct kgd_dev *kgd)
108 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
109 uint32_t sh_mem_config,
110 uint32_t sh_mem_ape1_base,
111 uint32_t sh_mem_ape1_limit,
112 uint32_t sh_mem_bases)
114 struct amdgpu_device *adev = get_amdgpu_device(kgd);
116 lock_srbm(kgd, 0, 0, 0, vmid);
118 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
119 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
120 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
121 WREG32(mmSH_MEM_BASES, sh_mem_bases);
126 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
129 struct amdgpu_device *adev = get_amdgpu_device(kgd);
132 * We have to assume that there is no outstanding mapping.
133 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
134 * a mapping is in progress or because a mapping finished
135 * and the SW cleared it.
136 * So the protocol is to always wait & clear.
138 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
139 ATC_VMID0_PASID_MAPPING__VALID_MASK;
141 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
143 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
145 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
147 /* Mapping vmid to pasid also for IH block */
148 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
153 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
155 struct amdgpu_device *adev = get_amdgpu_device(kgd);
159 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
160 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
162 lock_srbm(kgd, mec, pipe, 0, 0);
164 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
165 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
172 static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
176 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
177 m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
179 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
180 m->sdma_engine_id, m->sdma_queue_id, retval);
185 static inline struct vi_mqd *get_mqd(void *mqd)
187 return (struct vi_mqd *)mqd;
190 static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
192 return (struct vi_sdma_mqd *)mqd;
195 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
196 uint32_t queue_id, uint32_t __user *wptr,
197 uint32_t wptr_shift, uint32_t wptr_mask,
198 struct mm_struct *mm)
200 struct amdgpu_device *adev = get_amdgpu_device(kgd);
203 uint32_t reg, wptr_val, data;
204 bool valid_wptr = false;
208 acquire_queue(kgd, pipe_id, queue_id);
210 /* HIQ is set during driver init period with vmid set to 0*/
211 if (m->cp_hqd_vmid == 0) {
212 uint32_t value, mec, pipe;
214 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
215 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
217 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
218 mec, pipe, queue_id);
219 value = RREG32(mmRLC_CP_SCHEDULERS);
220 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
221 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
222 WREG32(mmRLC_CP_SCHEDULERS, value);
225 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
226 mqd_hqd = &m->cp_mqd_base_addr_lo;
228 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
229 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
231 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
232 * This is safe since EOP RPTR==WPTR for any inactive HQD
233 * on ASICs that do not support context-save.
234 * EOP writes/reads can start anywhere in the ring.
236 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
237 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
238 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
239 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
242 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
243 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
245 /* Copy userspace write pointer value to register.
246 * Activate doorbell logic to monitor subsequent changes.
248 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
249 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
250 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
252 /* read_user_ptr may take the mm->mmap_sem.
253 * release srbm_mutex to avoid circular dependency between
254 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
257 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
258 acquire_queue(kgd, pipe_id, queue_id);
260 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
262 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
263 WREG32(mmCP_HQD_ACTIVE, data);
270 static int kgd_hqd_dump(struct kgd_dev *kgd,
271 uint32_t pipe_id, uint32_t queue_id,
272 uint32_t (**dump)[2], uint32_t *n_regs)
274 struct amdgpu_device *adev = get_amdgpu_device(kgd);
276 #define HQD_N_REGS (54+4)
277 #define DUMP_REG(addr) do { \
278 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
280 (*dump)[i][0] = (addr) << 2; \
281 (*dump)[i++][1] = RREG32(addr); \
284 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
288 acquire_queue(kgd, pipe_id, queue_id);
290 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
291 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
292 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
293 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
295 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
300 WARN_ON_ONCE(i != HQD_N_REGS);
306 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
307 uint32_t __user *wptr, struct mm_struct *mm)
309 struct amdgpu_device *adev = get_amdgpu_device(kgd);
310 struct vi_sdma_mqd *m;
311 unsigned long end_jiffies;
312 uint32_t sdma_rlc_reg_offset;
315 m = get_sdma_mqd(mqd);
316 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
317 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
318 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
320 end_jiffies = msecs_to_jiffies(2000) + jiffies;
322 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
323 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
325 if (time_after(jiffies, end_jiffies)) {
326 pr_err("SDMA RLC not idle in %s\n", __func__);
329 usleep_range(500, 1000);
332 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
334 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
335 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
336 m->sdmax_rlcx_rb_rptr);
338 if (read_user_wptr(mm, wptr, data))
339 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
341 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
342 m->sdmax_rlcx_rb_rptr);
344 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
345 m->sdmax_rlcx_virtual_addr);
346 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
347 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
348 m->sdmax_rlcx_rb_base_hi);
349 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
350 m->sdmax_rlcx_rb_rptr_addr_lo);
351 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
352 m->sdmax_rlcx_rb_rptr_addr_hi);
354 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
356 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
361 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
362 uint32_t engine_id, uint32_t queue_id,
363 uint32_t (**dump)[2], uint32_t *n_regs)
365 struct amdgpu_device *adev = get_amdgpu_device(kgd);
366 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
367 queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
370 #define HQD_N_REGS (19+4+2+3+7)
372 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
376 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
377 DUMP_REG(sdma_offset + reg);
378 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
380 DUMP_REG(sdma_offset + reg);
381 for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
383 DUMP_REG(sdma_offset + reg);
384 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
386 DUMP_REG(sdma_offset + reg);
387 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
389 DUMP_REG(sdma_offset + reg);
391 WARN_ON_ONCE(i != HQD_N_REGS);
397 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
398 uint32_t pipe_id, uint32_t queue_id)
400 struct amdgpu_device *adev = get_amdgpu_device(kgd);
405 acquire_queue(kgd, pipe_id, queue_id);
406 act = RREG32(mmCP_HQD_ACTIVE);
408 low = lower_32_bits(queue_address >> 8);
409 high = upper_32_bits(queue_address >> 8);
411 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
412 high == RREG32(mmCP_HQD_PQ_BASE_HI))
419 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
421 struct amdgpu_device *adev = get_amdgpu_device(kgd);
422 struct vi_sdma_mqd *m;
423 uint32_t sdma_rlc_reg_offset;
424 uint32_t sdma_rlc_rb_cntl;
426 m = get_sdma_mqd(mqd);
427 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
429 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
431 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
437 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
438 enum kfd_preempt_type reset_type,
439 unsigned int utimeout, uint32_t pipe_id,
442 struct amdgpu_device *adev = get_amdgpu_device(kgd);
444 enum hqd_dequeue_request_type type;
445 unsigned long flags, end_jiffies;
447 struct vi_mqd *m = get_mqd(mqd);
449 if (adev->in_gpu_reset)
452 acquire_queue(kgd, pipe_id, queue_id);
454 if (m->cp_hqd_vmid == 0)
455 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
457 switch (reset_type) {
458 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
461 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
469 /* Workaround: If IQ timer is active and the wait time is close to or
470 * equal to 0, dequeueing is not safe. Wait until either the wait time
471 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
472 * cleared before continuing. Also, ensure wait times are set to at
475 local_irq_save(flags);
477 retry = 5000; /* wait for 500 usecs at maximum */
479 temp = RREG32(mmCP_HQD_IQ_TIMER);
480 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
481 pr_debug("HW is processing IQ\n");
484 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
485 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
486 == 3) /* SEM-rearm is safe */
488 /* Wait time 3 is safe for CP, but our MMIO read/write
489 * time is close to 1 microsecond, so check for 10 to
490 * leave more buffer room
492 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
495 pr_debug("IQ timer is active\n");
500 pr_err("CP HQD IQ timer status time out\n");
508 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
509 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
511 pr_debug("Dequeue request is pending\n");
514 pr_err("CP HQD dequeue request time out\n");
520 local_irq_restore(flags);
523 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
525 end_jiffies = (utimeout * HZ / 1000) + jiffies;
527 temp = RREG32(mmCP_HQD_ACTIVE);
528 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
530 if (time_after(jiffies, end_jiffies)) {
531 pr_err("cp queue preemption time out.\n");
535 usleep_range(500, 1000);
542 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
543 unsigned int utimeout)
545 struct amdgpu_device *adev = get_amdgpu_device(kgd);
546 struct vi_sdma_mqd *m;
547 uint32_t sdma_rlc_reg_offset;
549 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
551 m = get_sdma_mqd(mqd);
552 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
554 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
555 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
556 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
559 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
560 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
562 if (time_after(jiffies, end_jiffies)) {
563 pr_err("SDMA RLC not idle in %s\n", __func__);
566 usleep_range(500, 1000);
569 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
570 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
571 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
572 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
574 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
579 static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
580 uint8_t vmid, uint16_t *p_pasid)
583 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
585 value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
586 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
588 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
591 static int kgd_address_watch_disable(struct kgd_dev *kgd)
596 static int kgd_address_watch_execute(struct kgd_dev *kgd,
597 unsigned int watch_point_id,
605 static int kgd_wave_control_execute(struct kgd_dev *kgd,
606 uint32_t gfx_index_val,
609 struct amdgpu_device *adev = get_amdgpu_device(kgd);
612 mutex_lock(&adev->grbm_idx_mutex);
614 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
615 WREG32(mmSQ_CMD, sq_cmd);
617 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
618 INSTANCE_BROADCAST_WRITES, 1);
619 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
620 SH_BROADCAST_WRITES, 1);
621 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
622 SE_BROADCAST_WRITES, 1);
624 WREG32(mmGRBM_GFX_INDEX, data);
625 mutex_unlock(&adev->grbm_idx_mutex);
630 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
631 unsigned int watch_point_id,
632 unsigned int reg_offset)
637 static void set_scratch_backing_va(struct kgd_dev *kgd,
638 uint64_t va, uint32_t vmid)
640 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
642 lock_srbm(kgd, 0, 0, 0, vmid);
643 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
647 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
648 uint64_t page_table_base)
650 struct amdgpu_device *adev = get_amdgpu_device(kgd);
652 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
653 pr_err("trying to set page table base for wrong VMID\n");
656 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
657 lower_32_bits(page_table_base));
660 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
662 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
666 if (adev->in_gpu_reset)
669 for (vmid = 0; vmid < 16; vmid++) {
670 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
673 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
674 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
675 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
676 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
677 RREG32(mmVM_INVALIDATE_RESPONSE);
685 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
687 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
689 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
690 pr_err("non kfd vmid %d\n", vmid);
694 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
695 RREG32(mmVM_INVALIDATE_RESPONSE);
699 const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
700 .program_sh_mem_settings = kgd_program_sh_mem_settings,
701 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
702 .init_interrupts = kgd_init_interrupts,
703 .hqd_load = kgd_hqd_load,
704 .hqd_sdma_load = kgd_hqd_sdma_load,
705 .hqd_dump = kgd_hqd_dump,
706 .hqd_sdma_dump = kgd_hqd_sdma_dump,
707 .hqd_is_occupied = kgd_hqd_is_occupied,
708 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
709 .hqd_destroy = kgd_hqd_destroy,
710 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
711 .address_watch_disable = kgd_address_watch_disable,
712 .address_watch_execute = kgd_address_watch_execute,
713 .wave_control_execute = kgd_wave_control_execute,
714 .address_watch_get_offset = kgd_address_watch_get_offset,
715 .get_atc_vmid_pasid_mapping_info =
716 get_atc_vmid_pasid_mapping_info,
717 .set_scratch_backing_va = set_scratch_backing_va,
718 .get_tile_config = get_tile_config,
719 .set_vm_context_page_table_base = set_vm_context_page_table_base,
720 .invalidate_tlbs = invalidate_tlbs,
721 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,