2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
27 #include "amdgpu_ih.h"
29 #include "oss/osssys_5_0_0_offset.h"
30 #include "oss/osssys_5_0_0_sh_mask.h"
32 #include "soc15_common.h"
33 #include "navi10_ih.h"
36 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
39 * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
41 * @adev: amdgpu_device pointer
43 * Enable the interrupt ring buffer (NAVI10).
45 static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
47 u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
49 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
50 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
51 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
52 adev->irq.ih.enabled = true;
56 * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
58 * @adev: amdgpu_device pointer
60 * Disable the interrupt ring buffer (NAVI10).
62 static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
64 u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
66 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
67 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
68 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
69 /* set rptr, wptr to 0 */
70 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
71 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
72 adev->irq.ih.enabled = false;
73 adev->irq.ih.rptr = 0;
76 static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
78 int rb_bufsz = order_base_2(ih->ring_size / 4);
80 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
81 MC_SPACE, ih->use_bus_addr ? 1 : 4);
82 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
83 WPTR_OVERFLOW_CLEAR, 1);
84 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
85 WPTR_OVERFLOW_ENABLE, 1);
86 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
87 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
88 * value is written to memory
90 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
91 WPTR_WRITEBACK_ENABLE, 1);
92 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
93 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
94 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
100 * navi10_ih_irq_init - init and enable the interrupt ring
102 * @adev: amdgpu_device pointer
104 * Allocate a ring buffer for the interrupt controller,
105 * enable the RLC, disable interrupts, enable the IH
106 * ring buffer and enable it (NAVI).
107 * Called at device load and reume.
108 * Returns 0 for success, errors for failure.
110 static int navi10_ih_irq_init(struct amdgpu_device *adev)
112 struct amdgpu_ih_ring *ih = &adev->irq.ih;
114 u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
118 navi10_ih_disable_interrupts(adev);
120 adev->nbio_funcs->ih_control(adev);
122 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
123 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
124 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
126 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
127 ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
128 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
129 !!adev->irq.msi_enabled);
131 if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
132 if (ih->use_bus_addr) {
133 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
134 ih_chicken = REG_SET_FIELD(ih_chicken,
135 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
136 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
140 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
142 /* set the writeback address whether it's enabled or not */
143 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
144 lower_32_bits(ih->wptr_addr));
145 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
146 upper_32_bits(ih->wptr_addr) & 0xFFFF);
148 /* set rptr, wptr to 0 */
149 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
150 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
152 ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
153 if (ih->use_doorbell) {
154 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
155 IH_DOORBELL_RPTR, OFFSET,
157 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
158 IH_DOORBELL_RPTR, ENABLE, 1);
160 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
161 IH_DOORBELL_RPTR, ENABLE, 0);
163 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
165 adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
168 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
169 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
170 CLIENT18_IS_STORM_CLIENT, 1);
171 WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
173 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
174 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
175 WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
177 pci_set_master(adev->pdev);
179 /* enable interrupts */
180 navi10_ih_enable_interrupts(adev);
186 * navi10_ih_irq_disable - disable interrupts
188 * @adev: amdgpu_device pointer
190 * Disable interrupts on the hw (NAVI10).
192 static void navi10_ih_irq_disable(struct amdgpu_device *adev)
194 navi10_ih_disable_interrupts(adev);
196 /* Wait and acknowledge irq */
201 * navi10_ih_get_wptr - get the IH ring buffer wptr
203 * @adev: amdgpu_device pointer
205 * Get the IH ring buffer wptr from either the register
206 * or the writeback memory buffer (NAVI10). Also check for
207 * ring buffer overflow and deal with it.
208 * Returns the value of the wptr.
210 static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
211 struct amdgpu_ih_ring *ih)
215 wptr = le32_to_cpu(*ih->wptr_cpu);
217 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
220 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
221 wptr = RREG32_NO_KIQ(reg);
222 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
224 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
226 /* When a ring buffer overflow happen start parsing interrupt
227 * from the last not overwritten vector (wptr + 32). Hopefully
228 * this should allow us to catch up.
230 tmp = (wptr + 32) & ih->ptr_mask;
231 dev_warn(adev->dev, "IH ring buffer overflow "
232 "(0x%08X, 0x%08X, 0x%08X)\n",
233 wptr, ih->rptr, tmp);
236 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
237 tmp = RREG32_NO_KIQ(reg);
238 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
239 WREG32_NO_KIQ(reg, tmp);
241 return (wptr & ih->ptr_mask);
245 * navi10_ih_decode_iv - decode an interrupt vector
247 * @adev: amdgpu_device pointer
249 * Decodes the interrupt vector at the current rptr
250 * position and also advance the position.
252 static void navi10_ih_decode_iv(struct amdgpu_device *adev,
253 struct amdgpu_ih_ring *ih,
254 struct amdgpu_iv_entry *entry)
256 /* wptr/rptr are in bytes! */
257 u32 ring_index = ih->rptr >> 2;
260 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
261 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
262 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
263 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
264 dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
265 dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
266 dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
267 dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
269 entry->client_id = dw[0] & 0xff;
270 entry->src_id = (dw[0] >> 8) & 0xff;
271 entry->ring_id = (dw[0] >> 16) & 0xff;
272 entry->vmid = (dw[0] >> 24) & 0xf;
273 entry->vmid_src = (dw[0] >> 31);
274 entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
275 entry->timestamp_src = dw[2] >> 31;
276 entry->pasid = dw[3] & 0xffff;
277 entry->pasid_src = dw[3] >> 31;
278 entry->src_data[0] = dw[4];
279 entry->src_data[1] = dw[5];
280 entry->src_data[2] = dw[6];
281 entry->src_data[3] = dw[7];
283 /* wptr/rptr are in bytes! */
288 * navi10_ih_set_rptr - set the IH ring buffer rptr
290 * @adev: amdgpu_device pointer
292 * Set the IH ring buffer rptr.
294 static void navi10_ih_set_rptr(struct amdgpu_device *adev,
295 struct amdgpu_ih_ring *ih)
297 if (ih->use_doorbell) {
298 /* XXX check if swapping is necessary on BE */
299 *ih->rptr_cpu = ih->rptr;
300 WDOORBELL32(ih->doorbell_index, ih->rptr);
302 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
305 static int navi10_ih_early_init(void *handle)
307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
309 navi10_ih_set_interrupt_funcs(adev);
313 static int navi10_ih_sw_init(void *handle)
316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
319 /* use gpu virtual address for ih ring
320 * until ih_checken is programmed to allow
321 * use bus address for ih ring by psp bl */
323 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
324 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
328 adev->irq.ih.use_doorbell = true;
329 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
331 r = amdgpu_irq_init(adev);
336 static int navi10_ih_sw_fini(void *handle)
338 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
340 amdgpu_irq_fini(adev);
341 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
346 static int navi10_ih_hw_init(void *handle)
349 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
351 r = navi10_ih_irq_init(adev);
358 static int navi10_ih_hw_fini(void *handle)
360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
362 navi10_ih_irq_disable(adev);
367 static int navi10_ih_suspend(void *handle)
369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
371 return navi10_ih_hw_fini(adev);
374 static int navi10_ih_resume(void *handle)
376 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
378 return navi10_ih_hw_init(adev);
381 static bool navi10_ih_is_idle(void *handle)
387 static int navi10_ih_wait_for_idle(void *handle)
393 static int navi10_ih_soft_reset(void *handle)
399 static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
402 uint32_t data, def, field_val;
404 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
405 def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
406 field_val = enable ? 0 : 1;
407 data = REG_SET_FIELD(data, IH_CLK_CTRL,
408 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
409 data = REG_SET_FIELD(data, IH_CLK_CTRL,
410 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
411 data = REG_SET_FIELD(data, IH_CLK_CTRL,
412 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
413 data = REG_SET_FIELD(data, IH_CLK_CTRL,
414 DYN_CLK_SOFT_OVERRIDE, field_val);
415 data = REG_SET_FIELD(data, IH_CLK_CTRL,
416 REG_CLK_SOFT_OVERRIDE, field_val);
418 WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
424 static int navi10_ih_set_clockgating_state(void *handle,
425 enum amd_clockgating_state state)
427 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
429 navi10_ih_update_clockgating_state(adev,
430 state == AMD_CG_STATE_GATE ? true : false);
434 static int navi10_ih_set_powergating_state(void *handle,
435 enum amd_powergating_state state)
440 static void navi10_ih_get_clockgating_state(void *handle, u32 *flags)
442 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
444 if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
445 *flags |= AMD_CG_SUPPORT_IH_CG;
450 static const struct amd_ip_funcs navi10_ih_ip_funcs = {
452 .early_init = navi10_ih_early_init,
454 .sw_init = navi10_ih_sw_init,
455 .sw_fini = navi10_ih_sw_fini,
456 .hw_init = navi10_ih_hw_init,
457 .hw_fini = navi10_ih_hw_fini,
458 .suspend = navi10_ih_suspend,
459 .resume = navi10_ih_resume,
460 .is_idle = navi10_ih_is_idle,
461 .wait_for_idle = navi10_ih_wait_for_idle,
462 .soft_reset = navi10_ih_soft_reset,
463 .set_clockgating_state = navi10_ih_set_clockgating_state,
464 .set_powergating_state = navi10_ih_set_powergating_state,
465 .get_clockgating_state = navi10_ih_get_clockgating_state,
468 static const struct amdgpu_ih_funcs navi10_ih_funcs = {
469 .get_wptr = navi10_ih_get_wptr,
470 .decode_iv = navi10_ih_decode_iv,
471 .set_rptr = navi10_ih_set_rptr
474 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
476 if (adev->irq.ih_funcs == NULL)
477 adev->irq.ih_funcs = &navi10_ih_funcs;
480 const struct amdgpu_ip_block_version navi10_ih_ip_block =
482 .type = AMD_IP_BLOCK_TYPE_IH,
486 .funcs = &navi10_ih_ip_funcs,