]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/vega20_ih.c
Merge tag 'drm-intel-gt-next-2021-01-14' of git://anongit.freedesktop.org/drm/drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / vega20_ih.c
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_ih.h"
28 #include "soc15.h"
29
30 #include "oss/osssys_4_2_0_offset.h"
31 #include "oss/osssys_4_2_0_sh_mask.h"
32
33 #include "soc15_common.h"
34 #include "vega20_ih.h"
35
36 #define MAX_REARM_RETRY 10
37
38 static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
39
40 /**
41  * vega20_ih_init_register_offset - Initialize register offset for ih rings
42  *
43  * @adev: amdgpu_device pointer
44  *
45  * Initialize register offset ih rings (VEGA20).
46  */
47 static void vega20_ih_init_register_offset(struct amdgpu_device *adev)
48 {
49         struct amdgpu_ih_regs *ih_regs;
50
51         if (adev->irq.ih.ring_size) {
52                 ih_regs = &adev->irq.ih.ih_regs;
53                 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
54                 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
55                 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
56                 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
57                 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
58                 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
59                 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
60                 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
61                 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
62         }
63
64         if (adev->irq.ih1.ring_size) {
65                 ih_regs = &adev->irq.ih1.ih_regs;
66                 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
67                 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
68                 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
69                 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
70                 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
71                 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
72                 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
73         }
74
75         if (adev->irq.ih2.ring_size) {
76                 ih_regs = &adev->irq.ih2.ih_regs;
77                 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
78                 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
79                 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
80                 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
81                 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
82                 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
83                 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
84         }
85 }
86
87 /**
88  * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
89  *
90  * @adev: amdgpu_device pointer
91  * @ih: amdgpu_ih_ring pointet
92  * @enable: true - enable the interrupts, false - disable the interrupts
93  *
94  * Toggle the interrupt ring buffer (VEGA20)
95  */
96 static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
97                                             struct amdgpu_ih_ring *ih,
98                                             bool enable)
99 {
100         struct amdgpu_ih_regs *ih_regs;
101         uint32_t tmp;
102
103         ih_regs = &ih->ih_regs;
104
105         tmp = RREG32(ih_regs->ih_rb_cntl);
106         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
107         /* enable_intr field is only valid in ring0 */
108         if (ih == &adev->irq.ih)
109                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
110         if (amdgpu_sriov_vf(adev)) {
111                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
112                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
113                         return -ETIMEDOUT;
114                 }
115         } else {
116                 WREG32(ih_regs->ih_rb_cntl, tmp);
117         }
118
119         if (enable) {
120                 ih->enabled = true;
121         } else {
122                 /* set rptr, wptr to 0 */
123                 WREG32(ih_regs->ih_rb_rptr, 0);
124                 WREG32(ih_regs->ih_rb_wptr, 0);
125                 ih->enabled = false;
126                 ih->rptr = 0;
127         }
128
129         return 0;
130 }
131
132 /**
133  * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
134  *
135  * @adev: amdgpu_device pointer
136  * @enable: enable or disable interrupt ring buffers
137  *
138  * Toggle all the available interrupt ring buffers (VEGA20).
139  */
140 static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
141 {
142         struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
143         int i;
144         int r;
145
146         for (i = 0; i < ARRAY_SIZE(ih); i++) {
147                 if (ih[i]->ring_size) {
148                         r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable);
149                         if (r)
150                                 return r;
151                 }
152         }
153
154         return 0;
155 }
156
157 static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
158 {
159         int rb_bufsz = order_base_2(ih->ring_size / 4);
160
161         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
162                                    MC_SPACE, ih->use_bus_addr ? 1 : 4);
163         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
164                                    WPTR_OVERFLOW_CLEAR, 1);
165         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
166                                    WPTR_OVERFLOW_ENABLE, 1);
167         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
168         /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
169          * value is written to memory
170          */
171         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
172                                    WPTR_WRITEBACK_ENABLE, 1);
173         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
174         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
175         ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
176
177         return ih_rb_cntl;
178 }
179
180 static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
181 {
182         u32 ih_doorbell_rtpr = 0;
183
184         if (ih->use_doorbell) {
185                 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
186                                                  IH_DOORBELL_RPTR, OFFSET,
187                                                  ih->doorbell_index);
188                 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
189                                                  IH_DOORBELL_RPTR,
190                                                  ENABLE, 1);
191         } else {
192                 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
193                                                  IH_DOORBELL_RPTR,
194                                                  ENABLE, 0);
195         }
196         return ih_doorbell_rtpr;
197 }
198
199 /**
200  * vega20_ih_enable_ring - enable an ih ring buffer
201  *
202  * @adev: amdgpu_device pointer
203  * @ih: amdgpu_ih_ring pointer
204  *
205  * Enable an ih ring buffer (VEGA20)
206  */
207 static int vega20_ih_enable_ring(struct amdgpu_device *adev,
208                                  struct amdgpu_ih_ring *ih)
209 {
210         struct amdgpu_ih_regs *ih_regs;
211         uint32_t tmp;
212
213         ih_regs = &ih->ih_regs;
214
215         /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
216         WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
217         WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
218
219         tmp = RREG32(ih_regs->ih_rb_cntl);
220         tmp = vega20_ih_rb_cntl(ih, tmp);
221         if (ih == &adev->irq.ih)
222                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
223         if (ih == &adev->irq.ih1) {
224                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
225                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
226         }
227         if (amdgpu_sriov_vf(adev)) {
228                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
229                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
230                         return -ETIMEDOUT;
231                 }
232         } else {
233                 WREG32(ih_regs->ih_rb_cntl, tmp);
234         }
235
236         if (ih == &adev->irq.ih) {
237                 /* set the ih ring 0 writeback address whether it's enabled or not */
238                 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
239                 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
240         }
241
242         /* set rptr, wptr to 0 */
243         WREG32(ih_regs->ih_rb_wptr, 0);
244         WREG32(ih_regs->ih_rb_rptr, 0);
245
246         WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih));
247
248         return 0;
249 }
250
251 /**
252  * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
253  *
254  * @adev: amdgpu_device pointer
255  *
256  * Reroute VMC and UMC interrupts on primary ih ring to
257  * ih ring 1 so they won't lose when bunches of page faults
258  * interrupts overwhelms the interrupt handler(VEGA20)
259  */
260 static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
261 {
262         uint32_t tmp;
263
264         /* vega20 ih reroute will go through psp
265          * this function is only used for arcturus
266          */
267         if (adev->asic_type == CHIP_ARCTURUS) {
268                 /* Reroute to IH ring 1 for VMC */
269                 WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
270                 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
271                 tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
272                 tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
273                 WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
274
275                 /* Reroute IH ring 1 for UTCL2 */
276                 WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
277                 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
278                 tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
279                 WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
280         }
281 }
282
283 /**
284  * vega20_ih_irq_init - init and enable the interrupt ring
285  *
286  * @adev: amdgpu_device pointer
287  *
288  * Allocate a ring buffer for the interrupt controller,
289  * enable the RLC, disable interrupts, enable the IH
290  * ring buffer and enable it (VI).
291  * Called at device load and reume.
292  * Returns 0 for success, errors for failure.
293  */
294 static int vega20_ih_irq_init(struct amdgpu_device *adev)
295 {
296         struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
297         u32 ih_chicken;
298         int ret;
299         int i;
300         u32 tmp;
301
302         /* disable irqs */
303         ret = vega20_ih_toggle_interrupts(adev, false);
304         if (ret)
305                 return ret;
306
307         adev->nbio.funcs->ih_control(adev);
308
309         if (adev->asic_type == CHIP_ARCTURUS &&
310             adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
311                 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
312                 if (adev->irq.ih.use_bus_addr) {
313                         ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
314                                                    MC_SPACE_GPA_ENABLE, 1);
315                 }
316                 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
317         }
318
319         for (i = 0; i < ARRAY_SIZE(ih); i++) {
320                 if (ih[i]->ring_size) {
321                         if (i == 1)
322                                 vega20_ih_reroute_ih(adev);
323                         ret = vega20_ih_enable_ring(adev, ih[i]);
324                         if (ret)
325                                 return ret;
326                 }
327         }
328
329         tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
330         tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
331                             CLIENT18_IS_STORM_CLIENT, 1);
332         WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
333
334         tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
335         tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
336         WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
337
338         pci_set_master(adev->pdev);
339
340         /* enable interrupts */
341         ret = vega20_ih_toggle_interrupts(adev, true);
342         if (ret)
343                 return ret;
344
345         if (adev->irq.ih_soft.ring_size)
346                 adev->irq.ih_soft.enabled = true;
347
348         return 0;
349 }
350
351 /**
352  * vega20_ih_irq_disable - disable interrupts
353  *
354  * @adev: amdgpu_device pointer
355  *
356  * Disable interrupts on the hw (VEGA20).
357  */
358 static void vega20_ih_irq_disable(struct amdgpu_device *adev)
359 {
360         vega20_ih_toggle_interrupts(adev, false);
361
362         /* Wait and acknowledge irq */
363         mdelay(1);
364 }
365
366 /**
367  * vega20_ih_get_wptr - get the IH ring buffer wptr
368  *
369  * @adev: amdgpu_device pointer
370  *
371  * Get the IH ring buffer wptr from either the register
372  * or the writeback memory buffer (VEGA20).  Also check for
373  * ring buffer overflow and deal with it.
374  * Returns the value of the wptr.
375  */
376 static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
377                               struct amdgpu_ih_ring *ih)
378 {
379         u32 wptr, tmp;
380         struct amdgpu_ih_regs *ih_regs;
381
382         wptr = le32_to_cpu(*ih->wptr_cpu);
383         ih_regs = &ih->ih_regs;
384
385         if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
386                 goto out;
387
388         /* Double check that the overflow wasn't already cleared. */
389         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
390         if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
391                 goto out;
392
393         wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
394
395         /* When a ring buffer overflow happen start parsing interrupt
396          * from the last not overwritten vector (wptr + 32). Hopefully
397          * this should allow us to catchup.
398          */
399         tmp = (wptr + 32) & ih->ptr_mask;
400         dev_warn(adev->dev, "IH ring buffer overflow "
401                  "(0x%08X, 0x%08X, 0x%08X)\n",
402                  wptr, ih->rptr, tmp);
403         ih->rptr = tmp;
404
405         tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
406         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
407         WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
408
409 out:
410         return (wptr & ih->ptr_mask);
411 }
412
413 /**
414  * vega20_ih_irq_rearm - rearm IRQ if lost
415  *
416  * @adev: amdgpu_device pointer
417  *
418  */
419 static void vega20_ih_irq_rearm(struct amdgpu_device *adev,
420                                struct amdgpu_ih_ring *ih)
421 {
422         uint32_t v = 0;
423         uint32_t i = 0;
424         struct amdgpu_ih_regs *ih_regs;
425
426         ih_regs = &ih->ih_regs;
427
428         /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
429         for (i = 0; i < MAX_REARM_RETRY; i++) {
430                 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
431                 if ((v < ih->ring_size) && (v != ih->rptr))
432                         WDOORBELL32(ih->doorbell_index, ih->rptr);
433                 else
434                         break;
435         }
436 }
437
438 /**
439  * vega20_ih_set_rptr - set the IH ring buffer rptr
440  *
441  * @adev: amdgpu_device pointer
442  *
443  * Set the IH ring buffer rptr.
444  */
445 static void vega20_ih_set_rptr(struct amdgpu_device *adev,
446                                struct amdgpu_ih_ring *ih)
447 {
448         struct amdgpu_ih_regs *ih_regs;
449
450         if (ih->use_doorbell) {
451                 /* XXX check if swapping is necessary on BE */
452                 *ih->rptr_cpu = ih->rptr;
453                 WDOORBELL32(ih->doorbell_index, ih->rptr);
454
455                 if (amdgpu_sriov_vf(adev))
456                         vega20_ih_irq_rearm(adev, ih);
457         } else {
458                 ih_regs = &ih->ih_regs;
459                 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
460         }
461 }
462
463 /**
464  * vega20_ih_self_irq - dispatch work for ring 1 and 2
465  *
466  * @adev: amdgpu_device pointer
467  * @source: irq source
468  * @entry: IV with WPTR update
469  *
470  * Update the WPTR from the IV and schedule work to handle the entries.
471  */
472 static int vega20_ih_self_irq(struct amdgpu_device *adev,
473                               struct amdgpu_irq_src *source,
474                               struct amdgpu_iv_entry *entry)
475 {
476         uint32_t wptr = cpu_to_le32(entry->src_data[0]);
477
478         switch (entry->ring_id) {
479         case 1:
480                 *adev->irq.ih1.wptr_cpu = wptr;
481                 schedule_work(&adev->irq.ih1_work);
482                 break;
483         case 2:
484                 *adev->irq.ih2.wptr_cpu = wptr;
485                 schedule_work(&adev->irq.ih2_work);
486                 break;
487         default: break;
488         }
489         return 0;
490 }
491
492 static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = {
493         .process = vega20_ih_self_irq,
494 };
495
496 static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
497 {
498         adev->irq.self_irq.num_types = 0;
499         adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
500 }
501
502 static int vega20_ih_early_init(void *handle)
503 {
504         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
505
506         vega20_ih_set_interrupt_funcs(adev);
507         vega20_ih_set_self_irq_funcs(adev);
508         return 0;
509 }
510
511 static int vega20_ih_sw_init(void *handle)
512 {
513         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
514         int r;
515
516         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
517                               &adev->irq.self_irq);
518         if (r)
519                 return r;
520
521         r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
522         if (r)
523                 return r;
524
525         adev->irq.ih.use_doorbell = true;
526         adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
527
528         r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
529         if (r)
530                 return r;
531
532         adev->irq.ih1.use_doorbell = true;
533         adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
534
535         r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
536         if (r)
537                 return r;
538
539         adev->irq.ih2.use_doorbell = true;
540         adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
541
542         /* initialize ih control registers offset */
543         vega20_ih_init_register_offset(adev);
544
545         r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
546         if (r)
547                 return r;
548
549         r = amdgpu_irq_init(adev);
550
551         return r;
552 }
553
554 static int vega20_ih_sw_fini(void *handle)
555 {
556         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
557
558         amdgpu_irq_fini(adev);
559         amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
560         amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
561         amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
562         amdgpu_ih_ring_fini(adev, &adev->irq.ih);
563
564         return 0;
565 }
566
567 static int vega20_ih_hw_init(void *handle)
568 {
569         int r;
570         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
571
572         r = vega20_ih_irq_init(adev);
573         if (r)
574                 return r;
575
576         return 0;
577 }
578
579 static int vega20_ih_hw_fini(void *handle)
580 {
581         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582
583         vega20_ih_irq_disable(adev);
584
585         return 0;
586 }
587
588 static int vega20_ih_suspend(void *handle)
589 {
590         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
591
592         return vega20_ih_hw_fini(adev);
593 }
594
595 static int vega20_ih_resume(void *handle)
596 {
597         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
598
599         return vega20_ih_hw_init(adev);
600 }
601
602 static bool vega20_ih_is_idle(void *handle)
603 {
604         /* todo */
605         return true;
606 }
607
608 static int vega20_ih_wait_for_idle(void *handle)
609 {
610         /* todo */
611         return -ETIMEDOUT;
612 }
613
614 static int vega20_ih_soft_reset(void *handle)
615 {
616         /* todo */
617
618         return 0;
619 }
620
621 static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
622                                                bool enable)
623 {
624         uint32_t data, def, field_val;
625
626         if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
627                 def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
628                 field_val = enable ? 0 : 1;
629                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
630                                      IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
631                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
632                                      IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
633                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
634                                      DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
635                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
636                                      OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
637                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
638                                      LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
639                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
640                                      DYN_CLK_SOFT_OVERRIDE, field_val);
641                 data = REG_SET_FIELD(data, IH_CLK_CTRL,
642                                      REG_CLK_SOFT_OVERRIDE, field_val);
643                 if (def != data)
644                         WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
645         }
646 }
647
648 static int vega20_ih_set_clockgating_state(void *handle,
649                                           enum amd_clockgating_state state)
650 {
651         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
652
653         vega20_ih_update_clockgating_state(adev,
654                                 state == AMD_CG_STATE_GATE);
655         return 0;
656
657 }
658
659 static int vega20_ih_set_powergating_state(void *handle,
660                                           enum amd_powergating_state state)
661 {
662         return 0;
663 }
664
665 const struct amd_ip_funcs vega20_ih_ip_funcs = {
666         .name = "vega20_ih",
667         .early_init = vega20_ih_early_init,
668         .late_init = NULL,
669         .sw_init = vega20_ih_sw_init,
670         .sw_fini = vega20_ih_sw_fini,
671         .hw_init = vega20_ih_hw_init,
672         .hw_fini = vega20_ih_hw_fini,
673         .suspend = vega20_ih_suspend,
674         .resume = vega20_ih_resume,
675         .is_idle = vega20_ih_is_idle,
676         .wait_for_idle = vega20_ih_wait_for_idle,
677         .soft_reset = vega20_ih_soft_reset,
678         .set_clockgating_state = vega20_ih_set_clockgating_state,
679         .set_powergating_state = vega20_ih_set_powergating_state,
680 };
681
682 static const struct amdgpu_ih_funcs vega20_ih_funcs = {
683         .get_wptr = vega20_ih_get_wptr,
684         .decode_iv = amdgpu_ih_decode_iv_helper,
685         .set_rptr = vega20_ih_set_rptr
686 };
687
688 static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
689 {
690         adev->irq.ih_funcs = &vega20_ih_funcs;
691 }
692
693 const struct amdgpu_ip_block_version vega20_ih_ip_block =
694 {
695         .type = AMD_IP_BLOCK_TYPE_IH,
696         .major = 4,
697         .minor = 2,
698         .rev = 0,
699         .funcs = &vega20_ih_ip_funcs,
700 };
This page took 0.070993 seconds and 4 git commands to generate.