]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
Merge tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux.git] / drivers / gpu / drm / amd / amdgpu / sdma_v6_0.c
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32
33 #include "gc/gc_11_0_0_offset.h"
34 #include "gc/gc_11_0_0_sh_mask.h"
35 #include "gc/gc_11_0_0_default.h"
36 #include "hdp/hdp_6_0_0_offset.h"
37 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
38
39 #include "soc15_common.h"
40 #include "soc15.h"
41 #include "sdma_v6_0_0_pkt_open.h"
42 #include "nbio_v4_3.h"
43 #include "sdma_common.h"
44 #include "sdma_v6_0.h"
45 #include "v11_structs.h"
46
47 MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
48 MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
49 MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
50 MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
51
52 #define SDMA1_REG_OFFSET 0x600
53 #define SDMA0_HYP_DEC_REG_START 0x5880
54 #define SDMA0_HYP_DEC_REG_END 0x589a
55 #define SDMA1_HYP_DEC_REG_OFFSET 0x20
56
57 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
58 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
59 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
60 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
61 static int sdma_v6_0_start(struct amdgpu_device *adev);
62
63 static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
64 {
65         u32 base;
66
67         if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
68             internal_offset <= SDMA0_HYP_DEC_REG_END) {
69                 base = adev->reg_offset[GC_HWIP][0][1];
70                 if (instance != 0)
71                         internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
72         } else {
73                 base = adev->reg_offset[GC_HWIP][0][0];
74                 if (instance == 1)
75                         internal_offset += SDMA1_REG_OFFSET;
76         }
77
78         return base + internal_offset;
79 }
80
81 /**
82  * sdma_v6_0_init_microcode - load ucode images from disk
83  *
84  * @adev: amdgpu_device pointer
85  *
86  * Use the firmware interface to load the ucode images into
87  * the driver (not loaded into hw).
88  * Returns 0 on success, error on failure.
89  */
90 static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
91 {
92         char fw_name[30];
93         char ucode_prefix[30];
94
95         DRM_DEBUG("\n");
96
97         amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
98
99         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
100
101         return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
102 }
103
104 static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
105 {
106         unsigned ret;
107
108         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE));
109         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
110         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
111         amdgpu_ring_write(ring, 1);
112         ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
113         amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
114
115         return ret;
116 }
117
118 static void sdma_v6_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
119                                            unsigned offset)
120 {
121         unsigned cur;
122
123         BUG_ON(offset > ring->buf_mask);
124         BUG_ON(ring->ring[offset] != 0x55aa55aa);
125
126         cur = (ring->wptr - 1) & ring->buf_mask;
127         if (cur > offset)
128                 ring->ring[offset] = cur - offset;
129         else
130                 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
131 }
132
133 /**
134  * sdma_v6_0_ring_get_rptr - get the current read pointer
135  *
136  * @ring: amdgpu ring pointer
137  *
138  * Get the current rptr from the hardware.
139  */
140 static uint64_t sdma_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
141 {
142         u64 *rptr;
143
144         /* XXX check if swapping is necessary on BE */
145         rptr = (u64 *)ring->rptr_cpu_addr;
146
147         DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
148         return ((*rptr) >> 2);
149 }
150
151 /**
152  * sdma_v6_0_ring_get_wptr - get the current write pointer
153  *
154  * @ring: amdgpu ring pointer
155  *
156  * Get the current wptr from the hardware.
157  */
158 static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
159 {
160         u64 wptr = 0;
161
162         if (ring->use_doorbell) {
163                 /* XXX check if swapping is necessary on BE */
164                 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
165                 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
166         }
167
168         return wptr >> 2;
169 }
170
171 /**
172  * sdma_v6_0_ring_set_wptr - commit the write pointer
173  *
174  * @ring: amdgpu ring pointer
175  *
176  * Write the wptr back to the hardware.
177  */
178 static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
179 {
180         struct amdgpu_device *adev = ring->adev;
181         uint32_t *wptr_saved;
182         uint32_t *is_queue_unmap;
183         uint64_t aggregated_db_index;
184         uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
185
186         DRM_DEBUG("Setting write pointer\n");
187
188         if (ring->is_mes_queue) {
189                 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
190                 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
191                                               sizeof(uint32_t));
192                 aggregated_db_index =
193                         amdgpu_mes_get_aggregated_doorbell_index(adev,
194                                                          ring->hw_prio);
195
196                 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
197                              ring->wptr << 2);
198                 *wptr_saved = ring->wptr << 2;
199                 if (*is_queue_unmap) {
200                         WDOORBELL64(aggregated_db_index, ring->wptr << 2);
201                         DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
202                                         ring->doorbell_index, ring->wptr << 2);
203                         WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
204                 } else {
205                         DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
206                                         ring->doorbell_index, ring->wptr << 2);
207                         WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
208
209                         if (*is_queue_unmap)
210                                 WDOORBELL64(aggregated_db_index,
211                                             ring->wptr << 2);
212                 }
213         } else {
214                 if (ring->use_doorbell) {
215                         DRM_DEBUG("Using doorbell -- "
216                                   "wptr_offs == 0x%08x "
217                                   "lower_32_bits(ring->wptr) << 2 == 0x%08x "
218                                   "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
219                                   ring->wptr_offs,
220                                   lower_32_bits(ring->wptr << 2),
221                                   upper_32_bits(ring->wptr << 2));
222                         /* XXX check if swapping is necessary on BE */
223                         atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
224                                      ring->wptr << 2);
225                         DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
226                                   ring->doorbell_index, ring->wptr << 2);
227                         WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
228                 } else {
229                         DRM_DEBUG("Not using doorbell -- "
230                                   "regSDMA%i_GFX_RB_WPTR == 0x%08x "
231                                   "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
232                                   ring->me,
233                                   lower_32_bits(ring->wptr << 2),
234                                   ring->me,
235                                   upper_32_bits(ring->wptr << 2));
236                         WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
237                                         ring->me, regSDMA0_QUEUE0_RB_WPTR),
238                                         lower_32_bits(ring->wptr << 2));
239                         WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
240                                         ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
241                                         upper_32_bits(ring->wptr << 2));
242                 }
243         }
244 }
245
246 static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
247 {
248         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
249         int i;
250
251         for (i = 0; i < count; i++)
252                 if (sdma && sdma->burst_nop && (i == 0))
253                         amdgpu_ring_write(ring, ring->funcs->nop |
254                                 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
255                 else
256                         amdgpu_ring_write(ring, ring->funcs->nop);
257 }
258
259 /**
260  * sdma_v6_0_ring_emit_ib - Schedule an IB on the DMA engine
261  *
262  * @ring: amdgpu ring pointer
263  * @ib: IB object to schedule
264  *
265  * Schedule an IB in the DMA ring.
266  */
267 static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
268                                    struct amdgpu_job *job,
269                                    struct amdgpu_ib *ib,
270                                    uint32_t flags)
271 {
272         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
273         uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
274
275         /* An IB packet must end on a 8 DW boundary--the next dword
276          * must be on a 8-dword boundary. Our IB packet below is 6
277          * dwords long, thus add x number of NOPs, such that, in
278          * modular arithmetic,
279          * wptr + 6 + x = 8k, k >= 0, which in C is,
280          * (wptr + 6 + x) % 8 = 0.
281          * The expression below, is a solution of x.
282          */
283         sdma_v6_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
284
285         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) |
286                           SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
287         /* base must be 32 byte aligned */
288         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
289         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
290         amdgpu_ring_write(ring, ib->length_dw);
291         amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
292         amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
293 }
294
295 /**
296  * sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
297  *
298  * @ring: amdgpu ring pointer
299  * @job: job to retrieve vmid from
300  * @ib: IB object to schedule
301  *
302  * flush the IB by graphics cache rinse.
303  */
304 static void sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
305 {
306         uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
307                             SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
308                             SDMA_GCR_GLI_INV(1);
309
310         /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
311         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ));
312         amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
313         amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
314                           SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
315         amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
316                           SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
317         amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
318                           SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
319 }
320
321
322 /**
323  * sdma_v6_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
324  *
325  * @ring: amdgpu ring pointer
326  *
327  * Emit an hdp flush packet on the requested DMA ring.
328  */
329 static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
330 {
331         struct amdgpu_device *adev = ring->adev;
332         u32 ref_and_mask = 0;
333         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
334
335         ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
336
337         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
338                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
339                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
340         amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
341         amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
342         amdgpu_ring_write(ring, ref_and_mask); /* reference */
343         amdgpu_ring_write(ring, ref_and_mask); /* mask */
344         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
345                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
346 }
347
348 /**
349  * sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring
350  *
351  * @ring: amdgpu ring pointer
352  * @fence: amdgpu fence object
353  *
354  * Add a DMA fence packet to the ring to write
355  * the fence seq number and DMA trap packet to generate
356  * an interrupt if needed.
357  */
358 static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
359                                       unsigned flags)
360 {
361         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
362         /* write the fence */
363         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
364                           SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
365         /* zero in first two bits */
366         BUG_ON(addr & 0x3);
367         amdgpu_ring_write(ring, lower_32_bits(addr));
368         amdgpu_ring_write(ring, upper_32_bits(addr));
369         amdgpu_ring_write(ring, lower_32_bits(seq));
370
371         /* optionally write high bits as well */
372         if (write64bit) {
373                 addr += 4;
374                 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
375                                   SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
376                 /* zero in first two bits */
377                 BUG_ON(addr & 0x3);
378                 amdgpu_ring_write(ring, lower_32_bits(addr));
379                 amdgpu_ring_write(ring, upper_32_bits(addr));
380                 amdgpu_ring_write(ring, upper_32_bits(seq));
381         }
382
383         if (flags & AMDGPU_FENCE_FLAG_INT) {
384                 uint32_t ctx = ring->is_mes_queue ?
385                         (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
386                 /* generate an interrupt */
387                 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
388                 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
389         }
390 }
391
392 /**
393  * sdma_v6_0_gfx_stop - stop the gfx async dma engines
394  *
395  * @adev: amdgpu_device pointer
396  *
397  * Stop the gfx async dma ring buffers.
398  */
399 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
400 {
401         u32 rb_cntl, ib_cntl;
402         int i;
403
404         amdgpu_sdma_unset_buffer_funcs_helper(adev);
405
406         for (i = 0; i < adev->sdma.num_instances; i++) {
407                 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
408                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
409                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
410                 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
411                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
412                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
413         }
414 }
415
416 /**
417  * sdma_v6_0_rlc_stop - stop the compute async dma engines
418  *
419  * @adev: amdgpu_device pointer
420  *
421  * Stop the compute async dma queues.
422  */
423 static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
424 {
425         /* XXX todo */
426 }
427
428 /**
429  * sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch
430  *
431  * @adev: amdgpu_device pointer
432  * @enable: enable/disable the DMA MEs context switch.
433  *
434  * Halt or unhalt the async dma engines context switch.
435  */
436 static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
437 {
438 }
439
440 /**
441  * sdma_v6_0_enable - stop the async dma engines
442  *
443  * @adev: amdgpu_device pointer
444  * @enable: enable/disable the DMA MEs.
445  *
446  * Halt or unhalt the async dma engines.
447  */
448 static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
449 {
450         u32 f32_cntl;
451         int i;
452
453         if (!enable) {
454                 sdma_v6_0_gfx_stop(adev);
455                 sdma_v6_0_rlc_stop(adev);
456         }
457
458         if (amdgpu_sriov_vf(adev))
459                 return;
460
461         for (i = 0; i < adev->sdma.num_instances; i++) {
462                 f32_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
463                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
464                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), f32_cntl);
465         }
466 }
467
468 /**
469  * sdma_v6_0_gfx_resume - setup and start the async dma engines
470  *
471  * @adev: amdgpu_device pointer
472  *
473  * Set up the gfx DMA ring buffers and enable them.
474  * Returns 0 for success, error for failure.
475  */
476 static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
477 {
478         struct amdgpu_ring *ring;
479         u32 rb_cntl, ib_cntl;
480         u32 rb_bufsz;
481         u32 doorbell;
482         u32 doorbell_offset;
483         u32 temp;
484         u64 wptr_gpu_addr;
485         int i, r;
486
487         for (i = 0; i < adev->sdma.num_instances; i++) {
488                 ring = &adev->sdma.instance[i].ring;
489
490                 if (!amdgpu_sriov_vf(adev))
491                         WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
492
493                 /* Set ring buffer size in dwords */
494                 rb_bufsz = order_base_2(ring->ring_size / 4);
495                 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
496                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
497 #ifdef __BIG_ENDIAN
498                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
499                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
500                                         RPTR_WRITEBACK_SWAP_ENABLE, 1);
501 #endif
502                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
503                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
504
505                 /* Initialize the ring buffer's read and write pointers */
506                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
507                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
508                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
509                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
510
511                 /* setup the wptr shadow polling */
512                 wptr_gpu_addr = ring->wptr_gpu_addr;
513                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
514                        lower_32_bits(wptr_gpu_addr));
515                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
516                        upper_32_bits(wptr_gpu_addr));
517
518                 /* set the wb address whether it's enabled or not */
519                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
520                        upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
521                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
522                        lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
523
524                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
525                 if (amdgpu_sriov_vf(adev))
526                         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
527                 else
528                         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
529                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
530
531                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
532                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
533
534                 ring->wptr = 0;
535
536                 /* before programing wptr to a less value, need set minor_ptr_update first */
537                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
538
539                 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
540                         WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
541                         WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
542                 }
543
544                 doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
545                 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
546
547                 if (ring->use_doorbell) {
548                         doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
549                         doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
550                                         OFFSET, ring->doorbell_index);
551                 } else {
552                         doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
553                 }
554                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
555                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
556
557                 if (i == 0)
558                         adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
559                                                       ring->doorbell_index,
560                                                       adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
561
562                 if (amdgpu_sriov_vf(adev))
563                         sdma_v6_0_ring_set_wptr(ring);
564
565                 /* set minor_ptr_update to 0 after wptr programed */
566                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
567
568                 /* Set up RESP_MODE to non-copy addresses */
569                 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
570                 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
571                 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
572                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
573
574                 /* program default cache read and write policy */
575                 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
576                 /* clean read policy and write policy bits */
577                 temp &= 0xFF0FFF;
578                 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
579                          (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
580                          SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
581                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
582
583                 if (!amdgpu_sriov_vf(adev)) {
584                         /* unhalt engine */
585                         temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
586                         temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
587                         temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
588                         WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
589                 }
590
591                 /* enable DMA RB */
592                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
593                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
594
595                 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
596                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
597 #ifdef __BIG_ENDIAN
598                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
599 #endif
600                 /* enable DMA IBs */
601                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
602
603                 ring->sched.ready = true;
604
605                 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
606                         sdma_v6_0_ctx_switch_enable(adev, true);
607                         sdma_v6_0_enable(adev, true);
608                 }
609
610                 r = amdgpu_ring_test_helper(ring);
611                 if (r) {
612                         ring->sched.ready = false;
613                         return r;
614                 }
615
616                 if (adev->mman.buffer_funcs_ring == ring)
617                         amdgpu_ttm_set_buffer_funcs_status(adev, true);
618         }
619
620         return 0;
621 }
622
623 /**
624  * sdma_v6_0_rlc_resume - setup and start the async dma engines
625  *
626  * @adev: amdgpu_device pointer
627  *
628  * Set up the compute DMA queues and enable them.
629  * Returns 0 for success, error for failure.
630  */
631 static int sdma_v6_0_rlc_resume(struct amdgpu_device *adev)
632 {
633         return 0;
634 }
635
636 /**
637  * sdma_v6_0_load_microcode - load the sDMA ME ucode
638  *
639  * @adev: amdgpu_device pointer
640  *
641  * Loads the sDMA0/1 ucode.
642  * Returns 0 for success, -EINVAL if the ucode is not available.
643  */
644 static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
645 {
646         const struct sdma_firmware_header_v2_0 *hdr;
647         const __le32 *fw_data;
648         u32 fw_size;
649         int i, j;
650         bool use_broadcast;
651
652         /* halt the MEs */
653         sdma_v6_0_enable(adev, false);
654
655         if (!adev->sdma.instance[0].fw)
656                 return -EINVAL;
657
658         /* use broadcast mode to load SDMA microcode by default */
659         use_broadcast = true;
660
661         if (use_broadcast) {
662                 dev_info(adev->dev, "Use broadcast method to load SDMA firmware\n");
663                 /* load Control Thread microcode */
664                 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
665                 amdgpu_ucode_print_sdma_hdr(&hdr->header);
666                 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
667
668                 fw_data = (const __le32 *)
669                         (adev->sdma.instance[0].fw->data +
670                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
671
672                 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0);
673
674                 for (j = 0; j < fw_size; j++) {
675                         if (amdgpu_emu_mode == 1 && j % 500 == 0)
676                                 msleep(1);
677                         WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
678                 }
679
680                 /* load Context Switch microcode */
681                 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
682
683                 fw_data = (const __le32 *)
684                         (adev->sdma.instance[0].fw->data +
685                                 le32_to_cpu(hdr->ctl_ucode_offset));
686
687                 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0x8000);
688
689                 for (j = 0; j < fw_size; j++) {
690                         if (amdgpu_emu_mode == 1 && j % 500 == 0)
691                                 msleep(1);
692                         WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
693                 }
694         } else {
695                 dev_info(adev->dev, "Use legacy method to load SDMA firmware\n");
696                 for (i = 0; i < adev->sdma.num_instances; i++) {
697                         /* load Control Thread microcode */
698                         hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
699                         amdgpu_ucode_print_sdma_hdr(&hdr->header);
700                         fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
701
702                         fw_data = (const __le32 *)
703                                 (adev->sdma.instance[0].fw->data +
704                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
705
706                         WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0);
707
708                         for (j = 0; j < fw_size; j++) {
709                                 if (amdgpu_emu_mode == 1 && j % 500 == 0)
710                                         msleep(1);
711                                 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
712                         }
713
714                         WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
715
716                         /* load Context Switch microcode */
717                         fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
718
719                         fw_data = (const __le32 *)
720                                 (adev->sdma.instance[0].fw->data +
721                                         le32_to_cpu(hdr->ctl_ucode_offset));
722
723                         WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0x8000);
724
725                         for (j = 0; j < fw_size; j++) {
726                                 if (amdgpu_emu_mode == 1 && j % 500 == 0)
727                                         msleep(1);
728                                 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
729                         }
730
731                         WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
732                 }
733         }
734
735         return 0;
736 }
737
738 static int sdma_v6_0_soft_reset(void *handle)
739 {
740         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741         u32 tmp;
742         int i;
743
744         sdma_v6_0_gfx_stop(adev);
745
746         for (i = 0; i < adev->sdma.num_instances; i++) {
747                 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE));
748                 tmp |= SDMA0_FREEZE__FREEZE_MASK;
749                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp);
750                 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
751                 tmp |= SDMA0_F32_CNTL__HALT_MASK;
752                 tmp |= SDMA0_F32_CNTL__TH1_RESET_MASK;
753                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), tmp);
754
755                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0);
756
757                 udelay(100);
758
759                 tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i;
760                 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
761                 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
762
763                 udelay(100);
764
765                 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0);
766                 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
767
768                 udelay(100);
769         }
770
771         return sdma_v6_0_start(adev);
772 }
773
774 static bool sdma_v6_0_check_soft_reset(void *handle)
775 {
776         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
777         struct amdgpu_ring *ring;
778         int i, r;
779         long tmo = msecs_to_jiffies(1000);
780
781         for (i = 0; i < adev->sdma.num_instances; i++) {
782                 ring = &adev->sdma.instance[i].ring;
783                 r = amdgpu_ring_test_ib(ring, tmo);
784                 if (r)
785                         return true;
786         }
787
788         return false;
789 }
790
791 /**
792  * sdma_v6_0_start - setup and start the async dma engines
793  *
794  * @adev: amdgpu_device pointer
795  *
796  * Set up the DMA engines and enable them.
797  * Returns 0 for success, error for failure.
798  */
799 static int sdma_v6_0_start(struct amdgpu_device *adev)
800 {
801         int r = 0;
802
803         if (amdgpu_sriov_vf(adev)) {
804                 sdma_v6_0_ctx_switch_enable(adev, false);
805                 sdma_v6_0_enable(adev, false);
806
807                 /* set RB registers */
808                 r = sdma_v6_0_gfx_resume(adev);
809                 return r;
810         }
811
812         if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
813                 r = sdma_v6_0_load_microcode(adev);
814                 if (r)
815                         return r;
816
817                 /* The value of regSDMA_F32_CNTL is invalid the moment after loading fw */
818                 if (amdgpu_emu_mode == 1)
819                         msleep(1000);
820         }
821
822         /* unhalt the MEs */
823         sdma_v6_0_enable(adev, true);
824         /* enable sdma ring preemption */
825         sdma_v6_0_ctx_switch_enable(adev, true);
826
827         /* start the gfx rings and rlc compute queues */
828         r = sdma_v6_0_gfx_resume(adev);
829         if (r)
830                 return r;
831         r = sdma_v6_0_rlc_resume(adev);
832
833         return r;
834 }
835
836 static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
837                               struct amdgpu_mqd_prop *prop)
838 {
839         struct v11_sdma_mqd *m = mqd;
840         uint64_t wb_gpu_addr;
841
842         m->sdmax_rlcx_rb_cntl =
843                 order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
844                 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
845                 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
846                 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
847
848         m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
849         m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
850
851         wb_gpu_addr = prop->wptr_gpu_addr;
852         m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
853         m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
854
855         wb_gpu_addr = prop->rptr_gpu_addr;
856         m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
857         m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
858
859         m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 0,
860                                                         regSDMA0_QUEUE0_IB_CNTL));
861
862         m->sdmax_rlcx_doorbell_offset =
863                 prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
864
865         m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
866
867         m->sdmax_rlcx_skip_cntl = 0;
868         m->sdmax_rlcx_context_status = 0;
869         m->sdmax_rlcx_doorbell_log = 0;
870
871         m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
872         m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
873
874         return 0;
875 }
876
877 static void sdma_v6_0_set_mqd_funcs(struct amdgpu_device *adev)
878 {
879         adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v11_sdma_mqd);
880         adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v6_0_mqd_init;
881 }
882
883 /**
884  * sdma_v6_0_ring_test_ring - simple async dma engine test
885  *
886  * @ring: amdgpu_ring structure holding ring information
887  *
888  * Test the DMA engine by writing using it to write an
889  * value to memory.
890  * Returns 0 for success, error for failure.
891  */
892 static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
893 {
894         struct amdgpu_device *adev = ring->adev;
895         unsigned i;
896         unsigned index;
897         int r;
898         u32 tmp;
899         u64 gpu_addr;
900         volatile uint32_t *cpu_ptr = NULL;
901
902         tmp = 0xCAFEDEAD;
903
904         if (ring->is_mes_queue) {
905                 uint32_t offset = 0;
906                 offset = amdgpu_mes_ctx_get_offs(ring,
907                                          AMDGPU_MES_CTX_PADDING_OFFS);
908                 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
909                 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
910                 *cpu_ptr = tmp;
911         } else {
912                 r = amdgpu_device_wb_get(adev, &index);
913                 if (r) {
914                         dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
915                         return r;
916                 }
917
918                 gpu_addr = adev->wb.gpu_addr + (index * 4);
919                 adev->wb.wb[index] = cpu_to_le32(tmp);
920         }
921
922         r = amdgpu_ring_alloc(ring, 5);
923         if (r) {
924                 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
925                 amdgpu_device_wb_free(adev, index);
926                 return r;
927         }
928
929         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
930                           SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
931         amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
932         amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
933         amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
934         amdgpu_ring_write(ring, 0xDEADBEEF);
935         amdgpu_ring_commit(ring);
936
937         for (i = 0; i < adev->usec_timeout; i++) {
938                 if (ring->is_mes_queue)
939                         tmp = le32_to_cpu(*cpu_ptr);
940                 else
941                         tmp = le32_to_cpu(adev->wb.wb[index]);
942                 if (tmp == 0xDEADBEEF)
943                         break;
944                 if (amdgpu_emu_mode == 1)
945                         msleep(1);
946                 else
947                         udelay(1);
948         }
949
950         if (i >= adev->usec_timeout)
951                 r = -ETIMEDOUT;
952
953         if (!ring->is_mes_queue)
954                 amdgpu_device_wb_free(adev, index);
955
956         return r;
957 }
958
959 /**
960  * sdma_v6_0_ring_test_ib - test an IB on the DMA engine
961  *
962  * @ring: amdgpu_ring structure holding ring information
963  *
964  * Test a simple IB in the DMA ring.
965  * Returns 0 on success, error on failure.
966  */
967 static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
968 {
969         struct amdgpu_device *adev = ring->adev;
970         struct amdgpu_ib ib;
971         struct dma_fence *f = NULL;
972         unsigned index;
973         long r;
974         u32 tmp = 0;
975         u64 gpu_addr;
976         volatile uint32_t *cpu_ptr = NULL;
977
978         tmp = 0xCAFEDEAD;
979         memset(&ib, 0, sizeof(ib));
980
981         if (ring->is_mes_queue) {
982                 uint32_t offset = 0;
983                 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
984                 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
985                 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
986
987                 offset = amdgpu_mes_ctx_get_offs(ring,
988                                          AMDGPU_MES_CTX_PADDING_OFFS);
989                 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
990                 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
991                 *cpu_ptr = tmp;
992         } else {
993                 r = amdgpu_device_wb_get(adev, &index);
994                 if (r) {
995                         dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
996                         return r;
997                 }
998
999                 gpu_addr = adev->wb.gpu_addr + (index * 4);
1000                 adev->wb.wb[index] = cpu_to_le32(tmp);
1001
1002                 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
1003                 if (r) {
1004                         DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1005                         goto err0;
1006                 }
1007         }
1008
1009         ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1010                 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1011         ib.ptr[1] = lower_32_bits(gpu_addr);
1012         ib.ptr[2] = upper_32_bits(gpu_addr);
1013         ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1014         ib.ptr[4] = 0xDEADBEEF;
1015         ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1016         ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1017         ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1018         ib.length_dw = 8;
1019
1020         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1021         if (r)
1022                 goto err1;
1023
1024         r = dma_fence_wait_timeout(f, false, timeout);
1025         if (r == 0) {
1026                 DRM_ERROR("amdgpu: IB test timed out\n");
1027                 r = -ETIMEDOUT;
1028                 goto err1;
1029         } else if (r < 0) {
1030                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1031                 goto err1;
1032         }
1033
1034         if (ring->is_mes_queue)
1035                 tmp = le32_to_cpu(*cpu_ptr);
1036         else
1037                 tmp = le32_to_cpu(adev->wb.wb[index]);
1038
1039         if (tmp == 0xDEADBEEF)
1040                 r = 0;
1041         else
1042                 r = -EINVAL;
1043
1044 err1:
1045         amdgpu_ib_free(adev, &ib, NULL);
1046         dma_fence_put(f);
1047 err0:
1048         if (!ring->is_mes_queue)
1049                 amdgpu_device_wb_free(adev, index);
1050         return r;
1051 }
1052
1053
1054 /**
1055  * sdma_v6_0_vm_copy_pte - update PTEs by copying them from the GART
1056  *
1057  * @ib: indirect buffer to fill with commands
1058  * @pe: addr of the page entry
1059  * @src: src addr to copy from
1060  * @count: number of page entries to update
1061  *
1062  * Update PTEs by copying them from the GART using sDMA.
1063  */
1064 static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib,
1065                                   uint64_t pe, uint64_t src,
1066                                   unsigned count)
1067 {
1068         unsigned bytes = count * 8;
1069
1070         ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1071                 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1072         ib->ptr[ib->length_dw++] = bytes - 1;
1073         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1074         ib->ptr[ib->length_dw++] = lower_32_bits(src);
1075         ib->ptr[ib->length_dw++] = upper_32_bits(src);
1076         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1077         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1078
1079 }
1080
1081 /**
1082  * sdma_v6_0_vm_write_pte - update PTEs by writing them manually
1083  *
1084  * @ib: indirect buffer to fill with commands
1085  * @pe: addr of the page entry
1086  * @addr: dst addr to write into pe
1087  * @count: number of page entries to update
1088  * @incr: increase next addr by incr bytes
1089  * @flags: access flags
1090  *
1091  * Update PTEs by writing them manually using sDMA.
1092  */
1093 static void sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1094                                    uint64_t value, unsigned count,
1095                                    uint32_t incr)
1096 {
1097         unsigned ndw = count * 2;
1098
1099         ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1100                 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1101         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1102         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1103         ib->ptr[ib->length_dw++] = ndw - 1;
1104         for (; ndw > 0; ndw -= 2) {
1105                 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1106                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1107                 value += incr;
1108         }
1109 }
1110
1111 /**
1112  * sdma_v6_0_vm_set_pte_pde - update the page tables using sDMA
1113  *
1114  * @ib: indirect buffer to fill with commands
1115  * @pe: addr of the page entry
1116  * @addr: dst addr to write into pe
1117  * @count: number of page entries to update
1118  * @incr: increase next addr by incr bytes
1119  * @flags: access flags
1120  *
1121  * Update the page tables using sDMA.
1122  */
1123 static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1124                                      uint64_t pe,
1125                                      uint64_t addr, unsigned count,
1126                                      uint32_t incr, uint64_t flags)
1127 {
1128         /* for physically contiguous pages (vram) */
1129         ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE);
1130         ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1131         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1132         ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1133         ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1134         ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1135         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1136         ib->ptr[ib->length_dw++] = incr; /* increment size */
1137         ib->ptr[ib->length_dw++] = 0;
1138         ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1139 }
1140
1141 /**
1142  * sdma_v6_0_ring_pad_ib - pad the IB
1143  * @ib: indirect buffer to fill with padding
1144  *
1145  * Pad the IB with NOPs to a boundary multiple of 8.
1146  */
1147 static void sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1148 {
1149         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1150         u32 pad_count;
1151         int i;
1152
1153         pad_count = (-ib->length_dw) & 0x7;
1154         for (i = 0; i < pad_count; i++)
1155                 if (sdma && sdma->burst_nop && (i == 0))
1156                         ib->ptr[ib->length_dw++] =
1157                                 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) |
1158                                 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1159                 else
1160                         ib->ptr[ib->length_dw++] =
1161                                 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP);
1162 }
1163
1164 /**
1165  * sdma_v6_0_ring_emit_pipeline_sync - sync the pipeline
1166  *
1167  * @ring: amdgpu_ring pointer
1168  *
1169  * Make sure all previous operations are completed (CIK).
1170  */
1171 static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1172 {
1173         uint32_t seq = ring->fence_drv.sync_seq;
1174         uint64_t addr = ring->fence_drv.gpu_addr;
1175
1176         /* wait for idle */
1177         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1178                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1179                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1180                           SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1181         amdgpu_ring_write(ring, addr & 0xfffffffc);
1182         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1183         amdgpu_ring_write(ring, seq); /* reference */
1184         amdgpu_ring_write(ring, 0xffffffff); /* mask */
1185         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1186                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1187 }
1188
1189 /**
1190  * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA
1191  *
1192  * @ring: amdgpu_ring pointer
1193  * @vm: amdgpu_vm pointer
1194  *
1195  * Update the page table base and flush the VM TLB
1196  * using sDMA.
1197  */
1198 static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1199                                          unsigned vmid, uint64_t pd_addr)
1200 {
1201         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1202 }
1203
1204 static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1205                                      uint32_t reg, uint32_t val)
1206 {
1207         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1208                           SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1209         amdgpu_ring_write(ring, reg);
1210         amdgpu_ring_write(ring, val);
1211 }
1212
1213 static void sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1214                                          uint32_t val, uint32_t mask)
1215 {
1216         amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1217                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1218                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1219         amdgpu_ring_write(ring, reg << 2);
1220         amdgpu_ring_write(ring, 0);
1221         amdgpu_ring_write(ring, val); /* reference */
1222         amdgpu_ring_write(ring, mask); /* mask */
1223         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1224                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1225 }
1226
1227 static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1228                                                    uint32_t reg0, uint32_t reg1,
1229                                                    uint32_t ref, uint32_t mask)
1230 {
1231         amdgpu_ring_emit_wreg(ring, reg0, ref);
1232         /* wait for a cycle to reset vm_inv_eng*_ack */
1233         amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1234         amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1235 }
1236
1237 static int sdma_v6_0_early_init(void *handle)
1238 {
1239         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240
1241         sdma_v6_0_set_ring_funcs(adev);
1242         sdma_v6_0_set_buffer_funcs(adev);
1243         sdma_v6_0_set_vm_pte_funcs(adev);
1244         sdma_v6_0_set_irq_funcs(adev);
1245         sdma_v6_0_set_mqd_funcs(adev);
1246
1247         return 0;
1248 }
1249
1250 static int sdma_v6_0_sw_init(void *handle)
1251 {
1252         struct amdgpu_ring *ring;
1253         int r, i;
1254         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255
1256         /* SDMA trap event */
1257         r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1258                               GFX_11_0_0__SRCID__SDMA_TRAP,
1259                               &adev->sdma.trap_irq);
1260         if (r)
1261                 return r;
1262
1263         r = sdma_v6_0_init_microcode(adev);
1264         if (r) {
1265                 DRM_ERROR("Failed to load sdma firmware!\n");
1266                 return r;
1267         }
1268
1269         for (i = 0; i < adev->sdma.num_instances; i++) {
1270                 ring = &adev->sdma.instance[i].ring;
1271                 ring->ring_obj = NULL;
1272                 ring->use_doorbell = true;
1273                 ring->me = i;
1274
1275                 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1276                                 ring->use_doorbell?"true":"false");
1277
1278                 ring->doorbell_index =
1279                         (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
1280
1281                 sprintf(ring->name, "sdma%d", i);
1282                 r = amdgpu_ring_init(adev, ring, 1024,
1283                                      &adev->sdma.trap_irq,
1284                                      AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1285                                      AMDGPU_RING_PRIO_DEFAULT, NULL);
1286                 if (r)
1287                         return r;
1288         }
1289
1290         return r;
1291 }
1292
1293 static int sdma_v6_0_sw_fini(void *handle)
1294 {
1295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296         int i;
1297
1298         for (i = 0; i < adev->sdma.num_instances; i++)
1299                 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1300
1301         amdgpu_sdma_destroy_inst_ctx(adev, true);
1302
1303         return 0;
1304 }
1305
1306 static int sdma_v6_0_hw_init(void *handle)
1307 {
1308         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1309
1310         return sdma_v6_0_start(adev);
1311 }
1312
1313 static int sdma_v6_0_hw_fini(void *handle)
1314 {
1315         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316
1317         if (amdgpu_sriov_vf(adev)) {
1318                 /* disable the scheduler for SDMA */
1319                 amdgpu_sdma_unset_buffer_funcs_helper(adev);
1320                 return 0;
1321         }
1322
1323         sdma_v6_0_ctx_switch_enable(adev, false);
1324         sdma_v6_0_enable(adev, false);
1325
1326         return 0;
1327 }
1328
1329 static int sdma_v6_0_suspend(void *handle)
1330 {
1331         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332
1333         return sdma_v6_0_hw_fini(adev);
1334 }
1335
1336 static int sdma_v6_0_resume(void *handle)
1337 {
1338         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1339
1340         return sdma_v6_0_hw_init(adev);
1341 }
1342
1343 static bool sdma_v6_0_is_idle(void *handle)
1344 {
1345         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1346         u32 i;
1347
1348         for (i = 0; i < adev->sdma.num_instances; i++) {
1349                 u32 tmp = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG));
1350
1351                 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1352                         return false;
1353         }
1354
1355         return true;
1356 }
1357
1358 static int sdma_v6_0_wait_for_idle(void *handle)
1359 {
1360         unsigned i;
1361         u32 sdma0, sdma1;
1362         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1363
1364         for (i = 0; i < adev->usec_timeout; i++) {
1365                 sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
1366                 sdma1 = RREG32(sdma_v6_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG));
1367
1368                 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1369                         return 0;
1370                 udelay(1);
1371         }
1372         return -ETIMEDOUT;
1373 }
1374
1375 static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
1376 {
1377         int i, r = 0;
1378         struct amdgpu_device *adev = ring->adev;
1379         u32 index = 0;
1380         u64 sdma_gfx_preempt;
1381
1382         amdgpu_sdma_get_index_from_ring(ring, &index);
1383         sdma_gfx_preempt =
1384                 sdma_v6_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT);
1385
1386         /* assert preemption condition */
1387         amdgpu_ring_set_preempt_cond_exec(ring, false);
1388
1389         /* emit the trailing fence */
1390         ring->trail_seq += 1;
1391         amdgpu_ring_alloc(ring, 10);
1392         sdma_v6_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1393                                   ring->trail_seq, 0);
1394         amdgpu_ring_commit(ring);
1395
1396         /* assert IB preemption */
1397         WREG32(sdma_gfx_preempt, 1);
1398
1399         /* poll the trailing fence */
1400         for (i = 0; i < adev->usec_timeout; i++) {
1401                 if (ring->trail_seq ==
1402                     le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1403                         break;
1404                 udelay(1);
1405         }
1406
1407         if (i >= adev->usec_timeout) {
1408                 r = -EINVAL;
1409                 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1410         }
1411
1412         /* deassert IB preemption */
1413         WREG32(sdma_gfx_preempt, 0);
1414
1415         /* deassert the preemption condition */
1416         amdgpu_ring_set_preempt_cond_exec(ring, true);
1417         return r;
1418 }
1419
1420 static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
1421                                         struct amdgpu_irq_src *source,
1422                                         unsigned type,
1423                                         enum amdgpu_interrupt_state state)
1424 {
1425         u32 sdma_cntl;
1426
1427         u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
1428
1429         sdma_cntl = RREG32(reg_offset);
1430         sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1431                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1432         WREG32(reg_offset, sdma_cntl);
1433
1434         return 0;
1435 }
1436
1437 static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
1438                                       struct amdgpu_irq_src *source,
1439                                       struct amdgpu_iv_entry *entry)
1440 {
1441         int instances, queue;
1442         uint32_t mes_queue_id = entry->src_data[0];
1443
1444         DRM_DEBUG("IH: SDMA trap\n");
1445
1446         if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1447                 struct amdgpu_mes_queue *queue;
1448
1449                 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1450
1451                 spin_lock(&adev->mes.queue_id_lock);
1452                 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1453                 if (queue) {
1454                         DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1455                         amdgpu_fence_process(queue->ring);
1456                 }
1457                 spin_unlock(&adev->mes.queue_id_lock);
1458                 return 0;
1459         }
1460
1461         queue = entry->ring_id & 0xf;
1462         instances = (entry->ring_id & 0xf0) >> 4;
1463         if (instances > 1) {
1464                 DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n");
1465                 return -EINVAL;
1466         }
1467
1468         switch (entry->client_id) {
1469         case SOC21_IH_CLIENTID_GFX:
1470                 switch (queue) {
1471                 case 0:
1472                         amdgpu_fence_process(&adev->sdma.instance[instances].ring);
1473                         break;
1474                 default:
1475                         break;
1476                 }
1477                 break;
1478         }
1479         return 0;
1480 }
1481
1482 static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1483                                               struct amdgpu_irq_src *source,
1484                                               struct amdgpu_iv_entry *entry)
1485 {
1486         return 0;
1487 }
1488
1489 static int sdma_v6_0_set_clockgating_state(void *handle,
1490                                            enum amd_clockgating_state state)
1491 {
1492         return 0;
1493 }
1494
1495 static int sdma_v6_0_set_powergating_state(void *handle,
1496                                           enum amd_powergating_state state)
1497 {
1498         return 0;
1499 }
1500
1501 static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
1502 {
1503 }
1504
1505 const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
1506         .name = "sdma_v6_0",
1507         .early_init = sdma_v6_0_early_init,
1508         .late_init = NULL,
1509         .sw_init = sdma_v6_0_sw_init,
1510         .sw_fini = sdma_v6_0_sw_fini,
1511         .hw_init = sdma_v6_0_hw_init,
1512         .hw_fini = sdma_v6_0_hw_fini,
1513         .suspend = sdma_v6_0_suspend,
1514         .resume = sdma_v6_0_resume,
1515         .is_idle = sdma_v6_0_is_idle,
1516         .wait_for_idle = sdma_v6_0_wait_for_idle,
1517         .soft_reset = sdma_v6_0_soft_reset,
1518         .check_soft_reset = sdma_v6_0_check_soft_reset,
1519         .set_clockgating_state = sdma_v6_0_set_clockgating_state,
1520         .set_powergating_state = sdma_v6_0_set_powergating_state,
1521         .get_clockgating_state = sdma_v6_0_get_clockgating_state,
1522 };
1523
1524 static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
1525         .type = AMDGPU_RING_TYPE_SDMA,
1526         .align_mask = 0xf,
1527         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1528         .support_64bit_ptrs = true,
1529         .secure_submission_supported = true,
1530         .vmhub = AMDGPU_GFXHUB_0,
1531         .get_rptr = sdma_v6_0_ring_get_rptr,
1532         .get_wptr = sdma_v6_0_ring_get_wptr,
1533         .set_wptr = sdma_v6_0_ring_set_wptr,
1534         .emit_frame_size =
1535                 5 + /* sdma_v6_0_ring_init_cond_exec */
1536                 6 + /* sdma_v6_0_ring_emit_hdp_flush */
1537                 6 + /* sdma_v6_0_ring_emit_pipeline_sync */
1538                 /* sdma_v6_0_ring_emit_vm_flush */
1539                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1540                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1541                 10 + 10 + 10, /* sdma_v6_0_ring_emit_fence x3 for user fence, vm fence */
1542         .emit_ib_size = 5 + 7 + 6, /* sdma_v6_0_ring_emit_ib */
1543         .emit_ib = sdma_v6_0_ring_emit_ib,
1544         .emit_mem_sync = sdma_v6_0_ring_emit_mem_sync,
1545         .emit_fence = sdma_v6_0_ring_emit_fence,
1546         .emit_pipeline_sync = sdma_v6_0_ring_emit_pipeline_sync,
1547         .emit_vm_flush = sdma_v6_0_ring_emit_vm_flush,
1548         .emit_hdp_flush = sdma_v6_0_ring_emit_hdp_flush,
1549         .test_ring = sdma_v6_0_ring_test_ring,
1550         .test_ib = sdma_v6_0_ring_test_ib,
1551         .insert_nop = sdma_v6_0_ring_insert_nop,
1552         .pad_ib = sdma_v6_0_ring_pad_ib,
1553         .emit_wreg = sdma_v6_0_ring_emit_wreg,
1554         .emit_reg_wait = sdma_v6_0_ring_emit_reg_wait,
1555         .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
1556         .init_cond_exec = sdma_v6_0_ring_init_cond_exec,
1557         .patch_cond_exec = sdma_v6_0_ring_patch_cond_exec,
1558         .preempt_ib = sdma_v6_0_ring_preempt_ib,
1559 };
1560
1561 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1562 {
1563         int i;
1564
1565         for (i = 0; i < adev->sdma.num_instances; i++) {
1566                 adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs;
1567                 adev->sdma.instance[i].ring.me = i;
1568         }
1569 }
1570
1571 static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
1572         .set = sdma_v6_0_set_trap_irq_state,
1573         .process = sdma_v6_0_process_trap_irq,
1574 };
1575
1576 static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
1577         .process = sdma_v6_0_process_illegal_inst_irq,
1578 };
1579
1580 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1581 {
1582         adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1583                                         adev->sdma.num_instances;
1584         adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
1585         adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
1586 }
1587
1588 /**
1589  * sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine
1590  *
1591  * @ring: amdgpu_ring structure holding ring information
1592  * @src_offset: src GPU address
1593  * @dst_offset: dst GPU address
1594  * @byte_count: number of bytes to xfer
1595  *
1596  * Copy GPU buffers using the DMA engine.
1597  * Used by the amdgpu ttm implementation to move pages if
1598  * registered as the asic copy callback.
1599  */
1600 static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
1601                                        uint64_t src_offset,
1602                                        uint64_t dst_offset,
1603                                        uint32_t byte_count,
1604                                        bool tmz)
1605 {
1606         ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1607                 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1608                 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1609         ib->ptr[ib->length_dw++] = byte_count - 1;
1610         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1611         ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1612         ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1613         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1614         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1615 }
1616
1617 /**
1618  * sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine
1619  *
1620  * @ring: amdgpu_ring structure holding ring information
1621  * @src_data: value to write to buffer
1622  * @dst_offset: dst GPU address
1623  * @byte_count: number of bytes to xfer
1624  *
1625  * Fill GPU buffers using the DMA engine.
1626  */
1627 static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib,
1628                                        uint32_t src_data,
1629                                        uint64_t dst_offset,
1630                                        uint32_t byte_count)
1631 {
1632         ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
1633         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1634         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1635         ib->ptr[ib->length_dw++] = src_data;
1636         ib->ptr[ib->length_dw++] = byte_count - 1;
1637 }
1638
1639 static const struct amdgpu_buffer_funcs sdma_v6_0_buffer_funcs = {
1640         .copy_max_bytes = 0x400000,
1641         .copy_num_dw = 7,
1642         .emit_copy_buffer = sdma_v6_0_emit_copy_buffer,
1643
1644         .fill_max_bytes = 0x400000,
1645         .fill_num_dw = 5,
1646         .emit_fill_buffer = sdma_v6_0_emit_fill_buffer,
1647 };
1648
1649 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev)
1650 {
1651         adev->mman.buffer_funcs = &sdma_v6_0_buffer_funcs;
1652         adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1653 }
1654
1655 static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
1656         .copy_pte_num_dw = 7,
1657         .copy_pte = sdma_v6_0_vm_copy_pte,
1658         .write_pte = sdma_v6_0_vm_write_pte,
1659         .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
1660 };
1661
1662 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1663 {
1664         unsigned i;
1665
1666         adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
1667         for (i = 0; i < adev->sdma.num_instances; i++) {
1668                 adev->vm_manager.vm_pte_scheds[i] =
1669                         &adev->sdma.instance[i].ring.sched;
1670         }
1671         adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1672 }
1673
1674 const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
1675         .type = AMD_IP_BLOCK_TYPE_SDMA,
1676         .major = 6,
1677         .minor = 0,
1678         .rev = 0,
1679         .funcs = &sdma_v6_0_ip_funcs,
1680 };
This page took 0.131254 seconds and 4 git commands to generate.