]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
Merge tag 'linux-kselftest-4.18-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / uvd_v5_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König <[email protected]>
23  */
24
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_5_0_d.h"
31 #include "uvd/uvd_5_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "bif/bif_5_0_d.h"
35 #include "vi.h"
36 #include "smu/smu_7_1_2_d.h"
37 #include "smu/smu_7_1_2_sh_mask.h"
38
39 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
41 static int uvd_v5_0_start(struct amdgpu_device *adev);
42 static void uvd_v5_0_stop(struct amdgpu_device *adev);
43 static int uvd_v5_0_set_clockgating_state(void *handle,
44                                           enum amd_clockgating_state state);
45 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
46                                  bool enable);
47 /**
48  * uvd_v5_0_ring_get_rptr - get read pointer
49  *
50  * @ring: amdgpu_ring pointer
51  *
52  * Returns the current hardware read pointer
53  */
54 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
55 {
56         struct amdgpu_device *adev = ring->adev;
57
58         return RREG32(mmUVD_RBC_RB_RPTR);
59 }
60
61 /**
62  * uvd_v5_0_ring_get_wptr - get write pointer
63  *
64  * @ring: amdgpu_ring pointer
65  *
66  * Returns the current hardware write pointer
67  */
68 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
69 {
70         struct amdgpu_device *adev = ring->adev;
71
72         return RREG32(mmUVD_RBC_RB_WPTR);
73 }
74
75 /**
76  * uvd_v5_0_ring_set_wptr - set write pointer
77  *
78  * @ring: amdgpu_ring pointer
79  *
80  * Commits the write pointer to the hardware
81  */
82 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
83 {
84         struct amdgpu_device *adev = ring->adev;
85
86         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
87 }
88
89 static int uvd_v5_0_early_init(void *handle)
90 {
91         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
92         adev->uvd.num_uvd_inst = 1;
93
94         uvd_v5_0_set_ring_funcs(adev);
95         uvd_v5_0_set_irq_funcs(adev);
96
97         return 0;
98 }
99
100 static int uvd_v5_0_sw_init(void *handle)
101 {
102         struct amdgpu_ring *ring;
103         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104         int r;
105
106         /* UVD TRAP */
107         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
108         if (r)
109                 return r;
110
111         r = amdgpu_uvd_sw_init(adev);
112         if (r)
113                 return r;
114
115         r = amdgpu_uvd_resume(adev);
116         if (r)
117                 return r;
118
119         ring = &adev->uvd.inst->ring;
120         sprintf(ring->name, "uvd");
121         r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
122
123         return r;
124 }
125
126 static int uvd_v5_0_sw_fini(void *handle)
127 {
128         int r;
129         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
130
131         r = amdgpu_uvd_suspend(adev);
132         if (r)
133                 return r;
134
135         return amdgpu_uvd_sw_fini(adev);
136 }
137
138 /**
139  * uvd_v5_0_hw_init - start and test UVD block
140  *
141  * @adev: amdgpu_device pointer
142  *
143  * Initialize the hardware, boot up the VCPU and do some testing
144  */
145 static int uvd_v5_0_hw_init(void *handle)
146 {
147         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
148         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
149         uint32_t tmp;
150         int r;
151
152         amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
153         uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
154         uvd_v5_0_enable_mgcg(adev, true);
155
156         ring->ready = true;
157         r = amdgpu_ring_test_ring(ring);
158         if (r) {
159                 ring->ready = false;
160                 goto done;
161         }
162
163         r = amdgpu_ring_alloc(ring, 10);
164         if (r) {
165                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
166                 goto done;
167         }
168
169         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
170         amdgpu_ring_write(ring, tmp);
171         amdgpu_ring_write(ring, 0xFFFFF);
172
173         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
174         amdgpu_ring_write(ring, tmp);
175         amdgpu_ring_write(ring, 0xFFFFF);
176
177         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
178         amdgpu_ring_write(ring, tmp);
179         amdgpu_ring_write(ring, 0xFFFFF);
180
181         /* Clear timeout status bits */
182         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
183         amdgpu_ring_write(ring, 0x8);
184
185         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
186         amdgpu_ring_write(ring, 3);
187
188         amdgpu_ring_commit(ring);
189
190 done:
191         if (!r)
192                 DRM_INFO("UVD initialized successfully.\n");
193
194         return r;
195
196 }
197
198 /**
199  * uvd_v5_0_hw_fini - stop the hardware block
200  *
201  * @adev: amdgpu_device pointer
202  *
203  * Stop the UVD block, mark ring as not ready any more
204  */
205 static int uvd_v5_0_hw_fini(void *handle)
206 {
207         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
208         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
209
210         if (RREG32(mmUVD_STATUS) != 0)
211                 uvd_v5_0_stop(adev);
212
213         ring->ready = false;
214
215         return 0;
216 }
217
218 static int uvd_v5_0_suspend(void *handle)
219 {
220         int r;
221         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222
223         r = uvd_v5_0_hw_fini(adev);
224         if (r)
225                 return r;
226         uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
227
228         return amdgpu_uvd_suspend(adev);
229 }
230
231 static int uvd_v5_0_resume(void *handle)
232 {
233         int r;
234         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235
236         r = amdgpu_uvd_resume(adev);
237         if (r)
238                 return r;
239
240         return uvd_v5_0_hw_init(adev);
241 }
242
243 /**
244  * uvd_v5_0_mc_resume - memory controller programming
245  *
246  * @adev: amdgpu_device pointer
247  *
248  * Let the UVD memory controller know it's offsets
249  */
250 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
251 {
252         uint64_t offset;
253         uint32_t size;
254
255         /* programm memory controller bits 0-27 */
256         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
257                         lower_32_bits(adev->uvd.inst->gpu_addr));
258         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
259                         upper_32_bits(adev->uvd.inst->gpu_addr));
260
261         offset = AMDGPU_UVD_FIRMWARE_OFFSET;
262         size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
263         WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
264         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
265
266         offset += size;
267         size = AMDGPU_UVD_HEAP_SIZE;
268         WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
269         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
270
271         offset += size;
272         size = AMDGPU_UVD_STACK_SIZE +
273                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
274         WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
275         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
276
277         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
278         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
279         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
280 }
281
282 /**
283  * uvd_v5_0_start - start UVD block
284  *
285  * @adev: amdgpu_device pointer
286  *
287  * Setup and start the UVD block
288  */
289 static int uvd_v5_0_start(struct amdgpu_device *adev)
290 {
291         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
292         uint32_t rb_bufsz, tmp;
293         uint32_t lmi_swap_cntl;
294         uint32_t mp_swap_cntl;
295         int i, j, r;
296
297         /*disable DPG */
298         WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
299
300         /* disable byte swapping */
301         lmi_swap_cntl = 0;
302         mp_swap_cntl = 0;
303
304         uvd_v5_0_mc_resume(adev);
305
306         /* disable interupt */
307         WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
308
309         /* stall UMC and register bus before resetting VCPU */
310         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
311         mdelay(1);
312
313         /* put LMI, VCPU, RBC etc... into reset */
314         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
315                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
316                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
317                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
318                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
319         mdelay(5);
320
321         /* take UVD block out of reset */
322         WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
323         mdelay(5);
324
325         /* initialize UVD memory controller */
326         WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
327                              (1 << 21) | (1 << 9) | (1 << 20));
328
329 #ifdef __BIG_ENDIAN
330         /* swap (8 in 32) RB and IB */
331         lmi_swap_cntl = 0xa;
332         mp_swap_cntl = 0;
333 #endif
334         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
335         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
336
337         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
338         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
339         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
340         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
341         WREG32(mmUVD_MPC_SET_ALU, 0);
342         WREG32(mmUVD_MPC_SET_MUX, 0x88);
343
344         /* take all subblocks out of reset, except VCPU */
345         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
346         mdelay(5);
347
348         /* enable VCPU clock */
349         WREG32(mmUVD_VCPU_CNTL,  1 << 9);
350
351         /* enable UMC */
352         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
353
354         /* boot up the VCPU */
355         WREG32(mmUVD_SOFT_RESET, 0);
356         mdelay(10);
357
358         for (i = 0; i < 10; ++i) {
359                 uint32_t status;
360                 for (j = 0; j < 100; ++j) {
361                         status = RREG32(mmUVD_STATUS);
362                         if (status & 2)
363                                 break;
364                         mdelay(10);
365                 }
366                 r = 0;
367                 if (status & 2)
368                         break;
369
370                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
371                 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
372                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
373                 mdelay(10);
374                 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
375                 mdelay(10);
376                 r = -1;
377         }
378
379         if (r) {
380                 DRM_ERROR("UVD not responding, giving up!!!\n");
381                 return r;
382         }
383         /* enable master interrupt */
384         WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
385
386         /* clear the bit 4 of UVD_STATUS */
387         WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
388
389         rb_bufsz = order_base_2(ring->ring_size);
390         tmp = 0;
391         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
392         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
393         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
394         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
395         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
396         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
397         /* force RBC into idle state */
398         WREG32(mmUVD_RBC_RB_CNTL, tmp);
399
400         /* set the write pointer delay */
401         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
402
403         /* set the wb address */
404         WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
405
406         /* programm the RB_BASE for ring buffer */
407         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
408                         lower_32_bits(ring->gpu_addr));
409         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
410                         upper_32_bits(ring->gpu_addr));
411
412         /* Initialize the ring buffer's read and write pointers */
413         WREG32(mmUVD_RBC_RB_RPTR, 0);
414
415         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
416         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
417
418         WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
419
420         return 0;
421 }
422
423 /**
424  * uvd_v5_0_stop - stop UVD block
425  *
426  * @adev: amdgpu_device pointer
427  *
428  * stop the UVD block
429  */
430 static void uvd_v5_0_stop(struct amdgpu_device *adev)
431 {
432         /* force RBC into idle state */
433         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
434
435         /* Stall UMC and register bus before resetting VCPU */
436         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
437         mdelay(1);
438
439         /* put VCPU into reset */
440         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
441         mdelay(5);
442
443         /* disable VCPU clock */
444         WREG32(mmUVD_VCPU_CNTL, 0x0);
445
446         /* Unstall UMC and register bus */
447         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
448
449         WREG32(mmUVD_STATUS, 0);
450 }
451
452 /**
453  * uvd_v5_0_ring_emit_fence - emit an fence & trap command
454  *
455  * @ring: amdgpu_ring pointer
456  * @fence: fence to emit
457  *
458  * Write a fence and a trap command to the ring.
459  */
460 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
461                                      unsigned flags)
462 {
463         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
464
465         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
466         amdgpu_ring_write(ring, seq);
467         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
468         amdgpu_ring_write(ring, addr & 0xffffffff);
469         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
470         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
471         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
472         amdgpu_ring_write(ring, 0);
473
474         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
475         amdgpu_ring_write(ring, 0);
476         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
477         amdgpu_ring_write(ring, 0);
478         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
479         amdgpu_ring_write(ring, 2);
480 }
481
482 /**
483  * uvd_v5_0_ring_test_ring - register write test
484  *
485  * @ring: amdgpu_ring pointer
486  *
487  * Test if we can successfully write to the context register
488  */
489 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
490 {
491         struct amdgpu_device *adev = ring->adev;
492         uint32_t tmp = 0;
493         unsigned i;
494         int r;
495
496         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
497         r = amdgpu_ring_alloc(ring, 3);
498         if (r) {
499                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
500                           ring->idx, r);
501                 return r;
502         }
503         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
504         amdgpu_ring_write(ring, 0xDEADBEEF);
505         amdgpu_ring_commit(ring);
506         for (i = 0; i < adev->usec_timeout; i++) {
507                 tmp = RREG32(mmUVD_CONTEXT_ID);
508                 if (tmp == 0xDEADBEEF)
509                         break;
510                 DRM_UDELAY(1);
511         }
512
513         if (i < adev->usec_timeout) {
514                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
515                          ring->idx, i);
516         } else {
517                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
518                           ring->idx, tmp);
519                 r = -EINVAL;
520         }
521         return r;
522 }
523
524 /**
525  * uvd_v5_0_ring_emit_ib - execute indirect buffer
526  *
527  * @ring: amdgpu_ring pointer
528  * @ib: indirect buffer to execute
529  *
530  * Write ring commands to execute the indirect buffer
531  */
532 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
533                                   struct amdgpu_ib *ib,
534                                   unsigned vmid, bool ctx_switch)
535 {
536         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
537         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
538         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
539         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
540         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
541         amdgpu_ring_write(ring, ib->length_dw);
542 }
543
544 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
545 {
546         int i;
547
548         WARN_ON(ring->wptr % 2 || count % 2);
549
550         for (i = 0; i < count / 2; i++) {
551                 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
552                 amdgpu_ring_write(ring, 0);
553         }
554 }
555
556 static bool uvd_v5_0_is_idle(void *handle)
557 {
558         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
559
560         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
561 }
562
563 static int uvd_v5_0_wait_for_idle(void *handle)
564 {
565         unsigned i;
566         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
567
568         for (i = 0; i < adev->usec_timeout; i++) {
569                 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
570                         return 0;
571         }
572         return -ETIMEDOUT;
573 }
574
575 static int uvd_v5_0_soft_reset(void *handle)
576 {
577         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
578
579         uvd_v5_0_stop(adev);
580
581         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
582                         ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
583         mdelay(5);
584
585         return uvd_v5_0_start(adev);
586 }
587
588 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
589                                         struct amdgpu_irq_src *source,
590                                         unsigned type,
591                                         enum amdgpu_interrupt_state state)
592 {
593         // TODO
594         return 0;
595 }
596
597 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
598                                       struct amdgpu_irq_src *source,
599                                       struct amdgpu_iv_entry *entry)
600 {
601         DRM_DEBUG("IH: UVD TRAP\n");
602         amdgpu_fence_process(&adev->uvd.inst->ring);
603         return 0;
604 }
605
606 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
607 {
608         uint32_t data1, data3, suvd_flags;
609
610         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
611         data3 = RREG32(mmUVD_CGC_GATE);
612
613         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
614                      UVD_SUVD_CGC_GATE__SIT_MASK |
615                      UVD_SUVD_CGC_GATE__SMP_MASK |
616                      UVD_SUVD_CGC_GATE__SCM_MASK |
617                      UVD_SUVD_CGC_GATE__SDB_MASK;
618
619         if (enable) {
620                 data3 |= (UVD_CGC_GATE__SYS_MASK     |
621                         UVD_CGC_GATE__UDEC_MASK      |
622                         UVD_CGC_GATE__MPEG2_MASK     |
623                         UVD_CGC_GATE__RBC_MASK       |
624                         UVD_CGC_GATE__LMI_MC_MASK    |
625                         UVD_CGC_GATE__IDCT_MASK      |
626                         UVD_CGC_GATE__MPRD_MASK      |
627                         UVD_CGC_GATE__MPC_MASK       |
628                         UVD_CGC_GATE__LBSI_MASK      |
629                         UVD_CGC_GATE__LRBBM_MASK     |
630                         UVD_CGC_GATE__UDEC_RE_MASK   |
631                         UVD_CGC_GATE__UDEC_CM_MASK   |
632                         UVD_CGC_GATE__UDEC_IT_MASK   |
633                         UVD_CGC_GATE__UDEC_DB_MASK   |
634                         UVD_CGC_GATE__UDEC_MP_MASK   |
635                         UVD_CGC_GATE__WCB_MASK       |
636                         UVD_CGC_GATE__JPEG_MASK      |
637                         UVD_CGC_GATE__SCPU_MASK);
638                 /* only in pg enabled, we can gate clock to vcpu*/
639                 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
640                         data3 |= UVD_CGC_GATE__VCPU_MASK;
641                 data3 &= ~UVD_CGC_GATE__REGS_MASK;
642                 data1 |= suvd_flags;
643         } else {
644                 data3 = 0;
645                 data1 = 0;
646         }
647
648         WREG32(mmUVD_SUVD_CGC_GATE, data1);
649         WREG32(mmUVD_CGC_GATE, data3);
650 }
651
652 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
653 {
654         uint32_t data, data2;
655
656         data = RREG32(mmUVD_CGC_CTRL);
657         data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
658
659
660         data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
661                   UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
662
663
664         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
665                 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
666                 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
667
668         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
669                         UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
670                         UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
671                         UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
672                         UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
673                         UVD_CGC_CTRL__SYS_MODE_MASK |
674                         UVD_CGC_CTRL__UDEC_MODE_MASK |
675                         UVD_CGC_CTRL__MPEG2_MODE_MASK |
676                         UVD_CGC_CTRL__REGS_MODE_MASK |
677                         UVD_CGC_CTRL__RBC_MODE_MASK |
678                         UVD_CGC_CTRL__LMI_MC_MODE_MASK |
679                         UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
680                         UVD_CGC_CTRL__IDCT_MODE_MASK |
681                         UVD_CGC_CTRL__MPRD_MODE_MASK |
682                         UVD_CGC_CTRL__MPC_MODE_MASK |
683                         UVD_CGC_CTRL__LBSI_MODE_MASK |
684                         UVD_CGC_CTRL__LRBBM_MODE_MASK |
685                         UVD_CGC_CTRL__WCB_MODE_MASK |
686                         UVD_CGC_CTRL__VCPU_MODE_MASK |
687                         UVD_CGC_CTRL__JPEG_MODE_MASK |
688                         UVD_CGC_CTRL__SCPU_MODE_MASK);
689         data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
690                         UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
691                         UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
692                         UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
693                         UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
694
695         WREG32(mmUVD_CGC_CTRL, data);
696         WREG32(mmUVD_SUVD_CGC_CTRL, data2);
697 }
698
699 #if 0
700 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
701 {
702         uint32_t data, data1, cgc_flags, suvd_flags;
703
704         data = RREG32(mmUVD_CGC_GATE);
705         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
706
707         cgc_flags = UVD_CGC_GATE__SYS_MASK |
708                                 UVD_CGC_GATE__UDEC_MASK |
709                                 UVD_CGC_GATE__MPEG2_MASK |
710                                 UVD_CGC_GATE__RBC_MASK |
711                                 UVD_CGC_GATE__LMI_MC_MASK |
712                                 UVD_CGC_GATE__IDCT_MASK |
713                                 UVD_CGC_GATE__MPRD_MASK |
714                                 UVD_CGC_GATE__MPC_MASK |
715                                 UVD_CGC_GATE__LBSI_MASK |
716                                 UVD_CGC_GATE__LRBBM_MASK |
717                                 UVD_CGC_GATE__UDEC_RE_MASK |
718                                 UVD_CGC_GATE__UDEC_CM_MASK |
719                                 UVD_CGC_GATE__UDEC_IT_MASK |
720                                 UVD_CGC_GATE__UDEC_DB_MASK |
721                                 UVD_CGC_GATE__UDEC_MP_MASK |
722                                 UVD_CGC_GATE__WCB_MASK |
723                                 UVD_CGC_GATE__VCPU_MASK |
724                                 UVD_CGC_GATE__SCPU_MASK;
725
726         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
727                                 UVD_SUVD_CGC_GATE__SIT_MASK |
728                                 UVD_SUVD_CGC_GATE__SMP_MASK |
729                                 UVD_SUVD_CGC_GATE__SCM_MASK |
730                                 UVD_SUVD_CGC_GATE__SDB_MASK;
731
732         data |= cgc_flags;
733         data1 |= suvd_flags;
734
735         WREG32(mmUVD_CGC_GATE, data);
736         WREG32(mmUVD_SUVD_CGC_GATE, data1);
737 }
738 #endif
739
740 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
741                                  bool enable)
742 {
743         u32 orig, data;
744
745         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
746                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
747                 data |= 0xfff;
748                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
749
750                 orig = data = RREG32(mmUVD_CGC_CTRL);
751                 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
752                 if (orig != data)
753                         WREG32(mmUVD_CGC_CTRL, data);
754         } else {
755                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
756                 data &= ~0xfff;
757                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
758
759                 orig = data = RREG32(mmUVD_CGC_CTRL);
760                 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
761                 if (orig != data)
762                         WREG32(mmUVD_CGC_CTRL, data);
763         }
764 }
765
766 static int uvd_v5_0_set_clockgating_state(void *handle,
767                                           enum amd_clockgating_state state)
768 {
769         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
771
772         if (enable) {
773                 /* wait for STATUS to clear */
774                 if (uvd_v5_0_wait_for_idle(handle))
775                         return -EBUSY;
776                 uvd_v5_0_enable_clock_gating(adev, true);
777
778                 /* enable HW gates because UVD is idle */
779 /*              uvd_v5_0_set_hw_clock_gating(adev); */
780         } else {
781                 uvd_v5_0_enable_clock_gating(adev, false);
782         }
783
784         uvd_v5_0_set_sw_clock_gating(adev);
785         return 0;
786 }
787
788 static int uvd_v5_0_set_powergating_state(void *handle,
789                                           enum amd_powergating_state state)
790 {
791         /* This doesn't actually powergate the UVD block.
792          * That's done in the dpm code via the SMC.  This
793          * just re-inits the block as necessary.  The actual
794          * gating still happens in the dpm code.  We should
795          * revisit this when there is a cleaner line between
796          * the smc and the hw blocks
797          */
798         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
799         int ret = 0;
800
801         if (state == AMD_PG_STATE_GATE) {
802                 uvd_v5_0_stop(adev);
803         } else {
804                 ret = uvd_v5_0_start(adev);
805                 if (ret)
806                         goto out;
807         }
808
809 out:
810         return ret;
811 }
812
813 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
814 {
815         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816         int data;
817
818         mutex_lock(&adev->pm.mutex);
819
820         if (RREG32_SMC(ixCURRENT_PG_STATUS) &
821                                 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
822                 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
823                 goto out;
824         }
825
826         /* AMD_CG_SUPPORT_UVD_MGCG */
827         data = RREG32(mmUVD_CGC_CTRL);
828         if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
829                 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
830
831 out:
832         mutex_unlock(&adev->pm.mutex);
833 }
834
835 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
836         .name = "uvd_v5_0",
837         .early_init = uvd_v5_0_early_init,
838         .late_init = NULL,
839         .sw_init = uvd_v5_0_sw_init,
840         .sw_fini = uvd_v5_0_sw_fini,
841         .hw_init = uvd_v5_0_hw_init,
842         .hw_fini = uvd_v5_0_hw_fini,
843         .suspend = uvd_v5_0_suspend,
844         .resume = uvd_v5_0_resume,
845         .is_idle = uvd_v5_0_is_idle,
846         .wait_for_idle = uvd_v5_0_wait_for_idle,
847         .soft_reset = uvd_v5_0_soft_reset,
848         .set_clockgating_state = uvd_v5_0_set_clockgating_state,
849         .set_powergating_state = uvd_v5_0_set_powergating_state,
850         .get_clockgating_state = uvd_v5_0_get_clockgating_state,
851 };
852
853 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
854         .type = AMDGPU_RING_TYPE_UVD,
855         .align_mask = 0xf,
856         .support_64bit_ptrs = false,
857         .get_rptr = uvd_v5_0_ring_get_rptr,
858         .get_wptr = uvd_v5_0_ring_get_wptr,
859         .set_wptr = uvd_v5_0_ring_set_wptr,
860         .parse_cs = amdgpu_uvd_ring_parse_cs,
861         .emit_frame_size =
862                 14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
863         .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
864         .emit_ib = uvd_v5_0_ring_emit_ib,
865         .emit_fence = uvd_v5_0_ring_emit_fence,
866         .test_ring = uvd_v5_0_ring_test_ring,
867         .test_ib = amdgpu_uvd_ring_test_ib,
868         .insert_nop = uvd_v5_0_ring_insert_nop,
869         .pad_ib = amdgpu_ring_generic_pad_ib,
870         .begin_use = amdgpu_uvd_ring_begin_use,
871         .end_use = amdgpu_uvd_ring_end_use,
872 };
873
874 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
875 {
876         adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
877 }
878
879 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
880         .set = uvd_v5_0_set_interrupt_state,
881         .process = uvd_v5_0_process_interrupt,
882 };
883
884 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
885 {
886         adev->uvd.inst->irq.num_types = 1;
887         adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
888 }
889
890 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
891 {
892                 .type = AMD_IP_BLOCK_TYPE_UVD,
893                 .major = 5,
894                 .minor = 0,
895                 .rev = 0,
896                 .funcs = &uvd_v5_0_ip_funcs,
897 };
This page took 0.091526 seconds and 4 git commands to generate.