]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
Merge tag 'for-6.13-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vpe.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <drm/drm_drv.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_vpe.h"
29 #include "amdgpu_smu.h"
30 #include "soc15_common.h"
31 #include "vpe_v6_1.h"
32
33 #define AMDGPU_CSA_VPE_SIZE     64
34 /* VPE CSA resides in the 4th page of CSA */
35 #define AMDGPU_CSA_VPE_OFFSET   (4096 * 3)
36
37 /* 1 second timeout */
38 #define VPE_IDLE_TIMEOUT        msecs_to_jiffies(1000)
39
40 #define VPE_MAX_DPM_LEVEL                       4
41 #define FIXED1_8_BITS_PER_FRACTIONAL_PART       8
42 #define GET_PRATIO_INTEGER_PART(x)              ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART)
43
44 static void vpe_set_ring_funcs(struct amdgpu_device *adev);
45
46 static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder)
47 {
48         *remainder = dividend % divisor;
49         return dividend / divisor;
50 }
51
52 static inline uint16_t complete_integer_division_u16(
53         uint16_t dividend,
54         uint16_t divisor,
55         uint16_t *remainder)
56 {
57         return div16_u16_rem(dividend, divisor, (uint16_t *)remainder);
58 }
59
60 static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
61 {
62         u16 arg1_value = numerator;
63         u16 arg2_value = denominator;
64
65         uint16_t remainder;
66
67         /* determine integer part */
68         uint16_t res_value = complete_integer_division_u16(
69                 arg1_value, arg2_value, &remainder);
70
71         if (res_value > 127 /* CHAR_MAX */)
72                 return 0;
73
74         /* determine fractional part */
75         {
76                 unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART;
77
78                 do {
79                         remainder <<= 1;
80
81                         res_value <<= 1;
82
83                         if (remainder >= arg2_value) {
84                                 res_value |= 1;
85                                 remainder -= arg2_value;
86                         }
87                 } while (--i != 0);
88         }
89
90         /* round up LSB */
91         {
92                 uint16_t summand = (remainder << 1) >= arg2_value;
93
94                 if ((res_value + summand) > 32767 /* SHRT_MAX */)
95                         return 0;
96
97                 res_value += summand;
98         }
99
100         return res_value;
101 }
102
103 static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency)
104 {
105         uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency);
106
107         if (GET_PRATIO_INTEGER_PART(pratio) > 1)
108                 pratio = 0;
109
110         return pratio;
111 }
112
113 /*
114  * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
115  * VPE FW will dynamically decide which level should be used according to current loading.
116  *
117  * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
118  * calculate the ratios of adjusting from one clock to another.
119  * The VPE FW can then request the appropriate frequency from the PMFW.
120  */
121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
122 {
123         struct amdgpu_device *adev = vpe->ring.adev;
124         uint32_t dpm_ctl;
125
126         if (adev->pm.dpm_enabled) {
127                 struct dpm_clocks clock_table = { 0 };
128                 struct dpm_clock *VPEClks;
129                 struct dpm_clock *SOCClks;
130                 uint32_t idx;
131                 uint32_t vpeclk_enalbled_num = 0;
132                 uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0;
133                 uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0;
134
135                 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
136                 dpm_ctl |= 1; /* DPM enablement */
137                 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
138
139                 /* Get VPECLK and SOCCLK */
140                 if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) {
141                         dev_dbg(adev->dev, "%s: get clock failed!\n", __func__);
142                         goto disable_dpm;
143                 }
144
145                 SOCClks = clock_table.SocClocks;
146                 VPEClks = clock_table.VPEClocks;
147
148                 /* Comfirm enabled vpe clk num
149                  * Enabled VPE clocks are ordered from low to high in VPEClks
150                  * The highest valid clock index+1 is the number of VPEClks
151                  */
152                 for (idx = PP_SMU_NUM_VPECLK_DPM_LEVELS; idx && !vpeclk_enalbled_num; idx--)
153                         if (VPEClks[idx-1].Freq)
154                                 vpeclk_enalbled_num = idx;
155
156                 /* vpe dpm only cares 4 levels. */
157                 for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) {
158                         uint32_t soc_dpm_level;
159                         uint32_t min_freq;
160
161                         if (idx == 0)
162                                 soc_dpm_level = 0;
163                         else
164                                 soc_dpm_level = (idx * 2) + 1;
165
166                         /* clamp the max level */
167                         if (soc_dpm_level > vpeclk_enalbled_num - 1)
168                                 soc_dpm_level = vpeclk_enalbled_num - 1;
169
170                         min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ?
171                                    SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq;
172
173                         switch (idx) {
174                         case 0:
175                                 pratio_vmin_freq = min_freq;
176                                 break;
177                         case 1:
178                                 pratio_vmid_freq = min_freq;
179                                 break;
180                         case 2:
181                                 pratio_vnorm_freq = min_freq;
182                                 break;
183                         case 3:
184                                 pratio_vmax_freq = min_freq;
185                                 break;
186                         default:
187                                 break;
188                         }
189                 }
190
191                 if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) {
192                         uint32_t pratio_ctl;
193
194                         pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq);
195                         pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq);
196                         pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq);
197
198                         pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18);
199                         WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl);           /* PRatio */
200                         WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000);      /* 1ms, unit=1/24MHz */
201                         WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000);  /* 50ms */
202                         WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
203                         WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
204                         dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
205                 } else {
206                         dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__);
207                         goto disable_dpm;
208                 }
209         }
210         return 0;
211
212 disable_dpm:
213         dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
214         dpm_ctl &= 0xfffffffe; /* Disable DPM */
215         WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
216         dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
217         return -EINVAL;
218 }
219
220 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
221 {
222         struct amdgpu_firmware_info ucode = {
223                 .ucode_id = AMDGPU_UCODE_ID_VPE,
224                 .mc_addr = adev->vpe.cmdbuf_gpu_addr,
225                 .ucode_size = 8,
226         };
227
228         return psp_execute_ip_fw_load(&adev->psp, &ucode);
229 }
230
231 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
232 {
233         struct amdgpu_device *adev = vpe->ring.adev;
234         const struct vpe_firmware_header_v1_0 *vpe_hdr;
235         char fw_prefix[32];
236         int ret;
237
238         amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
239         ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
240         if (ret)
241                 goto out;
242
243         vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
244         adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
245         adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
246
247         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
248                 struct amdgpu_firmware_info *info;
249
250                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX];
251                 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX;
252                 info->fw = adev->vpe.fw;
253                 adev->firmware.fw_size +=
254                         ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
255
256                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL];
257                 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL;
258                 info->fw = adev->vpe.fw;
259                 adev->firmware.fw_size +=
260                         ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
261         }
262
263         return 0;
264 out:
265         dev_err(adev->dev, "fail to initialize vpe microcode\n");
266         release_firmware(adev->vpe.fw);
267         adev->vpe.fw = NULL;
268         return ret;
269 }
270
271 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
272 {
273         struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
274         struct amdgpu_ring *ring = &vpe->ring;
275         int ret;
276
277         ring->ring_obj = NULL;
278         ring->use_doorbell = true;
279         ring->vm_hub = AMDGPU_MMHUB0(0);
280         ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
281         snprintf(ring->name, 4, "vpe");
282
283         ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
284                              AMDGPU_RING_PRIO_DEFAULT, NULL);
285         if (ret)
286                 return ret;
287
288         return 0;
289 }
290
291 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
292 {
293         amdgpu_ring_fini(&vpe->ring);
294
295         return 0;
296 }
297
298 static int vpe_early_init(struct amdgpu_ip_block *ip_block)
299 {
300         struct amdgpu_device *adev = ip_block->adev;
301         struct amdgpu_vpe *vpe = &adev->vpe;
302
303         switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
304         case IP_VERSION(6, 1, 0):
305         case IP_VERSION(6, 1, 3):
306                 vpe_v6_1_set_funcs(vpe);
307                 break;
308         case IP_VERSION(6, 1, 1):
309                 vpe_v6_1_set_funcs(vpe);
310                 vpe->collaborate_mode = true;
311                 break;
312         default:
313                 return -EINVAL;
314         }
315
316         vpe_set_ring_funcs(adev);
317         vpe_set_regs(vpe);
318
319         dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
320
321         return 0;
322 }
323
324 static void vpe_idle_work_handler(struct work_struct *work)
325 {
326         struct amdgpu_device *adev =
327                 container_of(work, struct amdgpu_device, vpe.idle_work.work);
328         unsigned int fences = 0;
329
330         fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
331
332         if (fences == 0)
333                 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
334         else
335                 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
336 }
337
338 static int vpe_common_init(struct amdgpu_vpe *vpe)
339 {
340         struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
341         int r;
342
343         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
344                                     AMDGPU_GEM_DOMAIN_GTT,
345                                     &adev->vpe.cmdbuf_obj,
346                                     &adev->vpe.cmdbuf_gpu_addr,
347                                     (void **)&adev->vpe.cmdbuf_cpu_addr);
348         if (r) {
349                 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r);
350                 return r;
351         }
352
353         vpe->context_started = false;
354         INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
355
356         return 0;
357 }
358
359 static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
360 {
361         struct amdgpu_device *adev = ip_block->adev;
362         struct amdgpu_vpe *vpe = &adev->vpe;
363         int ret;
364
365         ret = vpe_common_init(vpe);
366         if (ret)
367                 goto out;
368
369         ret = vpe_irq_init(vpe);
370         if (ret)
371                 goto out;
372
373         ret = vpe_ring_init(vpe);
374         if (ret)
375                 goto out;
376
377         ret = vpe_init_microcode(vpe);
378         if (ret)
379                 goto out;
380
381         /* TODO: Add queue reset mask when FW fully supports it */
382         adev->vpe.supported_reset =
383                  amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
384         ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
385         if (ret)
386                 goto out;
387 out:
388         return ret;
389 }
390
391 static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
392 {
393         struct amdgpu_device *adev = ip_block->adev;
394         struct amdgpu_vpe *vpe = &adev->vpe;
395
396         release_firmware(vpe->fw);
397         vpe->fw = NULL;
398
399         amdgpu_vpe_sysfs_reset_mask_fini(adev);
400         vpe_ring_fini(vpe);
401
402         amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
403                               &adev->vpe.cmdbuf_gpu_addr,
404                               (void **)&adev->vpe.cmdbuf_cpu_addr);
405
406         return 0;
407 }
408
409 static int vpe_hw_init(struct amdgpu_ip_block *ip_block)
410 {
411         struct amdgpu_device *adev = ip_block->adev;
412         struct amdgpu_vpe *vpe = &adev->vpe;
413         int ret;
414
415         /* Power on VPE */
416         ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
417                                                      AMD_PG_STATE_UNGATE);
418         if (ret)
419                 return ret;
420
421         ret = vpe_load_microcode(vpe);
422         if (ret)
423                 return ret;
424
425         ret = vpe_ring_start(vpe);
426         if (ret)
427                 return ret;
428
429         return 0;
430 }
431
432 static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
433 {
434         struct amdgpu_device *adev = ip_block->adev;
435         struct amdgpu_vpe *vpe = &adev->vpe;
436
437         vpe_ring_stop(vpe);
438
439         /* Power off VPE */
440         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
441
442         return 0;
443 }
444
445 static int vpe_suspend(struct amdgpu_ip_block *ip_block)
446 {
447         struct amdgpu_device *adev = ip_block->adev;
448
449         cancel_delayed_work_sync(&adev->vpe.idle_work);
450
451         return vpe_hw_fini(ip_block);
452 }
453
454 static int vpe_resume(struct amdgpu_ip_block *ip_block)
455 {
456         return vpe_hw_init(ip_block);
457 }
458
459 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
460 {
461         int i;
462
463         for (i = 0; i < count; i++)
464                 if (i == 0)
465                         amdgpu_ring_write(ring, ring->funcs->nop |
466                                 VPE_CMD_NOP_HEADER_COUNT(count - 1));
467                 else
468                         amdgpu_ring_write(ring, ring->funcs->nop);
469 }
470
471 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
472 {
473         struct amdgpu_device *adev = ring->adev;
474         uint32_t index = 0;
475         uint64_t csa_mc_addr;
476
477         if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
478                 return 0;
479
480         csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET +
481                       index * AMDGPU_CSA_VPE_SIZE;
482
483         return csa_mc_addr;
484 }
485
486 static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring,
487                                     uint32_t device_select,
488                                     uint32_t exec_count)
489 {
490         if (!ring->adev->vpe.collaborate_mode)
491                 return;
492
493         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) |
494                                 (device_select << 16));
495         amdgpu_ring_write(ring, exec_count & 0x1fff);
496 }
497
498 static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
499                              struct amdgpu_job *job,
500                              struct amdgpu_ib *ib,
501                              uint32_t flags)
502 {
503         uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
504         uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid);
505
506         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) |
507                                 VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf));
508
509         /* base must be 32 byte aligned */
510         amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0);
511         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
512         amdgpu_ring_write(ring, ib->length_dw);
513         amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
514         amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
515 }
516
517 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
518                                 uint64_t seq, unsigned int flags)
519 {
520         int i = 0;
521
522         do {
523                 /* write the fence */
524                 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
525                 /* zero in first two bits */
526                 WARN_ON_ONCE(addr & 0x3);
527                 amdgpu_ring_write(ring, lower_32_bits(addr));
528                 amdgpu_ring_write(ring, upper_32_bits(addr));
529                 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq));
530                 addr += 4;
531         } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1));
532
533         if (flags & AMDGPU_FENCE_FLAG_INT) {
534                 /* generate an interrupt */
535                 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0));
536                 amdgpu_ring_write(ring, 0);
537         }
538
539 }
540
541 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
542 {
543         uint32_t seq = ring->fence_drv.sync_seq;
544         uint64_t addr = ring->fence_drv.gpu_addr;
545
546         vpe_ring_emit_pred_exec(ring, 0, 6);
547
548         /* wait for idle */
549         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
550                                 VPE_POLL_REGMEM_SUBOP_REGMEM) |
551                                 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
552                                 VPE_CMD_POLL_REGMEM_HEADER_MEM(1));
553         amdgpu_ring_write(ring, addr & 0xfffffffc);
554         amdgpu_ring_write(ring, upper_32_bits(addr));
555         amdgpu_ring_write(ring, seq); /* reference */
556         amdgpu_ring_write(ring, 0xffffffff); /* mask */
557         amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
558                                 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4));
559 }
560
561 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
562 {
563         vpe_ring_emit_pred_exec(ring, 0, 3);
564
565         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
566         amdgpu_ring_write(ring, reg << 2);
567         amdgpu_ring_write(ring, val);
568 }
569
570 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
571                                    uint32_t val, uint32_t mask)
572 {
573         vpe_ring_emit_pred_exec(ring, 0, 6);
574
575         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
576                                 VPE_POLL_REGMEM_SUBOP_REGMEM) |
577                                 VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
578                                 VPE_CMD_POLL_REGMEM_HEADER_MEM(0));
579         amdgpu_ring_write(ring, reg << 2);
580         amdgpu_ring_write(ring, 0);
581         amdgpu_ring_write(ring, val); /* reference */
582         amdgpu_ring_write(ring, mask); /* mask */
583         amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
584                                 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10));
585 }
586
587 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
588                                    uint64_t pd_addr)
589 {
590         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
591 }
592
593 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
594                                             uint64_t addr)
595 {
596         unsigned int ret;
597
598         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
599         amdgpu_ring_write(ring, lower_32_bits(addr));
600         amdgpu_ring_write(ring, upper_32_bits(addr));
601         amdgpu_ring_write(ring, 1);
602         ret = ring->wptr & ring->buf_mask;
603         amdgpu_ring_write(ring, 0);
604
605         return ret;
606 }
607
608 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
609 {
610         struct amdgpu_device *adev = ring->adev;
611         struct amdgpu_vpe *vpe = &adev->vpe;
612         uint32_t preempt_reg = vpe->regs.queue0_preempt;
613         int i, r = 0;
614
615         /* assert preemption condition */
616         amdgpu_ring_set_preempt_cond_exec(ring, false);
617
618         /* emit the trailing fence */
619         ring->trail_seq += 1;
620         amdgpu_ring_alloc(ring, 10);
621         vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0);
622         amdgpu_ring_commit(ring);
623
624         /* assert IB preemption */
625         WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
626
627         /* poll the trailing fence */
628         for (i = 0; i < adev->usec_timeout; i++) {
629                 if (ring->trail_seq ==
630                     le32_to_cpu(*(ring->trail_fence_cpu_addr)))
631                         break;
632                 udelay(1);
633         }
634
635         if (i >= adev->usec_timeout) {
636                 r = -EINVAL;
637                 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx);
638         }
639
640         /* deassert IB preemption */
641         WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
642
643         /* deassert the preemption condition */
644         amdgpu_ring_set_preempt_cond_exec(ring, true);
645
646         return r;
647 }
648
649 static int vpe_set_clockgating_state(void *handle,
650                                      enum amd_clockgating_state state)
651 {
652         return 0;
653 }
654
655 static int vpe_set_powergating_state(void *handle,
656                                      enum amd_powergating_state state)
657 {
658         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
659         struct amdgpu_vpe *vpe = &adev->vpe;
660
661         if (!adev->pm.dpm_enabled)
662                 dev_err(adev->dev, "Without PM, cannot support powergating\n");
663
664         dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE");
665
666         if (state == AMD_PG_STATE_GATE) {
667                 amdgpu_dpm_enable_vpe(adev, false);
668                 vpe->context_started = false;
669         } else {
670                 amdgpu_dpm_enable_vpe(adev, true);
671         }
672
673         return 0;
674 }
675
676 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring)
677 {
678         struct amdgpu_device *adev = ring->adev;
679         struct amdgpu_vpe *vpe = &adev->vpe;
680         uint64_t rptr;
681
682         if (ring->use_doorbell) {
683                 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr);
684                 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr);
685         } else {
686                 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
687                 rptr = rptr << 32;
688                 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
689                 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr);
690         }
691
692         return (rptr >> 2);
693 }
694
695 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring)
696 {
697         struct amdgpu_device *adev = ring->adev;
698         struct amdgpu_vpe *vpe = &adev->vpe;
699         uint64_t wptr;
700
701         if (ring->use_doorbell) {
702                 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
703                 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr);
704         } else {
705                 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
706                 wptr = wptr << 32;
707                 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
708                 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr);
709         }
710
711         return (wptr >> 2);
712 }
713
714 static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
715 {
716         struct amdgpu_device *adev = ring->adev;
717         struct amdgpu_vpe *vpe = &adev->vpe;
718
719         if (ring->use_doorbell) {
720                 dev_dbg(adev->dev, "Using doorbell, \
721                         wptr_offs == 0x%08x, \
722                         lower_32_bits(ring->wptr) << 2 == 0x%08x, \
723                         upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
724                         ring->wptr_offs,
725                         lower_32_bits(ring->wptr << 2),
726                         upper_32_bits(ring->wptr << 2));
727                 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
728                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
729                 if (vpe->collaborate_mode)
730                         WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2);
731         } else {
732                 int i;
733
734                 for (i = 0; i < vpe->num_instances; i++) {
735                         dev_dbg(adev->dev, "Not using doorbell, \
736                                 regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
737                                 regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
738                                 lower_32_bits(ring->wptr << 2),
739                                 upper_32_bits(ring->wptr << 2));
740                         WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
741                                lower_32_bits(ring->wptr << 2));
742                         WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
743                                upper_32_bits(ring->wptr << 2));
744                 }
745         }
746 }
747
748 static int vpe_ring_test_ring(struct amdgpu_ring *ring)
749 {
750         struct amdgpu_device *adev = ring->adev;
751         const uint32_t test_pattern = 0xdeadbeef;
752         uint32_t index, i;
753         uint64_t wb_addr;
754         int ret;
755
756         ret = amdgpu_device_wb_get(adev, &index);
757         if (ret) {
758                 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
759                 return ret;
760         }
761
762         adev->wb.wb[index] = 0;
763         wb_addr = adev->wb.gpu_addr + (index * 4);
764
765         ret = amdgpu_ring_alloc(ring, 4);
766         if (ret) {
767                 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret);
768                 goto out;
769         }
770
771         amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
772         amdgpu_ring_write(ring, lower_32_bits(wb_addr));
773         amdgpu_ring_write(ring, upper_32_bits(wb_addr));
774         amdgpu_ring_write(ring, test_pattern);
775         amdgpu_ring_commit(ring);
776
777         for (i = 0; i < adev->usec_timeout; i++) {
778                 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern)
779                         goto out;
780                 udelay(1);
781         }
782
783         ret = -ETIMEDOUT;
784 out:
785         amdgpu_device_wb_free(adev, index);
786
787         return ret;
788 }
789
790 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
791 {
792         struct amdgpu_device *adev = ring->adev;
793         const uint32_t test_pattern = 0xdeadbeef;
794         struct amdgpu_ib ib = {};
795         struct dma_fence *f = NULL;
796         uint32_t index;
797         uint64_t wb_addr;
798         int ret;
799
800         ret = amdgpu_device_wb_get(adev, &index);
801         if (ret) {
802                 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
803                 return ret;
804         }
805
806         adev->wb.wb[index] = 0;
807         wb_addr = adev->wb.gpu_addr + (index * 4);
808
809         ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
810         if (ret)
811                 goto err0;
812
813         ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
814         ib.ptr[1] = lower_32_bits(wb_addr);
815         ib.ptr[2] = upper_32_bits(wb_addr);
816         ib.ptr[3] = test_pattern;
817         ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
818         ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
819         ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
820         ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
821         ib.length_dw = 8;
822
823         ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
824         if (ret)
825                 goto err1;
826
827         ret = dma_fence_wait_timeout(f, false, timeout);
828         if (ret <= 0) {
829                 ret = ret ? : -ETIMEDOUT;
830                 goto err1;
831         }
832
833         ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
834
835 err1:
836         amdgpu_ib_free(adev, &ib, NULL);
837         dma_fence_put(f);
838 err0:
839         amdgpu_device_wb_free(adev, index);
840
841         return ret;
842 }
843
844 static void vpe_ring_begin_use(struct amdgpu_ring *ring)
845 {
846         struct amdgpu_device *adev = ring->adev;
847         struct amdgpu_vpe *vpe = &adev->vpe;
848
849         cancel_delayed_work_sync(&adev->vpe.idle_work);
850
851         /* Power on VPE and notify VPE of new context  */
852         if (!vpe->context_started) {
853                 uint32_t context_notify;
854
855                 /* Power on VPE */
856                 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE);
857
858                 /* Indicates that a job from a new context has been submitted. */
859                 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
860                 if ((context_notify & 0x1) == 0)
861                         context_notify |= 0x1;
862                 else
863                         context_notify &= ~(0x1);
864                 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
865                 vpe->context_started = true;
866         }
867 }
868
869 static void vpe_ring_end_use(struct amdgpu_ring *ring)
870 {
871         struct amdgpu_device *adev = ring->adev;
872
873         schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
874 }
875
876 static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
877                                                 struct device_attribute *attr,
878                                                 char *buf)
879 {
880         struct drm_device *ddev = dev_get_drvdata(dev);
881         struct amdgpu_device *adev = drm_to_adev(ddev);
882
883         if (!adev)
884                 return -ENODEV;
885
886         return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset);
887 }
888
889 static DEVICE_ATTR(vpe_reset_mask, 0444,
890                    amdgpu_get_vpe_reset_mask, NULL);
891
892 int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev)
893 {
894         int r = 0;
895
896         if (adev->vpe.num_instances) {
897                 r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask);
898                 if (r)
899                         return r;
900         }
901
902         return r;
903 }
904
905 void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev)
906 {
907         if (adev->dev->kobj.sd) {
908                 if (adev->vpe.num_instances)
909                         device_remove_file(adev->dev, &dev_attr_vpe_reset_mask);
910         }
911 }
912
913 static const struct amdgpu_ring_funcs vpe_ring_funcs = {
914         .type = AMDGPU_RING_TYPE_VPE,
915         .align_mask = 0xf,
916         .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0),
917         .support_64bit_ptrs = true,
918         .get_rptr = vpe_ring_get_rptr,
919         .get_wptr = vpe_ring_get_wptr,
920         .set_wptr = vpe_ring_set_wptr,
921         .emit_frame_size =
922                 5 + /* vpe_ring_init_cond_exec */
923                 6 + /* vpe_ring_emit_pipeline_sync */
924                 10 + 10 + 10 + /* vpe_ring_emit_fence */
925                 /* vpe_ring_emit_vm_flush */
926                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
927                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6,
928         .emit_ib_size = 7 + 6,
929         .emit_ib = vpe_ring_emit_ib,
930         .emit_pipeline_sync = vpe_ring_emit_pipeline_sync,
931         .emit_fence = vpe_ring_emit_fence,
932         .emit_vm_flush = vpe_ring_emit_vm_flush,
933         .emit_wreg = vpe_ring_emit_wreg,
934         .emit_reg_wait = vpe_ring_emit_reg_wait,
935         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
936         .insert_nop = vpe_ring_insert_nop,
937         .pad_ib = amdgpu_ring_generic_pad_ib,
938         .test_ring = vpe_ring_test_ring,
939         .test_ib = vpe_ring_test_ib,
940         .init_cond_exec = vpe_ring_init_cond_exec,
941         .preempt_ib = vpe_ring_preempt_ib,
942         .begin_use = vpe_ring_begin_use,
943         .end_use = vpe_ring_end_use,
944 };
945
946 static void vpe_set_ring_funcs(struct amdgpu_device *adev)
947 {
948         adev->vpe.ring.funcs = &vpe_ring_funcs;
949 }
950
951 const struct amd_ip_funcs vpe_ip_funcs = {
952         .name = "vpe_v6_1",
953         .early_init = vpe_early_init,
954         .sw_init = vpe_sw_init,
955         .sw_fini = vpe_sw_fini,
956         .hw_init = vpe_hw_init,
957         .hw_fini = vpe_hw_fini,
958         .suspend = vpe_suspend,
959         .resume = vpe_resume,
960         .set_clockgating_state = vpe_set_clockgating_state,
961         .set_powergating_state = vpe_set_powergating_state,
962 };
963
964 const struct amdgpu_ip_block_version vpe_v6_1_ip_block = {
965         .type = AMD_IP_BLOCK_TYPE_VPE,
966         .major = 6,
967         .minor = 1,
968         .rev = 0,
969         .funcs = &vpe_ip_funcs,
970 };
This page took 0.091967 seconds and 4 git commands to generate.