2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef __AMDGPU_JPEG_H__
25 #define __AMDGPU_JPEG_H__
27 #include "amdgpu_ras.h"
29 #define AMDGPU_MAX_JPEG_INSTANCES 4
30 #define AMDGPU_MAX_JPEG_RINGS 8
32 #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
33 #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
35 #define WREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
38 WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
39 mmUVD_DPG_LMA_DATA, value); \
41 JPEG, GET_INST(JPEG, inst_idx), \
43 (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
44 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
45 indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
47 *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
49 *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
54 #define RREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
56 WREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_CTL, \
57 (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
58 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
59 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
60 RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \
63 #define WREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
65 WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
66 regUVD_DPG_LMA_DATA, value); \
67 WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
68 regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \
70 JPEG, GET_INST(JPEG, inst_idx), \
72 (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
73 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
74 indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
77 #define RREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
79 WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
80 regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \
81 WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
83 (UVD_DPG_LMA_CTL__MASK_EN_MASK | \
84 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
85 RREG32_SOC15(JPEG, inst_idx, regUVD_DPG_LMA_DATA); \
88 #define ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, offset, value, indirect) \
90 *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
91 *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \
94 struct amdgpu_jpeg_reg{
95 unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
98 struct amdgpu_jpeg_inst {
99 struct amdgpu_ring ring_dec[AMDGPU_MAX_JPEG_RINGS];
100 struct amdgpu_irq_src irq;
101 struct amdgpu_irq_src ras_poison_irq;
102 struct amdgpu_jpeg_reg external;
103 struct amdgpu_bo *dpg_sram_bo;
104 struct dpg_pause_state pause_state;
105 void *dpg_sram_cpu_addr;
106 uint64_t dpg_sram_gpu_addr;
107 uint32_t *dpg_sram_curr_addr;
111 struct amdgpu_jpeg_ras {
112 struct amdgpu_ras_block_object ras_block;
116 uint8_t num_jpeg_inst;
117 struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
118 unsigned num_jpeg_rings;
119 struct amdgpu_jpeg_reg internal;
120 unsigned harvest_config;
121 struct delayed_work idle_work;
122 enum amd_powergating_state cur_state;
123 struct mutex jpeg_pg_lock;
124 atomic_t total_submission_cnt;
125 struct ras_common_if *ras_if;
126 struct amdgpu_jpeg_ras *ras;
129 uint8_t num_inst_per_aid;
131 uint32_t supported_reset;
134 int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
135 int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev);
136 int amdgpu_jpeg_suspend(struct amdgpu_device *adev);
137 int amdgpu_jpeg_resume(struct amdgpu_device *adev);
139 void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring);
140 void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring);
142 int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring);
143 int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
145 int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
146 struct amdgpu_irq_src *source,
147 struct amdgpu_iv_entry *entry);
148 int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
149 struct ras_common_if *ras_block);
150 int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
151 int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
152 enum AMDGPU_UCODE_ID ucode_id);
153 void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
154 int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
155 void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
157 #endif /*__AMDGPU_JPEG_H__*/