1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_drv.h"
13 #include "common.xml.h"
14 #include "state.xml.h"
16 struct etnaviv_gem_submit;
17 struct etnaviv_vram_mapping;
19 struct etnaviv_chip_identity {
26 /* Supported feature fields. */
29 /* Supported minor feature fields. */
43 /* Number of streams supported. */
46 /* Total number of temporary registers per thread. */
49 /* Maximum number of threads. */
52 /* Number of shader cores. */
53 u32 shader_core_count;
55 /* Number of Neural Network cores. */
58 /* Size of the vertex cache. */
59 u32 vertex_cache_size;
61 /* Number of entries in the vertex output buffer. */
62 u32 vertex_output_buffer_size;
64 /* Number of pixel pipes. */
67 /* Number of instructions. */
68 u32 instruction_count;
70 /* Number of constants. */
76 /* Number of varyings */
80 enum etnaviv_sec_mode {
86 struct etnaviv_event {
87 struct dma_fence *fence;
88 struct etnaviv_gem_submit *submit;
90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
93 struct etnaviv_cmdbuf_suballoc;
98 #define ETNA_NR_EVENTS 30
100 enum etnaviv_gpu_state {
101 ETNA_GPU_STATE_UNKNOWN = 0,
102 ETNA_GPU_STATE_IDENTIFIED,
103 ETNA_GPU_STATE_RESET,
104 ETNA_GPU_STATE_INITIALIZED,
105 ETNA_GPU_STATE_RUNNING,
106 ETNA_GPU_STATE_FAULT,
110 struct drm_device *drm;
111 struct thermal_cooling_device *cooling;
114 struct etnaviv_chip_identity identity;
115 enum etnaviv_sec_mode sec_mode;
116 struct workqueue_struct *wq;
117 struct mutex sched_lock;
118 struct drm_gpu_scheduler sched;
119 enum etnaviv_gpu_state state;
122 struct etnaviv_cmdbuf buffer;
125 /* event management: */
126 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
127 struct etnaviv_event event[ETNA_NR_EVENTS];
128 struct completion event_free;
129 spinlock_t event_spinlock;
133 /* Fencing support */
134 struct xarray user_fences;
138 wait_queue_head_t fence_event;
140 spinlock_t fence_spinlock;
142 /* worker for handling 'sync' points: */
143 struct work_struct sync_point_work;
144 int sync_point_event;
147 u32 hangcheck_dma_addr;
148 u32 hangcheck_primid;
154 struct etnaviv_iommu_context *mmu_context;
155 unsigned int flush_seq;
160 struct clk *clk_core;
161 struct clk *clk_shader;
162 struct reset_control *rst;
164 unsigned int freq_scale;
165 unsigned int fe_waitcycles;
166 unsigned long base_rate_core;
167 unsigned long base_rate_shader;
170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
172 writel(data, gpu->mmio + reg);
175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
177 /* On some variants, such as the GC7000r6009, some FE registers
178 * need two reads to be consistent. Do that extra read here and
179 * throw away the result.
181 if (reg >= VIVS_FE_DMA_STATUS && reg <= VIVS_FE_AUTO_FLUSH)
182 readl(gpu->mmio + reg);
184 return readl(gpu->mmio + reg);
187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg)
189 /* Power registers in GC300 < 2.0 are offset by 0x100 */
190 if (gpu->identity.model == chipModel_GC300 &&
191 gpu->identity.revision < 0x2000)
197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data)
199 writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg));
202 static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg)
204 return readl(gpu->mmio + gpu_fix_power_address(gpu, reg));
207 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
209 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
210 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
212 #ifdef CONFIG_DEBUG_FS
213 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
216 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit);
217 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
218 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
219 u32 fence, struct drm_etnaviv_timespec *timeout);
220 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
221 struct etnaviv_gem_object *etnaviv_obj,
222 struct drm_etnaviv_timespec *timeout);
223 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
224 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
225 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
226 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
227 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
229 extern struct platform_driver etnaviv_gpu_driver;
231 #endif /* __ETNAVIV_GPU_H__ */