1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_drv.h"
13 #include "common.xml.h"
15 struct etnaviv_gem_submit;
16 struct etnaviv_vram_mapping;
18 struct etnaviv_chip_identity {
25 /* Supported feature fields. */
28 /* Supported minor feature fields. */
42 /* Number of streams supported. */
45 /* Total number of temporary registers per thread. */
48 /* Maximum number of threads. */
51 /* Number of shader cores. */
52 u32 shader_core_count;
54 /* Number of Neural Network cores. */
57 /* Size of the vertex cache. */
58 u32 vertex_cache_size;
60 /* Number of entries in the vertex output buffer. */
61 u32 vertex_output_buffer_size;
63 /* Number of pixel pipes. */
66 /* Number of instructions. */
67 u32 instruction_count;
69 /* Number of constants. */
75 /* Number of varyings */
79 enum etnaviv_sec_mode {
85 struct etnaviv_event {
86 struct dma_fence *fence;
87 struct etnaviv_gem_submit *submit;
89 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
92 struct etnaviv_cmdbuf_suballoc;
96 #define ETNA_NR_EVENTS 30
99 struct drm_device *drm;
100 struct thermal_cooling_device *cooling;
103 struct etnaviv_chip_identity identity;
104 enum etnaviv_sec_mode sec_mode;
105 struct workqueue_struct *wq;
106 struct mutex sched_lock;
107 struct drm_gpu_scheduler sched;
112 struct etnaviv_cmdbuf buffer;
115 /* event management: */
116 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
117 struct etnaviv_event event[ETNA_NR_EVENTS];
118 struct completion event_free;
119 spinlock_t event_spinlock;
123 /* Fencing support */
124 struct xarray user_fences;
128 wait_queue_head_t fence_event;
130 spinlock_t fence_spinlock;
132 /* worker for handling 'sync' points: */
133 struct work_struct sync_point_work;
134 int sync_point_event;
137 u32 hangcheck_dma_addr;
143 struct etnaviv_iommu_context *mmu_context;
144 unsigned int flush_seq;
149 struct clk *clk_core;
150 struct clk *clk_shader;
152 unsigned int freq_scale;
153 unsigned long base_rate_core;
154 unsigned long base_rate_shader;
157 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
159 writel(data, gpu->mmio + reg);
162 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
164 return readl(gpu->mmio + reg);
167 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg)
169 /* Power registers in GC300 < 2.0 are offset by 0x100 */
170 if (gpu->identity.model == chipModel_GC300 &&
171 gpu->identity.revision < 0x2000)
177 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data)
179 writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg));
182 static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg)
184 return readl(gpu->mmio + gpu_fix_power_address(gpu, reg));
187 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
189 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
190 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
192 #ifdef CONFIG_DEBUG_FS
193 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
196 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit);
197 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
198 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
199 u32 fence, struct drm_etnaviv_timespec *timeout);
200 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
201 struct etnaviv_gem_object *etnaviv_obj,
202 struct drm_etnaviv_timespec *timeout);
203 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
204 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
205 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
206 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
207 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
209 extern struct platform_driver etnaviv_gpu_driver;
211 #endif /* __ETNAVIV_GPU_H__ */