]> Git Repo - J-linux.git/blob - drivers/gpu/drm/panthor/panthor_device.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / panthor / panthor_device.h
1 /* SPDX-License-Identifier: GPL-2.0 or MIT */
2 /* Copyright 2018 Marty E. Plummer <[email protected]> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
4 /* Copyright 2023 Collabora ltd. */
5
6 #ifndef __PANTHOR_DEVICE_H__
7 #define __PANTHOR_DEVICE_H__
8
9 #include <linux/atomic.h>
10 #include <linux/io-pgtable.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <drm/drm_device.h>
16 #include <drm/drm_mm.h>
17 #include <drm/gpu_scheduler.h>
18 #include <drm/panthor_drm.h>
19
20 struct panthor_csf;
21 struct panthor_csf_ctx;
22 struct panthor_device;
23 struct panthor_gpu;
24 struct panthor_group_pool;
25 struct panthor_heap_pool;
26 struct panthor_job;
27 struct panthor_mmu;
28 struct panthor_fw;
29 struct panthor_perfcnt;
30 struct panthor_vm;
31 struct panthor_vm_pool;
32
33 /**
34  * enum panthor_device_pm_state - PM state
35  */
36 enum panthor_device_pm_state {
37         /** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
38         PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
39
40         /** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
41         PANTHOR_DEVICE_PM_STATE_RESUMING,
42
43         /** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
44         PANTHOR_DEVICE_PM_STATE_ACTIVE,
45
46         /** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
47         PANTHOR_DEVICE_PM_STATE_SUSPENDING,
48 };
49
50 /**
51  * struct panthor_irq - IRQ data
52  *
53  * Used to automate IRQ handling for the 3 different IRQs we have in this driver.
54  */
55 struct panthor_irq {
56         /** @ptdev: Panthor device */
57         struct panthor_device *ptdev;
58
59         /** @irq: IRQ number. */
60         int irq;
61
62         /** @mask: Current mask being applied to xxx_INT_MASK. */
63         u32 mask;
64
65         /** @suspended: Set to true when the IRQ is suspended. */
66         atomic_t suspended;
67 };
68
69 /**
70  * enum panthor_device_profiling_mode - Profiling state
71  */
72 enum panthor_device_profiling_flags {
73         /** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */
74         PANTHOR_DEVICE_PROFILING_DISABLED = 0,
75
76         /** @PANTHOR_DEVICE_PROFILING_CYCLES: Sampling job cycles. */
77         PANTHOR_DEVICE_PROFILING_CYCLES = BIT(0),
78
79         /** @PANTHOR_DEVICE_PROFILING_TIMESTAMP: Sampling job timestamp. */
80         PANTHOR_DEVICE_PROFILING_TIMESTAMP = BIT(1),
81
82         /** @PANTHOR_DEVICE_PROFILING_ALL: Sampling everything. */
83         PANTHOR_DEVICE_PROFILING_ALL =
84         PANTHOR_DEVICE_PROFILING_CYCLES |
85         PANTHOR_DEVICE_PROFILING_TIMESTAMP,
86 };
87
88 /**
89  * struct panthor_device - Panthor device
90  */
91 struct panthor_device {
92         /** @base: Base drm_device. */
93         struct drm_device base;
94
95         /** @phys_addr: Physical address of the iomem region. */
96         phys_addr_t phys_addr;
97
98         /** @iomem: CPU mapping of the IOMEM region. */
99         void __iomem *iomem;
100
101         /** @clks: GPU clocks. */
102         struct {
103                 /** @core: Core clock. */
104                 struct clk *core;
105
106                 /** @stacks: Stacks clock. This clock is optional. */
107                 struct clk *stacks;
108
109                 /** @coregroup: Core group clock. This clock is optional. */
110                 struct clk *coregroup;
111         } clks;
112
113         /** @coherent: True if the CPU/GPU are memory coherent. */
114         bool coherent;
115
116         /** @gpu_info: GPU information. */
117         struct drm_panthor_gpu_info gpu_info;
118
119         /** @csif_info: Command stream interface information. */
120         struct drm_panthor_csif_info csif_info;
121
122         /** @gpu: GPU management data. */
123         struct panthor_gpu *gpu;
124
125         /** @fw: FW management data. */
126         struct panthor_fw *fw;
127
128         /** @mmu: MMU management data. */
129         struct panthor_mmu *mmu;
130
131         /** @scheduler: Scheduler management data. */
132         struct panthor_scheduler *scheduler;
133
134         /** @devfreq: Device frequency scaling management data. */
135         struct panthor_devfreq *devfreq;
136
137         /** @unplug: Device unplug related fields. */
138         struct {
139                 /** @lock: Lock used to serialize unplug operations. */
140                 struct mutex lock;
141
142                 /**
143                  * @done: Completion object signaled when the unplug
144                  * operation is done.
145                  */
146                 struct completion done;
147         } unplug;
148
149         /** @reset: Reset related fields. */
150         struct {
151                 /** @wq: Ordered worqueud used to schedule reset operations. */
152                 struct workqueue_struct *wq;
153
154                 /** @work: Reset work. */
155                 struct work_struct work;
156
157                 /** @pending: Set to true if a reset is pending. */
158                 atomic_t pending;
159         } reset;
160
161         /** @pm: Power management related data. */
162         struct {
163                 /** @state: Power state. */
164                 atomic_t state;
165
166                 /**
167                  * @mmio_lock: Lock protecting MMIO userspace CPU mappings.
168                  *
169                  * This is needed to ensure we map the dummy IO pages when
170                  * the device is being suspended, and the real IO pages when
171                  * the device is being resumed. We can't just do with the
172                  * state atomicity to deal with this race.
173                  */
174                 struct mutex mmio_lock;
175
176                 /**
177                  * @dummy_latest_flush: Dummy LATEST_FLUSH page.
178                  *
179                  * Used to replace the real LATEST_FLUSH page when the GPU
180                  * is suspended.
181                  */
182                 struct page *dummy_latest_flush;
183         } pm;
184
185         /** @profile_mask: User-set profiling flags for job accounting. */
186         u32 profile_mask;
187
188         /** @current_frequency: Device clock frequency at present. Set by DVFS*/
189         unsigned long current_frequency;
190
191         /** @fast_rate: Maximum device clock frequency. Set by DVFS */
192         unsigned long fast_rate;
193 };
194
195 struct panthor_gpu_usage {
196         u64 time;
197         u64 cycles;
198 };
199
200 /**
201  * struct panthor_file - Panthor file
202  */
203 struct panthor_file {
204         /** @ptdev: Device attached to this file. */
205         struct panthor_device *ptdev;
206
207         /** @vms: VM pool attached to this file. */
208         struct panthor_vm_pool *vms;
209
210         /** @groups: Scheduling group pool attached to this file. */
211         struct panthor_group_pool *groups;
212
213         /** @stats: cycle and timestamp measures for job execution. */
214         struct panthor_gpu_usage stats;
215 };
216
217 int panthor_device_init(struct panthor_device *ptdev);
218 void panthor_device_unplug(struct panthor_device *ptdev);
219
220 /**
221  * panthor_device_schedule_reset() - Schedules a reset operation
222  */
223 static inline void panthor_device_schedule_reset(struct panthor_device *ptdev)
224 {
225         if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) &&
226             atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE)
227                 queue_work(ptdev->reset.wq, &ptdev->reset.work);
228 }
229
230 /**
231  * panthor_device_reset_is_pending() - Checks if a reset is pending.
232  *
233  * Return: true if a reset is pending, false otherwise.
234  */
235 static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
236 {
237         return atomic_read(&ptdev->reset.pending) != 0;
238 }
239
240 int panthor_device_mmap_io(struct panthor_device *ptdev,
241                            struct vm_area_struct *vma);
242
243 int panthor_device_resume(struct device *dev);
244 int panthor_device_suspend(struct device *dev);
245
246 enum drm_panthor_exception_type {
247         DRM_PANTHOR_EXCEPTION_OK = 0x00,
248         DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
249         DRM_PANTHOR_EXCEPTION_KABOOM = 0x05,
250         DRM_PANTHOR_EXCEPTION_EUREKA = 0x06,
251         DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08,
252         DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
253         DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
254         DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
255         DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE = 0x41,
256         DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
257         DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
258         DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
259         DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a,
260         DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b,
261         DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50,
262         DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51,
263         DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55,
264         DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58,
265         DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59,
266         DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a,
267         DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b,
268         DRM_PANTHOR_EXCEPTION_OOM = 0x60,
269         DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68,
270         DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69,
271         DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80,
272         DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88,
273         DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89,
274         DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a,
275         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0,
276         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1,
277         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2,
278         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3,
279         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4,
280         DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8,
281         DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9,
282         DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca,
283         DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb,
284         DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9,
285         DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda,
286         DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb,
287         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0,
288         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4,
289         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5,
290         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6,
291         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7,
292         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8,
293         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9,
294         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea,
295         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb,
296 };
297
298 /**
299  * panthor_exception_is_fault() - Checks if an exception is a fault.
300  *
301  * Return: true if the exception is a fault, false otherwise.
302  */
303 static inline bool
304 panthor_exception_is_fault(u32 exception_code)
305 {
306         return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
307 }
308
309 const char *panthor_exception_name(struct panthor_device *ptdev,
310                                    u32 exception_code);
311
312 /**
313  * PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt
314  * registration function.
315  *
316  * The boiler-plate to gracefully deal with shared interrupts is
317  * auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER()
318  * just after the actual handler. The handler prototype is:
319  *
320  * void (*handler)(struct panthor_device *, u32 status);
321  */
322 #define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler)                                    \
323 static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data)                  \
324 {                                                                                               \
325         struct panthor_irq *pirq = data;                                                        \
326         struct panthor_device *ptdev = pirq->ptdev;                                             \
327                                                                                                 \
328         if (atomic_read(&pirq->suspended))                                                      \
329                 return IRQ_NONE;                                                                \
330         if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT))                                        \
331                 return IRQ_NONE;                                                                \
332                                                                                                 \
333         gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0);                                         \
334         return IRQ_WAKE_THREAD;                                                                 \
335 }                                                                                               \
336                                                                                                 \
337 static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data)             \
338 {                                                                                               \
339         struct panthor_irq *pirq = data;                                                        \
340         struct panthor_device *ptdev = pirq->ptdev;                                             \
341         irqreturn_t ret = IRQ_NONE;                                                             \
342                                                                                                 \
343         while (true) {                                                                          \
344                 u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask;        \
345                                                                                                 \
346                 if (!status)                                                                    \
347                         break;                                                                  \
348                                                                                                 \
349                 gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status);                           \
350                                                                                                 \
351                 __handler(ptdev, status);                                                       \
352                 ret = IRQ_HANDLED;                                                              \
353         }                                                                                       \
354                                                                                                 \
355         if (!atomic_read(&pirq->suspended))                                                     \
356                 gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask);                        \
357                                                                                                 \
358         return ret;                                                                             \
359 }                                                                                               \
360                                                                                                 \
361 static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq)                 \
362 {                                                                                               \
363         pirq->mask = 0;                                                                         \
364         gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0);                                   \
365         synchronize_irq(pirq->irq);                                                             \
366         atomic_set(&pirq->suspended, true);                                                     \
367 }                                                                                               \
368                                                                                                 \
369 static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask)        \
370 {                                                                                               \
371         atomic_set(&pirq->suspended, false);                                                    \
372         pirq->mask = mask;                                                                      \
373         gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask);                               \
374         gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask);                                \
375 }                                                                                               \
376                                                                                                 \
377 static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev,                     \
378                                               struct panthor_irq *pirq,                         \
379                                               int irq, u32 mask)                                \
380 {                                                                                               \
381         pirq->ptdev = ptdev;                                                                    \
382         pirq->irq = irq;                                                                        \
383         panthor_ ## __name ## _irq_resume(pirq, mask);                                          \
384                                                                                                 \
385         return devm_request_threaded_irq(ptdev->base.dev, irq,                                  \
386                                          panthor_ ## __name ## _irq_raw_handler,                \
387                                          panthor_ ## __name ## _irq_threaded_handler,           \
388                                          IRQF_SHARED, KBUILD_MODNAME "-" # __name,              \
389                                          pirq);                                                 \
390 }
391
392 extern struct workqueue_struct *panthor_cleanup_wq;
393
394 #endif
This page took 0.049986 seconds and 4 git commands to generate.