]> Git Repo - linux.git/blob - drivers/gpu/drm/panthor/panthor_device.h
Merge tag 'sound-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux.git] / drivers / gpu / drm / panthor / panthor_device.h
1 /* SPDX-License-Identifier: GPL-2.0 or MIT */
2 /* Copyright 2018 Marty E. Plummer <[email protected]> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
4 /* Copyright 2023 Collabora ltd. */
5
6 #ifndef __PANTHOR_DEVICE_H__
7 #define __PANTHOR_DEVICE_H__
8
9 #include <linux/atomic.h>
10 #include <linux/io-pgtable.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <drm/drm_device.h>
16 #include <drm/drm_mm.h>
17 #include <drm/gpu_scheduler.h>
18 #include <drm/panthor_drm.h>
19
20 struct panthor_csf;
21 struct panthor_csf_ctx;
22 struct panthor_device;
23 struct panthor_gpu;
24 struct panthor_group_pool;
25 struct panthor_heap_pool;
26 struct panthor_job;
27 struct panthor_mmu;
28 struct panthor_fw;
29 struct panthor_perfcnt;
30 struct panthor_vm;
31 struct panthor_vm_pool;
32
33 /**
34  * enum panthor_device_pm_state - PM state
35  */
36 enum panthor_device_pm_state {
37         /** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
38         PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
39
40         /** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
41         PANTHOR_DEVICE_PM_STATE_RESUMING,
42
43         /** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
44         PANTHOR_DEVICE_PM_STATE_ACTIVE,
45
46         /** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
47         PANTHOR_DEVICE_PM_STATE_SUSPENDING,
48 };
49
50 /**
51  * struct panthor_irq - IRQ data
52  *
53  * Used to automate IRQ handling for the 3 different IRQs we have in this driver.
54  */
55 struct panthor_irq {
56         /** @ptdev: Panthor device */
57         struct panthor_device *ptdev;
58
59         /** @irq: IRQ number. */
60         int irq;
61
62         /** @mask: Current mask being applied to xxx_INT_MASK. */
63         u32 mask;
64
65         /** @suspended: Set to true when the IRQ is suspended. */
66         atomic_t suspended;
67 };
68
69 /**
70  * struct panthor_device - Panthor device
71  */
72 struct panthor_device {
73         /** @base: Base drm_device. */
74         struct drm_device base;
75
76         /** @phys_addr: Physical address of the iomem region. */
77         phys_addr_t phys_addr;
78
79         /** @iomem: CPU mapping of the IOMEM region. */
80         void __iomem *iomem;
81
82         /** @clks: GPU clocks. */
83         struct {
84                 /** @core: Core clock. */
85                 struct clk *core;
86
87                 /** @stacks: Stacks clock. This clock is optional. */
88                 struct clk *stacks;
89
90                 /** @coregroup: Core group clock. This clock is optional. */
91                 struct clk *coregroup;
92         } clks;
93
94         /** @coherent: True if the CPU/GPU are memory coherent. */
95         bool coherent;
96
97         /** @gpu_info: GPU information. */
98         struct drm_panthor_gpu_info gpu_info;
99
100         /** @csif_info: Command stream interface information. */
101         struct drm_panthor_csif_info csif_info;
102
103         /** @gpu: GPU management data. */
104         struct panthor_gpu *gpu;
105
106         /** @fw: FW management data. */
107         struct panthor_fw *fw;
108
109         /** @mmu: MMU management data. */
110         struct panthor_mmu *mmu;
111
112         /** @scheduler: Scheduler management data. */
113         struct panthor_scheduler *scheduler;
114
115         /** @devfreq: Device frequency scaling management data. */
116         struct panthor_devfreq *devfreq;
117
118         /** @unplug: Device unplug related fields. */
119         struct {
120                 /** @lock: Lock used to serialize unplug operations. */
121                 struct mutex lock;
122
123                 /**
124                  * @done: Completion object signaled when the unplug
125                  * operation is done.
126                  */
127                 struct completion done;
128         } unplug;
129
130         /** @reset: Reset related fields. */
131         struct {
132                 /** @wq: Ordered worqueud used to schedule reset operations. */
133                 struct workqueue_struct *wq;
134
135                 /** @work: Reset work. */
136                 struct work_struct work;
137
138                 /** @pending: Set to true if a reset is pending. */
139                 atomic_t pending;
140         } reset;
141
142         /** @pm: Power management related data. */
143         struct {
144                 /** @state: Power state. */
145                 atomic_t state;
146
147                 /**
148                  * @mmio_lock: Lock protecting MMIO userspace CPU mappings.
149                  *
150                  * This is needed to ensure we map the dummy IO pages when
151                  * the device is being suspended, and the real IO pages when
152                  * the device is being resumed. We can't just do with the
153                  * state atomicity to deal with this race.
154                  */
155                 struct mutex mmio_lock;
156
157                 /**
158                  * @dummy_latest_flush: Dummy LATEST_FLUSH page.
159                  *
160                  * Used to replace the real LATEST_FLUSH page when the GPU
161                  * is suspended.
162                  */
163                 struct page *dummy_latest_flush;
164         } pm;
165 };
166
167 /**
168  * struct panthor_file - Panthor file
169  */
170 struct panthor_file {
171         /** @ptdev: Device attached to this file. */
172         struct panthor_device *ptdev;
173
174         /** @vms: VM pool attached to this file. */
175         struct panthor_vm_pool *vms;
176
177         /** @groups: Scheduling group pool attached to this file. */
178         struct panthor_group_pool *groups;
179 };
180
181 int panthor_device_init(struct panthor_device *ptdev);
182 void panthor_device_unplug(struct panthor_device *ptdev);
183
184 /**
185  * panthor_device_schedule_reset() - Schedules a reset operation
186  */
187 static inline void panthor_device_schedule_reset(struct panthor_device *ptdev)
188 {
189         if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) &&
190             atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE)
191                 queue_work(ptdev->reset.wq, &ptdev->reset.work);
192 }
193
194 /**
195  * panthor_device_reset_is_pending() - Checks if a reset is pending.
196  *
197  * Return: true if a reset is pending, false otherwise.
198  */
199 static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
200 {
201         return atomic_read(&ptdev->reset.pending) != 0;
202 }
203
204 int panthor_device_mmap_io(struct panthor_device *ptdev,
205                            struct vm_area_struct *vma);
206
207 int panthor_device_resume(struct device *dev);
208 int panthor_device_suspend(struct device *dev);
209
210 enum drm_panthor_exception_type {
211         DRM_PANTHOR_EXCEPTION_OK = 0x00,
212         DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
213         DRM_PANTHOR_EXCEPTION_KABOOM = 0x05,
214         DRM_PANTHOR_EXCEPTION_EUREKA = 0x06,
215         DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08,
216         DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
217         DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
218         DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
219         DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
220         DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
221         DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
222         DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a,
223         DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b,
224         DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50,
225         DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51,
226         DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55,
227         DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58,
228         DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59,
229         DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a,
230         DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b,
231         DRM_PANTHOR_EXCEPTION_OOM = 0x60,
232         DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68,
233         DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69,
234         DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80,
235         DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88,
236         DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89,
237         DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a,
238         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0,
239         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1,
240         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2,
241         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3,
242         DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4,
243         DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8,
244         DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9,
245         DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca,
246         DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb,
247         DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9,
248         DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda,
249         DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb,
250         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0,
251         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4,
252         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5,
253         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6,
254         DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7,
255         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8,
256         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9,
257         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea,
258         DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb,
259 };
260
261 /**
262  * panthor_exception_is_fault() - Checks if an exception is a fault.
263  *
264  * Return: true if the exception is a fault, false otherwise.
265  */
266 static inline bool
267 panthor_exception_is_fault(u32 exception_code)
268 {
269         return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
270 }
271
272 const char *panthor_exception_name(struct panthor_device *ptdev,
273                                    u32 exception_code);
274
275 /**
276  * PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt
277  * registration function.
278  *
279  * The boiler-plate to gracefully deal with shared interrupts is
280  * auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER()
281  * just after the actual handler. The handler prototype is:
282  *
283  * void (*handler)(struct panthor_device *, u32 status);
284  */
285 #define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler)                                    \
286 static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data)                  \
287 {                                                                                               \
288         struct panthor_irq *pirq = data;                                                        \
289         struct panthor_device *ptdev = pirq->ptdev;                                             \
290                                                                                                 \
291         if (atomic_read(&pirq->suspended))                                                      \
292                 return IRQ_NONE;                                                                \
293         if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT))                                        \
294                 return IRQ_NONE;                                                                \
295                                                                                                 \
296         gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0);                                         \
297         return IRQ_WAKE_THREAD;                                                                 \
298 }                                                                                               \
299                                                                                                 \
300 static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data)             \
301 {                                                                                               \
302         struct panthor_irq *pirq = data;                                                        \
303         struct panthor_device *ptdev = pirq->ptdev;                                             \
304         irqreturn_t ret = IRQ_NONE;                                                             \
305                                                                                                 \
306         while (true) {                                                                          \
307                 u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask;        \
308                                                                                                 \
309                 if (!status)                                                                    \
310                         break;                                                                  \
311                                                                                                 \
312                 gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status);                           \
313                                                                                                 \
314                 __handler(ptdev, status);                                                       \
315                 ret = IRQ_HANDLED;                                                              \
316         }                                                                                       \
317                                                                                                 \
318         if (!atomic_read(&pirq->suspended))                                                     \
319                 gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask);                        \
320                                                                                                 \
321         return ret;                                                                             \
322 }                                                                                               \
323                                                                                                 \
324 static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq)                 \
325 {                                                                                               \
326         pirq->mask = 0;                                                                         \
327         gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0);                                   \
328         synchronize_irq(pirq->irq);                                                             \
329         atomic_set(&pirq->suspended, true);                                                     \
330 }                                                                                               \
331                                                                                                 \
332 static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask)        \
333 {                                                                                               \
334         atomic_set(&pirq->suspended, false);                                                    \
335         pirq->mask = mask;                                                                      \
336         gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask);                               \
337         gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask);                                \
338 }                                                                                               \
339                                                                                                 \
340 static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev,                     \
341                                               struct panthor_irq *pirq,                         \
342                                               int irq, u32 mask)                                \
343 {                                                                                               \
344         pirq->ptdev = ptdev;                                                                    \
345         pirq->irq = irq;                                                                        \
346         panthor_ ## __name ## _irq_resume(pirq, mask);                                          \
347                                                                                                 \
348         return devm_request_threaded_irq(ptdev->base.dev, irq,                                  \
349                                          panthor_ ## __name ## _irq_raw_handler,                \
350                                          panthor_ ## __name ## _irq_threaded_handler,           \
351                                          IRQF_SHARED, KBUILD_MODNAME "-" # __name,              \
352                                          pirq);                                                 \
353 }
354
355 extern struct workqueue_struct *panthor_cleanup_wq;
356
357 #endif
This page took 0.057457 seconds and 4 git commands to generate.