1 /* SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
10 #include <linux/interrupt.h>
11 #include <linux/kref.h>
12 #include <linux/mhi.h>
13 #include <linux/mutex.h>
14 #include <linux/pci.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 #include <linux/wait.h>
18 #include <linux/workqueue.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_gem.h>
22 #define QAIC_DBC_BASE SZ_128K
23 #define QAIC_DBC_SIZE SZ_4K
25 #define QAIC_NO_PARTITION -1
27 #define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
29 #define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
31 extern bool datapath_polling;
34 /* Uniquely identifies this user for the device */
36 struct kref ref_count;
37 /* Char device opened by this user */
38 struct qaic_drm_device *qddev;
39 /* Node in list of users that opened this drm device */
40 struct list_head node;
41 /* SRCU used to synchronize this user during cleanup */
42 struct srcu_struct qddev_lock;
46 struct dma_bridge_chan {
47 /* Pointer to device strcut maintained by driver */
48 struct qaic_device *qdev;
49 /* ID of this DMA bridge channel(DBC) */
51 /* Synchronizes access to xfer_list */
53 /* Base address of request queue */
55 /* Base address of response queue */
58 * Base bus address of request queue. Response queue bus address can be
59 * calculated by adding request queue size to this variable
62 /* Total size of request and response queue in byte */
64 /* Capacity of request/response queue */
66 /* The user that opened this DBC */
67 struct qaic_user *usr;
69 * Request ID of next memory handle that goes in request queue. One
70 * memory handle can enqueue more than one request elements, all
71 * this requests that belong to same memory handle have same request ID
74 /* true: DBC is in use; false: DBC not in use */
77 * Base address of device registers. Used to read/write request and
78 * response queue's head and tail pointer of this DBC.
80 void __iomem *dbc_base;
81 /* Head of list where each node is a memory handle queued in request queue */
82 struct list_head xfer_list;
83 /* Synchronizes DBC readers during cleanup */
84 struct srcu_struct ch_lock;
86 * When this DBC is released, any thread waiting on this wait queue is
89 wait_queue_head_t dbc_release;
90 /* Head of list where each node is a bo associated with this DBC */
91 struct list_head bo_lists;
92 /* The irq line for this DBC. Used for polling */
94 /* Polling work item to simulate interrupts */
95 struct work_struct poll_work;
99 /* Pointer to base PCI device struct of our physical device */
100 struct pci_dev *pdev;
101 /* Req. ID of request that will be queued next in MHI control device */
103 /* Base address of bar 0 */
105 /* Base address of bar 2 */
107 /* Controller structure for MHI devices */
108 struct mhi_controller *mhi_cntrl;
109 /* MHI control channel device */
110 struct mhi_device *cntl_ch;
111 /* List of requests queued in MHI control device */
112 struct list_head cntl_xfer_list;
113 /* Synchronizes MHI control device transactions and its xfer list */
114 struct mutex cntl_mutex;
115 /* Array of DBC struct of this device */
116 struct dma_bridge_chan *dbc;
117 /* Work queue for tasks related to MHI control device */
118 struct workqueue_struct *cntl_wq;
119 /* Synchronizes all the users of device during cleanup */
120 struct srcu_struct dev_lock;
121 /* true: Device under reset; false: Device not under reset */
124 * true: A tx MHI transaction has failed and a rx buffer is still queued
125 * in control device. Such a buffer is considered lost rx buffer
126 * false: No rx buffer is lost in control device
129 /* Maximum number of DBC supported by this device */
131 /* Reference to the drm_device for this device when it is created */
132 struct qaic_drm_device *qddev;
133 /* Generate the CRC of a control message */
134 u32 (*gen_crc)(void *msg);
135 /* Validate the CRC of a control message */
136 bool (*valid_crc)(void *msg);
139 struct qaic_drm_device {
140 /* Pointer to the root device struct driven by this driver */
141 struct qaic_device *qdev;
143 * The physical device can be partition in number of logical devices.
144 * And each logical device is given a partition id. This member stores
145 * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
146 * device is the actual physical device
149 /* Pointer to the drm device struct of this drm device */
150 struct drm_device *ddev;
151 /* Head in list of users who have opened this drm device */
152 struct list_head users;
153 /* Synchronizes access to users list */
154 struct mutex users_mutex;
158 struct drm_gem_object base;
159 /* Scatter/gather table for allocate/imported BO */
160 struct sg_table *sgt;
161 /* BO size requested by user. GEM object might be bigger in size. */
163 /* Head in list of slices of this BO */
164 struct list_head slices;
165 /* Total nents, for all slices of this BO */
166 int total_slice_nents;
168 * Direction of transfer. It can assume only two value DMA_TO_DEVICE and
172 /* The pointer of the DBC which operates on this BO */
173 struct dma_bridge_chan *dbc;
174 /* Number of slice that belongs to this buffer */
176 /* Number of slice that have been transferred by DMA engine */
177 u32 nr_slice_xfer_done;
178 /* true = BO is queued for execution, true = BO is not queued */
181 * If true then user has attached slicing information to this BO by
182 * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
185 /* Request ID of this BO if it is queued for execution */
187 /* Handle assigned to this BO */
189 /* Wait on this for completion of DMA transfer of this BO */
190 struct completion xfer_done;
192 * Node in linked list where head is dbc->xfer_list.
193 * This link list contain BO's that are queued for DMA transfer.
195 struct list_head xfer_list;
197 * Node in linked list where head is dbc->bo_lists.
198 * This link list contain BO's that are associated with the DBC it is
201 struct list_head bo_list;
204 * Latest timestamp(ns) at which kernel received a request to
209 * Latest timestamp(ns) at which kernel enqueued requests of
210 * this BO for execution in DMA queue
214 * Latest timestamp(ns) at which kernel received a completion
215 * interrupt for requests of this BO
217 u64 req_processed_ts;
219 * Number of elements already enqueued in DMA queue before
220 * enqueuing requests of this BO
222 u32 queue_level_before;
229 struct sg_table *sgt;
230 /* Number of requests required to queue in DMA queue */
232 /* See enum dma_data_direction */
234 /* Actual requests that will be copied in DMA queue */
235 struct dbc_req *reqs;
236 struct kref ref_count;
237 /* true: No DMA transfer required */
239 /* Pointer to the parent BO handle */
241 /* Node in list of slices maintained by parent BO */
242 struct list_head slice;
243 /* Size of this slice in bytes */
245 /* Offset of this slice in buffer */
249 int get_dbc_req_elem_size(void);
250 int get_dbc_rsp_elem_size(void);
251 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
252 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
253 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
255 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
257 int qaic_control_open(struct qaic_device *qdev);
258 void qaic_control_close(struct qaic_device *qdev);
259 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
261 irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
262 irqreturn_t dbc_irq_handler(int irq, void *data);
263 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
264 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
265 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
266 void release_dbc(struct qaic_device *qdev, u32 dbc_id);
268 void wake_all_cntl(struct qaic_device *qdev);
269 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
271 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
273 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
274 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
275 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
276 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
277 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
278 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
279 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
280 void irq_polling_work(struct work_struct *work);
282 #endif /* _QAIC_H_ */