]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
a0594cef | 2 | /* |
27ab27f8 ÁFR |
3 | * Copyright (C) 2018 Álvaro Fernández Rojas <[email protected]> |
4 | * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com> | |
5 | * Written by Mugunthan V N <[email protected]> | |
6 | * | |
a0594cef M |
7 | */ |
8 | ||
9 | #ifndef _DMA_H_ | |
10 | #define _DMA_H_ | |
11 | ||
27ab27f8 ÁFR |
12 | #include <linux/errno.h> |
13 | #include <linux/types.h> | |
14 | ||
a0594cef M |
15 | /* |
16 | * enum dma_direction - dma transfer direction indicator | |
17 | * @DMA_MEM_TO_MEM: Memcpy mode | |
18 | * @DMA_MEM_TO_DEV: From Memory to Device | |
19 | * @DMA_DEV_TO_MEM: From Device to Memory | |
20 | * @DMA_DEV_TO_DEV: From Device to Device | |
21 | */ | |
22 | enum dma_direction { | |
23 | DMA_MEM_TO_MEM, | |
24 | DMA_MEM_TO_DEV, | |
25 | DMA_DEV_TO_MEM, | |
26 | DMA_DEV_TO_DEV, | |
27 | }; | |
28 | ||
29 | #define DMA_SUPPORTS_MEM_TO_MEM BIT(0) | |
30 | #define DMA_SUPPORTS_MEM_TO_DEV BIT(1) | |
31 | #define DMA_SUPPORTS_DEV_TO_MEM BIT(2) | |
32 | #define DMA_SUPPORTS_DEV_TO_DEV BIT(3) | |
33 | ||
a0594cef M |
34 | /* |
35 | * struct dma_dev_priv - information about a device used by the uclass | |
36 | * | |
37 | * @supported: mode of transfers that DMA can support, should be | |
38 | * one/multiple of DMA_SUPPORTS_* | |
39 | */ | |
40 | struct dma_dev_priv { | |
41 | u32 supported; | |
42 | }; | |
43 | ||
27ab27f8 ÁFR |
44 | #ifdef CONFIG_DMA_CHANNELS |
45 | /** | |
46 | * A DMA is a feature of computer systems that allows certain hardware | |
47 | * subsystems to access main system memory, independent of the CPU. | |
48 | * DMA channels are typically generated externally to the HW module | |
49 | * consuming them, by an entity this API calls a DMA provider. This API | |
50 | * provides a standard means for drivers to enable and disable DMAs, and to | |
51 | * copy, send and receive data using DMA. | |
52 | * | |
53 | * A driver that implements UCLASS_DMA is a DMA provider. A provider will | |
54 | * often implement multiple separate DMAs, since the hardware it manages | |
55 | * often has this capability. dma_uclass.h describes the interface which | |
56 | * DMA providers must implement. | |
57 | * | |
58 | * DMA consumers/clients are the HW modules driven by the DMA channels. This | |
59 | * header file describes the API used by drivers for those HW modules. | |
60 | * | |
61 | * DMA consumer DMA_MEM_TO_DEV (transmit) usage example (based on networking). | |
62 | * Note. dma_send() is sync operation always - it'll start transfer and will | |
63 | * poll for it to complete: | |
64 | * - get/request dma channel | |
65 | * struct dma dma_tx; | |
66 | * ret = dma_get_by_name(common->dev, "tx0", &dma_tx); | |
67 | * if (ret) ... | |
68 | * | |
69 | * - enable dma channel | |
70 | * ret = dma_enable(&dma_tx); | |
71 | * if (ret) ... | |
72 | * | |
73 | * - dma transmit DMA_MEM_TO_DEV. | |
74 | * struct ti_drv_packet_data packet_data; | |
75 | * | |
76 | * packet_data.opt1 = val1; | |
77 | * packet_data.opt2 = val2; | |
78 | * ret = dma_send(&dma_tx, packet, length, &packet_data); | |
79 | * if (ret) .. | |
80 | * | |
81 | * DMA consumer DMA_DEV_TO_MEM (receive) usage example (based on networking). | |
82 | * Note. dma_receive() is sync operation always - it'll start transfer | |
83 | * (if required) and will poll for it to complete (or for any previously | |
84 | * configured dev2mem transfer to complete): | |
85 | * - get/request dma channel | |
86 | * struct dma dma_rx; | |
87 | * ret = dma_get_by_name(common->dev, "rx0", &dma_rx); | |
88 | * if (ret) ... | |
89 | * | |
90 | * - enable dma channel | |
91 | * ret = dma_enable(&dma_rx); | |
92 | * if (ret) ... | |
93 | * | |
94 | * - dma receive DMA_DEV_TO_MEM. | |
95 | * struct ti_drv_packet_data packet_data; | |
96 | * | |
97 | * len = dma_receive(&dma_rx, (void **)packet, &packet_data); | |
98 | * if (ret < 0) ... | |
99 | * | |
100 | * DMA consumer DMA_DEV_TO_MEM (receive) zero-copy usage example (based on | |
101 | * networking). Networking subsystem allows to configure and use few receive | |
102 | * buffers (dev2mem), as Networking RX DMA channels usually implemented | |
103 | * as streaming interface | |
104 | * - get/request dma channel | |
105 | * struct dma dma_rx; | |
106 | * ret = dma_get_by_name(common->dev, "rx0", &dma_rx); | |
107 | * if (ret) ... | |
108 | * | |
109 | * for (i = 0; i < RX_DESC_NUM; i++) { | |
110 | * ret = dma_prepare_rcv_buf(&dma_rx, | |
111 | * net_rx_packets[i], | |
112 | * RX_BUF_SIZE); | |
113 | * if (ret) ... | |
114 | * } | |
115 | * | |
116 | * - enable dma channel | |
117 | * ret = dma_enable(&dma_rx); | |
118 | * if (ret) ... | |
119 | * | |
120 | * - dma receive DMA_DEV_TO_MEM. | |
121 | * struct ti_drv_packet_data packet_data; | |
122 | * | |
123 | * len = dma_receive(&dma_rx, (void **)packet, &packet_data); | |
124 | * if (ret < 0) .. | |
125 | * | |
126 | * -- process packet -- | |
127 | * | |
128 | * - return buffer back to DAM channel | |
129 | * ret = dma_prepare_rcv_buf(&dma_rx, | |
130 | * net_rx_packets[rx_next], | |
131 | * RX_BUF_SIZE); | |
132 | */ | |
133 | ||
134 | struct udevice; | |
135 | ||
136 | /** | |
137 | * struct dma - A handle to (allowing control of) a single DMA. | |
138 | * | |
139 | * Clients provide storage for DMA handles. The content of the structure is | |
140 | * managed solely by the DMA API and DMA drivers. A DMA struct is | |
141 | * initialized by "get"ing the DMA struct. The DMA struct is passed to all | |
142 | * other DMA APIs to identify which DMA channel to operate upon. | |
143 | * | |
144 | * @dev: The device which implements the DMA channel. | |
145 | * @id: The DMA channel ID within the provider. | |
146 | * | |
147 | * Currently, the DMA API assumes that a single integer ID is enough to | |
148 | * identify and configure any DMA channel for any DMA provider. If this | |
149 | * assumption becomes invalid in the future, the struct could be expanded to | |
150 | * either (a) add more fields to allow DMA providers to store additional | |
151 | * information, or (b) replace the id field with an opaque pointer, which the | |
152 | * provider would dynamically allocated during its .of_xlate op, and process | |
153 | * during is .request op. This may require the addition of an extra op to clean | |
154 | * up the allocation. | |
155 | */ | |
156 | struct dma { | |
157 | struct udevice *dev; | |
158 | /* | |
159 | * Written by of_xlate. We assume a single id is enough for now. In the | |
160 | * future, we might add more fields here. | |
161 | */ | |
162 | unsigned long id; | |
163 | }; | |
164 | ||
165 | # if CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DMA) | |
166 | /** | |
167 | * dma_get_by_index - Get/request a DMA by integer index. | |
168 | * | |
169 | * This looks up and requests a DMA. The index is relative to the client | |
170 | * device; each device is assumed to have n DMAs associated with it somehow, | |
171 | * and this function finds and requests one of them. The mapping of client | |
172 | * device DMA indices to provider DMAs may be via device-tree properties, | |
173 | * board-provided mapping tables, or some other mechanism. | |
174 | * | |
175 | * @dev: The client device. | |
176 | * @index: The index of the DMA to request, within the client's list of | |
177 | * DMA channels. | |
178 | * @dma: A pointer to a DMA struct to initialize. | |
179 | * @return 0 if OK, or a negative error code. | |
180 | */ | |
181 | int dma_get_by_index(struct udevice *dev, int index, struct dma *dma); | |
182 | ||
183 | /** | |
184 | * dma_get_by_name - Get/request a DMA by name. | |
185 | * | |
186 | * This looks up and requests a DMA. The name is relative to the client | |
187 | * device; each device is assumed to have n DMAs associated with it somehow, | |
188 | * and this function finds and requests one of them. The mapping of client | |
189 | * device DMA names to provider DMAs may be via device-tree properties, | |
190 | * board-provided mapping tables, or some other mechanism. | |
191 | * | |
192 | * @dev: The client device. | |
193 | * @name: The name of the DMA to request, within the client's list of | |
194 | * DMA channels. | |
195 | * @dma: A pointer to a DMA struct to initialize. | |
196 | * @return 0 if OK, or a negative error code. | |
197 | */ | |
198 | int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma); | |
199 | # else | |
200 | static inline int dma_get_by_index(struct udevice *dev, int index, | |
201 | struct dma *dma) | |
202 | { | |
203 | return -ENOSYS; | |
204 | } | |
205 | ||
206 | static inline int dma_get_by_name(struct udevice *dev, const char *name, | |
207 | struct dma *dma) | |
208 | { | |
209 | return -ENOSYS; | |
210 | } | |
211 | # endif | |
212 | ||
213 | /** | |
214 | * dma_request - Request a DMA by provider-specific ID. | |
215 | * | |
216 | * This requests a DMA using a provider-specific ID. Generally, this function | |
217 | * should not be used, since dma_get_by_index/name() provide an interface that | |
218 | * better separates clients from intimate knowledge of DMA providers. | |
219 | * However, this function may be useful in core SoC-specific code. | |
220 | * | |
221 | * @dev: The DMA provider device. | |
222 | * @dma: A pointer to a DMA struct to initialize. The caller must | |
223 | * have already initialized any field in this struct which the | |
224 | * DMA provider uses to identify the DMA channel. | |
225 | * @return 0 if OK, or a negative error code. | |
226 | */ | |
227 | int dma_request(struct udevice *dev, struct dma *dma); | |
228 | ||
229 | /** | |
230 | * dma_free - Free a previously requested DMA. | |
231 | * | |
232 | * @dma: A DMA struct that was previously successfully requested by | |
233 | * dma_request/get_by_*(). | |
234 | * @return 0 if OK, or a negative error code. | |
235 | */ | |
236 | int dma_free(struct dma *dma); | |
237 | ||
238 | /** | |
239 | * dma_enable() - Enable (turn on) a DMA channel. | |
240 | * | |
241 | * @dma: A DMA struct that was previously successfully requested by | |
242 | * dma_request/get_by_*(). | |
243 | * @return zero on success, or -ve error code. | |
244 | */ | |
245 | int dma_enable(struct dma *dma); | |
246 | ||
247 | /** | |
248 | * dma_disable() - Disable (turn off) a DMA channel. | |
249 | * | |
250 | * @dma: A DMA struct that was previously successfully requested by | |
251 | * dma_request/get_by_*(). | |
252 | * @return zero on success, or -ve error code. | |
253 | */ | |
254 | int dma_disable(struct dma *dma); | |
255 | ||
256 | /** | |
257 | * dma_prepare_rcv_buf() - Prepare/add receive DMA buffer. | |
258 | * | |
259 | * It allows to implement zero-copy async DMA_DEV_TO_MEM (receive) transactions | |
260 | * if supported by DMA providers. | |
261 | * | |
262 | * @dma: A DMA struct that was previously successfully requested by | |
263 | * dma_request/get_by_*(). | |
264 | * @dst: The receive buffer pointer. | |
265 | * @size: The receive buffer size | |
266 | * @return zero on success, or -ve error code. | |
267 | */ | |
268 | int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size); | |
269 | ||
270 | /** | |
271 | * dma_receive() - Receive a DMA transfer. | |
272 | * | |
273 | * @dma: A DMA struct that was previously successfully requested by | |
274 | * dma_request/get_by_*(). | |
275 | * @dst: The destination pointer. | |
276 | * @metadata: DMA driver's channel specific data | |
277 | * @return length of received data on success, or zero - no data, | |
278 | * or -ve error code. | |
279 | */ | |
280 | int dma_receive(struct dma *dma, void **dst, void *metadata); | |
281 | ||
282 | /** | |
283 | * dma_send() - Send a DMA transfer. | |
284 | * | |
285 | * @dma: A DMA struct that was previously successfully requested by | |
286 | * dma_request/get_by_*(). | |
287 | * @src: The source pointer. | |
288 | * @len: Length of the data to be sent (number of bytes). | |
289 | * @metadata: DMA driver's channel specific data | |
290 | * @return zero on success, or -ve error code. | |
291 | */ | |
292 | int dma_send(struct dma *dma, void *src, size_t len, void *metadata); | |
b8a4dd28 VR |
293 | |
294 | /** | |
295 | * dma_get_cfg() - Get DMA channel configuration for client's use | |
296 | * | |
297 | * @dma: The DMA Channel to manipulate | |
298 | * @cfg_id: DMA provider specific ID to identify what | |
299 | * configuration data client needs | |
300 | * @cfg_data: Pointer to store pointer to DMA driver specific | |
301 | * configuration data for the given cfg_id (output param) | |
302 | * @return zero on success, or -ve error code. | |
303 | */ | |
304 | int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data); | |
27ab27f8 ÁFR |
305 | #endif /* CONFIG_DMA_CHANNELS */ |
306 | ||
1e373301 | 307 | #if CONFIG_IS_ENABLED(DMA) |
a0594cef M |
308 | /* |
309 | * dma_get_device - get a DMA device which supports transfer | |
310 | * type of transfer_type | |
311 | * | |
312 | * @transfer_type - transfer type should be one/multiple of | |
313 | * DMA_SUPPORTS_* | |
314 | * @devp - udevice pointer to return the found device | |
315 | * @return - will return on success and devp will hold the | |
316 | * pointer to the device | |
317 | */ | |
318 | int dma_get_device(u32 transfer_type, struct udevice **devp); | |
319 | ||
320 | /* | |
321 | * dma_memcpy - try to use DMA to do a mem copy which will be | |
322 | * much faster than CPU mem copy | |
323 | * | |
324 | * @dst - destination pointer | |
325 | * @src - souce pointer | |
326 | * @len - data length to be copied | |
327 | * @return - on successful transfer returns no of bytes | |
328 | transferred and on failure return error code. | |
329 | */ | |
330 | int dma_memcpy(void *dst, void *src, size_t len); | |
1e373301 VR |
331 | #else |
332 | static inline int dma_get_device(u32 transfer_type, struct udevice **devp) | |
333 | { | |
334 | return -ENOSYS; | |
335 | } | |
a0594cef | 336 | |
1e373301 VR |
337 | static inline int dma_memcpy(void *dst, void *src, size_t len) |
338 | { | |
339 | return -ENOSYS; | |
340 | } | |
341 | #endif /* CONFIG_DMA */ | |
a0594cef | 342 | #endif /* _DMA_H_ */ |