]>
Commit | Line | Data |
---|---|---|
c13c8260 CL |
1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | #ifndef DMAENGINE_H | |
22 | #define DMAENGINE_H | |
1c0f16e5 | 23 | |
c13c8260 CL |
24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | |
26 | #include <linux/kref.h> | |
27 | #include <linux/completion.h> | |
28 | #include <linux/rcupdate.h> | |
7405f74b | 29 | #include <linux/dma-mapping.h> |
c13c8260 CL |
30 | |
31 | /** | |
d379b01e | 32 | * enum dma_state - resource PNP/power managment state |
c13c8260 CL |
33 | * @DMA_RESOURCE_SUSPEND: DMA device going into low power state |
34 | * @DMA_RESOURCE_RESUME: DMA device returning to full power | |
d379b01e | 35 | * @DMA_RESOURCE_AVAILABLE: DMA device available to the system |
c13c8260 CL |
36 | * @DMA_RESOURCE_REMOVED: DMA device removed from the system |
37 | */ | |
d379b01e | 38 | enum dma_state { |
c13c8260 CL |
39 | DMA_RESOURCE_SUSPEND, |
40 | DMA_RESOURCE_RESUME, | |
d379b01e | 41 | DMA_RESOURCE_AVAILABLE, |
c13c8260 CL |
42 | DMA_RESOURCE_REMOVED, |
43 | }; | |
44 | ||
d379b01e DW |
45 | /** |
46 | * enum dma_state_client - state of the channel in the client | |
47 | * @DMA_ACK: client would like to use, or was using this channel | |
48 | * @DMA_DUP: client has already seen this channel, or is not using this channel | |
49 | * @DMA_NAK: client does not want to see any more channels | |
50 | */ | |
51 | enum dma_state_client { | |
52 | DMA_ACK, | |
53 | DMA_DUP, | |
54 | DMA_NAK, | |
55 | }; | |
56 | ||
c13c8260 | 57 | /** |
fe4ada2d | 58 | * typedef dma_cookie_t - an opaque DMA cookie |
c13c8260 CL |
59 | * |
60 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | |
61 | */ | |
62 | typedef s32 dma_cookie_t; | |
63 | ||
64 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | |
65 | ||
66 | /** | |
67 | * enum dma_status - DMA transaction status | |
68 | * @DMA_SUCCESS: transaction completed successfully | |
69 | * @DMA_IN_PROGRESS: transaction not yet processed | |
70 | * @DMA_ERROR: transaction failed | |
71 | */ | |
72 | enum dma_status { | |
73 | DMA_SUCCESS, | |
74 | DMA_IN_PROGRESS, | |
75 | DMA_ERROR, | |
76 | }; | |
77 | ||
7405f74b DW |
78 | /** |
79 | * enum dma_transaction_type - DMA transaction types/indexes | |
80 | */ | |
81 | enum dma_transaction_type { | |
82 | DMA_MEMCPY, | |
83 | DMA_XOR, | |
84 | DMA_PQ_XOR, | |
85 | DMA_DUAL_XOR, | |
86 | DMA_PQ_UPDATE, | |
87 | DMA_ZERO_SUM, | |
88 | DMA_PQ_ZERO_SUM, | |
89 | DMA_MEMSET, | |
90 | DMA_MEMCPY_CRC32C, | |
91 | DMA_INTERRUPT, | |
92 | }; | |
93 | ||
94 | /* last transaction type for creation of the capabilities mask */ | |
95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | |
96 | ||
97 | /** | |
98 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | |
99 | * See linux/cpumask.h | |
100 | */ | |
101 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | |
102 | ||
c13c8260 CL |
103 | /** |
104 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | |
105 | * @refcount: local_t used for open-coded "bigref" counting | |
106 | * @memcpy_count: transaction counter | |
107 | * @bytes_transferred: byte counter | |
108 | */ | |
109 | ||
110 | struct dma_chan_percpu { | |
111 | local_t refcount; | |
112 | /* stats */ | |
113 | unsigned long memcpy_count; | |
114 | unsigned long bytes_transferred; | |
115 | }; | |
116 | ||
117 | /** | |
118 | * struct dma_chan - devices supply DMA channels, clients use them | |
fe4ada2d | 119 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
c13c8260 | 120 | * @cookie: last cookie value returned to client |
fe4ada2d RD |
121 | * @chan_id: channel ID for sysfs |
122 | * @class_dev: class device for sysfs | |
c13c8260 | 123 | * @refcount: kref, used in "bigref" slow-mode |
fe4ada2d RD |
124 | * @slow_ref: indicates that the DMA channel is free |
125 | * @rcu: the DMA channel's RCU head | |
c13c8260 CL |
126 | * @device_node: used to add this to the device chan list |
127 | * @local: per-cpu pointer to a struct dma_chan_percpu | |
128 | */ | |
129 | struct dma_chan { | |
c13c8260 CL |
130 | struct dma_device *device; |
131 | dma_cookie_t cookie; | |
132 | ||
133 | /* sysfs */ | |
134 | int chan_id; | |
135 | struct class_device class_dev; | |
136 | ||
137 | struct kref refcount; | |
138 | int slow_ref; | |
139 | struct rcu_head rcu; | |
140 | ||
c13c8260 CL |
141 | struct list_head device_node; |
142 | struct dma_chan_percpu *local; | |
143 | }; | |
144 | ||
d379b01e | 145 | |
c13c8260 CL |
146 | void dma_chan_cleanup(struct kref *kref); |
147 | ||
148 | static inline void dma_chan_get(struct dma_chan *chan) | |
149 | { | |
150 | if (unlikely(chan->slow_ref)) | |
151 | kref_get(&chan->refcount); | |
152 | else { | |
153 | local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | |
154 | put_cpu(); | |
155 | } | |
156 | } | |
157 | ||
158 | static inline void dma_chan_put(struct dma_chan *chan) | |
159 | { | |
160 | if (unlikely(chan->slow_ref)) | |
161 | kref_put(&chan->refcount, dma_chan_cleanup); | |
162 | else { | |
163 | local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | |
164 | put_cpu(); | |
165 | } | |
166 | } | |
167 | ||
168 | /* | |
169 | * typedef dma_event_callback - function pointer to a DMA event callback | |
d379b01e DW |
170 | * For each channel added to the system this routine is called for each client. |
171 | * If the client would like to use the channel it returns '1' to signal (ack) | |
172 | * the dmaengine core to take out a reference on the channel and its | |
173 | * corresponding device. A client must not 'ack' an available channel more | |
174 | * than once. When a channel is removed all clients are notified. If a client | |
175 | * is using the channel it must 'ack' the removal. A client must not 'ack' a | |
176 | * removed channel more than once. | |
177 | * @client - 'this' pointer for the client context | |
178 | * @chan - channel to be acted upon | |
179 | * @state - available or removed | |
c13c8260 | 180 | */ |
d379b01e DW |
181 | struct dma_client; |
182 | typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, | |
183 | struct dma_chan *chan, enum dma_state state); | |
c13c8260 CL |
184 | |
185 | /** | |
186 | * struct dma_client - info on the entity making use of DMA services | |
187 | * @event_callback: func ptr to call when something happens | |
d379b01e DW |
188 | * @cap_mask: only return channels that satisfy the requested capabilities |
189 | * a value of zero corresponds to any capability | |
c13c8260 CL |
190 | * @global_node: list_head for global dma_client_list |
191 | */ | |
192 | struct dma_client { | |
193 | dma_event_callback event_callback; | |
d379b01e | 194 | dma_cap_mask_t cap_mask; |
c13c8260 CL |
195 | struct list_head global_node; |
196 | }; | |
197 | ||
7405f74b DW |
198 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
199 | /** | |
200 | * struct dma_async_tx_descriptor - async transaction descriptor | |
201 | * ---dma generic offload fields--- | |
202 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | |
203 | * this tx is sitting on a dependency list | |
204 | * @ack: the descriptor can not be reused until the client acknowledges | |
205 | * receipt, i.e. has has a chance to establish any dependency chains | |
206 | * @phys: physical address of the descriptor | |
207 | * @tx_list: driver common field for operations that require multiple | |
208 | * descriptors | |
209 | * @chan: target channel for this operation | |
210 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | |
211 | * @tx_set_dest: set a destination address in a hardware descriptor | |
212 | * @tx_set_src: set a source address in a hardware descriptor | |
213 | * @callback: routine to call after this operation is complete | |
214 | * @callback_param: general parameter to pass to the callback routine | |
215 | * ---async_tx api specific fields--- | |
216 | * @depend_list: at completion this list of transactions are submitted | |
217 | * @depend_node: allow this transaction to be executed after another | |
218 | * transaction has completed, possibly on another channel | |
219 | * @parent: pointer to the next level up in the dependency chain | |
220 | * @lock: protect the dependency list | |
221 | */ | |
222 | struct dma_async_tx_descriptor { | |
223 | dma_cookie_t cookie; | |
224 | int ack; | |
225 | dma_addr_t phys; | |
226 | struct list_head tx_list; | |
227 | struct dma_chan *chan; | |
228 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | |
229 | void (*tx_set_dest)(dma_addr_t addr, | |
230 | struct dma_async_tx_descriptor *tx, int index); | |
231 | void (*tx_set_src)(dma_addr_t addr, | |
232 | struct dma_async_tx_descriptor *tx, int index); | |
233 | dma_async_tx_callback callback; | |
234 | void *callback_param; | |
235 | struct list_head depend_list; | |
236 | struct list_head depend_node; | |
237 | struct dma_async_tx_descriptor *parent; | |
238 | spinlock_t lock; | |
239 | }; | |
240 | ||
c13c8260 CL |
241 | /** |
242 | * struct dma_device - info on the entity supplying DMA services | |
243 | * @chancnt: how many DMA channels are supported | |
244 | * @channels: the list of struct dma_chan | |
245 | * @global_node: list_head for global dma_device_list | |
7405f74b DW |
246 | * @cap_mask: one or more dma_capability flags |
247 | * @max_xor: maximum number of xor sources, 0 if no capability | |
fe4ada2d RD |
248 | * @refcount: reference count |
249 | * @done: IO completion struct | |
250 | * @dev_id: unique device ID | |
7405f74b | 251 | * @dev: struct device reference for dma mapping api |
fe4ada2d RD |
252 | * @device_alloc_chan_resources: allocate resources and return the |
253 | * number of allocated descriptors | |
254 | * @device_free_chan_resources: release DMA channel's resources | |
7405f74b DW |
255 | * @device_prep_dma_memcpy: prepares a memcpy operation |
256 | * @device_prep_dma_xor: prepares a xor operation | |
257 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | |
258 | * @device_prep_dma_memset: prepares a memset operation | |
259 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | |
260 | * @device_dependency_added: async_tx notifies the channel about new deps | |
261 | * @device_issue_pending: push pending transactions to hardware | |
c13c8260 CL |
262 | */ |
263 | struct dma_device { | |
264 | ||
265 | unsigned int chancnt; | |
266 | struct list_head channels; | |
267 | struct list_head global_node; | |
7405f74b DW |
268 | dma_cap_mask_t cap_mask; |
269 | int max_xor; | |
c13c8260 CL |
270 | |
271 | struct kref refcount; | |
272 | struct completion done; | |
273 | ||
274 | int dev_id; | |
7405f74b | 275 | struct device *dev; |
c13c8260 CL |
276 | |
277 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | |
278 | void (*device_free_chan_resources)(struct dma_chan *chan); | |
7405f74b DW |
279 | |
280 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | |
281 | struct dma_chan *chan, size_t len, int int_en); | |
282 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | |
283 | struct dma_chan *chan, unsigned int src_cnt, size_t len, | |
284 | int int_en); | |
285 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | |
286 | struct dma_chan *chan, unsigned int src_cnt, size_t len, | |
287 | u32 *result, int int_en); | |
288 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | |
289 | struct dma_chan *chan, int value, size_t len, int int_en); | |
290 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | |
291 | struct dma_chan *chan); | |
292 | ||
293 | void (*device_dependency_added)(struct dma_chan *chan); | |
294 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | |
c13c8260 CL |
295 | dma_cookie_t cookie, dma_cookie_t *last, |
296 | dma_cookie_t *used); | |
7405f74b | 297 | void (*device_issue_pending)(struct dma_chan *chan); |
c13c8260 CL |
298 | }; |
299 | ||
300 | /* --- public DMA engine API --- */ | |
301 | ||
d379b01e | 302 | void dma_async_client_register(struct dma_client *client); |
c13c8260 | 303 | void dma_async_client_unregister(struct dma_client *client); |
d379b01e | 304 | void dma_async_client_chan_request(struct dma_client *client); |
7405f74b DW |
305 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
306 | void *dest, void *src, size_t len); | |
307 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | |
308 | struct page *page, unsigned int offset, void *kdata, size_t len); | |
309 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | |
310 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | |
311 | unsigned int src_off, size_t len); | |
312 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |
313 | struct dma_chan *chan); | |
c13c8260 | 314 | |
7405f74b DW |
315 | static inline void |
316 | async_tx_ack(struct dma_async_tx_descriptor *tx) | |
317 | { | |
318 | tx->ack = 1; | |
c13c8260 CL |
319 | } |
320 | ||
7405f74b DW |
321 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
322 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | |
c13c8260 | 323 | { |
7405f74b DW |
324 | return min_t(int, DMA_TX_TYPE_END, |
325 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | |
326 | } | |
c13c8260 | 327 | |
7405f74b DW |
328 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) |
329 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | |
330 | { | |
331 | return min_t(int, DMA_TX_TYPE_END, | |
332 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | |
c13c8260 CL |
333 | } |
334 | ||
7405f74b DW |
335 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) |
336 | static inline void | |
337 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |
c13c8260 | 338 | { |
7405f74b DW |
339 | set_bit(tx_type, dstp->bits); |
340 | } | |
c13c8260 | 341 | |
7405f74b DW |
342 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
343 | static inline int | |
344 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | |
345 | { | |
346 | return test_bit(tx_type, srcp->bits); | |
c13c8260 CL |
347 | } |
348 | ||
7405f74b DW |
349 | #define for_each_dma_cap_mask(cap, mask) \ |
350 | for ((cap) = first_dma_cap(mask); \ | |
351 | (cap) < DMA_TX_TYPE_END; \ | |
352 | (cap) = next_dma_cap((cap), (mask))) | |
353 | ||
c13c8260 | 354 | /** |
7405f74b | 355 | * dma_async_issue_pending - flush pending transactions to HW |
fe4ada2d | 356 | * @chan: target DMA channel |
c13c8260 CL |
357 | * |
358 | * This allows drivers to push copies to HW in batches, | |
359 | * reducing MMIO writes where possible. | |
360 | */ | |
7405f74b | 361 | static inline void dma_async_issue_pending(struct dma_chan *chan) |
c13c8260 | 362 | { |
7405f74b | 363 | return chan->device->device_issue_pending(chan); |
c13c8260 CL |
364 | } |
365 | ||
7405f74b DW |
366 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) |
367 | ||
c13c8260 | 368 | /** |
7405f74b | 369 | * dma_async_is_tx_complete - poll for transaction completion |
c13c8260 CL |
370 | * @chan: DMA channel |
371 | * @cookie: transaction identifier to check status of | |
372 | * @last: returns last completed cookie, can be NULL | |
373 | * @used: returns last issued cookie, can be NULL | |
374 | * | |
375 | * If @last and @used are passed in, upon return they reflect the driver | |
376 | * internal state and can be used with dma_async_is_complete() to check | |
377 | * the status of multiple cookies without re-checking hardware state. | |
378 | */ | |
7405f74b | 379 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
c13c8260 CL |
380 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
381 | { | |
7405f74b | 382 | return chan->device->device_is_tx_complete(chan, cookie, last, used); |
c13c8260 CL |
383 | } |
384 | ||
7405f74b DW |
385 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ |
386 | dma_async_is_tx_complete(chan, cookie, last, used) | |
387 | ||
c13c8260 CL |
388 | /** |
389 | * dma_async_is_complete - test a cookie against chan state | |
390 | * @cookie: transaction identifier to test status of | |
391 | * @last_complete: last know completed transaction | |
392 | * @last_used: last cookie value handed out | |
393 | * | |
394 | * dma_async_is_complete() is used in dma_async_memcpy_complete() | |
395 | * the test logic is seperated for lightweight testing of multiple cookies | |
396 | */ | |
397 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |
398 | dma_cookie_t last_complete, dma_cookie_t last_used) | |
399 | { | |
400 | if (last_complete <= last_used) { | |
401 | if ((cookie <= last_complete) || (cookie > last_used)) | |
402 | return DMA_SUCCESS; | |
403 | } else { | |
404 | if ((cookie <= last_complete) && (cookie > last_used)) | |
405 | return DMA_SUCCESS; | |
406 | } | |
407 | return DMA_IN_PROGRESS; | |
408 | } | |
409 | ||
7405f74b | 410 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
c13c8260 CL |
411 | |
412 | /* --- DMA device --- */ | |
413 | ||
414 | int dma_async_device_register(struct dma_device *device); | |
415 | void dma_async_device_unregister(struct dma_device *device); | |
416 | ||
de5506e1 CL |
417 | /* --- Helper iov-locking functions --- */ |
418 | ||
419 | struct dma_page_list { | |
420 | char *base_address; | |
421 | int nr_pages; | |
422 | struct page **pages; | |
423 | }; | |
424 | ||
425 | struct dma_pinned_list { | |
426 | int nr_iovecs; | |
427 | struct dma_page_list page_list[0]; | |
428 | }; | |
429 | ||
430 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); | |
431 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); | |
432 | ||
433 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | |
434 | struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); | |
435 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | |
436 | struct dma_pinned_list *pinned_list, struct page *page, | |
437 | unsigned int offset, size_t len); | |
438 | ||
c13c8260 | 439 | #endif /* DMAENGINE_H */ |