]>
Commit | Line | Data |
---|---|---|
50437bff RK |
1 | /* |
2 | * Virtual DMA channel support for DMAengine | |
3 | * | |
4 | * Copyright (C) 2012 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef VIRT_DMA_H | |
11 | #define VIRT_DMA_H | |
12 | ||
13 | #include <linux/dmaengine.h> | |
14 | #include <linux/interrupt.h> | |
15 | ||
16 | #include "dmaengine.h" | |
17 | ||
18 | struct virt_dma_desc { | |
19 | struct dma_async_tx_descriptor tx; | |
20 | /* protected by vc.lock */ | |
21 | struct list_head node; | |
22 | }; | |
23 | ||
24 | struct virt_dma_chan { | |
25 | struct dma_chan chan; | |
26 | struct tasklet_struct task; | |
27 | void (*desc_free)(struct virt_dma_desc *); | |
28 | ||
29 | spinlock_t lock; | |
30 | ||
31 | /* protected by vc.lock */ | |
13bb26ae | 32 | struct list_head desc_allocated; |
50437bff RK |
33 | struct list_head desc_submitted; |
34 | struct list_head desc_issued; | |
35 | struct list_head desc_completed; | |
571fa740 RK |
36 | |
37 | struct virt_dma_desc *cyclic; | |
1c7f072d | 38 | struct virt_dma_desc *vd_terminated; |
50437bff RK |
39 | }; |
40 | ||
41 | static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) | |
42 | { | |
43 | return container_of(chan, struct virt_dma_chan, chan); | |
44 | } | |
45 | ||
46 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); | |
50437bff | 47 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); |
fe045874 | 48 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); |
02aa8486 BX |
49 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); |
50 | extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); | |
50437bff RK |
51 | |
52 | /** | |
53 | * vchan_tx_prep - prepare a descriptor | |
28ca3e85 LPC |
54 | * @vc: virtual channel allocating this descriptor |
55 | * @vd: virtual descriptor to prepare | |
56 | * @tx_flags: flags argument passed in to prepare function | |
50437bff RK |
57 | */ |
58 | static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, | |
59 | struct virt_dma_desc *vd, unsigned long tx_flags) | |
60 | { | |
13bb26ae | 61 | unsigned long flags; |
50437bff RK |
62 | |
63 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); | |
64 | vd->tx.flags = tx_flags; | |
65 | vd->tx.tx_submit = vchan_tx_submit; | |
13bb26ae RJ |
66 | vd->tx.desc_free = vchan_tx_desc_free; |
67 | ||
68 | spin_lock_irqsave(&vc->lock, flags); | |
69 | list_add_tail(&vd->node, &vc->desc_allocated); | |
70 | spin_unlock_irqrestore(&vc->lock, flags); | |
50437bff RK |
71 | |
72 | return &vd->tx; | |
73 | } | |
74 | ||
75 | /** | |
76 | * vchan_issue_pending - move submitted descriptors to issued list | |
28ca3e85 | 77 | * @vc: virtual channel to update |
50437bff RK |
78 | * |
79 | * vc.lock must be held by caller | |
80 | */ | |
81 | static inline bool vchan_issue_pending(struct virt_dma_chan *vc) | |
82 | { | |
83 | list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); | |
84 | return !list_empty(&vc->desc_issued); | |
85 | } | |
86 | ||
87 | /** | |
88 | * vchan_cookie_complete - report completion of a descriptor | |
28ca3e85 | 89 | * @vd: virtual descriptor to update |
50437bff RK |
90 | * |
91 | * vc.lock must be held by caller | |
92 | */ | |
93 | static inline void vchan_cookie_complete(struct virt_dma_desc *vd) | |
94 | { | |
95 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | |
af58652a | 96 | dma_cookie_t cookie; |
50437bff | 97 | |
af58652a | 98 | cookie = vd->tx.cookie; |
50437bff RK |
99 | dma_cookie_complete(&vd->tx); |
100 | dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", | |
af58652a | 101 | vd, cookie); |
50437bff RK |
102 | list_add_tail(&vd->node, &vc->desc_completed); |
103 | ||
104 | tasklet_schedule(&vc->task); | |
105 | } | |
106 | ||
6af149d2 PU |
107 | /** |
108 | * vchan_vdesc_fini - Free or reuse a descriptor | |
109 | * @vd: virtual descriptor to free/reuse | |
110 | */ | |
111 | static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) | |
112 | { | |
113 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | |
114 | ||
115 | if (dmaengine_desc_test_reuse(&vd->tx)) | |
116 | list_add(&vd->node, &vc->desc_allocated); | |
117 | else | |
118 | vc->desc_free(vd); | |
119 | } | |
120 | ||
571fa740 RK |
121 | /** |
122 | * vchan_cyclic_callback - report the completion of a period | |
28ca3e85 | 123 | * @vd: virtual descriptor |
571fa740 RK |
124 | */ |
125 | static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) | |
126 | { | |
127 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | |
128 | ||
129 | vc->cyclic = vd; | |
130 | tasklet_schedule(&vc->task); | |
131 | } | |
132 | ||
1c7f072d PU |
133 | /** |
134 | * vchan_terminate_vdesc - Disable pending cyclic callback | |
135 | * @vd: virtual descriptor to be terminated | |
136 | * | |
137 | * vc.lock must be held by caller | |
138 | */ | |
139 | static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) | |
140 | { | |
141 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | |
142 | ||
143 | /* free up stuck descriptor */ | |
144 | if (vc->vd_terminated) | |
145 | vchan_vdesc_fini(vc->vd_terminated); | |
146 | ||
147 | vc->vd_terminated = vd; | |
148 | if (vc->cyclic == vd) | |
149 | vc->cyclic = NULL; | |
150 | } | |
151 | ||
50437bff RK |
152 | /** |
153 | * vchan_next_desc - peek at the next descriptor to be processed | |
28ca3e85 | 154 | * @vc: virtual channel to obtain descriptor from |
50437bff RK |
155 | * |
156 | * vc.lock must be held by caller | |
157 | */ | |
158 | static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |
159 | { | |
360af35b MY |
160 | return list_first_entry_or_null(&vc->desc_issued, |
161 | struct virt_dma_desc, node); | |
50437bff RK |
162 | } |
163 | ||
164 | /** | |
8c8fe97b | 165 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors |
28ca3e85 LPC |
166 | * @vc: virtual channel to get descriptors from |
167 | * @head: list of descriptors found | |
50437bff RK |
168 | * |
169 | * vc.lock must be held by caller | |
170 | * | |
171 | * Removes all submitted and issued descriptors from internal lists, and | |
172 | * provides a list of all descriptors found | |
173 | */ | |
174 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | |
175 | struct list_head *head) | |
176 | { | |
13bb26ae | 177 | list_splice_tail_init(&vc->desc_allocated, head); |
50437bff RK |
178 | list_splice_tail_init(&vc->desc_submitted, head); |
179 | list_splice_tail_init(&vc->desc_issued, head); | |
180 | list_splice_tail_init(&vc->desc_completed, head); | |
181 | } | |
182 | ||
183 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | |
184 | { | |
13bb26ae | 185 | struct virt_dma_desc *vd; |
50437bff RK |
186 | unsigned long flags; |
187 | LIST_HEAD(head); | |
188 | ||
189 | spin_lock_irqsave(&vc->lock, flags); | |
190 | vchan_get_all_descriptors(vc, &head); | |
13bb26ae RJ |
191 | list_for_each_entry(vd, &head, node) |
192 | dmaengine_desc_clear_reuse(&vd->tx); | |
50437bff RK |
193 | spin_unlock_irqrestore(&vc->lock, flags); |
194 | ||
195 | vchan_dma_desc_free_list(vc, &head); | |
196 | } | |
197 | ||
2ed08629 LPC |
198 | /** |
199 | * vchan_synchronize() - synchronize callback execution to the current context | |
200 | * @vc: virtual channel to synchronize | |
201 | * | |
202 | * Makes sure that all scheduled or active callbacks have finished running. For | |
203 | * proper operation the caller has to ensure that no new callbacks are scheduled | |
204 | * after the invocation of this function started. | |
1c7f072d | 205 | * Free up the terminated cyclic descriptor to prevent memory leakage. |
2ed08629 LPC |
206 | */ |
207 | static inline void vchan_synchronize(struct virt_dma_chan *vc) | |
208 | { | |
1c7f072d PU |
209 | unsigned long flags; |
210 | ||
2ed08629 | 211 | tasklet_kill(&vc->task); |
1c7f072d PU |
212 | |
213 | spin_lock_irqsave(&vc->lock, flags); | |
214 | if (vc->vd_terminated) { | |
215 | vchan_vdesc_fini(vc->vd_terminated); | |
216 | vc->vd_terminated = NULL; | |
217 | } | |
218 | spin_unlock_irqrestore(&vc->lock, flags); | |
2ed08629 LPC |
219 | } |
220 | ||
50437bff | 221 | #endif |