]>
Commit | Line | Data |
---|---|---|
50437bff RK |
1 | /* |
2 | * Virtual DMA channel support for DMAengine | |
3 | * | |
4 | * Copyright (C) 2012 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/device.h> | |
11 | #include <linux/dmaengine.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/spinlock.h> | |
14 | ||
15 | #include "virt-dma.h" | |
16 | ||
17 | static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) | |
18 | { | |
19 | return container_of(tx, struct virt_dma_desc, tx); | |
20 | } | |
21 | ||
22 | dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | |
23 | { | |
24 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | |
25 | struct virt_dma_desc *vd = to_virt_desc(tx); | |
26 | unsigned long flags; | |
27 | dma_cookie_t cookie; | |
28 | ||
29 | spin_lock_irqsave(&vc->lock, flags); | |
30 | cookie = dma_cookie_assign(tx); | |
31 | ||
13bb26ae | 32 | list_move_tail(&vd->node, &vc->desc_submitted); |
50437bff RK |
33 | spin_unlock_irqrestore(&vc->lock, flags); |
34 | ||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | |
36 | vc, vd, cookie); | |
37 | ||
38 | return cookie; | |
39 | } | |
40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); | |
41 | ||
13bb26ae RJ |
42 | /** |
43 | * vchan_tx_desc_free - free a reusable descriptor | |
44 | * @tx: the transfer | |
45 | * | |
46 | * This function frees a previously allocated reusable descriptor. The only | |
47 | * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the | |
48 | * transfer. | |
49 | * | |
50 | * Returns 0 upon success | |
51 | */ | |
52 | int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) | |
53 | { | |
54 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | |
55 | struct virt_dma_desc *vd = to_virt_desc(tx); | |
56 | unsigned long flags; | |
57 | ||
58 | spin_lock_irqsave(&vc->lock, flags); | |
59 | list_del(&vd->node); | |
60 | spin_unlock_irqrestore(&vc->lock, flags); | |
61 | ||
62 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", | |
63 | vc, vd, vd->tx.cookie); | |
64 | vc->desc_free(vd); | |
65 | return 0; | |
66 | } | |
67 | EXPORT_SYMBOL_GPL(vchan_tx_desc_free); | |
68 | ||
fe045874 RK |
69 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, |
70 | dma_cookie_t cookie) | |
71 | { | |
72 | struct virt_dma_desc *vd; | |
73 | ||
74 | list_for_each_entry(vd, &vc->desc_issued, node) | |
75 | if (vd->tx.cookie == cookie) | |
76 | return vd; | |
77 | ||
78 | return NULL; | |
79 | } | |
80 | EXPORT_SYMBOL_GPL(vchan_find_desc); | |
81 | ||
50437bff RK |
82 | /* |
83 | * This tasklet handles the completion of a DMA descriptor by | |
84 | * calling its callback and freeing it. | |
85 | */ | |
86 | static void vchan_complete(unsigned long arg) | |
87 | { | |
88 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; | |
fdb980fb | 89 | struct virt_dma_desc *vd, *_vd; |
4f03ac6a | 90 | struct dmaengine_desc_callback cb; |
50437bff RK |
91 | LIST_HEAD(head); |
92 | ||
93 | spin_lock_irq(&vc->lock); | |
94 | list_splice_tail_init(&vc->desc_completed, &head); | |
571fa740 RK |
95 | vd = vc->cyclic; |
96 | if (vd) { | |
97 | vc->cyclic = NULL; | |
4f03ac6a DJ |
98 | dmaengine_desc_get_callback(&vd->tx, &cb); |
99 | } else { | |
100 | memset(&cb, 0, sizeof(cb)); | |
571fa740 | 101 | } |
50437bff RK |
102 | spin_unlock_irq(&vc->lock); |
103 | ||
4f03ac6a | 104 | dmaengine_desc_callback_invoke(&cb, NULL); |
571fa740 | 105 | |
fdb980fb | 106 | list_for_each_entry_safe(vd, _vd, &head, node) { |
4f03ac6a | 107 | dmaengine_desc_get_callback(&vd->tx, &cb); |
50437bff RK |
108 | |
109 | list_del(&vd->node); | |
13bb26ae RJ |
110 | if (dmaengine_desc_test_reuse(&vd->tx)) |
111 | list_add(&vd->node, &vc->desc_allocated); | |
112 | else | |
113 | vc->desc_free(vd); | |
50437bff | 114 | |
4f03ac6a | 115 | dmaengine_desc_callback_invoke(&cb, NULL); |
50437bff RK |
116 | } |
117 | } | |
118 | ||
119 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | |
120 | { | |
fdb980fb AS |
121 | struct virt_dma_desc *vd, *_vd; |
122 | ||
123 | list_for_each_entry_safe(vd, _vd, head, node) { | |
13bb26ae RJ |
124 | if (dmaengine_desc_test_reuse(&vd->tx)) { |
125 | list_move_tail(&vd->node, &vc->desc_allocated); | |
126 | } else { | |
127 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | |
128 | list_del(&vd->node); | |
129 | vc->desc_free(vd); | |
130 | } | |
50437bff RK |
131 | } |
132 | } | |
133 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | |
134 | ||
135 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | |
136 | { | |
137 | dma_cookie_init(&vc->chan); | |
138 | ||
139 | spin_lock_init(&vc->lock); | |
13bb26ae | 140 | INIT_LIST_HEAD(&vc->desc_allocated); |
50437bff RK |
141 | INIT_LIST_HEAD(&vc->desc_submitted); |
142 | INIT_LIST_HEAD(&vc->desc_issued); | |
143 | INIT_LIST_HEAD(&vc->desc_completed); | |
144 | ||
145 | tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); | |
146 | ||
147 | vc->chan.device = dmadev; | |
148 | list_add_tail(&vc->chan.device_node, &dmadev->channels); | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(vchan_init); | |
151 | ||
152 | MODULE_AUTHOR("Russell King"); | |
153 | MODULE_LICENSE("GPL"); |