]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
a0594cef M |
2 | /* |
3 | * Direct Memory Access U-Class driver | |
4 | * | |
27ab27f8 ÁFR |
5 | * Copyright (C) 2018 Álvaro Fernández Rojas <[email protected]> |
6 | * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com> | |
7 | * Written by Mugunthan V N <[email protected]> | |
a0594cef M |
8 | * |
9 | * Author: Mugunthan V N <[email protected]> | |
a0594cef M |
10 | */ |
11 | ||
12 | #include <common.h> | |
a0594cef | 13 | #include <dm.h> |
27ab27f8 | 14 | #include <dm/read.h> |
10b4dc52 | 15 | #include <dma-uclass.h> |
27ab27f8 | 16 | #include <dt-structs.h> |
a0594cef M |
17 | #include <errno.h> |
18 | ||
27ab27f8 ÁFR |
19 | #ifdef CONFIG_DMA_CHANNELS |
20 | static inline struct dma_ops *dma_dev_ops(struct udevice *dev) | |
21 | { | |
22 | return (struct dma_ops *)dev->driver->ops; | |
23 | } | |
24 | ||
25 | # if CONFIG_IS_ENABLED(OF_CONTROL) | |
26 | static int dma_of_xlate_default(struct dma *dma, | |
27 | struct ofnode_phandle_args *args) | |
28 | { | |
29 | debug("%s(dma=%p)\n", __func__, dma); | |
30 | ||
31 | if (args->args_count > 1) { | |
32 | pr_err("Invaild args_count: %d\n", args->args_count); | |
33 | return -EINVAL; | |
34 | } | |
35 | ||
36 | if (args->args_count) | |
37 | dma->id = args->args[0]; | |
38 | else | |
39 | dma->id = 0; | |
40 | ||
41 | return 0; | |
42 | } | |
43 | ||
44 | int dma_get_by_index(struct udevice *dev, int index, struct dma *dma) | |
45 | { | |
46 | int ret; | |
47 | struct ofnode_phandle_args args; | |
48 | struct udevice *dev_dma; | |
49 | const struct dma_ops *ops; | |
50 | ||
51 | debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma); | |
52 | ||
53 | assert(dma); | |
54 | dma->dev = NULL; | |
55 | ||
56 | ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index, | |
57 | &args); | |
58 | if (ret) { | |
59 | pr_err("%s: dev_read_phandle_with_args failed: err=%d\n", | |
60 | __func__, ret); | |
61 | return ret; | |
62 | } | |
63 | ||
64 | ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma); | |
65 | if (ret) { | |
66 | pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n", | |
67 | __func__, ret); | |
68 | return ret; | |
69 | } | |
70 | ||
71 | dma->dev = dev_dma; | |
72 | ||
73 | ops = dma_dev_ops(dev_dma); | |
74 | ||
75 | if (ops->of_xlate) | |
76 | ret = ops->of_xlate(dma, &args); | |
77 | else | |
78 | ret = dma_of_xlate_default(dma, &args); | |
79 | if (ret) { | |
80 | pr_err("of_xlate() failed: %d\n", ret); | |
81 | return ret; | |
82 | } | |
83 | ||
84 | return dma_request(dev_dma, dma); | |
85 | } | |
86 | ||
87 | int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma) | |
88 | { | |
89 | int index; | |
90 | ||
91 | debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma); | |
92 | dma->dev = NULL; | |
93 | ||
94 | index = dev_read_stringlist_search(dev, "dma-names", name); | |
95 | if (index < 0) { | |
96 | pr_err("dev_read_stringlist_search() failed: %d\n", index); | |
97 | return index; | |
98 | } | |
99 | ||
100 | return dma_get_by_index(dev, index, dma); | |
101 | } | |
102 | # endif /* OF_CONTROL */ | |
103 | ||
104 | int dma_request(struct udevice *dev, struct dma *dma) | |
105 | { | |
106 | struct dma_ops *ops = dma_dev_ops(dev); | |
107 | ||
108 | debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma); | |
109 | ||
110 | dma->dev = dev; | |
111 | ||
112 | if (!ops->request) | |
113 | return 0; | |
114 | ||
115 | return ops->request(dma); | |
116 | } | |
117 | ||
118 | int dma_free(struct dma *dma) | |
119 | { | |
120 | struct dma_ops *ops = dma_dev_ops(dma->dev); | |
121 | ||
122 | debug("%s(dma=%p)\n", __func__, dma); | |
123 | ||
124 | if (!ops->free) | |
125 | return 0; | |
126 | ||
127 | return ops->free(dma); | |
128 | } | |
129 | ||
130 | int dma_enable(struct dma *dma) | |
131 | { | |
132 | struct dma_ops *ops = dma_dev_ops(dma->dev); | |
133 | ||
134 | debug("%s(dma=%p)\n", __func__, dma); | |
135 | ||
136 | if (!ops->enable) | |
137 | return -ENOSYS; | |
138 | ||
139 | return ops->enable(dma); | |
140 | } | |
141 | ||
142 | int dma_disable(struct dma *dma) | |
143 | { | |
144 | struct dma_ops *ops = dma_dev_ops(dma->dev); | |
145 | ||
146 | debug("%s(dma=%p)\n", __func__, dma); | |
147 | ||
148 | if (!ops->disable) | |
149 | return -ENOSYS; | |
150 | ||
151 | return ops->disable(dma); | |
152 | } | |
153 | ||
154 | int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size) | |
155 | { | |
156 | struct dma_ops *ops = dma_dev_ops(dma->dev); | |
157 | ||
158 | debug("%s(dma=%p)\n", __func__, dma); | |
159 | ||
160 | if (!ops->prepare_rcv_buf) | |
161 | return -1; | |
162 | ||
163 | return ops->prepare_rcv_buf(dma, dst, size); | |
164 | } | |
165 | ||
166 | int dma_receive(struct dma *dma, void **dst, void *metadata) | |
167 | { | |
168 | struct dma_ops *ops = dma_dev_ops(dma->dev); | |
169 | ||
170 | debug("%s(dma=%p)\n", __func__, dma); | |
171 | ||
172 | if (!ops->receive) | |
173 | return -ENOSYS; | |
174 | ||
175 | return ops->receive(dma, dst, metadata); | |
176 | } | |
177 | ||
178 | int dma_send(struct dma *dma, void *src, size_t len, void *metadata) | |
179 | { | |
180 | struct dma_ops *ops = dma_dev_ops(dma->dev); | |
181 | ||
182 | debug("%s(dma=%p)\n", __func__, dma); | |
183 | ||
184 | if (!ops->send) | |
185 | return -ENOSYS; | |
186 | ||
187 | return ops->send(dma, src, len, metadata); | |
188 | } | |
189 | #endif /* CONFIG_DMA_CHANNELS */ | |
190 | ||
a0594cef M |
191 | int dma_get_device(u32 transfer_type, struct udevice **devp) |
192 | { | |
193 | struct udevice *dev; | |
194 | int ret; | |
195 | ||
196 | for (ret = uclass_first_device(UCLASS_DMA, &dev); dev && !ret; | |
197 | ret = uclass_next_device(&dev)) { | |
198 | struct dma_dev_priv *uc_priv; | |
199 | ||
200 | uc_priv = dev_get_uclass_priv(dev); | |
201 | if (uc_priv->supported & transfer_type) | |
202 | break; | |
203 | } | |
204 | ||
205 | if (!dev) { | |
9b643e31 | 206 | pr_err("No DMA device found that supports %x type\n", |
a0594cef M |
207 | transfer_type); |
208 | return -EPROTONOSUPPORT; | |
209 | } | |
210 | ||
211 | *devp = dev; | |
212 | ||
213 | return ret; | |
214 | } | |
215 | ||
216 | int dma_memcpy(void *dst, void *src, size_t len) | |
217 | { | |
218 | struct udevice *dev; | |
219 | const struct dma_ops *ops; | |
220 | int ret; | |
221 | ||
222 | ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev); | |
223 | if (ret < 0) | |
224 | return ret; | |
225 | ||
226 | ops = device_get_ops(dev); | |
227 | if (!ops->transfer) | |
228 | return -ENOSYS; | |
229 | ||
230 | /* Invalidate the area, so no writeback into the RAM races with DMA */ | |
231 | invalidate_dcache_range((unsigned long)dst, (unsigned long)dst + | |
232 | roundup(len, ARCH_DMA_MINALIGN)); | |
233 | ||
234 | return ops->transfer(dev, DMA_MEM_TO_MEM, dst, src, len); | |
235 | } | |
236 | ||
237 | UCLASS_DRIVER(dma) = { | |
238 | .id = UCLASS_DMA, | |
239 | .name = "dma", | |
240 | .flags = DM_UC_FLAG_SEQ_ALIAS, | |
241 | .per_device_auto_alloc_size = sizeof(struct dma_dev_priv), | |
242 | }; |