]>
Commit | Line | Data |
---|---|---|
244ab90e AL |
1 | /* |
2 | * DMA helper functions | |
3 | * | |
4 | * Copyright (c) 2009 Red Hat | |
5 | * | |
6 | * This work is licensed under the terms of the GNU General Public License | |
7 | * (GNU GPL), version 2 or later. | |
8 | */ | |
9 | ||
10 | #ifndef DMA_H | |
11 | #define DMA_H | |
12 | ||
13 | #include <stdio.h> | |
b90600ee | 14 | #include "memory.h" |
1ad2134f | 15 | #include "hw/hw.h" |
59a703eb | 16 | #include "block.h" |
7a0bac4d | 17 | #include "kvm.h" |
244ab90e | 18 | |
d86a77f8 | 19 | typedef struct DMAContext DMAContext; |
10dc8aef PB |
20 | typedef struct ScatterGatherEntry ScatterGatherEntry; |
21 | ||
43cf8ae6 DG |
22 | typedef enum { |
23 | DMA_DIRECTION_TO_DEVICE = 0, | |
24 | DMA_DIRECTION_FROM_DEVICE = 1, | |
25 | } DMADirection; | |
26 | ||
fead0c24 PB |
27 | struct QEMUSGList { |
28 | ScatterGatherEntry *sg; | |
29 | int nsg; | |
30 | int nalloc; | |
31 | size_t size; | |
c65bcef3 | 32 | DMAContext *dma; |
fead0c24 PB |
33 | }; |
34 | ||
4be403c8 | 35 | #ifndef CONFIG_USER_ONLY |
d9d1055e | 36 | |
e5332e63 DG |
37 | /* |
38 | * When an IOMMU is present, bus addresses become distinct from | |
39 | * CPU/memory physical addresses and may be a different size. Because | |
40 | * the IOVA size depends more on the bus than on the platform, we more | |
41 | * or less have to treat these as 64-bit always to cover all (or at | |
42 | * least most) cases. | |
43 | */ | |
44 | typedef uint64_t dma_addr_t; | |
45 | ||
46 | #define DMA_ADDR_BITS 64 | |
47 | #define DMA_ADDR_FMT "%" PRIx64 | |
48 | ||
49 | typedef int DMATranslateFunc(DMAContext *dma, | |
50 | dma_addr_t addr, | |
a8170e5e AK |
51 | hwaddr *paddr, |
52 | hwaddr *len, | |
e5332e63 DG |
53 | DMADirection dir); |
54 | typedef void* DMAMapFunc(DMAContext *dma, | |
55 | dma_addr_t addr, | |
56 | dma_addr_t *len, | |
57 | DMADirection dir); | |
58 | typedef void DMAUnmapFunc(DMAContext *dma, | |
59 | void *buffer, | |
60 | dma_addr_t len, | |
61 | DMADirection dir, | |
62 | dma_addr_t access_len); | |
63 | ||
64 | struct DMAContext { | |
b90600ee | 65 | AddressSpace *as; |
e5332e63 DG |
66 | DMATranslateFunc *translate; |
67 | DMAMapFunc *map; | |
68 | DMAUnmapFunc *unmap; | |
69 | }; | |
70 | ||
9e11908f PM |
71 | /* A global DMA context corresponding to the address_space_memory |
72 | * AddressSpace, for sysbus devices which do DMA. | |
73 | */ | |
74 | extern DMAContext dma_context_memory; | |
75 | ||
7a0bac4d BH |
76 | static inline void dma_barrier(DMAContext *dma, DMADirection dir) |
77 | { | |
78 | /* | |
79 | * This is called before DMA read and write operations | |
80 | * unless the _relaxed form is used and is responsible | |
81 | * for providing some sane ordering of accesses vs | |
82 | * concurrently running VCPUs. | |
83 | * | |
84 | * Users of map(), unmap() or lower level st/ld_* | |
85 | * operations are responsible for providing their own | |
86 | * ordering via barriers. | |
87 | * | |
88 | * This primitive implementation does a simple smp_mb() | |
89 | * before each operation which provides pretty much full | |
90 | * ordering. | |
91 | * | |
92 | * A smarter implementation can be devised if needed to | |
93 | * use lighter barriers based on the direction of the | |
94 | * transfer, the DMA context, etc... | |
95 | */ | |
96 | if (kvm_enabled()) { | |
97 | smp_mb(); | |
98 | } | |
99 | } | |
100 | ||
e5332e63 DG |
101 | static inline bool dma_has_iommu(DMAContext *dma) |
102 | { | |
b90600ee | 103 | return dma && dma->translate; |
e5332e63 | 104 | } |
d9d1055e | 105 | |
d86a77f8 DG |
106 | /* Checks that the given range of addresses is valid for DMA. This is |
107 | * useful for certain cases, but usually you should just use | |
108 | * dma_memory_{read,write}() and check for errors */ | |
e5332e63 DG |
109 | bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, |
110 | DMADirection dir); | |
111 | static inline bool dma_memory_valid(DMAContext *dma, | |
112 | dma_addr_t addr, dma_addr_t len, | |
113 | DMADirection dir) | |
d86a77f8 | 114 | { |
e5332e63 DG |
115 | if (!dma_has_iommu(dma)) { |
116 | return true; | |
117 | } else { | |
118 | return iommu_dma_memory_valid(dma, addr, len, dir); | |
119 | } | |
d86a77f8 DG |
120 | } |
121 | ||
e5332e63 DG |
122 | int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
123 | void *buf, dma_addr_t len, DMADirection dir); | |
7a0bac4d BH |
124 | static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, |
125 | void *buf, dma_addr_t len, | |
126 | DMADirection dir) | |
d86a77f8 | 127 | { |
e5332e63 DG |
128 | if (!dma_has_iommu(dma)) { |
129 | /* Fast-path for no IOMMU */ | |
b90600ee | 130 | address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); |
e5332e63 DG |
131 | return 0; |
132 | } else { | |
133 | return iommu_dma_memory_rw(dma, addr, buf, len, dir); | |
134 | } | |
d86a77f8 DG |
135 | } |
136 | ||
7a0bac4d BH |
137 | static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, |
138 | void *buf, dma_addr_t len) | |
139 | { | |
140 | return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); | |
141 | } | |
142 | ||
143 | static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, | |
144 | const void *buf, dma_addr_t len) | |
145 | { | |
146 | return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, | |
147 | DMA_DIRECTION_FROM_DEVICE); | |
148 | } | |
149 | ||
150 | static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, | |
151 | void *buf, dma_addr_t len, | |
152 | DMADirection dir) | |
153 | { | |
154 | dma_barrier(dma, dir); | |
155 | ||
156 | return dma_memory_rw_relaxed(dma, addr, buf, len, dir); | |
157 | } | |
158 | ||
d86a77f8 DG |
159 | static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, |
160 | void *buf, dma_addr_t len) | |
161 | { | |
162 | return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); | |
163 | } | |
164 | ||
165 | static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr, | |
166 | const void *buf, dma_addr_t len) | |
167 | { | |
168 | return dma_memory_rw(dma, addr, (void *)buf, len, | |
169 | DMA_DIRECTION_FROM_DEVICE); | |
170 | } | |
171 | ||
e5332e63 DG |
172 | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, |
173 | dma_addr_t len); | |
174 | ||
d86a77f8 DG |
175 | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); |
176 | ||
e5332e63 DG |
177 | void *iommu_dma_memory_map(DMAContext *dma, |
178 | dma_addr_t addr, dma_addr_t *len, | |
179 | DMADirection dir); | |
d86a77f8 DG |
180 | static inline void *dma_memory_map(DMAContext *dma, |
181 | dma_addr_t addr, dma_addr_t *len, | |
182 | DMADirection dir) | |
183 | { | |
e5332e63 | 184 | if (!dma_has_iommu(dma)) { |
a8170e5e | 185 | hwaddr xlen = *len; |
e5332e63 DG |
186 | void *p; |
187 | ||
b90600ee | 188 | p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE); |
e5332e63 DG |
189 | *len = xlen; |
190 | return p; | |
191 | } else { | |
192 | return iommu_dma_memory_map(dma, addr, len, dir); | |
193 | } | |
d86a77f8 DG |
194 | } |
195 | ||
e5332e63 DG |
196 | void iommu_dma_memory_unmap(DMAContext *dma, |
197 | void *buffer, dma_addr_t len, | |
198 | DMADirection dir, dma_addr_t access_len); | |
d86a77f8 DG |
199 | static inline void dma_memory_unmap(DMAContext *dma, |
200 | void *buffer, dma_addr_t len, | |
201 | DMADirection dir, dma_addr_t access_len) | |
202 | { | |
e5332e63 | 203 | if (!dma_has_iommu(dma)) { |
a8170e5e | 204 | address_space_unmap(dma->as, buffer, (hwaddr)len, |
b90600ee | 205 | dir == DMA_DIRECTION_FROM_DEVICE, access_len); |
e5332e63 DG |
206 | } else { |
207 | iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); | |
208 | } | |
d86a77f8 DG |
209 | } |
210 | ||
211 | #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ | |
212 | static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \ | |
213 | dma_addr_t addr) \ | |
214 | { \ | |
215 | uint##_bits##_t val; \ | |
216 | dma_memory_read(dma, addr, &val, (_bits) / 8); \ | |
217 | return _end##_bits##_to_cpu(val); \ | |
218 | } \ | |
219 | static inline void st##_sname##_##_end##_dma(DMAContext *dma, \ | |
220 | dma_addr_t addr, \ | |
221 | uint##_bits##_t val) \ | |
222 | { \ | |
223 | val = cpu_to_##_end##_bits(val); \ | |
224 | dma_memory_write(dma, addr, &val, (_bits) / 8); \ | |
225 | } | |
226 | ||
227 | static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr) | |
228 | { | |
229 | uint8_t val; | |
230 | ||
231 | dma_memory_read(dma, addr, &val, 1); | |
232 | return val; | |
233 | } | |
234 | ||
235 | static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val) | |
236 | { | |
237 | dma_memory_write(dma, addr, &val, 1); | |
238 | } | |
239 | ||
240 | DEFINE_LDST_DMA(uw, w, 16, le); | |
241 | DEFINE_LDST_DMA(l, l, 32, le); | |
242 | DEFINE_LDST_DMA(q, q, 64, le); | |
243 | DEFINE_LDST_DMA(uw, w, 16, be); | |
244 | DEFINE_LDST_DMA(l, l, 32, be); | |
245 | DEFINE_LDST_DMA(q, q, 64, be); | |
246 | ||
247 | #undef DEFINE_LDST_DMA | |
248 | ||
b90600ee | 249 | void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate, |
e5332e63 DG |
250 | DMAMapFunc map, DMAUnmapFunc unmap); |
251 | ||
10dc8aef | 252 | struct ScatterGatherEntry { |
d3231181 DG |
253 | dma_addr_t base; |
254 | dma_addr_t len; | |
10dc8aef | 255 | }; |
244ab90e | 256 | |
c65bcef3 | 257 | void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma); |
d3231181 | 258 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); |
244ab90e | 259 | void qemu_sglist_destroy(QEMUSGList *qsg); |
10dc8aef | 260 | #endif |
244ab90e | 261 | |
cb144ccb CH |
262 | typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num, |
263 | QEMUIOVector *iov, int nb_sectors, | |
264 | BlockDriverCompletionFunc *cb, void *opaque); | |
265 | ||
266 | BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs, | |
267 | QEMUSGList *sg, uint64_t sector_num, | |
268 | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, | |
43cf8ae6 | 269 | void *opaque, DMADirection dir); |
59a703eb AL |
270 | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
271 | QEMUSGList *sg, uint64_t sector, | |
272 | BlockDriverCompletionFunc *cb, void *opaque); | |
273 | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, | |
274 | QEMUSGList *sg, uint64_t sector, | |
275 | BlockDriverCompletionFunc *cb, void *opaque); | |
8171ee35 PB |
276 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); |
277 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); | |
278 | ||
84a69356 PB |
279 | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, |
280 | QEMUSGList *sg, enum BlockAcctType type); | |
281 | ||
244ab90e | 282 | #endif |