]>
Commit | Line | Data |
---|---|---|
244ab90e AL |
1 | /* |
2 | * DMA helper functions | |
3 | * | |
4 | * Copyright (c) 2009 Red Hat | |
5 | * | |
6 | * This work is licensed under the terms of the GNU General Public License | |
7 | * (GNU GPL), version 2 or later. | |
8 | */ | |
9 | ||
4be74634 | 10 | #include "sysemu/block-backend.h" |
9c17d615 | 11 | #include "sysemu/dma.h" |
c57c4658 | 12 | #include "trace.h" |
1de7afc9 PB |
13 | #include "qemu/range.h" |
14 | #include "qemu/thread.h" | |
6a1751b7 | 15 | #include "qemu/main-loop.h" |
244ab90e | 16 | |
e5332e63 DG |
17 | /* #define DEBUG_IOMMU */ |
18 | ||
df32fd1c | 19 | int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len) |
d86a77f8 | 20 | { |
df32fd1c | 21 | dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); |
24addbc7 | 22 | |
d86a77f8 DG |
23 | #define FILLBUF_SIZE 512 |
24 | uint8_t fillbuf[FILLBUF_SIZE]; | |
25 | int l; | |
24addbc7 | 26 | bool error = false; |
d86a77f8 DG |
27 | |
28 | memset(fillbuf, c, FILLBUF_SIZE); | |
29 | while (len > 0) { | |
30 | l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; | |
24addbc7 | 31 | error |= address_space_rw(as, addr, fillbuf, l, true); |
bc9b78de BH |
32 | len -= l; |
33 | addr += l; | |
d86a77f8 | 34 | } |
e5332e63 | 35 | |
24addbc7 | 36 | return error; |
d86a77f8 DG |
37 | } |
38 | ||
f487b677 PB |
39 | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
40 | AddressSpace *as) | |
244ab90e | 41 | { |
7267c094 | 42 | qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry)); |
244ab90e AL |
43 | qsg->nsg = 0; |
44 | qsg->nalloc = alloc_hint; | |
45 | qsg->size = 0; | |
df32fd1c | 46 | qsg->as = as; |
f487b677 PB |
47 | qsg->dev = dev; |
48 | object_ref(OBJECT(dev)); | |
244ab90e AL |
49 | } |
50 | ||
d3231181 | 51 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) |
244ab90e AL |
52 | { |
53 | if (qsg->nsg == qsg->nalloc) { | |
54 | qsg->nalloc = 2 * qsg->nalloc + 1; | |
7267c094 | 55 | qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry)); |
244ab90e AL |
56 | } |
57 | qsg->sg[qsg->nsg].base = base; | |
58 | qsg->sg[qsg->nsg].len = len; | |
59 | qsg->size += len; | |
60 | ++qsg->nsg; | |
61 | } | |
62 | ||
63 | void qemu_sglist_destroy(QEMUSGList *qsg) | |
64 | { | |
f487b677 | 65 | object_unref(OBJECT(qsg->dev)); |
7267c094 | 66 | g_free(qsg->sg); |
ea8d82a1 | 67 | memset(qsg, 0, sizeof(*qsg)); |
244ab90e AL |
68 | } |
69 | ||
59a703eb | 70 | typedef struct { |
7c84b1b8 | 71 | BlockAIOCB common; |
4be74634 | 72 | BlockBackend *blk; |
7c84b1b8 | 73 | BlockAIOCB *acb; |
59a703eb AL |
74 | QEMUSGList *sg; |
75 | uint64_t sector_num; | |
43cf8ae6 | 76 | DMADirection dir; |
59a703eb | 77 | int sg_cur_index; |
d3231181 | 78 | dma_addr_t sg_cur_byte; |
59a703eb AL |
79 | QEMUIOVector iov; |
80 | QEMUBH *bh; | |
cb144ccb | 81 | DMAIOFunc *io_func; |
37b7842c | 82 | } DMAAIOCB; |
59a703eb | 83 | |
4be74634 | 84 | static void dma_blk_cb(void *opaque, int ret); |
59a703eb AL |
85 | |
86 | static void reschedule_dma(void *opaque) | |
87 | { | |
37b7842c | 88 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
59a703eb AL |
89 | |
90 | qemu_bh_delete(dbs->bh); | |
91 | dbs->bh = NULL; | |
4be74634 | 92 | dma_blk_cb(dbs, 0); |
59a703eb AL |
93 | } |
94 | ||
95 | static void continue_after_map_failure(void *opaque) | |
96 | { | |
37b7842c | 97 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; |
59a703eb AL |
98 | |
99 | dbs->bh = qemu_bh_new(reschedule_dma, dbs); | |
100 | qemu_bh_schedule(dbs->bh); | |
101 | } | |
102 | ||
4be74634 | 103 | static void dma_blk_unmap(DMAAIOCB *dbs) |
59a703eb | 104 | { |
59a703eb AL |
105 | int i; |
106 | ||
59a703eb | 107 | for (i = 0; i < dbs->iov.niov; ++i) { |
df32fd1c | 108 | dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, |
c65bcef3 DG |
109 | dbs->iov.iov[i].iov_len, dbs->dir, |
110 | dbs->iov.iov[i].iov_len); | |
59a703eb | 111 | } |
c3adb5b9 PB |
112 | qemu_iovec_reset(&dbs->iov); |
113 | } | |
114 | ||
115 | static void dma_complete(DMAAIOCB *dbs, int ret) | |
116 | { | |
c57c4658 KW |
117 | trace_dma_complete(dbs, ret, dbs->common.cb); |
118 | ||
4be74634 | 119 | dma_blk_unmap(dbs); |
c3adb5b9 PB |
120 | if (dbs->common.cb) { |
121 | dbs->common.cb(dbs->common.opaque, ret); | |
122 | } | |
123 | qemu_iovec_destroy(&dbs->iov); | |
124 | if (dbs->bh) { | |
125 | qemu_bh_delete(dbs->bh); | |
126 | dbs->bh = NULL; | |
127 | } | |
8007429a | 128 | qemu_aio_unref(dbs); |
7403b14e AL |
129 | } |
130 | ||
4be74634 | 131 | static void dma_blk_cb(void *opaque, int ret) |
7403b14e AL |
132 | { |
133 | DMAAIOCB *dbs = (DMAAIOCB *)opaque; | |
c65bcef3 | 134 | dma_addr_t cur_addr, cur_len; |
7403b14e AL |
135 | void *mem; |
136 | ||
4be74634 | 137 | trace_dma_blk_cb(dbs, ret); |
c57c4658 | 138 | |
7403b14e AL |
139 | dbs->acb = NULL; |
140 | dbs->sector_num += dbs->iov.size / 512; | |
59a703eb AL |
141 | |
142 | if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { | |
c3adb5b9 | 143 | dma_complete(dbs, ret); |
59a703eb AL |
144 | return; |
145 | } | |
4be74634 | 146 | dma_blk_unmap(dbs); |
59a703eb AL |
147 | |
148 | while (dbs->sg_cur_index < dbs->sg->nsg) { | |
149 | cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; | |
150 | cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; | |
df32fd1c | 151 | mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); |
59a703eb AL |
152 | if (!mem) |
153 | break; | |
154 | qemu_iovec_add(&dbs->iov, mem, cur_len); | |
155 | dbs->sg_cur_byte += cur_len; | |
156 | if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { | |
157 | dbs->sg_cur_byte = 0; | |
158 | ++dbs->sg_cur_index; | |
159 | } | |
160 | } | |
161 | ||
162 | if (dbs->iov.size == 0) { | |
c57c4658 | 163 | trace_dma_map_wait(dbs); |
59a703eb AL |
164 | cpu_register_map_client(dbs, continue_after_map_failure); |
165 | return; | |
166 | } | |
167 | ||
58f423fb KW |
168 | if (dbs->iov.size & ~BDRV_SECTOR_MASK) { |
169 | qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK); | |
170 | } | |
171 | ||
4be74634 MA |
172 | dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov, |
173 | dbs->iov.size / 512, dma_blk_cb, dbs); | |
6bee44ea | 174 | assert(dbs->acb); |
59a703eb AL |
175 | } |
176 | ||
7c84b1b8 | 177 | static void dma_aio_cancel(BlockAIOCB *acb) |
c16b5a2c CH |
178 | { |
179 | DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); | |
180 | ||
c57c4658 KW |
181 | trace_dma_aio_cancel(dbs); |
182 | ||
c16b5a2c | 183 | if (dbs->acb) { |
4be74634 | 184 | blk_aio_cancel_async(dbs->acb); |
c16b5a2c CH |
185 | } |
186 | } | |
187 | ||
9bb9da46 | 188 | |
d7331bed | 189 | static const AIOCBInfo dma_aiocb_info = { |
c16b5a2c | 190 | .aiocb_size = sizeof(DMAAIOCB), |
9bb9da46 | 191 | .cancel_async = dma_aio_cancel, |
c16b5a2c CH |
192 | }; |
193 | ||
4be74634 MA |
194 | BlockAIOCB *dma_blk_io( |
195 | BlockBackend *blk, QEMUSGList *sg, uint64_t sector_num, | |
097310b5 | 196 | DMAIOFunc *io_func, BlockCompletionFunc *cb, |
43cf8ae6 | 197 | void *opaque, DMADirection dir) |
59a703eb | 198 | { |
4be74634 | 199 | DMAAIOCB *dbs = blk_aio_get(&dma_aiocb_info, blk, cb, opaque); |
59a703eb | 200 | |
4be74634 | 201 | trace_dma_blk_io(dbs, blk, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); |
c57c4658 | 202 | |
37b7842c | 203 | dbs->acb = NULL; |
4be74634 | 204 | dbs->blk = blk; |
59a703eb AL |
205 | dbs->sg = sg; |
206 | dbs->sector_num = sector_num; | |
207 | dbs->sg_cur_index = 0; | |
208 | dbs->sg_cur_byte = 0; | |
43cf8ae6 | 209 | dbs->dir = dir; |
cb144ccb | 210 | dbs->io_func = io_func; |
59a703eb AL |
211 | dbs->bh = NULL; |
212 | qemu_iovec_init(&dbs->iov, sg->nsg); | |
4be74634 | 213 | dma_blk_cb(dbs, 0); |
37b7842c | 214 | return &dbs->common; |
59a703eb AL |
215 | } |
216 | ||
217 | ||
4be74634 MA |
218 | BlockAIOCB *dma_blk_read(BlockBackend *blk, |
219 | QEMUSGList *sg, uint64_t sector, | |
220 | void (*cb)(void *opaque, int ret), void *opaque) | |
59a703eb | 221 | { |
4be74634 MA |
222 | return dma_blk_io(blk, sg, sector, blk_aio_readv, cb, opaque, |
223 | DMA_DIRECTION_FROM_DEVICE); | |
59a703eb AL |
224 | } |
225 | ||
4be74634 MA |
226 | BlockAIOCB *dma_blk_write(BlockBackend *blk, |
227 | QEMUSGList *sg, uint64_t sector, | |
228 | void (*cb)(void *opaque, int ret), void *opaque) | |
59a703eb | 229 | { |
4be74634 MA |
230 | return dma_blk_io(blk, sg, sector, blk_aio_writev, cb, opaque, |
231 | DMA_DIRECTION_TO_DEVICE); | |
59a703eb | 232 | } |
8171ee35 PB |
233 | |
234 | ||
c65bcef3 DG |
235 | static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, |
236 | DMADirection dir) | |
8171ee35 PB |
237 | { |
238 | uint64_t resid; | |
239 | int sg_cur_index; | |
240 | ||
241 | resid = sg->size; | |
242 | sg_cur_index = 0; | |
243 | len = MIN(len, resid); | |
244 | while (len > 0) { | |
245 | ScatterGatherEntry entry = sg->sg[sg_cur_index++]; | |
246 | int32_t xfer = MIN(len, entry.len); | |
df32fd1c | 247 | dma_memory_rw(sg->as, entry.base, ptr, xfer, dir); |
8171ee35 PB |
248 | ptr += xfer; |
249 | len -= xfer; | |
250 | resid -= xfer; | |
251 | } | |
252 | ||
253 | return resid; | |
254 | } | |
255 | ||
256 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) | |
257 | { | |
c65bcef3 | 258 | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE); |
8171ee35 PB |
259 | } |
260 | ||
261 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) | |
262 | { | |
c65bcef3 | 263 | return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE); |
8171ee35 | 264 | } |
84a69356 | 265 | |
4be74634 | 266 | void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, |
84a69356 PB |
267 | QEMUSGList *sg, enum BlockAcctType type) |
268 | { | |
4be74634 | 269 | block_acct_start(blk_get_stats(blk), cookie, sg->size, type); |
84a69356 | 270 | } |