]>
Commit | Line | Data |
---|---|---|
977e1244 GH |
1 | /* |
2 | * QEMU IDE Emulation: PCI Bus support. | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * Copyright (c) 2006 Openedhand Ltd. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
59f2a787 GH |
25 | #include <hw/hw.h> |
26 | #include <hw/pc.h> | |
27 | #include <hw/pci.h> | |
feef3102 | 28 | #include <hw/isa.h> |
977e1244 GH |
29 | #include "block.h" |
30 | #include "block_int.h" | |
977e1244 | 31 | #include "dma.h" |
59f2a787 | 32 | |
65c0f135 | 33 | #include <hw/ide/pci.h> |
977e1244 | 34 | |
40a6238a AG |
35 | #define BMDMA_PAGE_SIZE 4096 |
36 | ||
37 | static void bmdma_start_dma(IDEDMA *dma, IDEState *s, | |
38 | BlockDriverCompletionFunc *dma_cb) | |
39 | { | |
40 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
41 | ||
42 | bm->unit = s->unit; | |
43 | bm->dma_cb = dma_cb; | |
44 | bm->cur_prd_last = 0; | |
45 | bm->cur_prd_addr = 0; | |
46 | bm->cur_prd_len = 0; | |
47 | bm->sector_num = ide_get_sector(s); | |
48 | bm->nsector = s->nsector; | |
49 | ||
50 | if (bm->status & BM_STATUS_DMAING) { | |
51 | bm->dma_cb(bmdma_active_if(bm), 0); | |
52 | } | |
53 | } | |
54 | ||
55 | /* return 0 if buffer completed */ | |
56 | static int bmdma_prepare_buf(IDEDMA *dma, int is_write) | |
57 | { | |
58 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
59 | IDEState *s = bmdma_active_if(bm); | |
60 | struct { | |
61 | uint32_t addr; | |
62 | uint32_t size; | |
63 | } prd; | |
64 | int l, len; | |
65 | ||
66 | qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1); | |
67 | s->io_buffer_size = 0; | |
68 | for(;;) { | |
69 | if (bm->cur_prd_len == 0) { | |
70 | /* end of table (with a fail safe of one page) */ | |
71 | if (bm->cur_prd_last || | |
72 | (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) | |
73 | return s->io_buffer_size != 0; | |
74 | cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8); | |
75 | bm->cur_addr += 8; | |
76 | prd.addr = le32_to_cpu(prd.addr); | |
77 | prd.size = le32_to_cpu(prd.size); | |
78 | len = prd.size & 0xfffe; | |
79 | if (len == 0) | |
80 | len = 0x10000; | |
81 | bm->cur_prd_len = len; | |
82 | bm->cur_prd_addr = prd.addr; | |
83 | bm->cur_prd_last = (prd.size & 0x80000000); | |
84 | } | |
85 | l = bm->cur_prd_len; | |
86 | if (l > 0) { | |
87 | qemu_sglist_add(&s->sg, bm->cur_prd_addr, l); | |
88 | bm->cur_prd_addr += l; | |
89 | bm->cur_prd_len -= l; | |
90 | s->io_buffer_size += l; | |
91 | } | |
92 | } | |
93 | return 1; | |
94 | } | |
95 | ||
96 | /* return 0 if buffer completed */ | |
97 | static int bmdma_rw_buf(IDEDMA *dma, int is_write) | |
98 | { | |
99 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
100 | IDEState *s = bmdma_active_if(bm); | |
101 | struct { | |
102 | uint32_t addr; | |
103 | uint32_t size; | |
104 | } prd; | |
105 | int l, len; | |
106 | ||
107 | for(;;) { | |
108 | l = s->io_buffer_size - s->io_buffer_index; | |
109 | if (l <= 0) | |
110 | break; | |
111 | if (bm->cur_prd_len == 0) { | |
112 | /* end of table (with a fail safe of one page) */ | |
113 | if (bm->cur_prd_last || | |
114 | (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) | |
115 | return 0; | |
116 | cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8); | |
117 | bm->cur_addr += 8; | |
118 | prd.addr = le32_to_cpu(prd.addr); | |
119 | prd.size = le32_to_cpu(prd.size); | |
120 | len = prd.size & 0xfffe; | |
121 | if (len == 0) | |
122 | len = 0x10000; | |
123 | bm->cur_prd_len = len; | |
124 | bm->cur_prd_addr = prd.addr; | |
125 | bm->cur_prd_last = (prd.size & 0x80000000); | |
126 | } | |
127 | if (l > bm->cur_prd_len) | |
128 | l = bm->cur_prd_len; | |
129 | if (l > 0) { | |
130 | if (is_write) { | |
131 | cpu_physical_memory_write(bm->cur_prd_addr, | |
132 | s->io_buffer + s->io_buffer_index, l); | |
133 | } else { | |
134 | cpu_physical_memory_read(bm->cur_prd_addr, | |
135 | s->io_buffer + s->io_buffer_index, l); | |
136 | } | |
137 | bm->cur_prd_addr += l; | |
138 | bm->cur_prd_len -= l; | |
139 | s->io_buffer_index += l; | |
140 | } | |
141 | } | |
142 | return 1; | |
143 | } | |
144 | ||
145 | static int bmdma_set_unit(IDEDMA *dma, int unit) | |
146 | { | |
147 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
148 | bm->unit = unit; | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | static int bmdma_add_status(IDEDMA *dma, int status) | |
154 | { | |
155 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
156 | bm->status |= status; | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | static int bmdma_set_inactive(IDEDMA *dma) | |
162 | { | |
163 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
164 | ||
165 | bm->status &= ~BM_STATUS_DMAING; | |
166 | bm->dma_cb = NULL; | |
167 | bm->unit = -1; | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
172 | static void bmdma_restart_dma(BMDMAState *bm, int is_read) | |
173 | { | |
174 | IDEState *s = bmdma_active_if(bm); | |
175 | ||
176 | ide_set_sector(s, bm->sector_num); | |
177 | s->io_buffer_index = 0; | |
178 | s->io_buffer_size = 0; | |
179 | s->nsector = bm->nsector; | |
cd369c46 | 180 | s->is_read = is_read; |
40a6238a | 181 | bm->cur_addr = bm->addr; |
cd369c46 | 182 | bm->dma_cb = ide_dma_cb; |
40a6238a AG |
183 | bmdma_start_dma(&bm->dma, s, bm->dma_cb); |
184 | } | |
185 | ||
def93791 | 186 | /* TODO This should be common IDE code */ |
40a6238a AG |
187 | static void bmdma_restart_bh(void *opaque) |
188 | { | |
189 | BMDMAState *bm = opaque; | |
def93791 | 190 | IDEBus *bus = bm->bus; |
40a6238a AG |
191 | int is_read; |
192 | ||
193 | qemu_bh_delete(bm->bh); | |
194 | bm->bh = NULL; | |
195 | ||
def93791 KW |
196 | if (bm->unit == (uint8_t) -1) { |
197 | return; | |
198 | } | |
40a6238a | 199 | |
def93791 KW |
200 | is_read = !!(bus->error_status & BM_STATUS_RETRY_READ); |
201 | ||
202 | if (bus->error_status & BM_STATUS_DMA_RETRY) { | |
203 | bus->error_status &= ~(BM_STATUS_DMA_RETRY | BM_STATUS_RETRY_READ); | |
40a6238a | 204 | bmdma_restart_dma(bm, is_read); |
def93791 KW |
205 | } else if (bus->error_status & BM_STATUS_PIO_RETRY) { |
206 | bus->error_status &= ~(BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ); | |
40a6238a AG |
207 | if (is_read) { |
208 | ide_sector_read(bmdma_active_if(bm)); | |
209 | } else { | |
210 | ide_sector_write(bmdma_active_if(bm)); | |
211 | } | |
def93791 | 212 | } else if (bus->error_status & BM_STATUS_RETRY_FLUSH) { |
40a6238a AG |
213 | ide_flush_cache(bmdma_active_if(bm)); |
214 | } | |
215 | } | |
216 | ||
217 | static void bmdma_restart_cb(void *opaque, int running, int reason) | |
218 | { | |
219 | IDEDMA *dma = opaque; | |
220 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
221 | ||
222 | if (!running) | |
223 | return; | |
224 | ||
225 | if (!bm->bh) { | |
226 | bm->bh = qemu_bh_new(bmdma_restart_bh, &bm->dma); | |
227 | qemu_bh_schedule(bm->bh); | |
228 | } | |
229 | } | |
230 | ||
231 | static void bmdma_cancel(BMDMAState *bm) | |
232 | { | |
233 | if (bm->status & BM_STATUS_DMAING) { | |
234 | /* cancel DMA request */ | |
235 | bmdma_set_inactive(&bm->dma); | |
236 | } | |
237 | } | |
238 | ||
239 | static int bmdma_reset(IDEDMA *dma) | |
240 | { | |
241 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
242 | ||
243 | #ifdef DEBUG_IDE | |
244 | printf("ide: dma_reset\n"); | |
245 | #endif | |
246 | bmdma_cancel(bm); | |
247 | bm->cmd = 0; | |
248 | bm->status = 0; | |
249 | bm->addr = 0; | |
250 | bm->cur_addr = 0; | |
251 | bm->cur_prd_last = 0; | |
252 | bm->cur_prd_addr = 0; | |
253 | bm->cur_prd_len = 0; | |
254 | bm->sector_num = 0; | |
255 | bm->nsector = 0; | |
256 | ||
257 | return 0; | |
258 | } | |
259 | ||
260 | static int bmdma_start_transfer(IDEDMA *dma) | |
261 | { | |
262 | return 0; | |
263 | } | |
264 | ||
265 | static void bmdma_irq(void *opaque, int n, int level) | |
266 | { | |
267 | BMDMAState *bm = opaque; | |
268 | ||
269 | if (!level) { | |
270 | /* pass through lower */ | |
271 | qemu_set_irq(bm->irq, level); | |
272 | return; | |
273 | } | |
274 | ||
1635eecc | 275 | bm->status |= BM_STATUS_INT; |
40a6238a AG |
276 | |
277 | /* trigger the real irq */ | |
278 | qemu_set_irq(bm->irq, level); | |
279 | } | |
280 | ||
3e7e1558 | 281 | void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val) |
977e1244 GH |
282 | { |
283 | BMDMAState *bm = opaque; | |
284 | #ifdef DEBUG_IDE | |
285 | printf("%s: 0x%08x\n", __func__, val); | |
286 | #endif | |
c29947bb KW |
287 | |
288 | /* Ignore writes to SSBM if it keeps the old value */ | |
289 | if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { | |
290 | if (!(val & BM_CMD_START)) { | |
291 | /* | |
292 | * We can't cancel Scatter Gather DMA in the middle of the | |
293 | * operation or a partial (not full) DMA transfer would reach | |
294 | * the storage so we wait for completion instead (we beahve | |
295 | * like if the DMA was completed by the time the guest trying | |
296 | * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not | |
297 | * set). | |
298 | * | |
299 | * In the future we'll be able to safely cancel the I/O if the | |
300 | * whole DMA operation will be submitted to disk with a single | |
301 | * aio operation with preadv/pwritev. | |
302 | */ | |
40a6238a | 303 | if (bm->bus->dma->aiocb) { |
c29947bb | 304 | qemu_aio_flush(); |
2860e3eb KW |
305 | assert(bm->bus->dma->aiocb == NULL); |
306 | assert((bm->status & BM_STATUS_DMAING) == 0); | |
c29947bb KW |
307 | } |
308 | } else { | |
b76876e6 | 309 | bm->cur_addr = bm->addr; |
c29947bb KW |
310 | if (!(bm->status & BM_STATUS_DMAING)) { |
311 | bm->status |= BM_STATUS_DMAING; | |
312 | /* start dma transfer if possible */ | |
313 | if (bm->dma_cb) | |
40a6238a | 314 | bm->dma_cb(bmdma_active_if(bm), 0); |
c29947bb | 315 | } |
953844d1 | 316 | } |
977e1244 | 317 | } |
c29947bb KW |
318 | |
319 | bm->cmd = val & 0x09; | |
977e1244 GH |
320 | } |
321 | ||
9fbef1ac AK |
322 | static void bmdma_addr_read(IORange *ioport, uint64_t addr, |
323 | unsigned width, uint64_t *data) | |
977e1244 | 324 | { |
9fbef1ac AK |
325 | BMDMAState *bm = container_of(ioport, BMDMAState, addr_ioport); |
326 | uint32_t mask = (1ULL << (width * 8)) - 1; | |
977e1244 | 327 | |
9fbef1ac | 328 | *data = (bm->addr >> (addr * 8)) & mask; |
977e1244 | 329 | #ifdef DEBUG_IDE |
9fbef1ac | 330 | printf("%s: 0x%08x\n", __func__, (unsigned)*data); |
977e1244 | 331 | #endif |
977e1244 GH |
332 | } |
333 | ||
9fbef1ac AK |
334 | static void bmdma_addr_write(IORange *ioport, uint64_t addr, |
335 | unsigned width, uint64_t data) | |
977e1244 | 336 | { |
9fbef1ac AK |
337 | BMDMAState *bm = container_of(ioport, BMDMAState, addr_ioport); |
338 | int shift = addr * 8; | |
339 | uint32_t mask = (1ULL << (width * 8)) - 1; | |
977e1244 | 340 | |
977e1244 | 341 | #ifdef DEBUG_IDE |
9fbef1ac | 342 | printf("%s: 0x%08x\n", __func__, (unsigned)data); |
977e1244 | 343 | #endif |
9fbef1ac AK |
344 | bm->addr &= ~(mask << shift); |
345 | bm->addr |= ((data & mask) << shift) & ~3; | |
977e1244 GH |
346 | } |
347 | ||
9fbef1ac AK |
348 | const IORangeOps bmdma_addr_ioport_ops = { |
349 | .read = bmdma_addr_read, | |
350 | .write = bmdma_addr_write, | |
351 | }; | |
977e1244 | 352 | |
5ee84c33 JQ |
353 | static bool ide_bmdma_current_needed(void *opaque) |
354 | { | |
355 | BMDMAState *bm = opaque; | |
356 | ||
357 | return (bm->cur_prd_len != 0); | |
358 | } | |
359 | ||
def93791 KW |
360 | static bool ide_bmdma_status_needed(void *opaque) |
361 | { | |
362 | BMDMAState *bm = opaque; | |
363 | ||
364 | /* Older versions abused some bits in the status register for internal | |
365 | * error state. If any of these bits are set, we must add a subsection to | |
366 | * transfer the real status register */ | |
367 | uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; | |
368 | ||
369 | return ((bm->status & abused_bits) != 0); | |
370 | } | |
371 | ||
372 | static void ide_bmdma_pre_save(void *opaque) | |
373 | { | |
374 | BMDMAState *bm = opaque; | |
375 | uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; | |
376 | ||
377 | bm->migration_compat_status = | |
378 | (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); | |
379 | } | |
380 | ||
381 | /* This function accesses bm->bus->error_status which is loaded only after | |
382 | * BMDMA itself. This is why the function is called from ide_pci_post_load | |
383 | * instead of being registered with VMState where it would run too early. */ | |
384 | static int ide_bmdma_post_load(void *opaque, int version_id) | |
385 | { | |
386 | BMDMAState *bm = opaque; | |
387 | uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; | |
388 | ||
389 | if (bm->status == 0) { | |
390 | bm->status = bm->migration_compat_status & ~abused_bits; | |
391 | bm->bus->error_status |= bm->migration_compat_status & abused_bits; | |
392 | } | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
5ee84c33 JQ |
397 | static const VMStateDescription vmstate_bmdma_current = { |
398 | .name = "ide bmdma_current", | |
399 | .version_id = 1, | |
400 | .minimum_version_id = 1, | |
401 | .minimum_version_id_old = 1, | |
402 | .fields = (VMStateField []) { | |
403 | VMSTATE_UINT32(cur_addr, BMDMAState), | |
404 | VMSTATE_UINT32(cur_prd_last, BMDMAState), | |
405 | VMSTATE_UINT32(cur_prd_addr, BMDMAState), | |
406 | VMSTATE_UINT32(cur_prd_len, BMDMAState), | |
407 | VMSTATE_END_OF_LIST() | |
408 | } | |
409 | }; | |
410 | ||
def93791 KW |
411 | const VMStateDescription vmstate_bmdma_status = { |
412 | .name ="ide bmdma/status", | |
413 | .version_id = 1, | |
414 | .minimum_version_id = 1, | |
415 | .minimum_version_id_old = 1, | |
416 | .fields = (VMStateField []) { | |
417 | VMSTATE_UINT8(status, BMDMAState), | |
418 | VMSTATE_END_OF_LIST() | |
419 | } | |
420 | }; | |
5ee84c33 | 421 | |
407a4f30 JQ |
422 | static const VMStateDescription vmstate_bmdma = { |
423 | .name = "ide bmdma", | |
57338424 | 424 | .version_id = 3, |
407a4f30 JQ |
425 | .minimum_version_id = 0, |
426 | .minimum_version_id_old = 0, | |
def93791 | 427 | .pre_save = ide_bmdma_pre_save, |
407a4f30 JQ |
428 | .fields = (VMStateField []) { |
429 | VMSTATE_UINT8(cmd, BMDMAState), | |
def93791 | 430 | VMSTATE_UINT8(migration_compat_status, BMDMAState), |
407a4f30 JQ |
431 | VMSTATE_UINT32(addr, BMDMAState), |
432 | VMSTATE_INT64(sector_num, BMDMAState), | |
433 | VMSTATE_UINT32(nsector, BMDMAState), | |
434 | VMSTATE_UINT8(unit, BMDMAState), | |
435 | VMSTATE_END_OF_LIST() | |
5ee84c33 JQ |
436 | }, |
437 | .subsections = (VMStateSubsection []) { | |
438 | { | |
439 | .vmsd = &vmstate_bmdma_current, | |
440 | .needed = ide_bmdma_current_needed, | |
def93791 KW |
441 | }, { |
442 | .vmsd = &vmstate_bmdma_status, | |
443 | .needed = ide_bmdma_status_needed, | |
5ee84c33 JQ |
444 | }, { |
445 | /* empty */ | |
446 | } | |
977e1244 | 447 | } |
407a4f30 | 448 | }; |
977e1244 | 449 | |
407a4f30 | 450 | static int ide_pci_post_load(void *opaque, int version_id) |
977e1244 GH |
451 | { |
452 | PCIIDEState *d = opaque; | |
407a4f30 | 453 | int i; |
977e1244 | 454 | |
977e1244 | 455 | for(i = 0; i < 2; i++) { |
407a4f30 JQ |
456 | /* current versions always store 0/1, but older version |
457 | stored bigger values. We only need last bit */ | |
458 | d->bmdma[i].unit &= 1; | |
def93791 | 459 | ide_bmdma_post_load(&d->bmdma[i], -1); |
977e1244 | 460 | } |
def93791 | 461 | |
977e1244 GH |
462 | return 0; |
463 | } | |
464 | ||
407a4f30 JQ |
465 | const VMStateDescription vmstate_ide_pci = { |
466 | .name = "ide", | |
57338424 | 467 | .version_id = 3, |
407a4f30 JQ |
468 | .minimum_version_id = 0, |
469 | .minimum_version_id_old = 0, | |
470 | .post_load = ide_pci_post_load, | |
471 | .fields = (VMStateField []) { | |
472 | VMSTATE_PCI_DEVICE(dev, PCIIDEState), | |
473 | VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, | |
474 | vmstate_bmdma, BMDMAState), | |
475 | VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2), | |
476 | VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState), | |
477 | VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState), | |
478 | VMSTATE_END_OF_LIST() | |
479 | } | |
480 | }; | |
481 | ||
3e7e1558 | 482 | void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table) |
feef3102 GH |
483 | { |
484 | PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev); | |
485 | static const int bus[4] = { 0, 0, 1, 1 }; | |
486 | static const int unit[4] = { 0, 1, 0, 1 }; | |
487 | int i; | |
488 | ||
489 | for (i = 0; i < 4; i++) { | |
490 | if (hd_table[i] == NULL) | |
491 | continue; | |
1f850f10 | 492 | ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]); |
feef3102 GH |
493 | } |
494 | } | |
40a6238a AG |
495 | |
496 | static const struct IDEDMAOps bmdma_ops = { | |
497 | .start_dma = bmdma_start_dma, | |
498 | .start_transfer = bmdma_start_transfer, | |
499 | .prepare_buf = bmdma_prepare_buf, | |
500 | .rw_buf = bmdma_rw_buf, | |
501 | .set_unit = bmdma_set_unit, | |
502 | .add_status = bmdma_add_status, | |
503 | .set_inactive = bmdma_set_inactive, | |
504 | .restart_cb = bmdma_restart_cb, | |
505 | .reset = bmdma_reset, | |
506 | }; | |
507 | ||
508 | void bmdma_init(IDEBus *bus, BMDMAState *bm) | |
509 | { | |
510 | qemu_irq *irq; | |
511 | ||
512 | if (bus->dma == &bm->dma) { | |
513 | return; | |
514 | } | |
515 | ||
516 | bm->dma.ops = &bmdma_ops; | |
517 | bus->dma = &bm->dma; | |
518 | bm->irq = bus->irq; | |
519 | irq = qemu_allocate_irqs(bmdma_irq, bm, 1); | |
520 | bus->irq = *irq; | |
521 | } |