2 * QEMU IDE Emulation: MacIO support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "hw/ppc/mac.h"
27 #include "hw/ppc/mac_dbdma.h"
28 #include "block/block.h"
29 #include "sysemu/dma.h"
31 #include <hw/ide/internal.h>
34 // #define DEBUG_MACIO
37 static const int debug_macio = 1;
39 static const int debug_macio = 0;
42 #define MACIO_DPRINTF(fmt, ...) do { \
44 printf(fmt , ## __VA_ARGS__); \
49 /***********************************************************/
50 /* MacIO based PowerPC IDE */
52 #define MACIO_PAGE_SIZE 4096
54 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
56 DBDMA_io *io = opaque;
57 MACIOIDEState *m = io->opaque;
58 IDEState *s = idebus_active_if(&m->bus);
63 qemu_sglist_destroy(&s->sg);
64 ide_atapi_io_error(s, ret);
65 io->remainder_len = 0;
70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
71 s->nsector, io->len, s->status);
72 /* data not ready yet, wait for the channel to get restarted */
73 io->processing = false;
77 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
79 if (s->io_buffer_size > 0) {
81 qemu_sglist_destroy(&s->sg);
83 s->packet_transfer_size -= s->io_buffer_size;
85 s->io_buffer_index += s->io_buffer_size;
86 s->lba += s->io_buffer_index >> 11;
87 s->io_buffer_index &= 0x7ff;
90 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len,
93 io->len, s->packet_transfer_size);
94 if (io->remainder_len && io->len) {
95 /* guest wants the rest of its previous transfer */
96 int remainder_len = MIN(io->remainder_len, io->len);
98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len);
100 cpu_physical_memory_write(io->addr, io->remainder + 0x200 -
101 remainder_len, remainder_len);
103 io->addr += remainder_len;
104 io->len -= remainder_len;
105 s->io_buffer_size = remainder_len;
106 io->remainder_len -= remainder_len;
107 /* treat remainder as individual transfer, start again */
108 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
109 &address_space_memory);
110 pmac_ide_atapi_transfer_cb(opaque, 0);
114 if (!s->packet_transfer_size) {
115 MACIO_DPRINTF("end of transfer\n");
117 m->dma_active = false;
121 MACIO_DPRINTF("end of DMA\n");
125 /* launch next transfer */
127 /* handle unaligned accesses first, get them over with and only do the
128 remaining bulk transfer using our async DMA helpers */
129 unaligned = io->len & 0x1ff;
131 int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9);
132 int nsector = io->len >> 9;
134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
135 unaligned, io->addr + io->len - unaligned);
137 bdrv_read(s->bs, sector_num + nsector, io->remainder, 1);
138 cpu_physical_memory_write(io->addr + io->len - unaligned,
139 io->remainder, unaligned);
141 io->len -= unaligned;
144 MACIO_DPRINTF("io->len = %#x\n", io->len);
146 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
147 &address_space_memory);
148 qemu_sglist_add(&s->sg, io->addr, io->len);
149 io->addr += s->io_buffer_size;
150 io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size,
151 (0x200 - unaligned) & 0x1ff);
152 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
154 /* We would read no data from the block layer, thus not get a callback.
155 Just fake completion manually. */
157 pmac_ide_atapi_transfer_cb(opaque, 0);
163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
164 (s->lba << 2) + (s->io_buffer_index >> 9),
165 s->packet_transfer_size, s->dma_cmd);
167 m->aiocb = dma_bdrv_read(s->bs, &s->sg,
168 (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9),
169 pmac_ide_atapi_transfer_cb, io);
173 MACIO_DPRINTF("done DMA\n");
174 bdrv_acct_done(s->bs, &s->acct);
178 static void pmac_ide_transfer_cb(void *opaque, int ret)
180 DBDMA_io *io = opaque;
181 MACIOIDEState *m = io->opaque;
182 IDEState *s = idebus_active_if(&m->bus);
188 MACIO_DPRINTF("DMA error\n");
190 qemu_sglist_destroy(&s->sg);
192 io->remainder_len = 0;
196 if (--io->requests) {
197 /* More requests still in flight */
201 if (!m->dma_active) {
202 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
203 s->nsector, io->len, s->status);
204 /* data not ready yet, wait for the channel to get restarted */
205 io->processing = false;
209 sector_num = ide_get_sector(s);
210 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
211 if (s->io_buffer_size > 0) {
213 qemu_sglist_destroy(&s->sg);
214 n = (s->io_buffer_size + 0x1ff) >> 9;
216 ide_set_sector(s, sector_num);
220 if (io->finish_remain_read) {
221 /* Finish a stale read from the last iteration */
222 io->finish_remain_read = false;
223 cpu_physical_memory_write(io->finish_addr, io->remainder,
227 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d "
228 "sector_num: %" PRId64 "\n",
229 io->remainder_len, io->len, s->nsector, sector_num);
230 if (io->remainder_len && io->len) {
231 /* guest wants the rest of its previous transfer */
232 int remainder_len = MIN(io->remainder_len, io->len);
233 uint8_t *p = &io->remainder[0x200 - remainder_len];
235 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx "\n",
236 remainder_len, io->addr);
238 switch (s->dma_cmd) {
240 cpu_physical_memory_write(io->addr, p, remainder_len);
243 cpu_physical_memory_read(io->addr, p, remainder_len);
248 io->addr += remainder_len;
249 io->len -= remainder_len;
250 io->remainder_len -= remainder_len;
252 if (s->dma_cmd == IDE_DMA_WRITE && !io->remainder_len) {
254 qemu_iovec_reset(&io->iov);
255 qemu_iovec_add(&io->iov, io->remainder, 0x200);
257 m->aiocb = bdrv_aio_writev(s->bs, sector_num - 1, &io->iov, 1,
258 pmac_ide_transfer_cb, io);
262 if (s->nsector == 0 && !io->remainder_len) {
263 MACIO_DPRINTF("end of transfer\n");
264 s->status = READY_STAT | SEEK_STAT;
266 m->dma_active = false;
270 MACIO_DPRINTF("end of DMA\n");
274 /* launch next transfer */
276 s->io_buffer_index = 0;
277 s->io_buffer_size = MIN(io->len, s->nsector * 512);
279 /* handle unaligned accesses first, get them over with and only do the
280 remaining bulk transfer using our async DMA helpers */
281 unaligned = io->len & 0x1ff;
283 int nsector = io->len >> 9;
285 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
286 unaligned, io->addr + io->len - unaligned);
288 switch (s->dma_cmd) {
291 io->finish_addr = io->addr + io->len - unaligned;
292 io->finish_len = unaligned;
293 io->finish_remain_read = true;
294 qemu_iovec_reset(&io->iov);
295 qemu_iovec_add(&io->iov, io->remainder, 0x200);
297 m->aiocb = bdrv_aio_readv(s->bs, sector_num + nsector, &io->iov, 1,
298 pmac_ide_transfer_cb, io);
301 /* cache the contents in our io struct */
302 cpu_physical_memory_read(io->addr + io->len - unaligned,
303 io->remainder + io->remainder_len,
311 MACIO_DPRINTF("io->len = %#x\n", io->len);
313 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
314 &address_space_memory);
315 qemu_sglist_add(&s->sg, io->addr, io->len);
316 io->addr += io->len + unaligned;
317 io->remainder_len = (0x200 - unaligned) & 0x1ff;
318 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
320 /* Only subsector reads happening */
324 pmac_ide_transfer_cb(opaque, ret);
331 MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n",
332 sector_num, n, s->nsector, s->dma_cmd);
334 switch (s->dma_cmd) {
336 m->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num,
337 pmac_ide_transfer_cb, io);
340 m->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num,
341 pmac_ide_transfer_cb, io);
344 m->aiocb = dma_bdrv_io(s->bs, &s->sg, sector_num,
345 ide_issue_trim, pmac_ide_transfer_cb, io,
346 DMA_DIRECTION_TO_DEVICE);
354 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
355 bdrv_acct_done(s->bs, &s->acct);
360 static void pmac_ide_transfer(DBDMA_io *io)
362 MACIOIDEState *m = io->opaque;
363 IDEState *s = idebus_active_if(&m->bus);
367 s->io_buffer_size = 0;
368 if (s->drive_kind == IDE_CD) {
370 /* Handle non-block ATAPI DMA transfers */
372 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
373 bdrv_acct_start(s->bs, &s->acct, s->io_buffer_size,
375 MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
378 /* Copy ATAPI buffer directly to RAM and finish */
379 cpu_physical_memory_write(io->addr, s->io_buffer,
382 m->dma_active = false;
384 MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
385 bdrv_acct_done(s->bs, &s->acct);
390 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
391 pmac_ide_atapi_transfer_cb(io, 0);
395 switch (s->dma_cmd) {
397 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
400 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE);
407 pmac_ide_transfer_cb(io, 0);
410 static void pmac_ide_flush(DBDMA_io *io)
412 MACIOIDEState *m = io->opaque;
419 /* PowerMac IDE memory IO */
420 static void pmac_ide_writeb (void *opaque,
421 hwaddr addr, uint32_t val)
423 MACIOIDEState *d = opaque;
425 addr = (addr & 0xFFF) >> 4;
428 ide_ioport_write(&d->bus, addr, val);
432 ide_cmd_write(&d->bus, 0, val);
439 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
442 MACIOIDEState *d = opaque;
444 addr = (addr & 0xFFF) >> 4;
447 retval = ide_ioport_read(&d->bus, addr);
451 retval = ide_status_read(&d->bus, 0);
460 static void pmac_ide_writew (void *opaque,
461 hwaddr addr, uint32_t val)
463 MACIOIDEState *d = opaque;
465 addr = (addr & 0xFFF) >> 4;
468 ide_data_writew(&d->bus, 0, val);
472 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
475 MACIOIDEState *d = opaque;
477 addr = (addr & 0xFFF) >> 4;
479 retval = ide_data_readw(&d->bus, 0);
483 retval = bswap16(retval);
487 static void pmac_ide_writel (void *opaque,
488 hwaddr addr, uint32_t val)
490 MACIOIDEState *d = opaque;
492 addr = (addr & 0xFFF) >> 4;
495 ide_data_writel(&d->bus, 0, val);
499 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
502 MACIOIDEState *d = opaque;
504 addr = (addr & 0xFFF) >> 4;
506 retval = ide_data_readl(&d->bus, 0);
510 retval = bswap32(retval);
514 static const MemoryRegionOps pmac_ide_ops = {
527 .endianness = DEVICE_NATIVE_ENDIAN,
530 static const VMStateDescription vmstate_pmac = {
533 .minimum_version_id = 0,
534 .fields = (VMStateField[]) {
535 VMSTATE_IDE_BUS(bus, MACIOIDEState),
536 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
537 VMSTATE_END_OF_LIST()
541 static void macio_ide_reset(DeviceState *dev)
543 MACIOIDEState *d = MACIO_IDE(dev);
545 ide_bus_reset(&d->bus);
548 static int ide_nop(IDEDMA *dma)
553 static int ide_nop_int(IDEDMA *dma, int x)
558 static void ide_nop_restart(void *opaque, int x, RunState y)
562 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
563 BlockDriverCompletionFunc *cb)
565 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
568 m->dma_active = true;
569 DBDMA_kick(m->dbdma);
572 static const IDEDMAOps dbdma_ops = {
573 .start_dma = ide_dbdma_start,
574 .start_transfer = ide_nop,
575 .prepare_buf = ide_nop_int,
576 .rw_buf = ide_nop_int,
577 .set_unit = ide_nop_int,
578 .add_status = ide_nop_int,
579 .set_inactive = ide_nop,
580 .restart_cb = ide_nop_restart,
584 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
586 MACIOIDEState *s = MACIO_IDE(dev);
588 ide_init2(&s->bus, s->irq);
590 /* Register DMA callbacks */
591 s->dma.ops = &dbdma_ops;
592 s->bus.dma = &s->dma;
595 static void macio_ide_initfn(Object *obj)
597 SysBusDevice *d = SYS_BUS_DEVICE(obj);
598 MACIOIDEState *s = MACIO_IDE(obj);
600 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
601 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
602 sysbus_init_mmio(d, &s->mem);
603 sysbus_init_irq(d, &s->irq);
604 sysbus_init_irq(d, &s->dma_irq);
607 static void macio_ide_class_init(ObjectClass *oc, void *data)
609 DeviceClass *dc = DEVICE_CLASS(oc);
611 dc->realize = macio_ide_realizefn;
612 dc->reset = macio_ide_reset;
613 dc->vmsd = &vmstate_pmac;
616 static const TypeInfo macio_ide_type_info = {
617 .name = TYPE_MACIO_IDE,
618 .parent = TYPE_SYS_BUS_DEVICE,
619 .instance_size = sizeof(MACIOIDEState),
620 .instance_init = macio_ide_initfn,
621 .class_init = macio_ide_class_init,
624 static void macio_ide_register_types(void)
626 type_register_static(&macio_ide_type_info);
629 /* hd_table must contain 2 block drivers */
630 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
634 for (i = 0; i < 2; i++) {
636 ide_create_drive(&s->bus, i, hd_table[i]);
641 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
644 DBDMA_register_channel(dbdma, channel, s->dma_irq,
645 pmac_ide_transfer, pmac_ide_flush, s);
648 type_init(macio_ide_register_types)