2 * PowerMac descriptor-based DMA emulation
4 * Copyright (c) 2005-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2009 Laurent Vivier
8 * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h
10 * Definitions for using the Apple Descriptor-Based DMA controller
11 * in Power Macintosh computers.
13 * Copyright (C) 1996 Paul Mackerras.
15 * some parts from mol 0.9.71
17 * Descriptor based DMA emulation
21 * Permission is hereby granted, free of charge, to any person obtaining a copy
22 * of this software and associated documentation files (the "Software"), to deal
23 * in the Software without restriction, including without limitation the rights
24 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25 * copies of the Software, and to permit persons to whom the Software is
26 * furnished to do so, subject to the following conditions:
28 * The above copyright notice and this permission notice shall be included in
29 * all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
39 #include "qemu/osdep.h"
41 #include "hw/isa/isa.h"
42 #include "hw/ppc/mac_dbdma.h"
43 #include "qemu/main-loop.h"
50 #define DBDMA_DPRINTF(fmt, ...) \
51 do { printf("DBDMA: " fmt , ## __VA_ARGS__); } while (0)
53 #define DBDMA_DPRINTF(fmt, ...)
59 static DBDMAState *dbdma_from_ch(DBDMA_channel *ch)
61 return container_of(ch, DBDMAState, channels[ch->channel]);
65 static void dump_dbdma_cmd(dbdma_cmd *cmd)
67 printf("dbdma_cmd %p\n", cmd);
68 printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
69 printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
70 printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
71 printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
72 printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
73 printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
76 static void dump_dbdma_cmd(dbdma_cmd *cmd)
80 static void dbdma_cmdptr_load(DBDMA_channel *ch)
82 DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
83 ch->regs[DBDMA_CMDPTR_LO]);
84 cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO],
85 &ch->current, sizeof(dbdma_cmd));
88 static void dbdma_cmdptr_save(DBDMA_channel *ch)
90 DBDMA_DPRINTF("dbdma_cmdptr_save 0x%08x\n",
91 ch->regs[DBDMA_CMDPTR_LO]);
92 DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n",
93 le16_to_cpu(ch->current.xfer_status),
94 le16_to_cpu(ch->current.res_count));
95 cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO],
96 &ch->current, sizeof(dbdma_cmd));
99 static void kill_channel(DBDMA_channel *ch)
101 DBDMA_DPRINTF("kill_channel\n");
103 ch->regs[DBDMA_STATUS] |= DEAD;
104 ch->regs[DBDMA_STATUS] &= ~ACTIVE;
106 qemu_irq_raise(ch->irq);
109 static void conditional_interrupt(DBDMA_channel *ch)
111 dbdma_cmd *current = &ch->current;
113 uint16_t sel_mask, sel_value;
117 DBDMA_DPRINTF("%s\n", __func__);
119 intr = le16_to_cpu(current->command) & INTR_MASK;
122 case INTR_NEVER: /* don't interrupt */
124 case INTR_ALWAYS: /* always interrupt */
125 qemu_irq_raise(ch->irq);
126 DBDMA_DPRINTF("%s: raise\n", __func__);
130 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
132 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
133 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
135 cond = (status & sel_mask) == (sel_value & sel_mask);
138 case INTR_IFSET: /* intr if condition bit is 1 */
140 qemu_irq_raise(ch->irq);
141 DBDMA_DPRINTF("%s: raise\n", __func__);
144 case INTR_IFCLR: /* intr if condition bit is 0 */
146 qemu_irq_raise(ch->irq);
147 DBDMA_DPRINTF("%s: raise\n", __func__);
153 static int conditional_wait(DBDMA_channel *ch)
155 dbdma_cmd *current = &ch->current;
157 uint16_t sel_mask, sel_value;
161 DBDMA_DPRINTF("conditional_wait\n");
163 wait = le16_to_cpu(current->command) & WAIT_MASK;
166 case WAIT_NEVER: /* don't wait */
168 case WAIT_ALWAYS: /* always wait */
172 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
174 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
175 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
177 cond = (status & sel_mask) == (sel_value & sel_mask);
180 case WAIT_IFSET: /* wait if condition bit is 1 */
184 case WAIT_IFCLR: /* wait if condition bit is 0 */
192 static void next(DBDMA_channel *ch)
196 ch->regs[DBDMA_STATUS] &= ~BT;
198 cp = ch->regs[DBDMA_CMDPTR_LO];
199 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
200 dbdma_cmdptr_load(ch);
203 static void branch(DBDMA_channel *ch)
205 dbdma_cmd *current = &ch->current;
207 ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep;
208 ch->regs[DBDMA_STATUS] |= BT;
209 dbdma_cmdptr_load(ch);
212 static void conditional_branch(DBDMA_channel *ch)
214 dbdma_cmd *current = &ch->current;
216 uint16_t sel_mask, sel_value;
220 DBDMA_DPRINTF("conditional_branch\n");
222 /* check if we must branch */
224 br = le16_to_cpu(current->command) & BR_MASK;
227 case BR_NEVER: /* don't branch */
230 case BR_ALWAYS: /* always branch */
235 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
237 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
238 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
240 cond = (status & sel_mask) == (sel_value & sel_mask);
243 case BR_IFSET: /* branch if condition bit is 1 */
249 case BR_IFCLR: /* branch if condition bit is 0 */
258 static void channel_run(DBDMA_channel *ch);
260 static void dbdma_end(DBDMA_io *io)
262 DBDMA_channel *ch = io->channel;
263 dbdma_cmd *current = &ch->current;
265 DBDMA_DPRINTF("%s\n", __func__);
267 if (conditional_wait(ch))
270 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
271 current->res_count = cpu_to_le16(io->len);
272 dbdma_cmdptr_save(ch);
274 ch->regs[DBDMA_STATUS] &= ~FLUSH;
276 conditional_interrupt(ch);
277 conditional_branch(ch);
280 /* Indicate that we're ready for a new DMA round */
281 ch->io.processing = false;
283 if ((ch->regs[DBDMA_STATUS] & RUN) &&
284 (ch->regs[DBDMA_STATUS] & ACTIVE))
288 static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
289 uint16_t req_count, int is_last)
291 DBDMA_DPRINTF("start_output\n");
293 /* KEY_REGS, KEY_DEVICE and KEY_STREAM
294 * are not implemented in the mac-io chip
297 DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
298 if (!addr || key > KEY_STREAM3) {
304 ch->io.len = req_count;
305 ch->io.is_last = is_last;
306 ch->io.dma_end = dbdma_end;
307 ch->io.is_dma_out = 1;
308 ch->io.processing = true;
314 static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
315 uint16_t req_count, int is_last)
317 DBDMA_DPRINTF("start_input\n");
319 /* KEY_REGS, KEY_DEVICE and KEY_STREAM
320 * are not implemented in the mac-io chip
323 DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
324 if (!addr || key > KEY_STREAM3) {
330 ch->io.len = req_count;
331 ch->io.is_last = is_last;
332 ch->io.dma_end = dbdma_end;
333 ch->io.is_dma_out = 0;
334 ch->io.processing = true;
340 static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
343 dbdma_cmd *current = &ch->current;
346 DBDMA_DPRINTF("load_word\n");
348 /* only implements KEY_SYSTEM */
350 if (key != KEY_SYSTEM) {
351 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
356 cpu_physical_memory_read(addr, &val, len);
359 val = (val << 16) | (current->cmd_dep & 0x0000ffff);
361 val = (val << 24) | (current->cmd_dep & 0x00ffffff);
363 current->cmd_dep = val;
365 if (conditional_wait(ch))
368 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
369 dbdma_cmdptr_save(ch);
370 ch->regs[DBDMA_STATUS] &= ~FLUSH;
372 conditional_interrupt(ch);
376 DBDMA_kick(dbdma_from_ch(ch));
379 static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
382 dbdma_cmd *current = &ch->current;
385 DBDMA_DPRINTF("store_word\n");
387 /* only implements KEY_SYSTEM */
389 if (key != KEY_SYSTEM) {
390 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
395 val = current->cmd_dep;
401 cpu_physical_memory_write(addr, &val, len);
403 if (conditional_wait(ch))
406 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
407 dbdma_cmdptr_save(ch);
408 ch->regs[DBDMA_STATUS] &= ~FLUSH;
410 conditional_interrupt(ch);
414 DBDMA_kick(dbdma_from_ch(ch));
417 static void nop(DBDMA_channel *ch)
419 dbdma_cmd *current = &ch->current;
421 if (conditional_wait(ch))
424 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
425 dbdma_cmdptr_save(ch);
427 conditional_interrupt(ch);
428 conditional_branch(ch);
431 DBDMA_kick(dbdma_from_ch(ch));
434 static void stop(DBDMA_channel *ch)
436 ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH);
438 /* the stop command does not increment command pointer */
441 static void channel_run(DBDMA_channel *ch)
443 dbdma_cmd *current = &ch->current;
448 DBDMA_DPRINTF("channel_run\n");
449 dump_dbdma_cmd(current);
451 /* clear WAKE flag at command fetch */
453 ch->regs[DBDMA_STATUS] &= ~WAKE;
455 cmd = le16_to_cpu(current->command) & COMMAND_MASK;
467 key = le16_to_cpu(current->command) & 0x0700;
468 req_count = le16_to_cpu(current->req_count);
469 phy_addr = le32_to_cpu(current->phy_addr);
471 if (key == KEY_STREAM4) {
472 printf("command %x, invalid key 4\n", cmd);
479 start_output(ch, key, phy_addr, req_count, 0);
483 start_output(ch, key, phy_addr, req_count, 1);
487 start_input(ch, key, phy_addr, req_count, 0);
491 start_input(ch, key, phy_addr, req_count, 1);
495 if (key < KEY_REGS) {
496 printf("command %x, invalid key %x\n", cmd, key);
500 /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits
501 * and BRANCH is invalid
504 req_count = req_count & 0x0007;
505 if (req_count & 0x4) {
508 } else if (req_count & 0x2) {
516 load_word(ch, key, phy_addr, req_count);
520 store_word(ch, key, phy_addr, req_count);
525 static void DBDMA_run(DBDMAState *s)
529 for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
530 DBDMA_channel *ch = &s->channels[channel];
531 uint32_t status = ch->regs[DBDMA_STATUS];
532 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) {
538 static void DBDMA_run_bh(void *opaque)
540 DBDMAState *s = opaque;
542 DBDMA_DPRINTF("DBDMA_run_bh\n");
547 void DBDMA_kick(DBDMAState *dbdma)
549 qemu_bh_schedule(dbdma->bh);
552 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
553 DBDMA_rw rw, DBDMA_flush flush,
556 DBDMAState *s = dbdma;
557 DBDMA_channel *ch = &s->channels[nchan];
559 DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan);
567 ch->io.opaque = opaque;
571 dbdma_control_write(DBDMA_channel *ch)
573 uint16_t mask, value;
576 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
577 value = ch->regs[DBDMA_CONTROL] & 0xffff;
579 value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
581 status = ch->regs[DBDMA_STATUS];
583 status = (value & mask) | (status & ~mask);
593 if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
595 status &= ~(ACTIVE|DEAD);
598 if ((status & FLUSH) && ch->flush) {
603 DBDMA_DPRINTF(" status 0x%08x\n", status);
605 ch->regs[DBDMA_STATUS] = status;
607 if (status & ACTIVE) {
608 DBDMA_kick(dbdma_from_ch(ch));
612 static void dbdma_write(void *opaque, hwaddr addr,
613 uint64_t value, unsigned size)
615 int channel = addr >> DBDMA_CHANNEL_SHIFT;
616 DBDMAState *s = opaque;
617 DBDMA_channel *ch = &s->channels[channel];
618 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
620 DBDMA_DPRINTF("writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n",
622 DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
623 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
625 /* cmdptr cannot be modified if channel is ACTIVE */
627 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) {
631 ch->regs[reg] = value;
635 dbdma_control_write(ch);
637 case DBDMA_CMDPTR_LO:
638 /* 16-byte aligned */
639 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
640 dbdma_cmdptr_load(ch);
644 case DBDMA_BRANCH_SEL:
648 case DBDMA_XFER_MODE:
649 case DBDMA_CMDPTR_HI:
650 case DBDMA_DATA2PTR_HI:
651 case DBDMA_DATA2PTR_LO:
652 case DBDMA_ADDRESS_HI:
653 case DBDMA_BRANCH_ADDR_HI:
663 static uint64_t dbdma_read(void *opaque, hwaddr addr,
667 int channel = addr >> DBDMA_CHANNEL_SHIFT;
668 DBDMAState *s = opaque;
669 DBDMA_channel *ch = &s->channels[channel];
670 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
672 value = ch->regs[reg];
674 DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
675 DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
676 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
683 case DBDMA_CMDPTR_LO:
685 case DBDMA_BRANCH_SEL:
689 case DBDMA_XFER_MODE:
690 case DBDMA_CMDPTR_HI:
691 case DBDMA_DATA2PTR_HI:
692 case DBDMA_DATA2PTR_LO:
693 case DBDMA_ADDRESS_HI:
694 case DBDMA_BRANCH_ADDR_HI:
709 static const MemoryRegionOps dbdma_ops = {
711 .write = dbdma_write,
712 .endianness = DEVICE_LITTLE_ENDIAN,
714 .min_access_size = 4,
715 .max_access_size = 4,
719 static const VMStateDescription vmstate_dbdma_io = {
722 .minimum_version_id = 0,
723 .fields = (VMStateField[]) {
724 VMSTATE_UINT64(addr, struct DBDMA_io),
725 VMSTATE_INT32(len, struct DBDMA_io),
726 VMSTATE_INT32(is_last, struct DBDMA_io),
727 VMSTATE_INT32(is_dma_out, struct DBDMA_io),
728 VMSTATE_BOOL(processing, struct DBDMA_io),
729 VMSTATE_END_OF_LIST()
733 static const VMStateDescription vmstate_dbdma_cmd = {
736 .minimum_version_id = 0,
737 .fields = (VMStateField[]) {
738 VMSTATE_UINT16(req_count, dbdma_cmd),
739 VMSTATE_UINT16(command, dbdma_cmd),
740 VMSTATE_UINT32(phy_addr, dbdma_cmd),
741 VMSTATE_UINT32(cmd_dep, dbdma_cmd),
742 VMSTATE_UINT16(res_count, dbdma_cmd),
743 VMSTATE_UINT16(xfer_status, dbdma_cmd),
744 VMSTATE_END_OF_LIST()
748 static const VMStateDescription vmstate_dbdma_channel = {
749 .name = "dbdma_channel",
751 .minimum_version_id = 1,
752 .fields = (VMStateField[]) {
753 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
754 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io),
755 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd,
757 VMSTATE_END_OF_LIST()
761 static const VMStateDescription vmstate_dbdma = {
764 .minimum_version_id = 3,
765 .fields = (VMStateField[]) {
766 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
767 vmstate_dbdma_channel, DBDMA_channel),
768 VMSTATE_END_OF_LIST()
772 static void dbdma_reset(void *opaque)
774 DBDMAState *s = opaque;
777 for (i = 0; i < DBDMA_CHANNELS; i++)
778 memset(s->channels[i].regs, 0, DBDMA_SIZE);
781 static void dbdma_unassigned_rw(DBDMA_io *io)
783 DBDMA_channel *ch = io->channel;
784 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
785 __func__, ch->channel);
788 static void dbdma_unassigned_flush(DBDMA_io *io)
790 DBDMA_channel *ch = io->channel;
791 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
792 __func__, ch->channel);
795 void* DBDMA_init (MemoryRegion **dbdma_mem)
800 s = g_malloc0(sizeof(DBDMAState));
802 for (i = 0; i < DBDMA_CHANNELS; i++) {
803 DBDMA_io *io = &s->channels[i].io;
804 DBDMA_channel *ch = &s->channels[i];
805 qemu_iovec_init(&io->iov, 1);
807 ch->rw = dbdma_unassigned_rw;
808 ch->flush = dbdma_unassigned_flush;
813 memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000);
814 *dbdma_mem = &s->mem;
815 vmstate_register(NULL, -1, &vmstate_dbdma, s);
816 qemu_register_reset(dbdma_reset, s);
818 s->bh = qemu_bh_new(DBDMA_run_bh, s);