2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
34 * also produced as NCR89C100. See
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41 #define DPRINTF(fmt, ...) \
42 do { printf("ESP: " fmt , ## __VA_ARGS__); } while (0)
44 #define DPRINTF(fmt, ...) do {} while (0)
47 #define ESP_ERROR(fmt, ...) \
48 do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
53 typedef struct ESPState ESPState;
59 uint8_t rregs[ESP_REGS];
60 uint8_t wregs[ESP_REGS];
62 uint32_t ti_rptr, ti_wptr;
63 uint8_t ti_buf[TI_BUFSZ];
67 SCSIDevice *current_dev;
68 SCSIRequest *current_req;
69 uint8_t cmdbuf[TI_BUFSZ];
73 /* The amount of data left in the current DMA transfer. */
75 /* The size of the current DMA transfer. Zero if no transfer is in
81 ESPDMAMemoryReadWriteFunc dma_memory_read;
82 ESPDMAMemoryReadWriteFunc dma_memory_write;
85 void (*dma_cb)(ESPState *s);
93 #define ESP_WBUSID 0x4
97 #define ESP_WSYNTP 0x6
98 #define ESP_RFLAGS 0x7
101 #define ESP_RRES1 0x9
103 #define ESP_RRES2 0xa
104 #define ESP_WTEST 0xa
115 #define CMD_FLUSH 0x01
116 #define CMD_RESET 0x02
117 #define CMD_BUSRESET 0x03
119 #define CMD_ICCS 0x11
120 #define CMD_MSGACC 0x12
122 #define CMD_SATN 0x1a
124 #define CMD_SELATN 0x42
125 #define CMD_SELATNS 0x43
126 #define CMD_ENSEL 0x44
134 #define STAT_PIO_MASK 0x06
139 #define STAT_INT 0x80
141 #define BUSID_DID 0x07
146 #define INTR_RST 0x80
151 #define CFG1_RESREPT 0x40
153 #define TCHI_FAS100A 0x4
155 static void esp_raise_irq(ESPState *s)
157 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
158 s->rregs[ESP_RSTAT] |= STAT_INT;
159 qemu_irq_raise(s->irq);
160 DPRINTF("Raise IRQ\n");
164 static void esp_lower_irq(ESPState *s)
166 if (s->rregs[ESP_RSTAT] & STAT_INT) {
167 s->rregs[ESP_RSTAT] &= ~STAT_INT;
168 qemu_irq_lower(s->irq);
169 DPRINTF("Lower IRQ\n");
173 static void esp_dma_enable(void *opaque, int irq, int level)
175 DeviceState *d = opaque;
176 ESPState *s = container_of(d, ESPState, busdev.qdev);
180 DPRINTF("Raise enable\n");
186 DPRINTF("Lower enable\n");
191 static void esp_request_cancelled(SCSIRequest *req)
193 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
195 if (req == s->current_req) {
196 scsi_req_unref(s->current_req);
197 s->current_req = NULL;
198 s->current_dev = NULL;
202 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
207 target = s->wregs[ESP_WBUSID] & BUSID_DID;
209 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
210 s->dma_memory_read(s->dma_opaque, buf, dmalen);
213 memcpy(buf, s->ti_buf, dmalen);
214 buf[0] = buf[2] >> 5;
216 DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
222 if (s->current_req) {
223 /* Started a new command before the old one finished. Cancel it. */
224 scsi_req_cancel(s->current_req);
228 if (target >= ESP_MAX_DEVS || !s->bus.devs[target]) {
230 s->rregs[ESP_RSTAT] = 0;
231 s->rregs[ESP_RINTR] = INTR_DC;
232 s->rregs[ESP_RSEQ] = SEQ_0;
236 s->current_dev = s->bus.devs[target];
240 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
245 DPRINTF("do_busid_cmd: busid 0x%x\n", busid);
247 s->current_req = scsi_req_new(s->current_dev, 0, lun, buf, NULL);
248 datalen = scsi_req_enqueue(s->current_req);
249 s->ti_size = datalen;
251 s->rregs[ESP_RSTAT] = STAT_TC;
255 s->rregs[ESP_RSTAT] |= STAT_DI;
257 s->rregs[ESP_RSTAT] |= STAT_DO;
259 scsi_req_continue(s->current_req);
261 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
262 s->rregs[ESP_RSEQ] = SEQ_CD;
266 static void do_cmd(ESPState *s, uint8_t *buf)
268 uint8_t busid = buf[0];
270 do_busid_cmd(s, &buf[1], busid);
273 static void handle_satn(ESPState *s)
278 if (!s->dma_enabled) {
279 s->dma_cb = handle_satn;
282 len = get_cmd(s, buf);
287 static void handle_s_without_atn(ESPState *s)
292 if (!s->dma_enabled) {
293 s->dma_cb = handle_s_without_atn;
296 len = get_cmd(s, buf);
298 do_busid_cmd(s, buf, 0);
302 static void handle_satn_stop(ESPState *s)
304 if (!s->dma_enabled) {
305 s->dma_cb = handle_satn_stop;
308 s->cmdlen = get_cmd(s, s->cmdbuf);
310 DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen);
312 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
313 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
314 s->rregs[ESP_RSEQ] = SEQ_CD;
319 static void write_response(ESPState *s)
321 DPRINTF("Transfer status (status=%d)\n", s->status);
322 s->ti_buf[0] = s->status;
325 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
326 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
327 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
328 s->rregs[ESP_RSEQ] = SEQ_CD;
333 s->rregs[ESP_RFLAGS] = 2;
338 static void esp_dma_done(ESPState *s)
340 s->rregs[ESP_RSTAT] |= STAT_TC;
341 s->rregs[ESP_RINTR] = INTR_BS;
342 s->rregs[ESP_RSEQ] = 0;
343 s->rregs[ESP_RFLAGS] = 0;
344 s->rregs[ESP_TCLO] = 0;
345 s->rregs[ESP_TCMID] = 0;
349 static void esp_do_dma(ESPState *s)
354 to_device = (s->ti_size < 0);
357 DPRINTF("command len %d + %d\n", s->cmdlen, len);
358 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
362 do_cmd(s, s->cmdbuf);
365 if (s->async_len == 0) {
366 /* Defer until data is available. */
369 if (len > s->async_len) {
373 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
375 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
384 if (s->async_len == 0) {
385 scsi_req_continue(s->current_req);
386 /* If there is still data to be read from the device then
387 complete the DMA operation immediately. Otherwise defer
388 until the scsi layer has completed. */
389 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
394 /* Partially filled a scsi buffer. Complete immediately. */
398 static void esp_command_complete(SCSIRequest *req, uint32_t status)
400 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
402 DPRINTF("SCSI Command complete\n");
403 if (s->ti_size != 0) {
404 DPRINTF("SCSI command completed unexpectedly\n");
410 DPRINTF("Command failed\n");
413 s->rregs[ESP_RSTAT] = STAT_ST;
415 if (s->current_req) {
416 scsi_req_unref(s->current_req);
417 s->current_req = NULL;
418 s->current_dev = NULL;
422 static void esp_transfer_data(SCSIRequest *req, uint32_t len)
424 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
426 DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size);
428 s->async_buf = scsi_req_get_buf(req);
431 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
432 /* If this was the last part of a DMA transfer then the
433 completion interrupt is deferred to here. */
438 static void handle_ti(ESPState *s)
440 uint32_t dmalen, minlen;
442 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
446 s->dma_counter = dmalen;
449 minlen = (dmalen < 32) ? dmalen : 32;
450 else if (s->ti_size < 0)
451 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
453 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
454 DPRINTF("Transfer Information len %d\n", minlen);
456 s->dma_left = minlen;
457 s->rregs[ESP_RSTAT] &= ~STAT_TC;
459 } else if (s->do_cmd) {
460 DPRINTF("command len %d\n", s->cmdlen);
464 do_cmd(s, s->cmdbuf);
469 static void esp_hard_reset(DeviceState *d)
471 ESPState *s = container_of(d, ESPState, busdev.qdev);
473 memset(s->rregs, 0, ESP_REGS);
474 memset(s->wregs, 0, ESP_REGS);
475 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
483 s->rregs[ESP_CFG1] = 7;
486 static void esp_soft_reset(DeviceState *d)
488 ESPState *s = container_of(d, ESPState, busdev.qdev);
490 qemu_irq_lower(s->irq);
494 static void parent_esp_reset(void *opaque, int irq, int level)
497 esp_soft_reset(opaque);
501 static void esp_gpio_demux(void *opaque, int irq, int level)
505 parent_esp_reset(opaque, irq, level);
508 esp_dma_enable(opaque, irq, level);
513 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
515 ESPState *s = opaque;
516 uint32_t saddr, old_val;
518 saddr = addr >> s->it_shift;
519 DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]);
522 if (s->ti_size > 0) {
524 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
526 ESP_ERROR("PIO data read not implemented\n");
527 s->rregs[ESP_FIFO] = 0;
529 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
533 if (s->ti_size == 0) {
539 /* Clear sequence step, interrupt register and all status bits
541 old_val = s->rregs[ESP_RINTR];
542 s->rregs[ESP_RINTR] = 0;
543 s->rregs[ESP_RSTAT] &= ~STAT_TC;
544 s->rregs[ESP_RSEQ] = SEQ_CD;
551 return s->rregs[saddr];
554 static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
556 ESPState *s = opaque;
559 saddr = addr >> s->it_shift;
560 DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr],
565 s->rregs[ESP_RSTAT] &= ~STAT_TC;
569 s->cmdbuf[s->cmdlen++] = val & 0xff;
570 } else if (s->ti_size == TI_BUFSZ - 1) {
571 ESP_ERROR("fifo overrun\n");
574 s->ti_buf[s->ti_wptr++] = val & 0xff;
578 s->rregs[saddr] = val;
581 /* Reload DMA counter. */
582 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
583 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
587 switch(val & CMD_CMD) {
589 DPRINTF("NOP (%2.2x)\n", val);
592 DPRINTF("Flush FIFO (%2.2x)\n", val);
594 s->rregs[ESP_RINTR] = INTR_FC;
595 s->rregs[ESP_RSEQ] = 0;
596 s->rregs[ESP_RFLAGS] = 0;
599 DPRINTF("Chip reset (%2.2x)\n", val);
600 esp_soft_reset(&s->busdev.qdev);
603 DPRINTF("Bus reset (%2.2x)\n", val);
604 s->rregs[ESP_RINTR] = INTR_RST;
605 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
613 DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
615 s->rregs[ESP_RINTR] = INTR_FC;
616 s->rregs[ESP_RSTAT] |= STAT_MI;
619 DPRINTF("Message Accepted (%2.2x)\n", val);
620 s->rregs[ESP_RINTR] = INTR_DC;
621 s->rregs[ESP_RSEQ] = 0;
622 s->rregs[ESP_RFLAGS] = 0;
626 DPRINTF("Transfer padding (%2.2x)\n", val);
627 s->rregs[ESP_RSTAT] = STAT_TC;
628 s->rregs[ESP_RINTR] = INTR_FC;
629 s->rregs[ESP_RSEQ] = 0;
632 DPRINTF("Set ATN (%2.2x)\n", val);
635 DPRINTF("Select without ATN (%2.2x)\n", val);
636 handle_s_without_atn(s);
639 DPRINTF("Select with ATN (%2.2x)\n", val);
643 DPRINTF("Select with ATN & stop (%2.2x)\n", val);
647 DPRINTF("Enable selection (%2.2x)\n", val);
648 s->rregs[ESP_RINTR] = 0;
651 ESP_ERROR("Unhandled ESP command (%2.2x)\n", val);
655 case ESP_WBUSID ... ESP_WSYNO:
658 s->rregs[saddr] = val;
660 case ESP_WCCF ... ESP_WTEST:
662 case ESP_CFG2 ... ESP_RES4:
663 s->rregs[saddr] = val;
666 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", val, saddr);
669 s->wregs[saddr] = val;
672 static CPUReadMemoryFunc * const esp_mem_read[3] = {
678 static CPUWriteMemoryFunc * const esp_mem_write[3] = {
684 static const VMStateDescription vmstate_esp = {
687 .minimum_version_id = 3,
688 .minimum_version_id_old = 3,
689 .fields = (VMStateField []) {
690 VMSTATE_BUFFER(rregs, ESPState),
691 VMSTATE_BUFFER(wregs, ESPState),
692 VMSTATE_INT32(ti_size, ESPState),
693 VMSTATE_UINT32(ti_rptr, ESPState),
694 VMSTATE_UINT32(ti_wptr, ESPState),
695 VMSTATE_BUFFER(ti_buf, ESPState),
696 VMSTATE_UINT32(status, ESPState),
697 VMSTATE_UINT32(dma, ESPState),
698 VMSTATE_BUFFER(cmdbuf, ESPState),
699 VMSTATE_UINT32(cmdlen, ESPState),
700 VMSTATE_UINT32(do_cmd, ESPState),
701 VMSTATE_UINT32(dma_left, ESPState),
702 VMSTATE_END_OF_LIST()
706 void esp_init(target_phys_addr_t espaddr, int it_shift,
707 ESPDMAMemoryReadWriteFunc dma_memory_read,
708 ESPDMAMemoryReadWriteFunc dma_memory_write,
709 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
710 qemu_irq *dma_enable)
716 dev = qdev_create(NULL, "esp");
717 esp = DO_UPCAST(ESPState, busdev.qdev, dev);
718 esp->dma_memory_read = dma_memory_read;
719 esp->dma_memory_write = dma_memory_write;
720 esp->dma_opaque = dma_opaque;
721 esp->it_shift = it_shift;
722 /* XXX for now until rc4030 has been changed to use DMA enable signal */
723 esp->dma_enabled = 1;
724 qdev_init_nofail(dev);
725 s = sysbus_from_qdev(dev);
726 sysbus_connect_irq(s, 0, irq);
727 sysbus_mmio_map(s, 0, espaddr);
728 *reset = qdev_get_gpio_in(dev, 0);
729 *dma_enable = qdev_get_gpio_in(dev, 1);
732 static const struct SCSIBusOps esp_scsi_ops = {
733 .transfer_data = esp_transfer_data,
734 .complete = esp_command_complete,
735 .cancel = esp_request_cancelled
738 static int esp_init1(SysBusDevice *dev)
740 ESPState *s = FROM_SYSBUS(ESPState, dev);
743 sysbus_init_irq(dev, &s->irq);
744 assert(s->it_shift != -1);
746 esp_io_memory = cpu_register_io_memory(esp_mem_read, esp_mem_write, s,
747 DEVICE_NATIVE_ENDIAN);
748 sysbus_init_mmio(dev, ESP_REGS << s->it_shift, esp_io_memory);
750 qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2);
752 scsi_bus_new(&s->bus, &dev->qdev, 0, ESP_MAX_DEVS, &esp_scsi_ops);
753 return scsi_bus_legacy_handle_cmdline(&s->bus);
756 static SysBusDeviceInfo esp_info = {
759 .qdev.size = sizeof(ESPState),
760 .qdev.vmsd = &vmstate_esp,
761 .qdev.reset = esp_hard_reset,
762 .qdev.props = (Property[]) {
767 static void esp_register_devices(void)
769 sysbus_register_withprop(&esp_info);
772 device_init(esp_register_devices)