2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
34 * also produced as NCR89C100. See
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41 #define DPRINTF(fmt, ...) \
42 do { printf("ESP: " fmt , ## __VA_ARGS__); } while (0)
44 #define DPRINTF(fmt, ...) do {} while (0)
47 #define ESP_ERROR(fmt, ...) \
48 do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
53 typedef struct ESPState ESPState;
59 uint8_t rregs[ESP_REGS];
60 uint8_t wregs[ESP_REGS];
62 uint32_t ti_rptr, ti_wptr;
63 uint8_t ti_buf[TI_BUFSZ];
67 SCSIDevice *current_dev;
68 SCSIRequest *current_req;
69 uint8_t cmdbuf[TI_BUFSZ];
73 /* The amount of data left in the current DMA transfer. */
75 /* The size of the current DMA transfer. Zero if no transfer is in
81 ESPDMAMemoryReadWriteFunc dma_memory_read;
82 ESPDMAMemoryReadWriteFunc dma_memory_write;
85 void (*dma_cb)(ESPState *s);
93 #define ESP_WBUSID 0x4
97 #define ESP_WSYNTP 0x6
98 #define ESP_RFLAGS 0x7
101 #define ESP_RRES1 0x9
103 #define ESP_RRES2 0xa
104 #define ESP_WTEST 0xa
115 #define CMD_FLUSH 0x01
116 #define CMD_RESET 0x02
117 #define CMD_BUSRESET 0x03
119 #define CMD_ICCS 0x11
120 #define CMD_MSGACC 0x12
122 #define CMD_SATN 0x1a
124 #define CMD_SELATN 0x42
125 #define CMD_SELATNS 0x43
126 #define CMD_ENSEL 0x44
134 #define STAT_PIO_MASK 0x06
139 #define STAT_INT 0x80
141 #define BUSID_DID 0x07
146 #define INTR_RST 0x80
151 #define CFG1_RESREPT 0x40
153 #define TCHI_FAS100A 0x4
155 static void esp_raise_irq(ESPState *s)
157 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
158 s->rregs[ESP_RSTAT] |= STAT_INT;
159 qemu_irq_raise(s->irq);
160 DPRINTF("Raise IRQ\n");
164 static void esp_lower_irq(ESPState *s)
166 if (s->rregs[ESP_RSTAT] & STAT_INT) {
167 s->rregs[ESP_RSTAT] &= ~STAT_INT;
168 qemu_irq_lower(s->irq);
169 DPRINTF("Lower IRQ\n");
173 static void esp_dma_enable(void *opaque, int irq, int level)
175 DeviceState *d = opaque;
176 ESPState *s = container_of(d, ESPState, busdev.qdev);
180 DPRINTF("Raise enable\n");
186 DPRINTF("Lower enable\n");
191 static void esp_request_cancelled(SCSIRequest *req)
193 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
195 if (req == s->current_req) {
196 scsi_req_unref(s->current_req);
197 s->current_req = NULL;
198 s->current_dev = NULL;
202 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
207 target = s->wregs[ESP_WBUSID] & BUSID_DID;
209 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
210 s->dma_memory_read(s->dma_opaque, buf, dmalen);
213 memcpy(buf, s->ti_buf, dmalen);
216 DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
222 if (s->current_dev) {
223 /* Started a new command before the old one finished. Cancel it. */
224 scsi_req_cancel(s->current_req);
228 if (target >= ESP_MAX_DEVS || !s->bus.devs[target]) {
230 s->rregs[ESP_RSTAT] = 0;
231 s->rregs[ESP_RINTR] = INTR_DC;
232 s->rregs[ESP_RSEQ] = SEQ_0;
236 s->current_dev = s->bus.devs[target];
240 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
245 DPRINTF("do_busid_cmd: busid 0x%x\n", busid);
247 s->current_req = scsi_req_new(s->current_dev, 0, lun);
248 datalen = scsi_req_enqueue(s->current_req, buf);
249 s->ti_size = datalen;
251 s->rregs[ESP_RSTAT] = STAT_TC;
255 s->rregs[ESP_RSTAT] |= STAT_DI;
257 s->rregs[ESP_RSTAT] |= STAT_DO;
259 scsi_req_continue(s->current_req);
261 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
262 s->rregs[ESP_RSEQ] = SEQ_CD;
266 static void do_cmd(ESPState *s, uint8_t *buf)
268 uint8_t busid = buf[0];
270 do_busid_cmd(s, &buf[1], busid);
273 static void handle_satn(ESPState *s)
278 if (!s->dma_enabled) {
279 s->dma_cb = handle_satn;
282 len = get_cmd(s, buf);
287 static void handle_s_without_atn(ESPState *s)
292 if (!s->dma_enabled) {
293 s->dma_cb = handle_s_without_atn;
296 len = get_cmd(s, buf);
298 do_busid_cmd(s, buf, 0);
302 static void handle_satn_stop(ESPState *s)
304 if (!s->dma_enabled) {
305 s->dma_cb = handle_satn_stop;
308 s->cmdlen = get_cmd(s, s->cmdbuf);
310 DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen);
312 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
313 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
314 s->rregs[ESP_RSEQ] = SEQ_CD;
319 static void write_response(ESPState *s)
321 DPRINTF("Transfer status (sense=%d)\n", s->sense);
322 s->ti_buf[0] = s->sense;
325 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
326 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
327 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
328 s->rregs[ESP_RSEQ] = SEQ_CD;
333 s->rregs[ESP_RFLAGS] = 2;
338 static void esp_dma_done(ESPState *s)
340 s->rregs[ESP_RSTAT] |= STAT_TC;
341 s->rregs[ESP_RINTR] = INTR_BS;
342 s->rregs[ESP_RSEQ] = 0;
343 s->rregs[ESP_RFLAGS] = 0;
344 s->rregs[ESP_TCLO] = 0;
345 s->rregs[ESP_TCMID] = 0;
349 static void esp_do_dma(ESPState *s)
354 to_device = (s->ti_size < 0);
357 DPRINTF("command len %d + %d\n", s->cmdlen, len);
358 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
362 do_cmd(s, s->cmdbuf);
365 if (s->async_len == 0) {
366 /* Defer until data is available. */
369 if (len > s->async_len) {
373 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
375 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
384 if (s->async_len == 0) {
385 scsi_req_continue(s->current_req);
386 /* If there is still data to be read from the device then
387 complete the DMA operation immediately. Otherwise defer
388 until the scsi layer has completed. */
389 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
394 /* Partially filled a scsi buffer. Complete immediately. */
398 static void esp_command_complete(SCSIRequest *req, int reason, uint32_t arg)
400 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
402 if (reason == SCSI_REASON_DONE) {
403 DPRINTF("SCSI Command complete\n");
405 DPRINTF("SCSI command completed unexpectedly\n");
410 DPRINTF("Command failed\n");
412 s->rregs[ESP_RSTAT] = STAT_ST;
414 if (s->current_req) {
415 scsi_req_unref(s->current_req);
416 s->current_req = NULL;
417 s->current_dev = NULL;
420 DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size);
422 s->async_buf = s->current_dev->info->get_buf(req);
425 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
426 /* If this was the last part of a DMA transfer then the
427 completion interrupt is deferred to here. */
433 static void handle_ti(ESPState *s)
435 uint32_t dmalen, minlen;
437 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
441 s->dma_counter = dmalen;
444 minlen = (dmalen < 32) ? dmalen : 32;
445 else if (s->ti_size < 0)
446 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
448 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
449 DPRINTF("Transfer Information len %d\n", minlen);
451 s->dma_left = minlen;
452 s->rregs[ESP_RSTAT] &= ~STAT_TC;
454 } else if (s->do_cmd) {
455 DPRINTF("command len %d\n", s->cmdlen);
459 do_cmd(s, s->cmdbuf);
464 static void esp_hard_reset(DeviceState *d)
466 ESPState *s = container_of(d, ESPState, busdev.qdev);
468 memset(s->rregs, 0, ESP_REGS);
469 memset(s->wregs, 0, ESP_REGS);
470 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
478 s->rregs[ESP_CFG1] = 7;
481 static void esp_soft_reset(DeviceState *d)
483 ESPState *s = container_of(d, ESPState, busdev.qdev);
485 qemu_irq_lower(s->irq);
489 static void parent_esp_reset(void *opaque, int irq, int level)
492 esp_soft_reset(opaque);
496 static void esp_gpio_demux(void *opaque, int irq, int level)
500 parent_esp_reset(opaque, irq, level);
503 esp_dma_enable(opaque, irq, level);
508 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
510 ESPState *s = opaque;
511 uint32_t saddr, old_val;
513 saddr = addr >> s->it_shift;
514 DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]);
517 if (s->ti_size > 0) {
519 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
521 ESP_ERROR("PIO data read not implemented\n");
522 s->rregs[ESP_FIFO] = 0;
524 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
528 if (s->ti_size == 0) {
534 /* Clear sequence step, interrupt register and all status bits
536 old_val = s->rregs[ESP_RINTR];
537 s->rregs[ESP_RINTR] = 0;
538 s->rregs[ESP_RSTAT] &= ~STAT_TC;
539 s->rregs[ESP_RSEQ] = SEQ_CD;
546 return s->rregs[saddr];
549 static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
551 ESPState *s = opaque;
554 saddr = addr >> s->it_shift;
555 DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr],
560 s->rregs[ESP_RSTAT] &= ~STAT_TC;
564 s->cmdbuf[s->cmdlen++] = val & 0xff;
565 } else if (s->ti_size == TI_BUFSZ - 1) {
566 ESP_ERROR("fifo overrun\n");
569 s->ti_buf[s->ti_wptr++] = val & 0xff;
573 s->rregs[saddr] = val;
576 /* Reload DMA counter. */
577 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
578 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
582 switch(val & CMD_CMD) {
584 DPRINTF("NOP (%2.2x)\n", val);
587 DPRINTF("Flush FIFO (%2.2x)\n", val);
589 s->rregs[ESP_RINTR] = INTR_FC;
590 s->rregs[ESP_RSEQ] = 0;
591 s->rregs[ESP_RFLAGS] = 0;
594 DPRINTF("Chip reset (%2.2x)\n", val);
595 esp_soft_reset(&s->busdev.qdev);
598 DPRINTF("Bus reset (%2.2x)\n", val);
599 s->rregs[ESP_RINTR] = INTR_RST;
600 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
608 DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
610 s->rregs[ESP_RINTR] = INTR_FC;
611 s->rregs[ESP_RSTAT] |= STAT_MI;
614 DPRINTF("Message Accepted (%2.2x)\n", val);
615 s->rregs[ESP_RINTR] = INTR_DC;
616 s->rregs[ESP_RSEQ] = 0;
617 s->rregs[ESP_RFLAGS] = 0;
621 DPRINTF("Transfer padding (%2.2x)\n", val);
622 s->rregs[ESP_RSTAT] = STAT_TC;
623 s->rregs[ESP_RINTR] = INTR_FC;
624 s->rregs[ESP_RSEQ] = 0;
627 DPRINTF("Set ATN (%2.2x)\n", val);
630 DPRINTF("Select without ATN (%2.2x)\n", val);
631 handle_s_without_atn(s);
634 DPRINTF("Select with ATN (%2.2x)\n", val);
638 DPRINTF("Select with ATN & stop (%2.2x)\n", val);
642 DPRINTF("Enable selection (%2.2x)\n", val);
643 s->rregs[ESP_RINTR] = 0;
646 ESP_ERROR("Unhandled ESP command (%2.2x)\n", val);
650 case ESP_WBUSID ... ESP_WSYNO:
653 s->rregs[saddr] = val;
655 case ESP_WCCF ... ESP_WTEST:
657 case ESP_CFG2 ... ESP_RES4:
658 s->rregs[saddr] = val;
661 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", val, saddr);
664 s->wregs[saddr] = val;
667 static CPUReadMemoryFunc * const esp_mem_read[3] = {
673 static CPUWriteMemoryFunc * const esp_mem_write[3] = {
679 static const VMStateDescription vmstate_esp = {
682 .minimum_version_id = 3,
683 .minimum_version_id_old = 3,
684 .fields = (VMStateField []) {
685 VMSTATE_BUFFER(rregs, ESPState),
686 VMSTATE_BUFFER(wregs, ESPState),
687 VMSTATE_INT32(ti_size, ESPState),
688 VMSTATE_UINT32(ti_rptr, ESPState),
689 VMSTATE_UINT32(ti_wptr, ESPState),
690 VMSTATE_BUFFER(ti_buf, ESPState),
691 VMSTATE_UINT32(sense, ESPState),
692 VMSTATE_UINT32(dma, ESPState),
693 VMSTATE_BUFFER(cmdbuf, ESPState),
694 VMSTATE_UINT32(cmdlen, ESPState),
695 VMSTATE_UINT32(do_cmd, ESPState),
696 VMSTATE_UINT32(dma_left, ESPState),
697 VMSTATE_END_OF_LIST()
701 void esp_init(target_phys_addr_t espaddr, int it_shift,
702 ESPDMAMemoryReadWriteFunc dma_memory_read,
703 ESPDMAMemoryReadWriteFunc dma_memory_write,
704 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
705 qemu_irq *dma_enable)
711 dev = qdev_create(NULL, "esp");
712 esp = DO_UPCAST(ESPState, busdev.qdev, dev);
713 esp->dma_memory_read = dma_memory_read;
714 esp->dma_memory_write = dma_memory_write;
715 esp->dma_opaque = dma_opaque;
716 esp->it_shift = it_shift;
717 /* XXX for now until rc4030 has been changed to use DMA enable signal */
718 esp->dma_enabled = 1;
719 qdev_init_nofail(dev);
720 s = sysbus_from_qdev(dev);
721 sysbus_connect_irq(s, 0, irq);
722 sysbus_mmio_map(s, 0, espaddr);
723 *reset = qdev_get_gpio_in(dev, 0);
724 *dma_enable = qdev_get_gpio_in(dev, 1);
727 static const struct SCSIBusOps esp_scsi_ops = {
728 .complete = esp_command_complete,
729 .cancel = esp_request_cancelled
732 static int esp_init1(SysBusDevice *dev)
734 ESPState *s = FROM_SYSBUS(ESPState, dev);
737 sysbus_init_irq(dev, &s->irq);
738 assert(s->it_shift != -1);
740 esp_io_memory = cpu_register_io_memory(esp_mem_read, esp_mem_write, s,
741 DEVICE_NATIVE_ENDIAN);
742 sysbus_init_mmio(dev, ESP_REGS << s->it_shift, esp_io_memory);
744 qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2);
746 scsi_bus_new(&s->bus, &dev->qdev, 0, ESP_MAX_DEVS, &esp_scsi_ops);
747 return scsi_bus_legacy_handle_cmdline(&s->bus);
750 static SysBusDeviceInfo esp_info = {
753 .qdev.size = sizeof(ESPState),
754 .qdev.vmsd = &vmstate_esp,
755 .qdev.reset = esp_hard_reset,
756 .qdev.props = (Property[]) {
761 static void esp_register_devices(void)
763 sysbus_register_withprop(&esp_info);
766 device_init(esp_register_devices)