2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
32 * also produced as NCR89C100. See
33 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
38 #define ESP_ERROR(fmt, ...) \
39 do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
44 typedef struct ESPState ESPState;
49 uint8_t rregs[ESP_REGS];
50 uint8_t wregs[ESP_REGS];
54 uint32_t ti_rptr, ti_wptr;
57 uint8_t ti_buf[TI_BUFSZ];
59 SCSIDevice *current_dev;
60 SCSIRequest *current_req;
61 uint8_t cmdbuf[TI_BUFSZ];
65 /* The amount of data left in the current DMA transfer. */
67 /* The size of the current DMA transfer. Zero if no transfer is in
75 ESPDMAMemoryReadWriteFunc dma_memory_read;
76 ESPDMAMemoryReadWriteFunc dma_memory_write;
78 void (*dma_cb)(ESPState *s);
86 #define ESP_WBUSID 0x4
90 #define ESP_WSYNTP 0x6
91 #define ESP_RFLAGS 0x7
108 #define CMD_FLUSH 0x01
109 #define CMD_RESET 0x02
110 #define CMD_BUSRESET 0x03
112 #define CMD_ICCS 0x11
113 #define CMD_MSGACC 0x12
115 #define CMD_SATN 0x1a
117 #define CMD_SELATN 0x42
118 #define CMD_SELATNS 0x43
119 #define CMD_ENSEL 0x44
127 #define STAT_PIO_MASK 0x06
132 #define STAT_INT 0x80
134 #define BUSID_DID 0x07
139 #define INTR_RST 0x80
144 #define CFG1_RESREPT 0x40
146 #define TCHI_FAS100A 0x4
148 static void esp_raise_irq(ESPState *s)
150 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
151 s->rregs[ESP_RSTAT] |= STAT_INT;
152 qemu_irq_raise(s->irq);
153 trace_esp_raise_irq();
157 static void esp_lower_irq(ESPState *s)
159 if (s->rregs[ESP_RSTAT] & STAT_INT) {
160 s->rregs[ESP_RSTAT] &= ~STAT_INT;
161 qemu_irq_lower(s->irq);
162 trace_esp_lower_irq();
166 static void esp_dma_enable(void *opaque, int irq, int level)
168 DeviceState *d = opaque;
169 ESPState *s = container_of(d, ESPState, busdev.qdev);
173 trace_esp_dma_enable();
179 trace_esp_dma_disable();
184 static void esp_request_cancelled(SCSIRequest *req)
186 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
188 if (req == s->current_req) {
189 scsi_req_unref(s->current_req);
190 s->current_req = NULL;
191 s->current_dev = NULL;
195 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
200 target = s->wregs[ESP_WBUSID] & BUSID_DID;
202 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
203 s->dma_memory_read(s->dma_opaque, buf, dmalen);
206 memcpy(buf, s->ti_buf, dmalen);
207 buf[0] = buf[2] >> 5;
209 trace_esp_get_cmd(dmalen, target);
215 if (s->current_req) {
216 /* Started a new command before the old one finished. Cancel it. */
217 scsi_req_cancel(s->current_req);
221 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
222 if (!s->current_dev) {
224 s->rregs[ESP_RSTAT] = 0;
225 s->rregs[ESP_RINTR] = INTR_DC;
226 s->rregs[ESP_RSEQ] = SEQ_0;
233 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
237 SCSIDevice *current_lun;
239 trace_esp_do_busid_cmd(busid);
241 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
242 s->current_req = scsi_req_new(current_lun, 0, lun, buf, NULL);
243 datalen = scsi_req_enqueue(s->current_req);
244 s->ti_size = datalen;
246 s->rregs[ESP_RSTAT] = STAT_TC;
250 s->rregs[ESP_RSTAT] |= STAT_DI;
252 s->rregs[ESP_RSTAT] |= STAT_DO;
254 scsi_req_continue(s->current_req);
256 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
257 s->rregs[ESP_RSEQ] = SEQ_CD;
261 static void do_cmd(ESPState *s, uint8_t *buf)
263 uint8_t busid = buf[0];
265 do_busid_cmd(s, &buf[1], busid);
268 static void handle_satn(ESPState *s)
273 if (!s->dma_enabled) {
274 s->dma_cb = handle_satn;
277 len = get_cmd(s, buf);
282 static void handle_s_without_atn(ESPState *s)
287 if (!s->dma_enabled) {
288 s->dma_cb = handle_s_without_atn;
291 len = get_cmd(s, buf);
293 do_busid_cmd(s, buf, 0);
297 static void handle_satn_stop(ESPState *s)
299 if (!s->dma_enabled) {
300 s->dma_cb = handle_satn_stop;
303 s->cmdlen = get_cmd(s, s->cmdbuf);
305 trace_esp_handle_satn_stop(s->cmdlen);
307 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
308 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
309 s->rregs[ESP_RSEQ] = SEQ_CD;
314 static void write_response(ESPState *s)
316 trace_esp_write_response(s->status);
317 s->ti_buf[0] = s->status;
320 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
321 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
322 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
323 s->rregs[ESP_RSEQ] = SEQ_CD;
328 s->rregs[ESP_RFLAGS] = 2;
333 static void esp_dma_done(ESPState *s)
335 s->rregs[ESP_RSTAT] |= STAT_TC;
336 s->rregs[ESP_RINTR] = INTR_BS;
337 s->rregs[ESP_RSEQ] = 0;
338 s->rregs[ESP_RFLAGS] = 0;
339 s->rregs[ESP_TCLO] = 0;
340 s->rregs[ESP_TCMID] = 0;
344 static void esp_do_dma(ESPState *s)
349 to_device = (s->ti_size < 0);
352 trace_esp_do_dma(s->cmdlen, len);
353 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
357 do_cmd(s, s->cmdbuf);
360 if (s->async_len == 0) {
361 /* Defer until data is available. */
364 if (len > s->async_len) {
368 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
370 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
379 if (s->async_len == 0) {
380 scsi_req_continue(s->current_req);
381 /* If there is still data to be read from the device then
382 complete the DMA operation immediately. Otherwise defer
383 until the scsi layer has completed. */
384 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
389 /* Partially filled a scsi buffer. Complete immediately. */
393 static void esp_command_complete(SCSIRequest *req, uint32_t status)
395 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
397 trace_esp_command_complete();
398 if (s->ti_size != 0) {
399 trace_esp_command_complete_unexpected();
405 trace_esp_command_complete_fail();
408 s->rregs[ESP_RSTAT] = STAT_ST;
410 if (s->current_req) {
411 scsi_req_unref(s->current_req);
412 s->current_req = NULL;
413 s->current_dev = NULL;
417 static void esp_transfer_data(SCSIRequest *req, uint32_t len)
419 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
421 trace_esp_transfer_data(s->dma_left, s->ti_size);
423 s->async_buf = scsi_req_get_buf(req);
426 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
427 /* If this was the last part of a DMA transfer then the
428 completion interrupt is deferred to here. */
433 static void handle_ti(ESPState *s)
435 uint32_t dmalen, minlen;
437 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
441 s->dma_counter = dmalen;
444 minlen = (dmalen < 32) ? dmalen : 32;
445 else if (s->ti_size < 0)
446 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
448 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
449 trace_esp_handle_ti(minlen);
451 s->dma_left = minlen;
452 s->rregs[ESP_RSTAT] &= ~STAT_TC;
454 } else if (s->do_cmd) {
455 trace_esp_handle_ti_cmd(s->cmdlen);
459 do_cmd(s, s->cmdbuf);
464 static void esp_hard_reset(DeviceState *d)
466 ESPState *s = container_of(d, ESPState, busdev.qdev);
468 memset(s->rregs, 0, ESP_REGS);
469 memset(s->wregs, 0, ESP_REGS);
470 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
478 s->rregs[ESP_CFG1] = 7;
481 static void esp_soft_reset(DeviceState *d)
483 ESPState *s = container_of(d, ESPState, busdev.qdev);
485 qemu_irq_lower(s->irq);
489 static void parent_esp_reset(void *opaque, int irq, int level)
492 esp_soft_reset(opaque);
496 static void esp_gpio_demux(void *opaque, int irq, int level)
500 parent_esp_reset(opaque, irq, level);
503 esp_dma_enable(opaque, irq, level);
508 static uint64_t esp_mem_read(void *opaque, target_phys_addr_t addr,
511 ESPState *s = opaque;
512 uint32_t saddr, old_val;
514 saddr = addr >> s->it_shift;
515 trace_esp_mem_readb(saddr, s->rregs[saddr]);
518 if (s->ti_size > 0) {
520 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
522 ESP_ERROR("PIO data read not implemented\n");
523 s->rregs[ESP_FIFO] = 0;
525 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
529 if (s->ti_size == 0) {
535 /* Clear sequence step, interrupt register and all status bits
537 old_val = s->rregs[ESP_RINTR];
538 s->rregs[ESP_RINTR] = 0;
539 s->rregs[ESP_RSTAT] &= ~STAT_TC;
540 s->rregs[ESP_RSEQ] = SEQ_CD;
547 return s->rregs[saddr];
550 static void esp_mem_write(void *opaque, target_phys_addr_t addr,
551 uint64_t val, unsigned size)
553 ESPState *s = opaque;
556 saddr = addr >> s->it_shift;
557 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
561 s->rregs[ESP_RSTAT] &= ~STAT_TC;
565 s->cmdbuf[s->cmdlen++] = val & 0xff;
566 } else if (s->ti_size == TI_BUFSZ - 1) {
567 ESP_ERROR("fifo overrun\n");
570 s->ti_buf[s->ti_wptr++] = val & 0xff;
574 s->rregs[saddr] = val;
577 /* Reload DMA counter. */
578 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
579 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
583 switch(val & CMD_CMD) {
585 trace_esp_mem_writeb_cmd_nop(val);
588 trace_esp_mem_writeb_cmd_flush(val);
590 s->rregs[ESP_RINTR] = INTR_FC;
591 s->rregs[ESP_RSEQ] = 0;
592 s->rregs[ESP_RFLAGS] = 0;
595 trace_esp_mem_writeb_cmd_reset(val);
596 esp_soft_reset(&s->busdev.qdev);
599 trace_esp_mem_writeb_cmd_bus_reset(val);
600 s->rregs[ESP_RINTR] = INTR_RST;
601 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
609 trace_esp_mem_writeb_cmd_iccs(val);
611 s->rregs[ESP_RINTR] = INTR_FC;
612 s->rregs[ESP_RSTAT] |= STAT_MI;
615 trace_esp_mem_writeb_cmd_msgacc(val);
616 s->rregs[ESP_RINTR] = INTR_DC;
617 s->rregs[ESP_RSEQ] = 0;
618 s->rregs[ESP_RFLAGS] = 0;
622 trace_esp_mem_writeb_cmd_pad(val);
623 s->rregs[ESP_RSTAT] = STAT_TC;
624 s->rregs[ESP_RINTR] = INTR_FC;
625 s->rregs[ESP_RSEQ] = 0;
628 trace_esp_mem_writeb_cmd_satn(val);
631 trace_esp_mem_writeb_cmd_sel(val);
632 handle_s_without_atn(s);
635 trace_esp_mem_writeb_cmd_selatn(val);
639 trace_esp_mem_writeb_cmd_selatns(val);
643 trace_esp_mem_writeb_cmd_ensel(val);
644 s->rregs[ESP_RINTR] = 0;
647 ESP_ERROR("Unhandled ESP command (%2.2x)\n", (unsigned)val);
651 case ESP_WBUSID ... ESP_WSYNO:
654 s->rregs[saddr] = val;
656 case ESP_WCCF ... ESP_WTEST:
658 case ESP_CFG2 ... ESP_RES4:
659 s->rregs[saddr] = val;
662 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", (unsigned)val, saddr);
665 s->wregs[saddr] = val;
668 static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr,
669 unsigned size, bool is_write)
671 return (size == 1) || (is_write && size == 4);
674 static const MemoryRegionOps esp_mem_ops = {
675 .read = esp_mem_read,
676 .write = esp_mem_write,
677 .endianness = DEVICE_NATIVE_ENDIAN,
678 .valid.accepts = esp_mem_accepts,
681 static const VMStateDescription vmstate_esp = {
684 .minimum_version_id = 3,
685 .minimum_version_id_old = 3,
686 .fields = (VMStateField []) {
687 VMSTATE_BUFFER(rregs, ESPState),
688 VMSTATE_BUFFER(wregs, ESPState),
689 VMSTATE_INT32(ti_size, ESPState),
690 VMSTATE_UINT32(ti_rptr, ESPState),
691 VMSTATE_UINT32(ti_wptr, ESPState),
692 VMSTATE_BUFFER(ti_buf, ESPState),
693 VMSTATE_UINT32(status, ESPState),
694 VMSTATE_UINT32(dma, ESPState),
695 VMSTATE_BUFFER(cmdbuf, ESPState),
696 VMSTATE_UINT32(cmdlen, ESPState),
697 VMSTATE_UINT32(do_cmd, ESPState),
698 VMSTATE_UINT32(dma_left, ESPState),
699 VMSTATE_END_OF_LIST()
703 void esp_init(target_phys_addr_t espaddr, int it_shift,
704 ESPDMAMemoryReadWriteFunc dma_memory_read,
705 ESPDMAMemoryReadWriteFunc dma_memory_write,
706 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
707 qemu_irq *dma_enable)
713 dev = qdev_create(NULL, "esp");
714 esp = DO_UPCAST(ESPState, busdev.qdev, dev);
715 esp->dma_memory_read = dma_memory_read;
716 esp->dma_memory_write = dma_memory_write;
717 esp->dma_opaque = dma_opaque;
718 esp->it_shift = it_shift;
719 /* XXX for now until rc4030 has been changed to use DMA enable signal */
720 esp->dma_enabled = 1;
721 qdev_init_nofail(dev);
722 s = sysbus_from_qdev(dev);
723 sysbus_connect_irq(s, 0, irq);
724 sysbus_mmio_map(s, 0, espaddr);
725 *reset = qdev_get_gpio_in(dev, 0);
726 *dma_enable = qdev_get_gpio_in(dev, 1);
729 static const struct SCSIBusInfo esp_scsi_info = {
731 .max_target = ESP_MAX_DEVS,
734 .transfer_data = esp_transfer_data,
735 .complete = esp_command_complete,
736 .cancel = esp_request_cancelled
739 static int esp_init1(SysBusDevice *dev)
741 ESPState *s = FROM_SYSBUS(ESPState, dev);
743 sysbus_init_irq(dev, &s->irq);
744 assert(s->it_shift != -1);
746 memory_region_init_io(&s->iomem, &esp_mem_ops, s,
747 "esp", ESP_REGS << s->it_shift);
748 sysbus_init_mmio(dev, &s->iomem);
750 qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2);
752 scsi_bus_new(&s->bus, &dev->qdev, &esp_scsi_info);
753 return scsi_bus_legacy_handle_cmdline(&s->bus);
756 static Property esp_properties[] = {
760 static void esp_class_init(ObjectClass *klass, void *data)
762 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
767 static DeviceInfo esp_info = {
769 .size = sizeof(ESPState),
770 .vmsd = &vmstate_esp,
771 .reset = esp_hard_reset,
772 .props = esp_properties,
773 .class_init = esp_class_init,
776 static void esp_register_devices(void)
778 sysbus_register_withprop(&esp_info);
781 device_init(esp_register_devices)