2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
32 * also produced as NCR89C100. See
33 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
38 #define ESP_ERROR(fmt, ...) \
39 do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
44 typedef struct ESPState ESPState;
49 uint8_t rregs[ESP_REGS];
50 uint8_t wregs[ESP_REGS];
54 uint32_t ti_rptr, ti_wptr;
57 uint8_t ti_buf[TI_BUFSZ];
59 SCSIDevice *current_dev;
60 SCSIRequest *current_req;
61 uint8_t cmdbuf[TI_BUFSZ];
65 /* The amount of data left in the current DMA transfer. */
67 /* The size of the current DMA transfer. Zero if no transfer is in
75 ESPDMAMemoryReadWriteFunc dma_memory_read;
76 ESPDMAMemoryReadWriteFunc dma_memory_write;
78 void (*dma_cb)(ESPState *s);
86 #define ESP_WBUSID 0x4
90 #define ESP_WSYNTP 0x6
91 #define ESP_RFLAGS 0x7
108 #define CMD_FLUSH 0x01
109 #define CMD_RESET 0x02
110 #define CMD_BUSRESET 0x03
112 #define CMD_ICCS 0x11
113 #define CMD_MSGACC 0x12
115 #define CMD_SATN 0x1a
117 #define CMD_SELATN 0x42
118 #define CMD_SELATNS 0x43
119 #define CMD_ENSEL 0x44
127 #define STAT_PIO_MASK 0x06
132 #define STAT_INT 0x80
134 #define BUSID_DID 0x07
139 #define INTR_RST 0x80
144 #define CFG1_RESREPT 0x40
146 #define TCHI_FAS100A 0x4
148 static void esp_raise_irq(ESPState *s)
150 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
151 s->rregs[ESP_RSTAT] |= STAT_INT;
152 qemu_irq_raise(s->irq);
153 trace_esp_raise_irq();
157 static void esp_lower_irq(ESPState *s)
159 if (s->rregs[ESP_RSTAT] & STAT_INT) {
160 s->rregs[ESP_RSTAT] &= ~STAT_INT;
161 qemu_irq_lower(s->irq);
162 trace_esp_lower_irq();
166 static void esp_dma_enable(void *opaque, int irq, int level)
168 DeviceState *d = opaque;
169 ESPState *s = container_of(d, ESPState, busdev.qdev);
173 trace_esp_dma_enable();
179 trace_esp_dma_disable();
184 static void esp_request_cancelled(SCSIRequest *req)
186 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
188 if (req == s->current_req) {
189 scsi_req_unref(s->current_req);
190 s->current_req = NULL;
191 s->current_dev = NULL;
195 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
200 target = s->wregs[ESP_WBUSID] & BUSID_DID;
202 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
203 s->dma_memory_read(s->dma_opaque, buf, dmalen);
206 memcpy(buf, s->ti_buf, dmalen);
207 buf[0] = buf[2] >> 5;
209 trace_esp_get_cmd(dmalen, target);
215 if (s->current_req) {
216 /* Started a new command before the old one finished. Cancel it. */
217 scsi_req_cancel(s->current_req);
221 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
222 if (!s->current_dev) {
224 s->rregs[ESP_RSTAT] = 0;
225 s->rregs[ESP_RINTR] = INTR_DC;
226 s->rregs[ESP_RSEQ] = SEQ_0;
233 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
237 SCSIDevice *current_lun;
239 trace_esp_do_busid_cmd(busid);
241 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
242 s->current_req = scsi_req_new(current_lun, 0, lun, buf, NULL);
243 datalen = scsi_req_enqueue(s->current_req);
244 s->ti_size = datalen;
246 s->rregs[ESP_RSTAT] = STAT_TC;
250 s->rregs[ESP_RSTAT] |= STAT_DI;
252 s->rregs[ESP_RSTAT] |= STAT_DO;
254 scsi_req_continue(s->current_req);
256 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
257 s->rregs[ESP_RSEQ] = SEQ_CD;
261 static void do_cmd(ESPState *s, uint8_t *buf)
263 uint8_t busid = buf[0];
265 do_busid_cmd(s, &buf[1], busid);
268 static void handle_satn(ESPState *s)
273 if (!s->dma_enabled) {
274 s->dma_cb = handle_satn;
277 len = get_cmd(s, buf);
282 static void handle_s_without_atn(ESPState *s)
287 if (!s->dma_enabled) {
288 s->dma_cb = handle_s_without_atn;
291 len = get_cmd(s, buf);
293 do_busid_cmd(s, buf, 0);
297 static void handle_satn_stop(ESPState *s)
299 if (!s->dma_enabled) {
300 s->dma_cb = handle_satn_stop;
303 s->cmdlen = get_cmd(s, s->cmdbuf);
305 trace_esp_handle_satn_stop(s->cmdlen);
307 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
308 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
309 s->rregs[ESP_RSEQ] = SEQ_CD;
314 static void write_response(ESPState *s)
316 trace_esp_write_response(s->status);
317 s->ti_buf[0] = s->status;
320 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
321 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
322 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
323 s->rregs[ESP_RSEQ] = SEQ_CD;
328 s->rregs[ESP_RFLAGS] = 2;
333 static void esp_dma_done(ESPState *s)
335 s->rregs[ESP_RSTAT] |= STAT_TC;
336 s->rregs[ESP_RINTR] = INTR_BS;
337 s->rregs[ESP_RSEQ] = 0;
338 s->rregs[ESP_RFLAGS] = 0;
339 s->rregs[ESP_TCLO] = 0;
340 s->rregs[ESP_TCMID] = 0;
344 static void esp_do_dma(ESPState *s)
349 to_device = (s->ti_size < 0);
352 trace_esp_do_dma(s->cmdlen, len);
353 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
357 do_cmd(s, s->cmdbuf);
360 if (s->async_len == 0) {
361 /* Defer until data is available. */
364 if (len > s->async_len) {
368 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
370 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
379 if (s->async_len == 0) {
380 scsi_req_continue(s->current_req);
381 /* If there is still data to be read from the device then
382 complete the DMA operation immediately. Otherwise defer
383 until the scsi layer has completed. */
384 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
389 /* Partially filled a scsi buffer. Complete immediately. */
393 static void esp_command_complete(SCSIRequest *req, uint32_t status,
396 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
398 trace_esp_command_complete();
399 if (s->ti_size != 0) {
400 trace_esp_command_complete_unexpected();
406 trace_esp_command_complete_fail();
409 s->rregs[ESP_RSTAT] = STAT_ST;
411 if (s->current_req) {
412 scsi_req_unref(s->current_req);
413 s->current_req = NULL;
414 s->current_dev = NULL;
418 static void esp_transfer_data(SCSIRequest *req, uint32_t len)
420 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
422 trace_esp_transfer_data(s->dma_left, s->ti_size);
424 s->async_buf = scsi_req_get_buf(req);
427 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
428 /* If this was the last part of a DMA transfer then the
429 completion interrupt is deferred to here. */
434 static void handle_ti(ESPState *s)
436 uint32_t dmalen, minlen;
438 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
442 s->dma_counter = dmalen;
445 minlen = (dmalen < 32) ? dmalen : 32;
446 else if (s->ti_size < 0)
447 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
449 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
450 trace_esp_handle_ti(minlen);
452 s->dma_left = minlen;
453 s->rregs[ESP_RSTAT] &= ~STAT_TC;
455 } else if (s->do_cmd) {
456 trace_esp_handle_ti_cmd(s->cmdlen);
460 do_cmd(s, s->cmdbuf);
465 static void esp_hard_reset(DeviceState *d)
467 ESPState *s = container_of(d, ESPState, busdev.qdev);
469 memset(s->rregs, 0, ESP_REGS);
470 memset(s->wregs, 0, ESP_REGS);
471 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
479 s->rregs[ESP_CFG1] = 7;
482 static void esp_soft_reset(DeviceState *d)
484 ESPState *s = container_of(d, ESPState, busdev.qdev);
486 qemu_irq_lower(s->irq);
490 static void parent_esp_reset(void *opaque, int irq, int level)
493 esp_soft_reset(opaque);
497 static void esp_gpio_demux(void *opaque, int irq, int level)
501 parent_esp_reset(opaque, irq, level);
504 esp_dma_enable(opaque, irq, level);
509 static uint64_t esp_mem_read(void *opaque, target_phys_addr_t addr,
512 ESPState *s = opaque;
513 uint32_t saddr, old_val;
515 saddr = addr >> s->it_shift;
516 trace_esp_mem_readb(saddr, s->rregs[saddr]);
519 if (s->ti_size > 0) {
521 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
523 ESP_ERROR("PIO data read not implemented\n");
524 s->rregs[ESP_FIFO] = 0;
526 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
530 if (s->ti_size == 0) {
536 /* Clear sequence step, interrupt register and all status bits
538 old_val = s->rregs[ESP_RINTR];
539 s->rregs[ESP_RINTR] = 0;
540 s->rregs[ESP_RSTAT] &= ~STAT_TC;
541 s->rregs[ESP_RSEQ] = SEQ_CD;
548 return s->rregs[saddr];
551 static void esp_mem_write(void *opaque, target_phys_addr_t addr,
552 uint64_t val, unsigned size)
554 ESPState *s = opaque;
557 saddr = addr >> s->it_shift;
558 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
562 s->rregs[ESP_RSTAT] &= ~STAT_TC;
566 s->cmdbuf[s->cmdlen++] = val & 0xff;
567 } else if (s->ti_size == TI_BUFSZ - 1) {
568 ESP_ERROR("fifo overrun\n");
571 s->ti_buf[s->ti_wptr++] = val & 0xff;
575 s->rregs[saddr] = val;
578 /* Reload DMA counter. */
579 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
580 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
584 switch(val & CMD_CMD) {
586 trace_esp_mem_writeb_cmd_nop(val);
589 trace_esp_mem_writeb_cmd_flush(val);
591 s->rregs[ESP_RINTR] = INTR_FC;
592 s->rregs[ESP_RSEQ] = 0;
593 s->rregs[ESP_RFLAGS] = 0;
596 trace_esp_mem_writeb_cmd_reset(val);
597 esp_soft_reset(&s->busdev.qdev);
600 trace_esp_mem_writeb_cmd_bus_reset(val);
601 s->rregs[ESP_RINTR] = INTR_RST;
602 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
610 trace_esp_mem_writeb_cmd_iccs(val);
612 s->rregs[ESP_RINTR] = INTR_FC;
613 s->rregs[ESP_RSTAT] |= STAT_MI;
616 trace_esp_mem_writeb_cmd_msgacc(val);
617 s->rregs[ESP_RINTR] = INTR_DC;
618 s->rregs[ESP_RSEQ] = 0;
619 s->rregs[ESP_RFLAGS] = 0;
623 trace_esp_mem_writeb_cmd_pad(val);
624 s->rregs[ESP_RSTAT] = STAT_TC;
625 s->rregs[ESP_RINTR] = INTR_FC;
626 s->rregs[ESP_RSEQ] = 0;
629 trace_esp_mem_writeb_cmd_satn(val);
632 trace_esp_mem_writeb_cmd_sel(val);
633 handle_s_without_atn(s);
636 trace_esp_mem_writeb_cmd_selatn(val);
640 trace_esp_mem_writeb_cmd_selatns(val);
644 trace_esp_mem_writeb_cmd_ensel(val);
645 s->rregs[ESP_RINTR] = 0;
648 ESP_ERROR("Unhandled ESP command (%2.2x)\n", (unsigned)val);
652 case ESP_WBUSID ... ESP_WSYNO:
655 s->rregs[saddr] = val;
657 case ESP_WCCF ... ESP_WTEST:
659 case ESP_CFG2 ... ESP_RES4:
660 s->rregs[saddr] = val;
663 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", (unsigned)val, saddr);
666 s->wregs[saddr] = val;
669 static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr,
670 unsigned size, bool is_write)
672 return (size == 1) || (is_write && size == 4);
675 static const MemoryRegionOps esp_mem_ops = {
676 .read = esp_mem_read,
677 .write = esp_mem_write,
678 .endianness = DEVICE_NATIVE_ENDIAN,
679 .valid.accepts = esp_mem_accepts,
682 static const VMStateDescription vmstate_esp = {
685 .minimum_version_id = 3,
686 .minimum_version_id_old = 3,
687 .fields = (VMStateField []) {
688 VMSTATE_BUFFER(rregs, ESPState),
689 VMSTATE_BUFFER(wregs, ESPState),
690 VMSTATE_INT32(ti_size, ESPState),
691 VMSTATE_UINT32(ti_rptr, ESPState),
692 VMSTATE_UINT32(ti_wptr, ESPState),
693 VMSTATE_BUFFER(ti_buf, ESPState),
694 VMSTATE_UINT32(status, ESPState),
695 VMSTATE_UINT32(dma, ESPState),
696 VMSTATE_BUFFER(cmdbuf, ESPState),
697 VMSTATE_UINT32(cmdlen, ESPState),
698 VMSTATE_UINT32(do_cmd, ESPState),
699 VMSTATE_UINT32(dma_left, ESPState),
700 VMSTATE_END_OF_LIST()
704 void esp_init(target_phys_addr_t espaddr, int it_shift,
705 ESPDMAMemoryReadWriteFunc dma_memory_read,
706 ESPDMAMemoryReadWriteFunc dma_memory_write,
707 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
708 qemu_irq *dma_enable)
714 dev = qdev_create(NULL, "esp");
715 esp = DO_UPCAST(ESPState, busdev.qdev, dev);
716 esp->dma_memory_read = dma_memory_read;
717 esp->dma_memory_write = dma_memory_write;
718 esp->dma_opaque = dma_opaque;
719 esp->it_shift = it_shift;
720 /* XXX for now until rc4030 has been changed to use DMA enable signal */
721 esp->dma_enabled = 1;
722 qdev_init_nofail(dev);
723 s = sysbus_from_qdev(dev);
724 sysbus_connect_irq(s, 0, irq);
725 sysbus_mmio_map(s, 0, espaddr);
726 *reset = qdev_get_gpio_in(dev, 0);
727 *dma_enable = qdev_get_gpio_in(dev, 1);
730 static const struct SCSIBusInfo esp_scsi_info = {
732 .max_target = ESP_MAX_DEVS,
735 .transfer_data = esp_transfer_data,
736 .complete = esp_command_complete,
737 .cancel = esp_request_cancelled
740 static int esp_init1(SysBusDevice *dev)
742 ESPState *s = FROM_SYSBUS(ESPState, dev);
744 sysbus_init_irq(dev, &s->irq);
745 assert(s->it_shift != -1);
747 memory_region_init_io(&s->iomem, &esp_mem_ops, s,
748 "esp", ESP_REGS << s->it_shift);
749 sysbus_init_mmio(dev, &s->iomem);
751 qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2);
753 scsi_bus_new(&s->bus, &dev->qdev, &esp_scsi_info);
754 return scsi_bus_legacy_handle_cmdline(&s->bus);
757 static Property esp_properties[] = {
761 static void esp_class_init(ObjectClass *klass, void *data)
763 DeviceClass *dc = DEVICE_CLASS(klass);
764 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
767 dc->reset = esp_hard_reset;
768 dc->vmsd = &vmstate_esp;
769 dc->props = esp_properties;
772 static TypeInfo esp_info = {
774 .parent = TYPE_SYS_BUS_DEVICE,
775 .instance_size = sizeof(ESPState),
776 .class_init = esp_class_init,
779 static void esp_register_types(void)
781 type_register_static(&esp_info);
784 type_init(esp_register_types)