2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "scsi-disk.h"
28 /* FIXME: Only needed for MAX_DISKS, which is probably wrong. */
35 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
36 * also produced as NCR89C100. See
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
43 #define DPRINTF(fmt, args...) \
44 do { printf("ESP: " fmt , ##args); } while (0)
46 #define DPRINTF(fmt, args...)
51 #define ESP_SIZE (ESP_REGS * 4)
54 typedef struct ESPState ESPState;
58 uint8_t rregs[ESP_REGS];
59 uint8_t wregs[ESP_REGS];
61 uint32_t ti_rptr, ti_wptr;
62 uint8_t ti_buf[TI_BUFSZ];
65 SCSIDevice *scsi_dev[ESP_MAX_DEVS];
66 SCSIDevice *current_dev;
67 uint8_t cmdbuf[TI_BUFSZ];
71 /* The amount of data left in the current DMA transfer. */
73 /* The size of the current DMA transfer. Zero if no transfer is in
86 #define ESP_WBUSID 0x4
90 #define ESP_WSYNTP 0x6
91 #define ESP_RFLAGS 0x7
108 #define CMD_FLUSH 0x01
109 #define CMD_RESET 0x02
110 #define CMD_BUSRESET 0x03
112 #define CMD_ICCS 0x11
113 #define CMD_MSGACC 0x12
114 #define CMD_SATN 0x1a
115 #define CMD_SELATN 0x42
116 #define CMD_SELATNS 0x43
117 #define CMD_ENSEL 0x44
125 #define STAT_PIO_MASK 0x06
135 #define INTR_RST 0x80
140 #define CFG1_RESREPT 0x40
142 #define CFG2_MASK 0x15
144 #define TCHI_FAS100A 0x4
146 static int get_cmd(ESPState *s, uint8_t *buf)
151 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
152 target = s->wregs[ESP_WBUSID] & 7;
153 DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
155 espdma_memory_read(s->dma_opaque, buf, dmalen);
158 memcpy(&buf[1], s->ti_buf, dmalen);
166 if (s->current_dev) {
167 /* Started a new command before the old one finished. Cancel it. */
168 s->current_dev->cancel_io(s->current_dev, 0);
172 if (target >= ESP_MAX_DEVS || !s->scsi_dev[target]) {
174 s->rregs[ESP_RSTAT] = STAT_IN;
175 s->rregs[ESP_RINTR] = INTR_DC;
176 s->rregs[ESP_RSEQ] = SEQ_0;
177 qemu_irq_raise(s->irq);
180 s->current_dev = s->scsi_dev[target];
184 static void do_cmd(ESPState *s, uint8_t *buf)
189 DPRINTF("do_cmd: busid 0x%x\n", buf[0]);
191 datalen = s->current_dev->send_command(s->current_dev, 0, &buf[1], lun);
192 s->ti_size = datalen;
194 s->rregs[ESP_RSTAT] = STAT_IN | STAT_TC;
198 s->rregs[ESP_RSTAT] |= STAT_DI;
199 s->current_dev->read_data(s->current_dev, 0);
201 s->rregs[ESP_RSTAT] |= STAT_DO;
202 s->current_dev->write_data(s->current_dev, 0);
205 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
206 s->rregs[ESP_RSEQ] = SEQ_CD;
207 qemu_irq_raise(s->irq);
210 static void handle_satn(ESPState *s)
215 len = get_cmd(s, buf);
220 static void handle_satn_stop(ESPState *s)
222 s->cmdlen = get_cmd(s, s->cmdbuf);
224 DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen);
226 s->rregs[ESP_RSTAT] = STAT_IN | STAT_TC | STAT_CD;
227 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
228 s->rregs[ESP_RSEQ] = SEQ_CD;
229 qemu_irq_raise(s->irq);
233 static void write_response(ESPState *s)
235 DPRINTF("Transfer status (sense=%d)\n", s->sense);
236 s->ti_buf[0] = s->sense;
239 espdma_memory_write(s->dma_opaque, s->ti_buf, 2);
240 s->rregs[ESP_RSTAT] = STAT_IN | STAT_TC | STAT_ST;
241 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
242 s->rregs[ESP_RSEQ] = SEQ_CD;
247 s->rregs[ESP_RFLAGS] = 2;
249 qemu_irq_raise(s->irq);
252 static void esp_dma_done(ESPState *s)
254 s->rregs[ESP_RSTAT] |= STAT_IN | STAT_TC;
255 s->rregs[ESP_RINTR] = INTR_BS;
256 s->rregs[ESP_RSEQ] = 0;
257 s->rregs[ESP_RFLAGS] = 0;
258 s->rregs[ESP_TCLO] = 0;
259 s->rregs[ESP_TCMID] = 0;
260 qemu_irq_raise(s->irq);
263 static void esp_do_dma(ESPState *s)
268 to_device = (s->ti_size < 0);
271 DPRINTF("command len %d + %d\n", s->cmdlen, len);
272 espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
276 do_cmd(s, s->cmdbuf);
279 if (s->async_len == 0) {
280 /* Defer until data is available. */
283 if (len > s->async_len) {
287 espdma_memory_read(s->dma_opaque, s->async_buf, len);
289 espdma_memory_write(s->dma_opaque, s->async_buf, len);
298 if (s->async_len == 0) {
300 // ti_size is negative
301 s->current_dev->write_data(s->current_dev, 0);
303 s->current_dev->read_data(s->current_dev, 0);
304 /* If there is still data to be read from the device then
305 complete the DMA operation immeriately. Otherwise defer
306 until the scsi layer has completed. */
307 if (s->dma_left == 0 && s->ti_size > 0) {
312 /* Partially filled a scsi buffer. Complete immediately. */
317 static void esp_command_complete(void *opaque, int reason, uint32_t tag,
320 ESPState *s = (ESPState *)opaque;
322 if (reason == SCSI_REASON_DONE) {
323 DPRINTF("SCSI Command complete\n");
325 DPRINTF("SCSI command completed unexpectedly\n");
330 DPRINTF("Command failed\n");
332 s->rregs[ESP_RSTAT] = STAT_ST;
334 s->current_dev = NULL;
336 DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size);
338 s->async_buf = s->current_dev->get_buf(s->current_dev, 0);
341 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
342 /* If this was the last part of a DMA transfer then the
343 completion interrupt is deferred to here. */
349 static void handle_ti(ESPState *s)
351 uint32_t dmalen, minlen;
353 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
357 s->dma_counter = dmalen;
360 minlen = (dmalen < 32) ? dmalen : 32;
361 else if (s->ti_size < 0)
362 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
364 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
365 DPRINTF("Transfer Information len %d\n", minlen);
367 s->dma_left = minlen;
368 s->rregs[ESP_RSTAT] &= ~STAT_TC;
370 } else if (s->do_cmd) {
371 DPRINTF("command len %d\n", s->cmdlen);
375 do_cmd(s, s->cmdbuf);
380 static void esp_reset(void *opaque)
382 ESPState *s = opaque;
384 memset(s->rregs, 0, ESP_REGS);
385 memset(s->wregs, 0, ESP_REGS);
386 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
394 static void parent_esp_reset(void *opaque, int irq, int level)
400 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
402 ESPState *s = opaque;
405 saddr = (addr & ESP_MASK) >> 2;
406 DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]);
409 if (s->ti_size > 0) {
411 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
413 fprintf(stderr, "esp: PIO data read not implemented\n");
414 s->rregs[ESP_FIFO] = 0;
416 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
418 qemu_irq_raise(s->irq);
420 if (s->ti_size == 0) {
426 // Clear interrupt/error status bits
427 s->rregs[ESP_RSTAT] &= ~(STAT_IN | STAT_GE | STAT_PE);
428 qemu_irq_lower(s->irq);
433 return s->rregs[saddr];
436 static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
438 ESPState *s = opaque;
441 saddr = (addr & ESP_MASK) >> 2;
442 DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr],
447 s->rregs[ESP_RSTAT] &= ~STAT_TC;
451 s->cmdbuf[s->cmdlen++] = val & 0xff;
452 } else if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
456 fprintf(stderr, "esp: PIO data write not implemented\n");
459 s->ti_buf[s->ti_wptr++] = val & 0xff;
463 s->rregs[saddr] = val;
466 /* Reload DMA counter. */
467 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
468 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
472 switch(val & CMD_CMD) {
474 DPRINTF("NOP (%2.2x)\n", val);
477 DPRINTF("Flush FIFO (%2.2x)\n", val);
479 s->rregs[ESP_RINTR] = INTR_FC;
480 s->rregs[ESP_RSEQ] = 0;
483 DPRINTF("Chip reset (%2.2x)\n", val);
487 DPRINTF("Bus reset (%2.2x)\n", val);
488 s->rregs[ESP_RINTR] = INTR_RST;
489 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
490 qemu_irq_raise(s->irq);
497 DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
501 DPRINTF("Message Accepted (%2.2x)\n", val);
503 s->rregs[ESP_RINTR] = INTR_DC;
504 s->rregs[ESP_RSEQ] = 0;
507 DPRINTF("Set ATN (%2.2x)\n", val);
510 DPRINTF("Set ATN (%2.2x)\n", val);
514 DPRINTF("Set ATN & stop (%2.2x)\n", val);
518 DPRINTF("Enable selection (%2.2x)\n", val);
521 DPRINTF("Unhandled ESP command (%2.2x)\n", val);
525 case ESP_WBUSID ... ESP_WSYNO:
528 s->rregs[saddr] = val;
530 case ESP_WCCF ... ESP_WTEST:
533 s->rregs[saddr] = val & CFG2_MASK;
535 case ESP_CFG3 ... ESP_RES4:
536 s->rregs[saddr] = val;
541 s->wregs[saddr] = val;
544 static CPUReadMemoryFunc *esp_mem_read[3] = {
550 static CPUWriteMemoryFunc *esp_mem_write[3] = {
556 static void esp_save(QEMUFile *f, void *opaque)
558 ESPState *s = opaque;
560 qemu_put_buffer(f, s->rregs, ESP_REGS);
561 qemu_put_buffer(f, s->wregs, ESP_REGS);
562 qemu_put_be32s(f, &s->ti_size);
563 qemu_put_be32s(f, &s->ti_rptr);
564 qemu_put_be32s(f, &s->ti_wptr);
565 qemu_put_buffer(f, s->ti_buf, TI_BUFSZ);
566 qemu_put_be32s(f, &s->sense);
567 qemu_put_be32s(f, &s->dma);
568 qemu_put_buffer(f, s->cmdbuf, TI_BUFSZ);
569 qemu_put_be32s(f, &s->cmdlen);
570 qemu_put_be32s(f, &s->do_cmd);
571 qemu_put_be32s(f, &s->dma_left);
572 // There should be no transfers in progress, so dma_counter is not saved
575 static int esp_load(QEMUFile *f, void *opaque, int version_id)
577 ESPState *s = opaque;
580 return -EINVAL; // Cannot emulate 2
582 qemu_get_buffer(f, s->rregs, ESP_REGS);
583 qemu_get_buffer(f, s->wregs, ESP_REGS);
584 qemu_get_be32s(f, &s->ti_size);
585 qemu_get_be32s(f, &s->ti_rptr);
586 qemu_get_be32s(f, &s->ti_wptr);
587 qemu_get_buffer(f, s->ti_buf, TI_BUFSZ);
588 qemu_get_be32s(f, &s->sense);
589 qemu_get_be32s(f, &s->dma);
590 qemu_get_buffer(f, s->cmdbuf, TI_BUFSZ);
591 qemu_get_be32s(f, &s->cmdlen);
592 qemu_get_be32s(f, &s->do_cmd);
593 qemu_get_be32s(f, &s->dma_left);
598 void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id)
600 ESPState *s = (ESPState *)opaque;
603 for (id = 0; id < ESP_MAX_DEVS; id++) {
604 if (s->scsi_dev[id] == NULL)
608 if (id >= ESP_MAX_DEVS) {
609 DPRINTF("Bad Device ID %d\n", id);
612 if (s->scsi_dev[id]) {
613 DPRINTF("Destroying device %d\n", id);
614 s->scsi_dev[id]->destroy(s->scsi_dev[id]);
616 DPRINTF("Attaching block device %d\n", id);
617 /* Command queueing is not implemented. */
618 s->scsi_dev[id] = scsi_disk_init(bd, 0, esp_command_complete, s);
621 void *esp_init(target_phys_addr_t espaddr,
622 void *dma_opaque, qemu_irq irq, qemu_irq *reset)
627 s = qemu_mallocz(sizeof(ESPState));
632 s->dma_opaque = dma_opaque;
634 esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
635 cpu_register_physical_memory(espaddr, ESP_SIZE, esp_io_memory);
639 register_savevm("esp", espaddr, 3, esp_save, esp_load, s);
640 qemu_register_reset(esp_reset, s);
642 *reset = *qemu_allocate_irqs(parent_esp_reset, s, 1);