2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
30 #include "hw/scsi/esp.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState *s)
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState *s)
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState *s)
65 qemu_irq_raise(s->irq_data);
68 static void esp_lower_drq(ESPState *s)
70 qemu_irq_lower(s->irq_data);
73 void esp_dma_enable(ESPState *s, int irq, int level)
77 trace_esp_dma_enable();
83 trace_esp_dma_disable();
88 void esp_request_cancelled(SCSIRequest *req)
90 ESPState *s = req->hba_private;
92 if (req == s->current_req) {
93 scsi_req_unref(s->current_req);
94 s->current_req = NULL;
95 s->current_dev = NULL;
99 static void set_pdma(ESPState *s, enum pdma_origin_id origin,
100 uint32_t index, uint32_t len)
102 s->pdma_origin = origin;
103 s->pdma_start = index;
108 static uint8_t *get_pdma_buf(ESPState *s)
110 switch (s->pdma_origin) {
123 static int get_cmd_cb(ESPState *s)
127 target = s->wregs[ESP_WBUSID] & BUSID_DID;
133 if (s->current_req) {
134 /* Started a new command before the old one finished. Cancel it. */
135 scsi_req_cancel(s->current_req);
139 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
140 if (!s->current_dev) {
142 s->rregs[ESP_RSTAT] = 0;
143 s->rregs[ESP_RINTR] = INTR_DC;
144 s->rregs[ESP_RSEQ] = SEQ_0;
151 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
156 target = s->wregs[ESP_WBUSID] & BUSID_DID;
158 dmalen = s->rregs[ESP_TCLO];
159 dmalen |= s->rregs[ESP_TCMID] << 8;
160 dmalen |= s->rregs[ESP_TCHI] << 16;
161 if (dmalen > buflen) {
164 if (s->dma_memory_read) {
165 s->dma_memory_read(s->dma_opaque, buf, dmalen);
167 memcpy(s->pdma_buf, buf, dmalen);
168 set_pdma(s, PDMA, 0, dmalen);
174 if (dmalen > TI_BUFSZ) {
177 memcpy(buf, s->ti_buf, dmalen);
178 buf[0] = buf[2] >> 5;
180 trace_esp_get_cmd(dmalen, target);
182 if (get_cmd_cb(s) < 0) {
188 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
192 SCSIDevice *current_lun;
194 trace_esp_do_busid_cmd(busid);
196 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
197 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
198 datalen = scsi_req_enqueue(s->current_req);
199 s->ti_size = datalen;
201 s->rregs[ESP_RSTAT] = STAT_TC;
205 s->rregs[ESP_RSTAT] |= STAT_DI;
207 s->rregs[ESP_RSTAT] |= STAT_DO;
209 scsi_req_continue(s->current_req);
211 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
212 s->rregs[ESP_RSEQ] = SEQ_CD;
216 static void do_cmd(ESPState *s, uint8_t *buf)
218 uint8_t busid = buf[0];
220 do_busid_cmd(s, &buf[1], busid);
223 static void satn_pdma_cb(ESPState *s)
225 if (get_cmd_cb(s) < 0) {
228 if (s->pdma_cur != s->pdma_start) {
229 do_cmd(s, get_pdma_buf(s) + s->pdma_start);
233 static void handle_satn(ESPState *s)
238 if (s->dma && !s->dma_enabled) {
239 s->dma_cb = handle_satn;
242 s->pdma_cb = satn_pdma_cb;
243 len = get_cmd(s, buf, sizeof(buf));
248 static void s_without_satn_pdma_cb(ESPState *s)
250 if (get_cmd_cb(s) < 0) {
253 if (s->pdma_cur != s->pdma_start) {
254 do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
258 static void handle_s_without_atn(ESPState *s)
263 if (s->dma && !s->dma_enabled) {
264 s->dma_cb = handle_s_without_atn;
267 s->pdma_cb = s_without_satn_pdma_cb;
268 len = get_cmd(s, buf, sizeof(buf));
270 do_busid_cmd(s, buf, 0);
274 static void satn_stop_pdma_cb(ESPState *s)
276 if (get_cmd_cb(s) < 0) {
279 s->cmdlen = s->pdma_cur - s->pdma_start;
281 trace_esp_handle_satn_stop(s->cmdlen);
283 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
284 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
285 s->rregs[ESP_RSEQ] = SEQ_CD;
290 static void handle_satn_stop(ESPState *s)
292 if (s->dma && !s->dma_enabled) {
293 s->dma_cb = handle_satn_stop;
296 s->pdma_cb = satn_stop_pdma_cb;
297 s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
299 trace_esp_handle_satn_stop(s->cmdlen);
301 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
302 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
303 s->rregs[ESP_RSEQ] = SEQ_CD;
308 static void write_response_pdma_cb(ESPState *s)
310 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
311 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
312 s->rregs[ESP_RSEQ] = SEQ_CD;
316 static void write_response(ESPState *s)
318 trace_esp_write_response(s->status);
319 s->ti_buf[0] = s->status;
322 if (s->dma_memory_write) {
323 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
324 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
325 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
326 s->rregs[ESP_RSEQ] = SEQ_CD;
328 set_pdma(s, TI, 0, 2);
329 s->pdma_cb = write_response_pdma_cb;
337 s->rregs[ESP_RFLAGS] = 2;
342 static void esp_dma_done(ESPState *s)
344 s->rregs[ESP_RSTAT] |= STAT_TC;
345 s->rregs[ESP_RINTR] = INTR_BS;
346 s->rregs[ESP_RSEQ] = 0;
347 s->rregs[ESP_RFLAGS] = 0;
348 s->rregs[ESP_TCLO] = 0;
349 s->rregs[ESP_TCMID] = 0;
350 s->rregs[ESP_TCHI] = 0;
354 static void do_dma_pdma_cb(ESPState *s)
356 int to_device = (s->ti_size < 0);
357 int len = s->pdma_cur - s->pdma_start;
362 do_cmd(s, s->cmdbuf);
373 if (s->async_len == 0) {
374 scsi_req_continue(s->current_req);
376 * If there is still data to be read from the device then
377 * complete the DMA operation immediately. Otherwise defer
378 * until the scsi layer has completed.
380 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
385 /* Partially filled a scsi buffer. Complete immediately. */
389 static void esp_do_dma(ESPState *s)
397 * handle_ti_cmd() case: esp_do_dma() is called only from
398 * handle_ti_cmd() with do_cmd != NULL (see the assert())
400 trace_esp_do_dma(s->cmdlen, len);
401 assert (s->cmdlen <= sizeof(s->cmdbuf) &&
402 len <= sizeof(s->cmdbuf) - s->cmdlen);
403 if (s->dma_memory_read) {
404 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
406 set_pdma(s, CMD, s->cmdlen, len);
407 s->pdma_cb = do_dma_pdma_cb;
411 trace_esp_handle_ti_cmd(s->cmdlen);
415 do_cmd(s, s->cmdbuf);
418 if (s->async_len == 0) {
419 /* Defer until data is available. */
422 if (len > s->async_len) {
425 to_device = (s->ti_size < 0);
427 if (s->dma_memory_read) {
428 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
430 set_pdma(s, ASYNC, 0, len);
431 s->pdma_cb = do_dma_pdma_cb;
436 if (s->dma_memory_write) {
437 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
439 set_pdma(s, ASYNC, 0, len);
440 s->pdma_cb = do_dma_pdma_cb;
452 if (s->async_len == 0) {
453 scsi_req_continue(s->current_req);
454 /* If there is still data to be read from the device then
455 complete the DMA operation immediately. Otherwise defer
456 until the scsi layer has completed. */
457 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
462 /* Partially filled a scsi buffer. Complete immediately. */
466 static void esp_report_command_complete(ESPState *s, uint32_t status)
468 trace_esp_command_complete();
469 if (s->ti_size != 0) {
470 trace_esp_command_complete_unexpected();
476 trace_esp_command_complete_fail();
479 s->rregs[ESP_RSTAT] = STAT_ST;
481 if (s->current_req) {
482 scsi_req_unref(s->current_req);
483 s->current_req = NULL;
484 s->current_dev = NULL;
488 void esp_command_complete(SCSIRequest *req, uint32_t status,
491 ESPState *s = req->hba_private;
493 if (s->rregs[ESP_RSTAT] & STAT_INT) {
494 /* Defer handling command complete until the previous
495 * interrupt has been handled.
497 trace_esp_command_complete_deferred();
498 s->deferred_status = status;
499 s->deferred_complete = true;
502 esp_report_command_complete(s, status);
505 void esp_transfer_data(SCSIRequest *req, uint32_t len)
507 ESPState *s = req->hba_private;
510 trace_esp_transfer_data(s->dma_left, s->ti_size);
512 s->async_buf = scsi_req_get_buf(req);
515 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
516 /* If this was the last part of a DMA transfer then the
517 completion interrupt is deferred to here. */
522 static void handle_ti(ESPState *s)
524 uint32_t dmalen, minlen;
526 if (s->dma && !s->dma_enabled) {
527 s->dma_cb = handle_ti;
531 dmalen = s->rregs[ESP_TCLO];
532 dmalen |= s->rregs[ESP_TCMID] << 8;
533 dmalen |= s->rregs[ESP_TCHI] << 16;
537 s->dma_counter = dmalen;
540 minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
541 else if (s->ti_size < 0)
542 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
544 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
545 trace_esp_handle_ti(minlen);
547 s->dma_left = minlen;
548 s->rregs[ESP_RSTAT] &= ~STAT_TC;
550 } else if (s->do_cmd) {
551 trace_esp_handle_ti_cmd(s->cmdlen);
555 do_cmd(s, s->cmdbuf);
559 void esp_hard_reset(ESPState *s)
561 memset(s->rregs, 0, ESP_REGS);
562 memset(s->wregs, 0, ESP_REGS);
571 s->rregs[ESP_CFG1] = 7;
574 static void esp_soft_reset(ESPState *s)
576 qemu_irq_lower(s->irq);
577 qemu_irq_lower(s->irq_data);
581 static void parent_esp_reset(ESPState *s, int irq, int level)
588 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
592 trace_esp_mem_readb(saddr, s->rregs[saddr]);
595 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
597 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
598 s->rregs[ESP_FIFO] = 0;
599 } else if (s->ti_rptr < s->ti_wptr) {
601 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
603 if (s->ti_rptr == s->ti_wptr) {
609 /* Clear sequence step, interrupt register and all status bits
611 old_val = s->rregs[ESP_RINTR];
612 s->rregs[ESP_RINTR] = 0;
613 s->rregs[ESP_RSTAT] &= ~STAT_TC;
614 s->rregs[ESP_RSEQ] = SEQ_CD;
616 if (s->deferred_complete) {
617 esp_report_command_complete(s, s->deferred_status);
618 s->deferred_complete = false;
622 /* Return the unique id if the value has never been written */
623 if (!s->tchi_written) {
629 return s->rregs[saddr];
632 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
634 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
637 s->tchi_written = true;
641 s->rregs[ESP_RSTAT] &= ~STAT_TC;
645 if (s->cmdlen < ESP_CMDBUF_SZ) {
646 s->cmdbuf[s->cmdlen++] = val & 0xff;
648 trace_esp_error_fifo_overrun();
650 } else if (s->ti_wptr == TI_BUFSZ - 1) {
651 trace_esp_error_fifo_overrun();
654 s->ti_buf[s->ti_wptr++] = val & 0xff;
658 s->rregs[saddr] = val;
661 /* Reload DMA counter. */
662 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
663 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
664 s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
668 switch(val & CMD_CMD) {
670 trace_esp_mem_writeb_cmd_nop(val);
673 trace_esp_mem_writeb_cmd_flush(val);
675 s->rregs[ESP_RINTR] = INTR_FC;
676 s->rregs[ESP_RSEQ] = 0;
677 s->rregs[ESP_RFLAGS] = 0;
680 trace_esp_mem_writeb_cmd_reset(val);
684 trace_esp_mem_writeb_cmd_bus_reset(val);
685 s->rregs[ESP_RINTR] = INTR_RST;
686 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
694 trace_esp_mem_writeb_cmd_iccs(val);
696 s->rregs[ESP_RINTR] = INTR_FC;
697 s->rregs[ESP_RSTAT] |= STAT_MI;
700 trace_esp_mem_writeb_cmd_msgacc(val);
701 s->rregs[ESP_RINTR] = INTR_DC;
702 s->rregs[ESP_RSEQ] = 0;
703 s->rregs[ESP_RFLAGS] = 0;
707 trace_esp_mem_writeb_cmd_pad(val);
708 s->rregs[ESP_RSTAT] = STAT_TC;
709 s->rregs[ESP_RINTR] = INTR_FC;
710 s->rregs[ESP_RSEQ] = 0;
713 trace_esp_mem_writeb_cmd_satn(val);
716 trace_esp_mem_writeb_cmd_rstatn(val);
719 trace_esp_mem_writeb_cmd_sel(val);
720 handle_s_without_atn(s);
723 trace_esp_mem_writeb_cmd_selatn(val);
727 trace_esp_mem_writeb_cmd_selatns(val);
731 trace_esp_mem_writeb_cmd_ensel(val);
732 s->rregs[ESP_RINTR] = 0;
735 trace_esp_mem_writeb_cmd_dissel(val);
736 s->rregs[ESP_RINTR] = 0;
740 trace_esp_error_unhandled_command(val);
744 case ESP_WBUSID ... ESP_WSYNO:
747 case ESP_CFG2: case ESP_CFG3:
748 case ESP_RES3: case ESP_RES4:
749 s->rregs[saddr] = val;
751 case ESP_WCCF ... ESP_WTEST:
754 trace_esp_error_invalid_write(val, saddr);
757 s->wregs[saddr] = val;
760 static bool esp_mem_accepts(void *opaque, hwaddr addr,
761 unsigned size, bool is_write,
764 return (size == 1) || (is_write && size == 4);
767 static bool esp_pdma_needed(void *opaque)
769 ESPState *s = opaque;
770 return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
774 static const VMStateDescription vmstate_esp_pdma = {
777 .minimum_version_id = 1,
778 .needed = esp_pdma_needed,
779 .fields = (VMStateField[]) {
780 VMSTATE_BUFFER(pdma_buf, ESPState),
781 VMSTATE_INT32(pdma_origin, ESPState),
782 VMSTATE_UINT32(pdma_len, ESPState),
783 VMSTATE_UINT32(pdma_start, ESPState),
784 VMSTATE_UINT32(pdma_cur, ESPState),
785 VMSTATE_END_OF_LIST()
789 const VMStateDescription vmstate_esp = {
792 .minimum_version_id = 3,
793 .fields = (VMStateField[]) {
794 VMSTATE_BUFFER(rregs, ESPState),
795 VMSTATE_BUFFER(wregs, ESPState),
796 VMSTATE_INT32(ti_size, ESPState),
797 VMSTATE_UINT32(ti_rptr, ESPState),
798 VMSTATE_UINT32(ti_wptr, ESPState),
799 VMSTATE_BUFFER(ti_buf, ESPState),
800 VMSTATE_UINT32(status, ESPState),
801 VMSTATE_UINT32(deferred_status, ESPState),
802 VMSTATE_BOOL(deferred_complete, ESPState),
803 VMSTATE_UINT32(dma, ESPState),
804 VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
805 VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
806 VMSTATE_UINT32(cmdlen, ESPState),
807 VMSTATE_UINT32(do_cmd, ESPState),
808 VMSTATE_UINT32(dma_left, ESPState),
809 VMSTATE_END_OF_LIST()
811 .subsections = (const VMStateDescription * []) {
817 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
818 uint64_t val, unsigned int size)
820 SysBusESPState *sysbus = opaque;
823 saddr = addr >> sysbus->it_shift;
824 esp_reg_write(&sysbus->esp, saddr, val);
827 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
830 SysBusESPState *sysbus = opaque;
833 saddr = addr >> sysbus->it_shift;
834 return esp_reg_read(&sysbus->esp, saddr);
837 static const MemoryRegionOps sysbus_esp_mem_ops = {
838 .read = sysbus_esp_mem_read,
839 .write = sysbus_esp_mem_write,
840 .endianness = DEVICE_NATIVE_ENDIAN,
841 .valid.accepts = esp_mem_accepts,
844 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
845 uint64_t val, unsigned int size)
847 SysBusESPState *sysbus = opaque;
848 ESPState *s = &sysbus->esp;
850 uint8_t *buf = get_pdma_buf(s);
852 dmalen = s->rregs[ESP_TCLO];
853 dmalen |= s->rregs[ESP_TCMID] << 8;
854 dmalen |= s->rregs[ESP_TCHI] << 16;
855 if (dmalen == 0 || s->pdma_len == 0) {
860 buf[s->pdma_cur++] = val;
865 buf[s->pdma_cur++] = val >> 8;
866 buf[s->pdma_cur++] = val;
871 s->rregs[ESP_TCLO] = dmalen & 0xff;
872 s->rregs[ESP_TCMID] = dmalen >> 8;
873 s->rregs[ESP_TCHI] = dmalen >> 16;
874 if (s->pdma_len == 0 && s->pdma_cb) {
881 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
884 SysBusESPState *sysbus = opaque;
885 ESPState *s = &sysbus->esp;
886 uint8_t *buf = get_pdma_buf(s);
889 if (s->pdma_len == 0) {
894 val = buf[s->pdma_cur++];
898 val = buf[s->pdma_cur++];
899 val = (val << 8) | buf[s->pdma_cur++];
904 if (s->pdma_len == 0 && s->pdma_cb) {
912 static const MemoryRegionOps sysbus_esp_pdma_ops = {
913 .read = sysbus_esp_pdma_read,
914 .write = sysbus_esp_pdma_write,
915 .endianness = DEVICE_NATIVE_ENDIAN,
916 .valid.min_access_size = 1,
917 .valid.max_access_size = 2,
920 static const struct SCSIBusInfo esp_scsi_info = {
922 .max_target = ESP_MAX_DEVS,
925 .transfer_data = esp_transfer_data,
926 .complete = esp_command_complete,
927 .cancel = esp_request_cancelled
930 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
932 SysBusESPState *sysbus = ESP(opaque);
933 ESPState *s = &sysbus->esp;
937 parent_esp_reset(s, irq, level);
940 esp_dma_enable(opaque, irq, level);
945 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
947 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
948 SysBusESPState *sysbus = ESP(dev);
949 ESPState *s = &sysbus->esp;
951 sysbus_init_irq(sbd, &s->irq);
952 sysbus_init_irq(sbd, &s->irq_data);
953 assert(sysbus->it_shift != -1);
955 s->chip_id = TCHI_FAS100A;
956 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
957 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
958 sysbus_init_mmio(sbd, &sysbus->iomem);
959 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
960 sysbus, "esp-pdma", 2);
961 sysbus_init_mmio(sbd, &sysbus->pdma);
963 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
965 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
968 static void sysbus_esp_hard_reset(DeviceState *dev)
970 SysBusESPState *sysbus = ESP(dev);
971 esp_hard_reset(&sysbus->esp);
974 static const VMStateDescription vmstate_sysbus_esp_scsi = {
975 .name = "sysbusespscsi",
977 .minimum_version_id = 1,
978 .fields = (VMStateField[]) {
979 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
980 VMSTATE_END_OF_LIST()
984 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
986 DeviceClass *dc = DEVICE_CLASS(klass);
988 dc->realize = sysbus_esp_realize;
989 dc->reset = sysbus_esp_hard_reset;
990 dc->vmsd = &vmstate_sysbus_esp_scsi;
991 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
994 static const TypeInfo sysbus_esp_info = {
996 .parent = TYPE_SYS_BUS_DEVICE,
997 .instance_size = sizeof(SysBusESPState),
998 .class_init = sysbus_esp_class_init,
1001 static void esp_register_types(void)
1003 type_register_static(&sysbus_esp_info);
1006 type_init(esp_register_types)