]> Git Repo - qemu.git/blame_incremental - hw/esp.c
sysbus: rename sysbus_init_mmio_region() to sysbus_init_mmio()
[qemu.git] / hw / esp.c
... / ...
CommitLineData
1/*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "sysbus.h"
26#include "scsi.h"
27#include "esp.h"
28#include "trace.h"
29
30/*
31 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
32 * also produced as NCR89C100. See
33 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
34 * and
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
36 */
37
38#define ESP_ERROR(fmt, ...) \
39 do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
40
41#define ESP_REGS 16
42#define TI_BUFSZ 16
43
44typedef struct ESPState ESPState;
45
46struct ESPState {
47 SysBusDevice busdev;
48 MemoryRegion iomem;
49 uint8_t rregs[ESP_REGS];
50 uint8_t wregs[ESP_REGS];
51 qemu_irq irq;
52 uint32_t it_shift;
53 int32_t ti_size;
54 uint32_t ti_rptr, ti_wptr;
55 uint32_t status;
56 uint32_t dma;
57 uint8_t ti_buf[TI_BUFSZ];
58 SCSIBus bus;
59 SCSIDevice *current_dev;
60 SCSIRequest *current_req;
61 uint8_t cmdbuf[TI_BUFSZ];
62 uint32_t cmdlen;
63 uint32_t do_cmd;
64
65 /* The amount of data left in the current DMA transfer. */
66 uint32_t dma_left;
67 /* The size of the current DMA transfer. Zero if no transfer is in
68 progress. */
69 uint32_t dma_counter;
70 int dma_enabled;
71
72 uint32_t async_len;
73 uint8_t *async_buf;
74
75 ESPDMAMemoryReadWriteFunc dma_memory_read;
76 ESPDMAMemoryReadWriteFunc dma_memory_write;
77 void *dma_opaque;
78 void (*dma_cb)(ESPState *s);
79};
80
81#define ESP_TCLO 0x0
82#define ESP_TCMID 0x1
83#define ESP_FIFO 0x2
84#define ESP_CMD 0x3
85#define ESP_RSTAT 0x4
86#define ESP_WBUSID 0x4
87#define ESP_RINTR 0x5
88#define ESP_WSEL 0x5
89#define ESP_RSEQ 0x6
90#define ESP_WSYNTP 0x6
91#define ESP_RFLAGS 0x7
92#define ESP_WSYNO 0x7
93#define ESP_CFG1 0x8
94#define ESP_RRES1 0x9
95#define ESP_WCCF 0x9
96#define ESP_RRES2 0xa
97#define ESP_WTEST 0xa
98#define ESP_CFG2 0xb
99#define ESP_CFG3 0xc
100#define ESP_RES3 0xd
101#define ESP_TCHI 0xe
102#define ESP_RES4 0xf
103
104#define CMD_DMA 0x80
105#define CMD_CMD 0x7f
106
107#define CMD_NOP 0x00
108#define CMD_FLUSH 0x01
109#define CMD_RESET 0x02
110#define CMD_BUSRESET 0x03
111#define CMD_TI 0x10
112#define CMD_ICCS 0x11
113#define CMD_MSGACC 0x12
114#define CMD_PAD 0x18
115#define CMD_SATN 0x1a
116#define CMD_SEL 0x41
117#define CMD_SELATN 0x42
118#define CMD_SELATNS 0x43
119#define CMD_ENSEL 0x44
120
121#define STAT_DO 0x00
122#define STAT_DI 0x01
123#define STAT_CD 0x02
124#define STAT_ST 0x03
125#define STAT_MO 0x06
126#define STAT_MI 0x07
127#define STAT_PIO_MASK 0x06
128
129#define STAT_TC 0x10
130#define STAT_PE 0x20
131#define STAT_GE 0x40
132#define STAT_INT 0x80
133
134#define BUSID_DID 0x07
135
136#define INTR_FC 0x08
137#define INTR_BS 0x10
138#define INTR_DC 0x20
139#define INTR_RST 0x80
140
141#define SEQ_0 0x0
142#define SEQ_CD 0x4
143
144#define CFG1_RESREPT 0x40
145
146#define TCHI_FAS100A 0x4
147
148static void esp_raise_irq(ESPState *s)
149{
150 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
151 s->rregs[ESP_RSTAT] |= STAT_INT;
152 qemu_irq_raise(s->irq);
153 trace_esp_raise_irq();
154 }
155}
156
157static void esp_lower_irq(ESPState *s)
158{
159 if (s->rregs[ESP_RSTAT] & STAT_INT) {
160 s->rregs[ESP_RSTAT] &= ~STAT_INT;
161 qemu_irq_lower(s->irq);
162 trace_esp_lower_irq();
163 }
164}
165
166static void esp_dma_enable(void *opaque, int irq, int level)
167{
168 DeviceState *d = opaque;
169 ESPState *s = container_of(d, ESPState, busdev.qdev);
170
171 if (level) {
172 s->dma_enabled = 1;
173 trace_esp_dma_enable();
174 if (s->dma_cb) {
175 s->dma_cb(s);
176 s->dma_cb = NULL;
177 }
178 } else {
179 trace_esp_dma_disable();
180 s->dma_enabled = 0;
181 }
182}
183
184static void esp_request_cancelled(SCSIRequest *req)
185{
186 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
187
188 if (req == s->current_req) {
189 scsi_req_unref(s->current_req);
190 s->current_req = NULL;
191 s->current_dev = NULL;
192 }
193}
194
195static uint32_t get_cmd(ESPState *s, uint8_t *buf)
196{
197 uint32_t dmalen;
198 int target;
199
200 target = s->wregs[ESP_WBUSID] & BUSID_DID;
201 if (s->dma) {
202 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
203 s->dma_memory_read(s->dma_opaque, buf, dmalen);
204 } else {
205 dmalen = s->ti_size;
206 memcpy(buf, s->ti_buf, dmalen);
207 buf[0] = buf[2] >> 5;
208 }
209 trace_esp_get_cmd(dmalen, target);
210
211 s->ti_size = 0;
212 s->ti_rptr = 0;
213 s->ti_wptr = 0;
214
215 if (s->current_req) {
216 /* Started a new command before the old one finished. Cancel it. */
217 scsi_req_cancel(s->current_req);
218 s->async_len = 0;
219 }
220
221 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
222 if (!s->current_dev) {
223 // No such drive
224 s->rregs[ESP_RSTAT] = 0;
225 s->rregs[ESP_RINTR] = INTR_DC;
226 s->rregs[ESP_RSEQ] = SEQ_0;
227 esp_raise_irq(s);
228 return 0;
229 }
230 return dmalen;
231}
232
233static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
234{
235 int32_t datalen;
236 int lun;
237 SCSIDevice *current_lun;
238
239 trace_esp_do_busid_cmd(busid);
240 lun = busid & 7;
241 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
242 s->current_req = scsi_req_new(current_lun, 0, lun, buf, NULL);
243 datalen = scsi_req_enqueue(s->current_req);
244 s->ti_size = datalen;
245 if (datalen != 0) {
246 s->rregs[ESP_RSTAT] = STAT_TC;
247 s->dma_left = 0;
248 s->dma_counter = 0;
249 if (datalen > 0) {
250 s->rregs[ESP_RSTAT] |= STAT_DI;
251 } else {
252 s->rregs[ESP_RSTAT] |= STAT_DO;
253 }
254 scsi_req_continue(s->current_req);
255 }
256 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
257 s->rregs[ESP_RSEQ] = SEQ_CD;
258 esp_raise_irq(s);
259}
260
261static void do_cmd(ESPState *s, uint8_t *buf)
262{
263 uint8_t busid = buf[0];
264
265 do_busid_cmd(s, &buf[1], busid);
266}
267
268static void handle_satn(ESPState *s)
269{
270 uint8_t buf[32];
271 int len;
272
273 if (!s->dma_enabled) {
274 s->dma_cb = handle_satn;
275 return;
276 }
277 len = get_cmd(s, buf);
278 if (len)
279 do_cmd(s, buf);
280}
281
282static void handle_s_without_atn(ESPState *s)
283{
284 uint8_t buf[32];
285 int len;
286
287 if (!s->dma_enabled) {
288 s->dma_cb = handle_s_without_atn;
289 return;
290 }
291 len = get_cmd(s, buf);
292 if (len) {
293 do_busid_cmd(s, buf, 0);
294 }
295}
296
297static void handle_satn_stop(ESPState *s)
298{
299 if (!s->dma_enabled) {
300 s->dma_cb = handle_satn_stop;
301 return;
302 }
303 s->cmdlen = get_cmd(s, s->cmdbuf);
304 if (s->cmdlen) {
305 trace_esp_handle_satn_stop(s->cmdlen);
306 s->do_cmd = 1;
307 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
308 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
309 s->rregs[ESP_RSEQ] = SEQ_CD;
310 esp_raise_irq(s);
311 }
312}
313
314static void write_response(ESPState *s)
315{
316 trace_esp_write_response(s->status);
317 s->ti_buf[0] = s->status;
318 s->ti_buf[1] = 0;
319 if (s->dma) {
320 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
321 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
322 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
323 s->rregs[ESP_RSEQ] = SEQ_CD;
324 } else {
325 s->ti_size = 2;
326 s->ti_rptr = 0;
327 s->ti_wptr = 0;
328 s->rregs[ESP_RFLAGS] = 2;
329 }
330 esp_raise_irq(s);
331}
332
333static void esp_dma_done(ESPState *s)
334{
335 s->rregs[ESP_RSTAT] |= STAT_TC;
336 s->rregs[ESP_RINTR] = INTR_BS;
337 s->rregs[ESP_RSEQ] = 0;
338 s->rregs[ESP_RFLAGS] = 0;
339 s->rregs[ESP_TCLO] = 0;
340 s->rregs[ESP_TCMID] = 0;
341 esp_raise_irq(s);
342}
343
344static void esp_do_dma(ESPState *s)
345{
346 uint32_t len;
347 int to_device;
348
349 to_device = (s->ti_size < 0);
350 len = s->dma_left;
351 if (s->do_cmd) {
352 trace_esp_do_dma(s->cmdlen, len);
353 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
354 s->ti_size = 0;
355 s->cmdlen = 0;
356 s->do_cmd = 0;
357 do_cmd(s, s->cmdbuf);
358 return;
359 }
360 if (s->async_len == 0) {
361 /* Defer until data is available. */
362 return;
363 }
364 if (len > s->async_len) {
365 len = s->async_len;
366 }
367 if (to_device) {
368 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
369 } else {
370 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
371 }
372 s->dma_left -= len;
373 s->async_buf += len;
374 s->async_len -= len;
375 if (to_device)
376 s->ti_size += len;
377 else
378 s->ti_size -= len;
379 if (s->async_len == 0) {
380 scsi_req_continue(s->current_req);
381 /* If there is still data to be read from the device then
382 complete the DMA operation immediately. Otherwise defer
383 until the scsi layer has completed. */
384 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
385 return;
386 }
387 }
388
389 /* Partially filled a scsi buffer. Complete immediately. */
390 esp_dma_done(s);
391}
392
393static void esp_command_complete(SCSIRequest *req, uint32_t status)
394{
395 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
396
397 trace_esp_command_complete();
398 if (s->ti_size != 0) {
399 trace_esp_command_complete_unexpected();
400 }
401 s->ti_size = 0;
402 s->dma_left = 0;
403 s->async_len = 0;
404 if (status) {
405 trace_esp_command_complete_fail();
406 }
407 s->status = status;
408 s->rregs[ESP_RSTAT] = STAT_ST;
409 esp_dma_done(s);
410 if (s->current_req) {
411 scsi_req_unref(s->current_req);
412 s->current_req = NULL;
413 s->current_dev = NULL;
414 }
415}
416
417static void esp_transfer_data(SCSIRequest *req, uint32_t len)
418{
419 ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
420
421 trace_esp_transfer_data(s->dma_left, s->ti_size);
422 s->async_len = len;
423 s->async_buf = scsi_req_get_buf(req);
424 if (s->dma_left) {
425 esp_do_dma(s);
426 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
427 /* If this was the last part of a DMA transfer then the
428 completion interrupt is deferred to here. */
429 esp_dma_done(s);
430 }
431}
432
433static void handle_ti(ESPState *s)
434{
435 uint32_t dmalen, minlen;
436
437 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
438 if (dmalen==0) {
439 dmalen=0x10000;
440 }
441 s->dma_counter = dmalen;
442
443 if (s->do_cmd)
444 minlen = (dmalen < 32) ? dmalen : 32;
445 else if (s->ti_size < 0)
446 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
447 else
448 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
449 trace_esp_handle_ti(minlen);
450 if (s->dma) {
451 s->dma_left = minlen;
452 s->rregs[ESP_RSTAT] &= ~STAT_TC;
453 esp_do_dma(s);
454 } else if (s->do_cmd) {
455 trace_esp_handle_ti_cmd(s->cmdlen);
456 s->ti_size = 0;
457 s->cmdlen = 0;
458 s->do_cmd = 0;
459 do_cmd(s, s->cmdbuf);
460 return;
461 }
462}
463
464static void esp_hard_reset(DeviceState *d)
465{
466 ESPState *s = container_of(d, ESPState, busdev.qdev);
467
468 memset(s->rregs, 0, ESP_REGS);
469 memset(s->wregs, 0, ESP_REGS);
470 s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
471 s->ti_size = 0;
472 s->ti_rptr = 0;
473 s->ti_wptr = 0;
474 s->dma = 0;
475 s->do_cmd = 0;
476 s->dma_cb = NULL;
477
478 s->rregs[ESP_CFG1] = 7;
479}
480
481static void esp_soft_reset(DeviceState *d)
482{
483 ESPState *s = container_of(d, ESPState, busdev.qdev);
484
485 qemu_irq_lower(s->irq);
486 esp_hard_reset(d);
487}
488
489static void parent_esp_reset(void *opaque, int irq, int level)
490{
491 if (level) {
492 esp_soft_reset(opaque);
493 }
494}
495
496static void esp_gpio_demux(void *opaque, int irq, int level)
497{
498 switch (irq) {
499 case 0:
500 parent_esp_reset(opaque, irq, level);
501 break;
502 case 1:
503 esp_dma_enable(opaque, irq, level);
504 break;
505 }
506}
507
508static uint64_t esp_mem_read(void *opaque, target_phys_addr_t addr,
509 unsigned size)
510{
511 ESPState *s = opaque;
512 uint32_t saddr, old_val;
513
514 saddr = addr >> s->it_shift;
515 trace_esp_mem_readb(saddr, s->rregs[saddr]);
516 switch (saddr) {
517 case ESP_FIFO:
518 if (s->ti_size > 0) {
519 s->ti_size--;
520 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
521 /* Data out. */
522 ESP_ERROR("PIO data read not implemented\n");
523 s->rregs[ESP_FIFO] = 0;
524 } else {
525 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
526 }
527 esp_raise_irq(s);
528 }
529 if (s->ti_size == 0) {
530 s->ti_rptr = 0;
531 s->ti_wptr = 0;
532 }
533 break;
534 case ESP_RINTR:
535 /* Clear sequence step, interrupt register and all status bits
536 except TC */
537 old_val = s->rregs[ESP_RINTR];
538 s->rregs[ESP_RINTR] = 0;
539 s->rregs[ESP_RSTAT] &= ~STAT_TC;
540 s->rregs[ESP_RSEQ] = SEQ_CD;
541 esp_lower_irq(s);
542
543 return old_val;
544 default:
545 break;
546 }
547 return s->rregs[saddr];
548}
549
550static void esp_mem_write(void *opaque, target_phys_addr_t addr,
551 uint64_t val, unsigned size)
552{
553 ESPState *s = opaque;
554 uint32_t saddr;
555
556 saddr = addr >> s->it_shift;
557 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
558 switch (saddr) {
559 case ESP_TCLO:
560 case ESP_TCMID:
561 s->rregs[ESP_RSTAT] &= ~STAT_TC;
562 break;
563 case ESP_FIFO:
564 if (s->do_cmd) {
565 s->cmdbuf[s->cmdlen++] = val & 0xff;
566 } else if (s->ti_size == TI_BUFSZ - 1) {
567 ESP_ERROR("fifo overrun\n");
568 } else {
569 s->ti_size++;
570 s->ti_buf[s->ti_wptr++] = val & 0xff;
571 }
572 break;
573 case ESP_CMD:
574 s->rregs[saddr] = val;
575 if (val & CMD_DMA) {
576 s->dma = 1;
577 /* Reload DMA counter. */
578 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
579 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
580 } else {
581 s->dma = 0;
582 }
583 switch(val & CMD_CMD) {
584 case CMD_NOP:
585 trace_esp_mem_writeb_cmd_nop(val);
586 break;
587 case CMD_FLUSH:
588 trace_esp_mem_writeb_cmd_flush(val);
589 //s->ti_size = 0;
590 s->rregs[ESP_RINTR] = INTR_FC;
591 s->rregs[ESP_RSEQ] = 0;
592 s->rregs[ESP_RFLAGS] = 0;
593 break;
594 case CMD_RESET:
595 trace_esp_mem_writeb_cmd_reset(val);
596 esp_soft_reset(&s->busdev.qdev);
597 break;
598 case CMD_BUSRESET:
599 trace_esp_mem_writeb_cmd_bus_reset(val);
600 s->rregs[ESP_RINTR] = INTR_RST;
601 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
602 esp_raise_irq(s);
603 }
604 break;
605 case CMD_TI:
606 handle_ti(s);
607 break;
608 case CMD_ICCS:
609 trace_esp_mem_writeb_cmd_iccs(val);
610 write_response(s);
611 s->rregs[ESP_RINTR] = INTR_FC;
612 s->rregs[ESP_RSTAT] |= STAT_MI;
613 break;
614 case CMD_MSGACC:
615 trace_esp_mem_writeb_cmd_msgacc(val);
616 s->rregs[ESP_RINTR] = INTR_DC;
617 s->rregs[ESP_RSEQ] = 0;
618 s->rregs[ESP_RFLAGS] = 0;
619 esp_raise_irq(s);
620 break;
621 case CMD_PAD:
622 trace_esp_mem_writeb_cmd_pad(val);
623 s->rregs[ESP_RSTAT] = STAT_TC;
624 s->rregs[ESP_RINTR] = INTR_FC;
625 s->rregs[ESP_RSEQ] = 0;
626 break;
627 case CMD_SATN:
628 trace_esp_mem_writeb_cmd_satn(val);
629 break;
630 case CMD_SEL:
631 trace_esp_mem_writeb_cmd_sel(val);
632 handle_s_without_atn(s);
633 break;
634 case CMD_SELATN:
635 trace_esp_mem_writeb_cmd_selatn(val);
636 handle_satn(s);
637 break;
638 case CMD_SELATNS:
639 trace_esp_mem_writeb_cmd_selatns(val);
640 handle_satn_stop(s);
641 break;
642 case CMD_ENSEL:
643 trace_esp_mem_writeb_cmd_ensel(val);
644 s->rregs[ESP_RINTR] = 0;
645 break;
646 default:
647 ESP_ERROR("Unhandled ESP command (%2.2x)\n", (unsigned)val);
648 break;
649 }
650 break;
651 case ESP_WBUSID ... ESP_WSYNO:
652 break;
653 case ESP_CFG1:
654 s->rregs[saddr] = val;
655 break;
656 case ESP_WCCF ... ESP_WTEST:
657 break;
658 case ESP_CFG2 ... ESP_RES4:
659 s->rregs[saddr] = val;
660 break;
661 default:
662 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", (unsigned)val, saddr);
663 return;
664 }
665 s->wregs[saddr] = val;
666}
667
668static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr,
669 unsigned size, bool is_write)
670{
671 return (size == 1) || (is_write && size == 4);
672}
673
674static const MemoryRegionOps esp_mem_ops = {
675 .read = esp_mem_read,
676 .write = esp_mem_write,
677 .endianness = DEVICE_NATIVE_ENDIAN,
678 .valid.accepts = esp_mem_accepts,
679};
680
681static const VMStateDescription vmstate_esp = {
682 .name ="esp",
683 .version_id = 3,
684 .minimum_version_id = 3,
685 .minimum_version_id_old = 3,
686 .fields = (VMStateField []) {
687 VMSTATE_BUFFER(rregs, ESPState),
688 VMSTATE_BUFFER(wregs, ESPState),
689 VMSTATE_INT32(ti_size, ESPState),
690 VMSTATE_UINT32(ti_rptr, ESPState),
691 VMSTATE_UINT32(ti_wptr, ESPState),
692 VMSTATE_BUFFER(ti_buf, ESPState),
693 VMSTATE_UINT32(status, ESPState),
694 VMSTATE_UINT32(dma, ESPState),
695 VMSTATE_BUFFER(cmdbuf, ESPState),
696 VMSTATE_UINT32(cmdlen, ESPState),
697 VMSTATE_UINT32(do_cmd, ESPState),
698 VMSTATE_UINT32(dma_left, ESPState),
699 VMSTATE_END_OF_LIST()
700 }
701};
702
703void esp_init(target_phys_addr_t espaddr, int it_shift,
704 ESPDMAMemoryReadWriteFunc dma_memory_read,
705 ESPDMAMemoryReadWriteFunc dma_memory_write,
706 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
707 qemu_irq *dma_enable)
708{
709 DeviceState *dev;
710 SysBusDevice *s;
711 ESPState *esp;
712
713 dev = qdev_create(NULL, "esp");
714 esp = DO_UPCAST(ESPState, busdev.qdev, dev);
715 esp->dma_memory_read = dma_memory_read;
716 esp->dma_memory_write = dma_memory_write;
717 esp->dma_opaque = dma_opaque;
718 esp->it_shift = it_shift;
719 /* XXX for now until rc4030 has been changed to use DMA enable signal */
720 esp->dma_enabled = 1;
721 qdev_init_nofail(dev);
722 s = sysbus_from_qdev(dev);
723 sysbus_connect_irq(s, 0, irq);
724 sysbus_mmio_map(s, 0, espaddr);
725 *reset = qdev_get_gpio_in(dev, 0);
726 *dma_enable = qdev_get_gpio_in(dev, 1);
727}
728
729static const struct SCSIBusInfo esp_scsi_info = {
730 .tcq = false,
731 .max_target = ESP_MAX_DEVS,
732 .max_lun = 7,
733
734 .transfer_data = esp_transfer_data,
735 .complete = esp_command_complete,
736 .cancel = esp_request_cancelled
737};
738
739static int esp_init1(SysBusDevice *dev)
740{
741 ESPState *s = FROM_SYSBUS(ESPState, dev);
742
743 sysbus_init_irq(dev, &s->irq);
744 assert(s->it_shift != -1);
745
746 memory_region_init_io(&s->iomem, &esp_mem_ops, s,
747 "esp", ESP_REGS << s->it_shift);
748 sysbus_init_mmio(dev, &s->iomem);
749
750 qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2);
751
752 scsi_bus_new(&s->bus, &dev->qdev, &esp_scsi_info);
753 return scsi_bus_legacy_handle_cmdline(&s->bus);
754}
755
756static SysBusDeviceInfo esp_info = {
757 .init = esp_init1,
758 .qdev.name = "esp",
759 .qdev.size = sizeof(ESPState),
760 .qdev.vmsd = &vmstate_esp,
761 .qdev.reset = esp_hard_reset,
762 .qdev.props = (Property[]) {
763 {.name = NULL}
764 }
765};
766
767static void esp_register_devices(void)
768{
769 sysbus_register_withprop(&esp_info);
770}
771
772device_init(esp_register_devices)
This page took 0.03669 seconds and 4 git commands to generate.