]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU ESP/NCR53C9x emulation | |
3 | * | |
4 | * Copyright (c) 2005-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include "vl.h" | |
25 | ||
26 | /* debug ESP card */ | |
27 | //#define DEBUG_ESP | |
28 | ||
29 | /* | |
30 | * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), also | |
31 | * produced as NCR89C100. See | |
32 | * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt | |
33 | * and | |
34 | * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt | |
35 | */ | |
36 | ||
37 | #ifdef DEBUG_ESP | |
38 | #define DPRINTF(fmt, args...) \ | |
39 | do { printf("ESP: " fmt , ##args); } while (0) | |
40 | #else | |
41 | #define DPRINTF(fmt, args...) | |
42 | #endif | |
43 | ||
44 | #define ESP_MASK 0x3f | |
45 | #define ESP_REGS 16 | |
46 | #define ESP_SIZE (ESP_REGS * 4) | |
47 | #define TI_BUFSZ 32 | |
48 | /* The HBA is ID 7, so for simplicitly limit to 7 devices. */ | |
49 | #define ESP_MAX_DEVS 7 | |
50 | ||
51 | typedef struct ESPState ESPState; | |
52 | ||
53 | struct ESPState { | |
54 | qemu_irq irq; | |
55 | BlockDriverState **bd; | |
56 | uint8_t rregs[ESP_REGS]; | |
57 | uint8_t wregs[ESP_REGS]; | |
58 | int32_t ti_size; | |
59 | uint32_t ti_rptr, ti_wptr; | |
60 | uint8_t ti_buf[TI_BUFSZ]; | |
61 | int sense; | |
62 | int dma; | |
63 | SCSIDevice *scsi_dev[MAX_DISKS]; | |
64 | SCSIDevice *current_dev; | |
65 | uint8_t cmdbuf[TI_BUFSZ]; | |
66 | int cmdlen; | |
67 | int do_cmd; | |
68 | ||
69 | /* The amount of data left in the current DMA transfer. */ | |
70 | uint32_t dma_left; | |
71 | /* The size of the current DMA transfer. Zero if no transfer is in | |
72 | progress. */ | |
73 | uint32_t dma_counter; | |
74 | uint8_t *async_buf; | |
75 | uint32_t async_len; | |
76 | void *dma_opaque; | |
77 | }; | |
78 | ||
79 | #define STAT_DO 0x00 | |
80 | #define STAT_DI 0x01 | |
81 | #define STAT_CD 0x02 | |
82 | #define STAT_ST 0x03 | |
83 | #define STAT_MI 0x06 | |
84 | #define STAT_MO 0x07 | |
85 | ||
86 | #define STAT_TC 0x10 | |
87 | #define STAT_PE 0x20 | |
88 | #define STAT_GE 0x40 | |
89 | #define STAT_IN 0x80 | |
90 | ||
91 | #define INTR_FC 0x08 | |
92 | #define INTR_BS 0x10 | |
93 | #define INTR_DC 0x20 | |
94 | #define INTR_RST 0x80 | |
95 | ||
96 | #define SEQ_0 0x0 | |
97 | #define SEQ_CD 0x4 | |
98 | ||
99 | static int get_cmd(ESPState *s, uint8_t *buf) | |
100 | { | |
101 | uint32_t dmalen; | |
102 | int target; | |
103 | ||
104 | dmalen = s->rregs[0] | (s->rregs[1] << 8); | |
105 | target = s->wregs[4] & 7; | |
106 | DPRINTF("get_cmd: len %d target %d\n", dmalen, target); | |
107 | if (s->dma) { | |
108 | espdma_memory_read(s->dma_opaque, buf, dmalen); | |
109 | } else { | |
110 | buf[0] = 0; | |
111 | memcpy(&buf[1], s->ti_buf, dmalen); | |
112 | dmalen++; | |
113 | } | |
114 | ||
115 | s->ti_size = 0; | |
116 | s->ti_rptr = 0; | |
117 | s->ti_wptr = 0; | |
118 | ||
119 | if (s->current_dev) { | |
120 | /* Started a new command before the old one finished. Cancel it. */ | |
121 | scsi_cancel_io(s->current_dev, 0); | |
122 | s->async_len = 0; | |
123 | } | |
124 | ||
125 | if (target >= MAX_DISKS || !s->scsi_dev[target]) { | |
126 | // No such drive | |
127 | s->rregs[4] = STAT_IN; | |
128 | s->rregs[5] = INTR_DC; | |
129 | s->rregs[6] = SEQ_0; | |
130 | qemu_irq_raise(s->irq); | |
131 | return 0; | |
132 | } | |
133 | s->current_dev = s->scsi_dev[target]; | |
134 | return dmalen; | |
135 | } | |
136 | ||
137 | static void do_cmd(ESPState *s, uint8_t *buf) | |
138 | { | |
139 | int32_t datalen; | |
140 | int lun; | |
141 | ||
142 | DPRINTF("do_cmd: busid 0x%x\n", buf[0]); | |
143 | lun = buf[0] & 7; | |
144 | datalen = scsi_send_command(s->current_dev, 0, &buf[1], lun); | |
145 | s->ti_size = datalen; | |
146 | if (datalen != 0) { | |
147 | s->rregs[4] = STAT_IN | STAT_TC; | |
148 | s->dma_left = 0; | |
149 | s->dma_counter = 0; | |
150 | if (datalen > 0) { | |
151 | s->rregs[4] |= STAT_DI; | |
152 | scsi_read_data(s->current_dev, 0); | |
153 | } else { | |
154 | s->rregs[4] |= STAT_DO; | |
155 | scsi_write_data(s->current_dev, 0); | |
156 | } | |
157 | } | |
158 | s->rregs[5] = INTR_BS | INTR_FC; | |
159 | s->rregs[6] = SEQ_CD; | |
160 | qemu_irq_raise(s->irq); | |
161 | } | |
162 | ||
163 | static void handle_satn(ESPState *s) | |
164 | { | |
165 | uint8_t buf[32]; | |
166 | int len; | |
167 | ||
168 | len = get_cmd(s, buf); | |
169 | if (len) | |
170 | do_cmd(s, buf); | |
171 | } | |
172 | ||
173 | static void handle_satn_stop(ESPState *s) | |
174 | { | |
175 | s->cmdlen = get_cmd(s, s->cmdbuf); | |
176 | if (s->cmdlen) { | |
177 | DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen); | |
178 | s->do_cmd = 1; | |
179 | s->rregs[4] = STAT_IN | STAT_TC | STAT_CD; | |
180 | s->rregs[5] = INTR_BS | INTR_FC; | |
181 | s->rregs[6] = SEQ_CD; | |
182 | qemu_irq_raise(s->irq); | |
183 | } | |
184 | } | |
185 | ||
186 | static void write_response(ESPState *s) | |
187 | { | |
188 | DPRINTF("Transfer status (sense=%d)\n", s->sense); | |
189 | s->ti_buf[0] = s->sense; | |
190 | s->ti_buf[1] = 0; | |
191 | if (s->dma) { | |
192 | espdma_memory_write(s->dma_opaque, s->ti_buf, 2); | |
193 | s->rregs[4] = STAT_IN | STAT_TC | STAT_ST; | |
194 | s->rregs[5] = INTR_BS | INTR_FC; | |
195 | s->rregs[6] = SEQ_CD; | |
196 | } else { | |
197 | s->ti_size = 2; | |
198 | s->ti_rptr = 0; | |
199 | s->ti_wptr = 0; | |
200 | s->rregs[7] = 2; | |
201 | } | |
202 | qemu_irq_raise(s->irq); | |
203 | } | |
204 | ||
205 | static void esp_dma_done(ESPState *s) | |
206 | { | |
207 | s->rregs[4] |= STAT_IN | STAT_TC; | |
208 | s->rregs[5] = INTR_BS; | |
209 | s->rregs[6] = 0; | |
210 | s->rregs[7] = 0; | |
211 | s->rregs[0] = 0; | |
212 | s->rregs[1] = 0; | |
213 | qemu_irq_raise(s->irq); | |
214 | } | |
215 | ||
216 | static void esp_do_dma(ESPState *s) | |
217 | { | |
218 | uint32_t len; | |
219 | int to_device; | |
220 | ||
221 | to_device = (s->ti_size < 0); | |
222 | len = s->dma_left; | |
223 | if (s->do_cmd) { | |
224 | DPRINTF("command len %d + %d\n", s->cmdlen, len); | |
225 | espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len); | |
226 | s->ti_size = 0; | |
227 | s->cmdlen = 0; | |
228 | s->do_cmd = 0; | |
229 | do_cmd(s, s->cmdbuf); | |
230 | return; | |
231 | } | |
232 | if (s->async_len == 0) { | |
233 | /* Defer until data is available. */ | |
234 | return; | |
235 | } | |
236 | if (len > s->async_len) { | |
237 | len = s->async_len; | |
238 | } | |
239 | if (to_device) { | |
240 | espdma_memory_read(s->dma_opaque, s->async_buf, len); | |
241 | } else { | |
242 | espdma_memory_write(s->dma_opaque, s->async_buf, len); | |
243 | } | |
244 | s->dma_left -= len; | |
245 | s->async_buf += len; | |
246 | s->async_len -= len; | |
247 | if (to_device) | |
248 | s->ti_size += len; | |
249 | else | |
250 | s->ti_size -= len; | |
251 | if (s->async_len == 0) { | |
252 | if (to_device) { | |
253 | // ti_size is negative | |
254 | scsi_write_data(s->current_dev, 0); | |
255 | } else { | |
256 | scsi_read_data(s->current_dev, 0); | |
257 | /* If there is still data to be read from the device then | |
258 | complete the DMA operation immeriately. Otherwise defer | |
259 | until the scsi layer has completed. */ | |
260 | if (s->dma_left == 0 && s->ti_size > 0) { | |
261 | esp_dma_done(s); | |
262 | } | |
263 | } | |
264 | } else { | |
265 | /* Partially filled a scsi buffer. Complete immediately. */ | |
266 | esp_dma_done(s); | |
267 | } | |
268 | } | |
269 | ||
270 | static void esp_command_complete(void *opaque, int reason, uint32_t tag, | |
271 | uint32_t arg) | |
272 | { | |
273 | ESPState *s = (ESPState *)opaque; | |
274 | ||
275 | if (reason == SCSI_REASON_DONE) { | |
276 | DPRINTF("SCSI Command complete\n"); | |
277 | if (s->ti_size != 0) | |
278 | DPRINTF("SCSI command completed unexpectedly\n"); | |
279 | s->ti_size = 0; | |
280 | s->dma_left = 0; | |
281 | s->async_len = 0; | |
282 | if (arg) | |
283 | DPRINTF("Command failed\n"); | |
284 | s->sense = arg; | |
285 | s->rregs[4] = STAT_ST; | |
286 | esp_dma_done(s); | |
287 | s->current_dev = NULL; | |
288 | } else { | |
289 | DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size); | |
290 | s->async_len = arg; | |
291 | s->async_buf = scsi_get_buf(s->current_dev, 0); | |
292 | if (s->dma_left) { | |
293 | esp_do_dma(s); | |
294 | } else if (s->dma_counter != 0 && s->ti_size <= 0) { | |
295 | /* If this was the last part of a DMA transfer then the | |
296 | completion interrupt is deferred to here. */ | |
297 | esp_dma_done(s); | |
298 | } | |
299 | } | |
300 | } | |
301 | ||
302 | static void handle_ti(ESPState *s) | |
303 | { | |
304 | uint32_t dmalen, minlen; | |
305 | ||
306 | dmalen = s->rregs[0] | (s->rregs[1] << 8); | |
307 | if (dmalen==0) { | |
308 | dmalen=0x10000; | |
309 | } | |
310 | s->dma_counter = dmalen; | |
311 | ||
312 | if (s->do_cmd) | |
313 | minlen = (dmalen < 32) ? dmalen : 32; | |
314 | else if (s->ti_size < 0) | |
315 | minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size; | |
316 | else | |
317 | minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size; | |
318 | DPRINTF("Transfer Information len %d\n", minlen); | |
319 | if (s->dma) { | |
320 | s->dma_left = minlen; | |
321 | s->rregs[4] &= ~STAT_TC; | |
322 | esp_do_dma(s); | |
323 | } else if (s->do_cmd) { | |
324 | DPRINTF("command len %d\n", s->cmdlen); | |
325 | s->ti_size = 0; | |
326 | s->cmdlen = 0; | |
327 | s->do_cmd = 0; | |
328 | do_cmd(s, s->cmdbuf); | |
329 | return; | |
330 | } | |
331 | } | |
332 | ||
333 | static void esp_reset(void *opaque) | |
334 | { | |
335 | ESPState *s = opaque; | |
336 | ||
337 | memset(s->rregs, 0, ESP_REGS); | |
338 | memset(s->wregs, 0, ESP_REGS); | |
339 | s->rregs[0x0e] = 0x4; // Indicate fas100a | |
340 | s->ti_size = 0; | |
341 | s->ti_rptr = 0; | |
342 | s->ti_wptr = 0; | |
343 | s->dma = 0; | |
344 | s->do_cmd = 0; | |
345 | } | |
346 | ||
347 | static void parent_esp_reset(void *opaque, int irq, int level) | |
348 | { | |
349 | if (level) | |
350 | esp_reset(opaque); | |
351 | } | |
352 | ||
353 | static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr) | |
354 | { | |
355 | ESPState *s = opaque; | |
356 | uint32_t saddr; | |
357 | ||
358 | saddr = (addr & ESP_MASK) >> 2; | |
359 | DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]); | |
360 | switch (saddr) { | |
361 | case 2: | |
362 | // FIFO | |
363 | if (s->ti_size > 0) { | |
364 | s->ti_size--; | |
365 | if ((s->rregs[4] & 6) == 0) { | |
366 | /* Data in/out. */ | |
367 | fprintf(stderr, "esp: PIO data read not implemented\n"); | |
368 | s->rregs[2] = 0; | |
369 | } else { | |
370 | s->rregs[2] = s->ti_buf[s->ti_rptr++]; | |
371 | } | |
372 | qemu_irq_raise(s->irq); | |
373 | } | |
374 | if (s->ti_size == 0) { | |
375 | s->ti_rptr = 0; | |
376 | s->ti_wptr = 0; | |
377 | } | |
378 | break; | |
379 | case 5: | |
380 | // interrupt | |
381 | // Clear interrupt/error status bits | |
382 | s->rregs[4] &= ~(STAT_IN | STAT_GE | STAT_PE); | |
383 | qemu_irq_lower(s->irq); | |
384 | break; | |
385 | default: | |
386 | break; | |
387 | } | |
388 | return s->rregs[saddr]; | |
389 | } | |
390 | ||
391 | static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | |
392 | { | |
393 | ESPState *s = opaque; | |
394 | uint32_t saddr; | |
395 | ||
396 | saddr = (addr & ESP_MASK) >> 2; | |
397 | DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr], val); | |
398 | switch (saddr) { | |
399 | case 0: | |
400 | case 1: | |
401 | s->rregs[4] &= ~STAT_TC; | |
402 | break; | |
403 | case 2: | |
404 | // FIFO | |
405 | if (s->do_cmd) { | |
406 | s->cmdbuf[s->cmdlen++] = val & 0xff; | |
407 | } else if ((s->rregs[4] & 6) == 0) { | |
408 | uint8_t buf; | |
409 | buf = val & 0xff; | |
410 | s->ti_size--; | |
411 | fprintf(stderr, "esp: PIO data write not implemented\n"); | |
412 | } else { | |
413 | s->ti_size++; | |
414 | s->ti_buf[s->ti_wptr++] = val & 0xff; | |
415 | } | |
416 | break; | |
417 | case 3: | |
418 | s->rregs[saddr] = val; | |
419 | // Command | |
420 | if (val & 0x80) { | |
421 | s->dma = 1; | |
422 | /* Reload DMA counter. */ | |
423 | s->rregs[0] = s->wregs[0]; | |
424 | s->rregs[1] = s->wregs[1]; | |
425 | } else { | |
426 | s->dma = 0; | |
427 | } | |
428 | switch(val & 0x7f) { | |
429 | case 0: | |
430 | DPRINTF("NOP (%2.2x)\n", val); | |
431 | break; | |
432 | case 1: | |
433 | DPRINTF("Flush FIFO (%2.2x)\n", val); | |
434 | //s->ti_size = 0; | |
435 | s->rregs[5] = INTR_FC; | |
436 | s->rregs[6] = 0; | |
437 | break; | |
438 | case 2: | |
439 | DPRINTF("Chip reset (%2.2x)\n", val); | |
440 | esp_reset(s); | |
441 | break; | |
442 | case 3: | |
443 | DPRINTF("Bus reset (%2.2x)\n", val); | |
444 | s->rregs[5] = INTR_RST; | |
445 | if (!(s->wregs[8] & 0x40)) { | |
446 | qemu_irq_raise(s->irq); | |
447 | } | |
448 | break; | |
449 | case 0x10: | |
450 | handle_ti(s); | |
451 | break; | |
452 | case 0x11: | |
453 | DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val); | |
454 | write_response(s); | |
455 | break; | |
456 | case 0x12: | |
457 | DPRINTF("Message Accepted (%2.2x)\n", val); | |
458 | write_response(s); | |
459 | s->rregs[5] = INTR_DC; | |
460 | s->rregs[6] = 0; | |
461 | break; | |
462 | case 0x1a: | |
463 | DPRINTF("Set ATN (%2.2x)\n", val); | |
464 | break; | |
465 | case 0x42: | |
466 | DPRINTF("Set ATN (%2.2x)\n", val); | |
467 | handle_satn(s); | |
468 | break; | |
469 | case 0x43: | |
470 | DPRINTF("Set ATN & stop (%2.2x)\n", val); | |
471 | handle_satn_stop(s); | |
472 | break; | |
473 | case 0x44: | |
474 | DPRINTF("Enable selection (%2.2x)\n", val); | |
475 | break; | |
476 | default: | |
477 | DPRINTF("Unhandled ESP command (%2.2x)\n", val); | |
478 | break; | |
479 | } | |
480 | break; | |
481 | case 4 ... 7: | |
482 | break; | |
483 | case 8: | |
484 | s->rregs[saddr] = val; | |
485 | break; | |
486 | case 9 ... 10: | |
487 | break; | |
488 | case 11: | |
489 | s->rregs[saddr] = val & 0x15; | |
490 | break; | |
491 | case 12 ... 15: | |
492 | s->rregs[saddr] = val; | |
493 | break; | |
494 | default: | |
495 | break; | |
496 | } | |
497 | s->wregs[saddr] = val; | |
498 | } | |
499 | ||
500 | static CPUReadMemoryFunc *esp_mem_read[3] = { | |
501 | esp_mem_readb, | |
502 | esp_mem_readb, | |
503 | esp_mem_readb, | |
504 | }; | |
505 | ||
506 | static CPUWriteMemoryFunc *esp_mem_write[3] = { | |
507 | esp_mem_writeb, | |
508 | esp_mem_writeb, | |
509 | esp_mem_writeb, | |
510 | }; | |
511 | ||
512 | static void esp_save(QEMUFile *f, void *opaque) | |
513 | { | |
514 | ESPState *s = opaque; | |
515 | ||
516 | qemu_put_buffer(f, s->rregs, ESP_REGS); | |
517 | qemu_put_buffer(f, s->wregs, ESP_REGS); | |
518 | qemu_put_be32s(f, &s->ti_size); | |
519 | qemu_put_be32s(f, &s->ti_rptr); | |
520 | qemu_put_be32s(f, &s->ti_wptr); | |
521 | qemu_put_buffer(f, s->ti_buf, TI_BUFSZ); | |
522 | qemu_put_be32s(f, &s->sense); | |
523 | qemu_put_be32s(f, &s->dma); | |
524 | qemu_put_buffer(f, s->cmdbuf, TI_BUFSZ); | |
525 | qemu_put_be32s(f, &s->cmdlen); | |
526 | qemu_put_be32s(f, &s->do_cmd); | |
527 | qemu_put_be32s(f, &s->dma_left); | |
528 | // There should be no transfers in progress, so dma_counter is not saved | |
529 | } | |
530 | ||
531 | static int esp_load(QEMUFile *f, void *opaque, int version_id) | |
532 | { | |
533 | ESPState *s = opaque; | |
534 | ||
535 | if (version_id != 3) | |
536 | return -EINVAL; // Cannot emulate 2 | |
537 | ||
538 | qemu_get_buffer(f, s->rregs, ESP_REGS); | |
539 | qemu_get_buffer(f, s->wregs, ESP_REGS); | |
540 | qemu_get_be32s(f, &s->ti_size); | |
541 | qemu_get_be32s(f, &s->ti_rptr); | |
542 | qemu_get_be32s(f, &s->ti_wptr); | |
543 | qemu_get_buffer(f, s->ti_buf, TI_BUFSZ); | |
544 | qemu_get_be32s(f, &s->sense); | |
545 | qemu_get_be32s(f, &s->dma); | |
546 | qemu_get_buffer(f, s->cmdbuf, TI_BUFSZ); | |
547 | qemu_get_be32s(f, &s->cmdlen); | |
548 | qemu_get_be32s(f, &s->do_cmd); | |
549 | qemu_get_be32s(f, &s->dma_left); | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id) | |
555 | { | |
556 | ESPState *s = (ESPState *)opaque; | |
557 | ||
558 | if (id < 0) { | |
559 | for (id = 0; id < ESP_MAX_DEVS; id++) { | |
560 | if (s->scsi_dev[id] == NULL) | |
561 | break; | |
562 | } | |
563 | } | |
564 | if (id >= ESP_MAX_DEVS) { | |
565 | DPRINTF("Bad Device ID %d\n", id); | |
566 | return; | |
567 | } | |
568 | if (s->scsi_dev[id]) { | |
569 | DPRINTF("Destroying device %d\n", id); | |
570 | scsi_disk_destroy(s->scsi_dev[id]); | |
571 | } | |
572 | DPRINTF("Attaching block device %d\n", id); | |
573 | /* Command queueing is not implemented. */ | |
574 | s->scsi_dev[id] = scsi_disk_init(bd, 0, esp_command_complete, s); | |
575 | } | |
576 | ||
577 | void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr, | |
578 | void *dma_opaque, qemu_irq irq, qemu_irq *reset) | |
579 | { | |
580 | ESPState *s; | |
581 | int esp_io_memory; | |
582 | ||
583 | s = qemu_mallocz(sizeof(ESPState)); | |
584 | if (!s) | |
585 | return NULL; | |
586 | ||
587 | s->bd = bd; | |
588 | s->irq = irq; | |
589 | s->dma_opaque = dma_opaque; | |
590 | ||
591 | esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s); | |
592 | cpu_register_physical_memory(espaddr, ESP_SIZE, esp_io_memory); | |
593 | ||
594 | esp_reset(s); | |
595 | ||
596 | register_savevm("esp", espaddr, 3, esp_save, esp_load, s); | |
597 | qemu_register_reset(esp_reset, s); | |
598 | ||
599 | *reset = *qemu_allocate_irqs(parent_esp_reset, s, 1); | |
600 | ||
601 | return s; | |
602 | } |