]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU PowerMac CUDA device support | |
3 | * | |
4 | * Copyright (c) 2004-2007 Fabrice Bellard | |
5 | * Copyright (c) 2007 Jocelyn Mayer | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | #include "qemu/osdep.h" | |
26 | #include "hw/hw.h" | |
27 | #include "hw/ppc/mac.h" | |
28 | #include "hw/input/adb.h" | |
29 | #include "qemu/timer.h" | |
30 | #include "sysemu/sysemu.h" | |
31 | #include "qemu/cutils.h" | |
32 | ||
33 | /* XXX: implement all timer modes */ | |
34 | ||
35 | /* debug CUDA */ | |
36 | //#define DEBUG_CUDA | |
37 | ||
38 | /* debug CUDA packets */ | |
39 | //#define DEBUG_CUDA_PACKET | |
40 | ||
41 | #ifdef DEBUG_CUDA | |
42 | #define CUDA_DPRINTF(fmt, ...) \ | |
43 | do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0) | |
44 | #else | |
45 | #define CUDA_DPRINTF(fmt, ...) | |
46 | #endif | |
47 | ||
48 | /* Bits in B data register: all active low */ | |
49 | #define TREQ 0x08 /* Transfer request (input) */ | |
50 | #define TACK 0x10 /* Transfer acknowledge (output) */ | |
51 | #define TIP 0x20 /* Transfer in progress (output) */ | |
52 | ||
53 | /* Bits in ACR */ | |
54 | #define SR_CTRL 0x1c /* Shift register control bits */ | |
55 | #define SR_EXT 0x0c /* Shift on external clock */ | |
56 | #define SR_OUT 0x10 /* Shift out if 1 */ | |
57 | ||
58 | /* Bits in IFR and IER */ | |
59 | #define IER_SET 0x80 /* set bits in IER */ | |
60 | #define IER_CLR 0 /* clear bits in IER */ | |
61 | #define SR_INT 0x04 /* Shift register full/empty */ | |
62 | #define SR_DATA_INT 0x08 | |
63 | #define SR_CLOCK_INT 0x10 | |
64 | #define T1_INT 0x40 /* Timer 1 interrupt */ | |
65 | #define T2_INT 0x20 /* Timer 2 interrupt */ | |
66 | ||
67 | /* Bits in ACR */ | |
68 | #define T1MODE 0xc0 /* Timer 1 mode */ | |
69 | #define T1MODE_CONT 0x40 /* continuous interrupts */ | |
70 | ||
71 | /* commands (1st byte) */ | |
72 | #define ADB_PACKET 0 | |
73 | #define CUDA_PACKET 1 | |
74 | #define ERROR_PACKET 2 | |
75 | #define TIMER_PACKET 3 | |
76 | #define POWER_PACKET 4 | |
77 | #define MACIIC_PACKET 5 | |
78 | #define PMU_PACKET 6 | |
79 | ||
80 | ||
81 | /* CUDA commands (2nd byte) */ | |
82 | #define CUDA_WARM_START 0x0 | |
83 | #define CUDA_AUTOPOLL 0x1 | |
84 | #define CUDA_GET_6805_ADDR 0x2 | |
85 | #define CUDA_GET_TIME 0x3 | |
86 | #define CUDA_GET_PRAM 0x7 | |
87 | #define CUDA_SET_6805_ADDR 0x8 | |
88 | #define CUDA_SET_TIME 0x9 | |
89 | #define CUDA_POWERDOWN 0xa | |
90 | #define CUDA_POWERUP_TIME 0xb | |
91 | #define CUDA_SET_PRAM 0xc | |
92 | #define CUDA_MS_RESET 0xd | |
93 | #define CUDA_SEND_DFAC 0xe | |
94 | #define CUDA_BATTERY_SWAP_SENSE 0x10 | |
95 | #define CUDA_RESET_SYSTEM 0x11 | |
96 | #define CUDA_SET_IPL 0x12 | |
97 | #define CUDA_FILE_SERVER_FLAG 0x13 | |
98 | #define CUDA_SET_AUTO_RATE 0x14 | |
99 | #define CUDA_GET_AUTO_RATE 0x16 | |
100 | #define CUDA_SET_DEVICE_LIST 0x19 | |
101 | #define CUDA_GET_DEVICE_LIST 0x1a | |
102 | #define CUDA_SET_ONE_SECOND_MODE 0x1b | |
103 | #define CUDA_SET_POWER_MESSAGES 0x21 | |
104 | #define CUDA_GET_SET_IIC 0x22 | |
105 | #define CUDA_WAKEUP 0x23 | |
106 | #define CUDA_TIMER_TICKLE 0x24 | |
107 | #define CUDA_COMBINED_FORMAT_IIC 0x25 | |
108 | ||
109 | #define CUDA_TIMER_FREQ (4700000 / 6) | |
110 | ||
111 | /* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */ | |
112 | #define RTC_OFFSET 2082844800 | |
113 | ||
114 | /* CUDA registers */ | |
115 | #define CUDA_REG_B 0x00 | |
116 | #define CUDA_REG_A 0x01 | |
117 | #define CUDA_REG_DIRB 0x02 | |
118 | #define CUDA_REG_DIRA 0x03 | |
119 | #define CUDA_REG_T1CL 0x04 | |
120 | #define CUDA_REG_T1CH 0x05 | |
121 | #define CUDA_REG_T1LL 0x06 | |
122 | #define CUDA_REG_T1LH 0x07 | |
123 | #define CUDA_REG_T2CL 0x08 | |
124 | #define CUDA_REG_T2CH 0x09 | |
125 | #define CUDA_REG_SR 0x0a | |
126 | #define CUDA_REG_ACR 0x0b | |
127 | #define CUDA_REG_PCR 0x0c | |
128 | #define CUDA_REG_IFR 0x0d | |
129 | #define CUDA_REG_IER 0x0e | |
130 | #define CUDA_REG_ANH 0x0f | |
131 | ||
132 | static void cuda_update(CUDAState *s); | |
133 | static void cuda_receive_packet_from_host(CUDAState *s, | |
134 | const uint8_t *data, int len); | |
135 | static void cuda_timer_update(CUDAState *s, CUDATimer *ti, | |
136 | int64_t current_time); | |
137 | ||
138 | static void cuda_update_irq(CUDAState *s) | |
139 | { | |
140 | if (s->ifr & s->ier & (SR_INT | T1_INT | T2_INT)) { | |
141 | qemu_irq_raise(s->irq); | |
142 | } else { | |
143 | qemu_irq_lower(s->irq); | |
144 | } | |
145 | } | |
146 | ||
147 | static uint64_t get_tb(uint64_t time, uint64_t freq) | |
148 | { | |
149 | return muldiv64(time, freq, NANOSECONDS_PER_SECOND); | |
150 | } | |
151 | ||
152 | static unsigned int get_counter(CUDATimer *ti) | |
153 | { | |
154 | int64_t d; | |
155 | unsigned int counter; | |
156 | uint64_t tb_diff; | |
157 | uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); | |
158 | ||
159 | /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup. */ | |
160 | tb_diff = get_tb(current_time, ti->frequency) - ti->load_time; | |
161 | d = (tb_diff * 0xBF401675E5DULL) / (ti->frequency << 24); | |
162 | ||
163 | if (ti->index == 0) { | |
164 | /* the timer goes down from latch to -1 (period of latch + 2) */ | |
165 | if (d <= (ti->counter_value + 1)) { | |
166 | counter = (ti->counter_value - d) & 0xffff; | |
167 | } else { | |
168 | counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); | |
169 | counter = (ti->latch - counter) & 0xffff; | |
170 | } | |
171 | } else { | |
172 | counter = (ti->counter_value - d) & 0xffff; | |
173 | } | |
174 | return counter; | |
175 | } | |
176 | ||
177 | static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val) | |
178 | { | |
179 | CUDA_DPRINTF("T%d.counter=%d\n", 1 + ti->index, val); | |
180 | ti->load_time = get_tb(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), | |
181 | s->frequency); | |
182 | ti->counter_value = val; | |
183 | cuda_timer_update(s, ti, ti->load_time); | |
184 | } | |
185 | ||
186 | static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time) | |
187 | { | |
188 | int64_t d, next_time; | |
189 | unsigned int counter; | |
190 | ||
191 | /* current counter value */ | |
192 | d = muldiv64(current_time - s->load_time, | |
193 | CUDA_TIMER_FREQ, NANOSECONDS_PER_SECOND); | |
194 | /* the timer goes down from latch to -1 (period of latch + 2) */ | |
195 | if (d <= (s->counter_value + 1)) { | |
196 | counter = (s->counter_value - d) & 0xffff; | |
197 | } else { | |
198 | counter = (d - (s->counter_value + 1)) % (s->latch + 2); | |
199 | counter = (s->latch - counter) & 0xffff; | |
200 | } | |
201 | ||
202 | /* Note: we consider the irq is raised on 0 */ | |
203 | if (counter == 0xffff) { | |
204 | next_time = d + s->latch + 1; | |
205 | } else if (counter == 0) { | |
206 | next_time = d + s->latch + 2; | |
207 | } else { | |
208 | next_time = d + counter; | |
209 | } | |
210 | CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n", | |
211 | s->latch, d, next_time - d); | |
212 | next_time = muldiv64(next_time, NANOSECONDS_PER_SECOND, CUDA_TIMER_FREQ) + | |
213 | s->load_time; | |
214 | if (next_time <= current_time) | |
215 | next_time = current_time + 1; | |
216 | return next_time; | |
217 | } | |
218 | ||
219 | static void cuda_timer_update(CUDAState *s, CUDATimer *ti, | |
220 | int64_t current_time) | |
221 | { | |
222 | if (!ti->timer) | |
223 | return; | |
224 | if (ti->index == 0 && (s->acr & T1MODE) != T1MODE_CONT) { | |
225 | timer_del(ti->timer); | |
226 | } else { | |
227 | ti->next_irq_time = get_next_irq_time(ti, current_time); | |
228 | timer_mod(ti->timer, ti->next_irq_time); | |
229 | } | |
230 | } | |
231 | ||
232 | static void cuda_timer1(void *opaque) | |
233 | { | |
234 | CUDAState *s = opaque; | |
235 | CUDATimer *ti = &s->timers[0]; | |
236 | ||
237 | cuda_timer_update(s, ti, ti->next_irq_time); | |
238 | s->ifr |= T1_INT; | |
239 | cuda_update_irq(s); | |
240 | } | |
241 | ||
242 | static void cuda_timer2(void *opaque) | |
243 | { | |
244 | CUDAState *s = opaque; | |
245 | CUDATimer *ti = &s->timers[1]; | |
246 | ||
247 | cuda_timer_update(s, ti, ti->next_irq_time); | |
248 | s->ifr |= T2_INT; | |
249 | cuda_update_irq(s); | |
250 | } | |
251 | ||
252 | static void cuda_set_sr_int(void *opaque) | |
253 | { | |
254 | CUDAState *s = opaque; | |
255 | ||
256 | CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__); | |
257 | s->ifr |= SR_INT; | |
258 | cuda_update_irq(s); | |
259 | } | |
260 | ||
261 | static void cuda_delay_set_sr_int(CUDAState *s) | |
262 | { | |
263 | int64_t expire; | |
264 | ||
265 | if (s->dirb == 0xff) { | |
266 | /* Not in Mac OS, fire the IRQ directly */ | |
267 | cuda_set_sr_int(s); | |
268 | return; | |
269 | } | |
270 | ||
271 | CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__); | |
272 | ||
273 | expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 300 * SCALE_US; | |
274 | timer_mod(s->sr_delay_timer, expire); | |
275 | } | |
276 | ||
277 | static uint32_t cuda_readb(void *opaque, hwaddr addr) | |
278 | { | |
279 | CUDAState *s = opaque; | |
280 | uint32_t val; | |
281 | ||
282 | addr = (addr >> 9) & 0xf; | |
283 | switch(addr) { | |
284 | case CUDA_REG_B: | |
285 | val = s->b; | |
286 | break; | |
287 | case CUDA_REG_A: | |
288 | val = s->a; | |
289 | break; | |
290 | case CUDA_REG_DIRB: | |
291 | val = s->dirb; | |
292 | break; | |
293 | case CUDA_REG_DIRA: | |
294 | val = s->dira; | |
295 | break; | |
296 | case CUDA_REG_T1CL: | |
297 | val = get_counter(&s->timers[0]) & 0xff; | |
298 | s->ifr &= ~T1_INT; | |
299 | cuda_update_irq(s); | |
300 | break; | |
301 | case CUDA_REG_T1CH: | |
302 | val = get_counter(&s->timers[0]) >> 8; | |
303 | cuda_update_irq(s); | |
304 | break; | |
305 | case CUDA_REG_T1LL: | |
306 | val = s->timers[0].latch & 0xff; | |
307 | break; | |
308 | case CUDA_REG_T1LH: | |
309 | /* XXX: check this */ | |
310 | val = (s->timers[0].latch >> 8) & 0xff; | |
311 | break; | |
312 | case CUDA_REG_T2CL: | |
313 | val = get_counter(&s->timers[1]) & 0xff; | |
314 | s->ifr &= ~T2_INT; | |
315 | cuda_update_irq(s); | |
316 | break; | |
317 | case CUDA_REG_T2CH: | |
318 | val = get_counter(&s->timers[1]) >> 8; | |
319 | break; | |
320 | case CUDA_REG_SR: | |
321 | val = s->sr; | |
322 | s->ifr &= ~(SR_INT | SR_CLOCK_INT | SR_DATA_INT); | |
323 | cuda_update_irq(s); | |
324 | break; | |
325 | case CUDA_REG_ACR: | |
326 | val = s->acr; | |
327 | break; | |
328 | case CUDA_REG_PCR: | |
329 | val = s->pcr; | |
330 | break; | |
331 | case CUDA_REG_IFR: | |
332 | val = s->ifr; | |
333 | if (s->ifr & s->ier) { | |
334 | val |= 0x80; | |
335 | } | |
336 | break; | |
337 | case CUDA_REG_IER: | |
338 | val = s->ier | 0x80; | |
339 | break; | |
340 | default: | |
341 | case CUDA_REG_ANH: | |
342 | val = s->anh; | |
343 | break; | |
344 | } | |
345 | if (addr != CUDA_REG_IFR || val != 0) { | |
346 | CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val); | |
347 | } | |
348 | ||
349 | return val; | |
350 | } | |
351 | ||
352 | static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val) | |
353 | { | |
354 | CUDAState *s = opaque; | |
355 | ||
356 | addr = (addr >> 9) & 0xf; | |
357 | CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val); | |
358 | ||
359 | switch(addr) { | |
360 | case CUDA_REG_B: | |
361 | s->b = val; | |
362 | cuda_update(s); | |
363 | break; | |
364 | case CUDA_REG_A: | |
365 | s->a = val; | |
366 | break; | |
367 | case CUDA_REG_DIRB: | |
368 | s->dirb = val; | |
369 | break; | |
370 | case CUDA_REG_DIRA: | |
371 | s->dira = val; | |
372 | break; | |
373 | case CUDA_REG_T1CL: | |
374 | s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; | |
375 | cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); | |
376 | break; | |
377 | case CUDA_REG_T1CH: | |
378 | s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); | |
379 | s->ifr &= ~T1_INT; | |
380 | set_counter(s, &s->timers[0], s->timers[0].latch); | |
381 | break; | |
382 | case CUDA_REG_T1LL: | |
383 | s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; | |
384 | cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); | |
385 | break; | |
386 | case CUDA_REG_T1LH: | |
387 | s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); | |
388 | s->ifr &= ~T1_INT; | |
389 | cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); | |
390 | break; | |
391 | case CUDA_REG_T2CL: | |
392 | s->timers[1].latch = (s->timers[1].latch & 0xff00) | val; | |
393 | break; | |
394 | case CUDA_REG_T2CH: | |
395 | /* To ensure T2 generates an interrupt on zero crossing with the | |
396 | common timer code, write the value directly from the latch to | |
397 | the counter */ | |
398 | s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8); | |
399 | s->ifr &= ~T2_INT; | |
400 | set_counter(s, &s->timers[1], s->timers[1].latch); | |
401 | break; | |
402 | case CUDA_REG_SR: | |
403 | s->sr = val; | |
404 | break; | |
405 | case CUDA_REG_ACR: | |
406 | s->acr = val; | |
407 | cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); | |
408 | cuda_update(s); | |
409 | break; | |
410 | case CUDA_REG_PCR: | |
411 | s->pcr = val; | |
412 | break; | |
413 | case CUDA_REG_IFR: | |
414 | /* reset bits */ | |
415 | s->ifr &= ~val; | |
416 | cuda_update_irq(s); | |
417 | break; | |
418 | case CUDA_REG_IER: | |
419 | if (val & IER_SET) { | |
420 | /* set bits */ | |
421 | s->ier |= val & 0x7f; | |
422 | } else { | |
423 | /* reset bits */ | |
424 | s->ier &= ~val; | |
425 | } | |
426 | cuda_update_irq(s); | |
427 | break; | |
428 | default: | |
429 | case CUDA_REG_ANH: | |
430 | s->anh = val; | |
431 | break; | |
432 | } | |
433 | } | |
434 | ||
435 | /* NOTE: TIP and TREQ are negated */ | |
436 | static void cuda_update(CUDAState *s) | |
437 | { | |
438 | int packet_received, len; | |
439 | ||
440 | packet_received = 0; | |
441 | if (!(s->b & TIP)) { | |
442 | /* transfer requested from host */ | |
443 | ||
444 | if (s->acr & SR_OUT) { | |
445 | /* data output */ | |
446 | if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { | |
447 | if (s->data_out_index < sizeof(s->data_out)) { | |
448 | CUDA_DPRINTF("send: %02x\n", s->sr); | |
449 | s->data_out[s->data_out_index++] = s->sr; | |
450 | cuda_delay_set_sr_int(s); | |
451 | } | |
452 | } | |
453 | } else { | |
454 | if (s->data_in_index < s->data_in_size) { | |
455 | /* data input */ | |
456 | if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { | |
457 | s->sr = s->data_in[s->data_in_index++]; | |
458 | CUDA_DPRINTF("recv: %02x\n", s->sr); | |
459 | /* indicate end of transfer */ | |
460 | if (s->data_in_index >= s->data_in_size) { | |
461 | s->b = (s->b | TREQ); | |
462 | } | |
463 | cuda_delay_set_sr_int(s); | |
464 | } | |
465 | } | |
466 | } | |
467 | } else { | |
468 | /* no transfer requested: handle sync case */ | |
469 | if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) { | |
470 | /* update TREQ state each time TACK change state */ | |
471 | if (s->b & TACK) | |
472 | s->b = (s->b | TREQ); | |
473 | else | |
474 | s->b = (s->b & ~TREQ); | |
475 | cuda_delay_set_sr_int(s); | |
476 | } else { | |
477 | if (!(s->last_b & TIP)) { | |
478 | /* handle end of host to cuda transfer */ | |
479 | packet_received = (s->data_out_index > 0); | |
480 | /* always an IRQ at the end of transfer */ | |
481 | cuda_delay_set_sr_int(s); | |
482 | } | |
483 | /* signal if there is data to read */ | |
484 | if (s->data_in_index < s->data_in_size) { | |
485 | s->b = (s->b & ~TREQ); | |
486 | } | |
487 | } | |
488 | } | |
489 | ||
490 | s->last_acr = s->acr; | |
491 | s->last_b = s->b; | |
492 | ||
493 | /* NOTE: cuda_receive_packet_from_host() can call cuda_update() | |
494 | recursively */ | |
495 | if (packet_received) { | |
496 | len = s->data_out_index; | |
497 | s->data_out_index = 0; | |
498 | cuda_receive_packet_from_host(s, s->data_out, len); | |
499 | } | |
500 | } | |
501 | ||
502 | static void cuda_send_packet_to_host(CUDAState *s, | |
503 | const uint8_t *data, int len) | |
504 | { | |
505 | #ifdef DEBUG_CUDA_PACKET | |
506 | { | |
507 | int i; | |
508 | printf("cuda_send_packet_to_host:\n"); | |
509 | for(i = 0; i < len; i++) | |
510 | printf(" %02x", data[i]); | |
511 | printf("\n"); | |
512 | } | |
513 | #endif | |
514 | memcpy(s->data_in, data, len); | |
515 | s->data_in_size = len; | |
516 | s->data_in_index = 0; | |
517 | cuda_update(s); | |
518 | cuda_delay_set_sr_int(s); | |
519 | } | |
520 | ||
521 | static void cuda_adb_poll(void *opaque) | |
522 | { | |
523 | CUDAState *s = opaque; | |
524 | uint8_t obuf[ADB_MAX_OUT_LEN + 2]; | |
525 | int olen; | |
526 | ||
527 | olen = adb_poll(&s->adb_bus, obuf + 2, s->adb_poll_mask); | |
528 | if (olen > 0) { | |
529 | obuf[0] = ADB_PACKET; | |
530 | obuf[1] = 0x40; /* polled data */ | |
531 | cuda_send_packet_to_host(s, obuf, olen + 2); | |
532 | } | |
533 | timer_mod(s->adb_poll_timer, | |
534 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |
535 | (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); | |
536 | } | |
537 | ||
538 | /* description of commands */ | |
539 | typedef struct CudaCommand { | |
540 | uint8_t command; | |
541 | const char *name; | |
542 | bool (*handler)(CUDAState *s, | |
543 | const uint8_t *in_args, int in_len, | |
544 | uint8_t *out_args, int *out_len); | |
545 | } CudaCommand; | |
546 | ||
547 | static bool cuda_cmd_autopoll(CUDAState *s, | |
548 | const uint8_t *in_data, int in_len, | |
549 | uint8_t *out_data, int *out_len) | |
550 | { | |
551 | int autopoll; | |
552 | ||
553 | if (in_len != 1) { | |
554 | return false; | |
555 | } | |
556 | ||
557 | autopoll = (in_data[0] != 0); | |
558 | if (autopoll != s->autopoll) { | |
559 | s->autopoll = autopoll; | |
560 | if (autopoll) { | |
561 | timer_mod(s->adb_poll_timer, | |
562 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |
563 | (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); | |
564 | } else { | |
565 | timer_del(s->adb_poll_timer); | |
566 | } | |
567 | } | |
568 | return true; | |
569 | } | |
570 | ||
571 | static bool cuda_cmd_set_autorate(CUDAState *s, | |
572 | const uint8_t *in_data, int in_len, | |
573 | uint8_t *out_data, int *out_len) | |
574 | { | |
575 | if (in_len != 1) { | |
576 | return false; | |
577 | } | |
578 | ||
579 | /* we don't want a period of 0 ms */ | |
580 | /* FIXME: check what real hardware does */ | |
581 | if (in_data[0] == 0) { | |
582 | return false; | |
583 | } | |
584 | ||
585 | s->autopoll_rate_ms = in_data[0]; | |
586 | if (s->autopoll) { | |
587 | timer_mod(s->adb_poll_timer, | |
588 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |
589 | (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); | |
590 | } | |
591 | return true; | |
592 | } | |
593 | ||
594 | static bool cuda_cmd_set_device_list(CUDAState *s, | |
595 | const uint8_t *in_data, int in_len, | |
596 | uint8_t *out_data, int *out_len) | |
597 | { | |
598 | if (in_len != 2) { | |
599 | return false; | |
600 | } | |
601 | ||
602 | s->adb_poll_mask = (((uint16_t)in_data[0]) << 8) | in_data[1]; | |
603 | return true; | |
604 | } | |
605 | ||
606 | static bool cuda_cmd_powerdown(CUDAState *s, | |
607 | const uint8_t *in_data, int in_len, | |
608 | uint8_t *out_data, int *out_len) | |
609 | { | |
610 | if (in_len != 0) { | |
611 | return false; | |
612 | } | |
613 | ||
614 | qemu_system_shutdown_request(); | |
615 | return true; | |
616 | } | |
617 | ||
618 | static bool cuda_cmd_reset_system(CUDAState *s, | |
619 | const uint8_t *in_data, int in_len, | |
620 | uint8_t *out_data, int *out_len) | |
621 | { | |
622 | if (in_len != 0) { | |
623 | return false; | |
624 | } | |
625 | ||
626 | qemu_system_reset_request(); | |
627 | return true; | |
628 | } | |
629 | ||
630 | static bool cuda_cmd_set_file_server_flag(CUDAState *s, | |
631 | const uint8_t *in_data, int in_len, | |
632 | uint8_t *out_data, int *out_len) | |
633 | { | |
634 | if (in_len != 1) { | |
635 | return false; | |
636 | } | |
637 | ||
638 | qemu_log_mask(LOG_UNIMP, | |
639 | "CUDA: unimplemented command FILE_SERVER_FLAG %d\n", | |
640 | in_data[0]); | |
641 | return true; | |
642 | } | |
643 | ||
644 | static bool cuda_cmd_set_power_message(CUDAState *s, | |
645 | const uint8_t *in_data, int in_len, | |
646 | uint8_t *out_data, int *out_len) | |
647 | { | |
648 | if (in_len != 1) { | |
649 | return false; | |
650 | } | |
651 | ||
652 | qemu_log_mask(LOG_UNIMP, | |
653 | "CUDA: unimplemented command SET_POWER_MESSAGE %d\n", | |
654 | in_data[0]); | |
655 | return true; | |
656 | } | |
657 | ||
658 | static bool cuda_cmd_get_time(CUDAState *s, | |
659 | const uint8_t *in_data, int in_len, | |
660 | uint8_t *out_data, int *out_len) | |
661 | { | |
662 | uint32_t ti; | |
663 | ||
664 | if (in_len != 0) { | |
665 | return false; | |
666 | } | |
667 | ||
668 | ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) | |
669 | / NANOSECONDS_PER_SECOND); | |
670 | out_data[0] = ti >> 24; | |
671 | out_data[1] = ti >> 16; | |
672 | out_data[2] = ti >> 8; | |
673 | out_data[3] = ti; | |
674 | *out_len = 4; | |
675 | return true; | |
676 | } | |
677 | ||
678 | static bool cuda_cmd_set_time(CUDAState *s, | |
679 | const uint8_t *in_data, int in_len, | |
680 | uint8_t *out_data, int *out_len) | |
681 | { | |
682 | uint32_t ti; | |
683 | ||
684 | if (in_len != 4) { | |
685 | return false; | |
686 | } | |
687 | ||
688 | ti = (((uint32_t)in_data[1]) << 24) + (((uint32_t)in_data[2]) << 16) | |
689 | + (((uint32_t)in_data[3]) << 8) + in_data[4]; | |
690 | s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) | |
691 | / NANOSECONDS_PER_SECOND); | |
692 | return true; | |
693 | } | |
694 | ||
695 | static const CudaCommand handlers[] = { | |
696 | { CUDA_AUTOPOLL, "AUTOPOLL", cuda_cmd_autopoll }, | |
697 | { CUDA_SET_AUTO_RATE, "SET_AUTO_RATE", cuda_cmd_set_autorate }, | |
698 | { CUDA_SET_DEVICE_LIST, "SET_DEVICE_LIST", cuda_cmd_set_device_list }, | |
699 | { CUDA_POWERDOWN, "POWERDOWN", cuda_cmd_powerdown }, | |
700 | { CUDA_RESET_SYSTEM, "RESET_SYSTEM", cuda_cmd_reset_system }, | |
701 | { CUDA_FILE_SERVER_FLAG, "FILE_SERVER_FLAG", | |
702 | cuda_cmd_set_file_server_flag }, | |
703 | { CUDA_SET_POWER_MESSAGES, "SET_POWER_MESSAGES", | |
704 | cuda_cmd_set_power_message }, | |
705 | { CUDA_GET_TIME, "GET_TIME", cuda_cmd_get_time }, | |
706 | { CUDA_SET_TIME, "SET_TIME", cuda_cmd_set_time }, | |
707 | }; | |
708 | ||
709 | static void cuda_receive_packet(CUDAState *s, | |
710 | const uint8_t *data, int len) | |
711 | { | |
712 | uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] }; | |
713 | int i, out_len = 0; | |
714 | ||
715 | for (i = 0; i < ARRAY_SIZE(handlers); i++) { | |
716 | const CudaCommand *desc = &handlers[i]; | |
717 | if (desc->command == data[0]) { | |
718 | CUDA_DPRINTF("handling command %s\n", desc->name); | |
719 | out_len = 0; | |
720 | if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) { | |
721 | cuda_send_packet_to_host(s, obuf, 3 + out_len); | |
722 | } else { | |
723 | qemu_log_mask(LOG_GUEST_ERROR, | |
724 | "CUDA: %s: wrong parameters %d\n", | |
725 | desc->name, len); | |
726 | obuf[0] = ERROR_PACKET; | |
727 | obuf[1] = 0x5; /* bad parameters */ | |
728 | obuf[2] = CUDA_PACKET; | |
729 | obuf[3] = data[0]; | |
730 | cuda_send_packet_to_host(s, obuf, 4); | |
731 | } | |
732 | return; | |
733 | } | |
734 | } | |
735 | ||
736 | qemu_log_mask(LOG_GUEST_ERROR, "CUDA: unknown command 0x%02x\n", data[0]); | |
737 | obuf[0] = ERROR_PACKET; | |
738 | obuf[1] = 0x2; /* unknown command */ | |
739 | obuf[2] = CUDA_PACKET; | |
740 | obuf[3] = data[0]; | |
741 | cuda_send_packet_to_host(s, obuf, 4); | |
742 | } | |
743 | ||
744 | static void cuda_receive_packet_from_host(CUDAState *s, | |
745 | const uint8_t *data, int len) | |
746 | { | |
747 | #ifdef DEBUG_CUDA_PACKET | |
748 | { | |
749 | int i; | |
750 | printf("cuda_receive_packet_from_host:\n"); | |
751 | for(i = 0; i < len; i++) | |
752 | printf(" %02x", data[i]); | |
753 | printf("\n"); | |
754 | } | |
755 | #endif | |
756 | switch(data[0]) { | |
757 | case ADB_PACKET: | |
758 | { | |
759 | uint8_t obuf[ADB_MAX_OUT_LEN + 3]; | |
760 | int olen; | |
761 | olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1); | |
762 | if (olen > 0) { | |
763 | obuf[0] = ADB_PACKET; | |
764 | obuf[1] = 0x00; | |
765 | cuda_send_packet_to_host(s, obuf, olen + 2); | |
766 | } else { | |
767 | /* error */ | |
768 | obuf[0] = ADB_PACKET; | |
769 | obuf[1] = -olen; | |
770 | obuf[2] = data[1]; | |
771 | olen = 0; | |
772 | cuda_send_packet_to_host(s, obuf, olen + 3); | |
773 | } | |
774 | } | |
775 | break; | |
776 | case CUDA_PACKET: | |
777 | cuda_receive_packet(s, data + 1, len - 1); | |
778 | break; | |
779 | } | |
780 | } | |
781 | ||
782 | static void cuda_writew (void *opaque, hwaddr addr, uint32_t value) | |
783 | { | |
784 | } | |
785 | ||
786 | static void cuda_writel (void *opaque, hwaddr addr, uint32_t value) | |
787 | { | |
788 | } | |
789 | ||
790 | static uint32_t cuda_readw (void *opaque, hwaddr addr) | |
791 | { | |
792 | return 0; | |
793 | } | |
794 | ||
795 | static uint32_t cuda_readl (void *opaque, hwaddr addr) | |
796 | { | |
797 | return 0; | |
798 | } | |
799 | ||
800 | static const MemoryRegionOps cuda_ops = { | |
801 | .old_mmio = { | |
802 | .write = { | |
803 | cuda_writeb, | |
804 | cuda_writew, | |
805 | cuda_writel, | |
806 | }, | |
807 | .read = { | |
808 | cuda_readb, | |
809 | cuda_readw, | |
810 | cuda_readl, | |
811 | }, | |
812 | }, | |
813 | .endianness = DEVICE_NATIVE_ENDIAN, | |
814 | }; | |
815 | ||
816 | static bool cuda_timer_exist(void *opaque, int version_id) | |
817 | { | |
818 | CUDATimer *s = opaque; | |
819 | ||
820 | return s->timer != NULL; | |
821 | } | |
822 | ||
823 | static const VMStateDescription vmstate_cuda_timer = { | |
824 | .name = "cuda_timer", | |
825 | .version_id = 0, | |
826 | .minimum_version_id = 0, | |
827 | .fields = (VMStateField[]) { | |
828 | VMSTATE_UINT16(latch, CUDATimer), | |
829 | VMSTATE_UINT16(counter_value, CUDATimer), | |
830 | VMSTATE_INT64(load_time, CUDATimer), | |
831 | VMSTATE_INT64(next_irq_time, CUDATimer), | |
832 | VMSTATE_TIMER_PTR_TEST(timer, CUDATimer, cuda_timer_exist), | |
833 | VMSTATE_END_OF_LIST() | |
834 | } | |
835 | }; | |
836 | ||
837 | static const VMStateDescription vmstate_cuda = { | |
838 | .name = "cuda", | |
839 | .version_id = 4, | |
840 | .minimum_version_id = 4, | |
841 | .fields = (VMStateField[]) { | |
842 | VMSTATE_UINT8(a, CUDAState), | |
843 | VMSTATE_UINT8(b, CUDAState), | |
844 | VMSTATE_UINT8(last_b, CUDAState), | |
845 | VMSTATE_UINT8(dira, CUDAState), | |
846 | VMSTATE_UINT8(dirb, CUDAState), | |
847 | VMSTATE_UINT8(sr, CUDAState), | |
848 | VMSTATE_UINT8(acr, CUDAState), | |
849 | VMSTATE_UINT8(last_acr, CUDAState), | |
850 | VMSTATE_UINT8(pcr, CUDAState), | |
851 | VMSTATE_UINT8(ifr, CUDAState), | |
852 | VMSTATE_UINT8(ier, CUDAState), | |
853 | VMSTATE_UINT8(anh, CUDAState), | |
854 | VMSTATE_INT32(data_in_size, CUDAState), | |
855 | VMSTATE_INT32(data_in_index, CUDAState), | |
856 | VMSTATE_INT32(data_out_index, CUDAState), | |
857 | VMSTATE_UINT8(autopoll, CUDAState), | |
858 | VMSTATE_UINT8(autopoll_rate_ms, CUDAState), | |
859 | VMSTATE_UINT16(adb_poll_mask, CUDAState), | |
860 | VMSTATE_BUFFER(data_in, CUDAState), | |
861 | VMSTATE_BUFFER(data_out, CUDAState), | |
862 | VMSTATE_UINT32(tick_offset, CUDAState), | |
863 | VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1, | |
864 | vmstate_cuda_timer, CUDATimer), | |
865 | VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState), | |
866 | VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState), | |
867 | VMSTATE_END_OF_LIST() | |
868 | } | |
869 | }; | |
870 | ||
871 | static void cuda_reset(DeviceState *dev) | |
872 | { | |
873 | CUDAState *s = CUDA(dev); | |
874 | ||
875 | s->b = 0; | |
876 | s->a = 0; | |
877 | s->dirb = 0xff; | |
878 | s->dira = 0; | |
879 | s->sr = 0; | |
880 | s->acr = 0; | |
881 | s->pcr = 0; | |
882 | s->ifr = 0; | |
883 | s->ier = 0; | |
884 | // s->ier = T1_INT | SR_INT; | |
885 | s->anh = 0; | |
886 | s->data_in_size = 0; | |
887 | s->data_in_index = 0; | |
888 | s->data_out_index = 0; | |
889 | s->autopoll = 0; | |
890 | ||
891 | s->timers[0].latch = 0xffff; | |
892 | set_counter(s, &s->timers[0], 0xffff); | |
893 | ||
894 | s->timers[1].latch = 0xffff; | |
895 | ||
896 | s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s); | |
897 | } | |
898 | ||
899 | static void cuda_realizefn(DeviceState *dev, Error **errp) | |
900 | { | |
901 | CUDAState *s = CUDA(dev); | |
902 | struct tm tm; | |
903 | ||
904 | s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s); | |
905 | s->timers[0].frequency = s->frequency; | |
906 | s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer2, s); | |
907 | s->timers[1].frequency = (SCALE_US * 6000) / 4700; | |
908 | ||
909 | qemu_get_timedate(&tm, 0); | |
910 | s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; | |
911 | ||
912 | s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s); | |
913 | s->autopoll_rate_ms = 20; | |
914 | s->adb_poll_mask = 0xffff; | |
915 | } | |
916 | ||
917 | static void cuda_initfn(Object *obj) | |
918 | { | |
919 | SysBusDevice *d = SYS_BUS_DEVICE(obj); | |
920 | CUDAState *s = CUDA(obj); | |
921 | int i; | |
922 | ||
923 | memory_region_init_io(&s->mem, obj, &cuda_ops, s, "cuda", 0x2000); | |
924 | sysbus_init_mmio(d, &s->mem); | |
925 | sysbus_init_irq(d, &s->irq); | |
926 | ||
927 | for (i = 0; i < ARRAY_SIZE(s->timers); i++) { | |
928 | s->timers[i].index = i; | |
929 | } | |
930 | ||
931 | qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, | |
932 | DEVICE(obj), "adb.0"); | |
933 | } | |
934 | ||
935 | static Property cuda_properties[] = { | |
936 | DEFINE_PROP_UINT64("frequency", CUDAState, frequency, 0), | |
937 | DEFINE_PROP_END_OF_LIST() | |
938 | }; | |
939 | ||
940 | static void cuda_class_init(ObjectClass *oc, void *data) | |
941 | { | |
942 | DeviceClass *dc = DEVICE_CLASS(oc); | |
943 | ||
944 | dc->realize = cuda_realizefn; | |
945 | dc->reset = cuda_reset; | |
946 | dc->vmsd = &vmstate_cuda; | |
947 | dc->props = cuda_properties; | |
948 | set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); | |
949 | } | |
950 | ||
951 | static const TypeInfo cuda_type_info = { | |
952 | .name = TYPE_CUDA, | |
953 | .parent = TYPE_SYS_BUS_DEVICE, | |
954 | .instance_size = sizeof(CUDAState), | |
955 | .instance_init = cuda_initfn, | |
956 | .class_init = cuda_class_init, | |
957 | }; | |
958 | ||
959 | static void cuda_register_types(void) | |
960 | { | |
961 | type_register_static(&cuda_type_info); | |
962 | } | |
963 | ||
964 | type_init(cuda_register_types) |