]>
Commit | Line | Data |
---|---|---|
9ee6e8bb PB |
1 | /* |
2 | * ARM MPCore internal peripheral emulation. | |
3 | * | |
4 | * Copyright (c) 2006-2007 CodeSourcery. | |
5 | * Written by Paul Brook | |
6 | * | |
7 | * This code is licenced under the GPL. | |
8 | */ | |
9 | ||
87ecb68b PB |
10 | #include "hw.h" |
11 | #include "qemu-timer.h" | |
12 | #include "primecell.h" | |
9ee6e8bb PB |
13 | |
14 | #define MPCORE_PRIV_BASE 0x10100000 | |
15 | #define NCPU 4 | |
16 | /* ??? The MPCore TRM says the on-chip controller has 224 external IRQ lines | |
17 | (+ 32 internal). However my test chip only exposes/reports 32. | |
18 | More importantly Linux falls over if more than 32 are present! */ | |
19 | #define GIC_NIRQ 64 | |
20 | ||
21 | static inline int | |
22 | gic_get_current_cpu(void) | |
23 | { | |
24 | return cpu_single_env->cpu_index; | |
25 | } | |
26 | ||
27 | #include "arm_gic.c" | |
28 | ||
29 | /* MPCore private memory region. */ | |
30 | ||
31 | typedef struct { | |
32 | uint32_t count; | |
33 | uint32_t load; | |
34 | uint32_t control; | |
35 | uint32_t status; | |
36 | uint32_t old_status; | |
37 | int64_t tick; | |
38 | QEMUTimer *timer; | |
39 | struct mpcore_priv_state *mpcore; | |
40 | int id; /* Encodes both timer/watchdog and CPU. */ | |
41 | } mpcore_timer_state; | |
42 | ||
43 | typedef struct mpcore_priv_state { | |
44 | gic_state *gic; | |
45 | uint32_t scu_control; | |
46 | mpcore_timer_state timer[8]; | |
47 | } mpcore_priv_state; | |
48 | ||
49 | /* Per-CPU Timers. */ | |
50 | ||
51 | static inline void mpcore_timer_update_irq(mpcore_timer_state *s) | |
52 | { | |
53 | if (s->status & ~s->old_status) { | |
54 | gic_set_pending_private(s->mpcore->gic, s->id >> 1, 29 + (s->id & 1)); | |
55 | } | |
56 | s->old_status = s->status; | |
57 | } | |
58 | ||
59 | /* Return conversion factor from mpcore timer ticks to qemu timer ticks. */ | |
60 | static inline uint32_t mpcore_timer_scale(mpcore_timer_state *s) | |
61 | { | |
62 | return (((s->control >> 8) & 0xff) + 1) * 10; | |
63 | } | |
64 | ||
65 | static void mpcore_timer_reload(mpcore_timer_state *s, int restart) | |
66 | { | |
67 | if (s->count == 0) | |
68 | return; | |
69 | if (restart) | |
70 | s->tick = qemu_get_clock(vm_clock); | |
71 | s->tick += (int64_t)s->count * mpcore_timer_scale(s); | |
72 | qemu_mod_timer(s->timer, s->tick); | |
73 | } | |
74 | ||
75 | static void mpcore_timer_tick(void *opaque) | |
76 | { | |
77 | mpcore_timer_state *s = (mpcore_timer_state *)opaque; | |
78 | s->status = 1; | |
79 | if (s->control & 2) { | |
80 | s->count = s->load; | |
81 | mpcore_timer_reload(s, 0); | |
82 | } else { | |
83 | s->count = 0; | |
84 | } | |
85 | mpcore_timer_update_irq(s); | |
86 | } | |
87 | ||
88 | static uint32_t mpcore_timer_read(mpcore_timer_state *s, int offset) | |
89 | { | |
90 | int64_t val; | |
91 | switch (offset) { | |
92 | case 0: /* Load */ | |
93 | return s->load; | |
94 | /* Fall through. */ | |
95 | case 4: /* Counter. */ | |
96 | if (((s->control & 1) == 0) || (s->count == 0)) | |
97 | return 0; | |
98 | /* Slow and ugly, but hopefully won't happen too often. */ | |
99 | val = s->tick - qemu_get_clock(vm_clock); | |
100 | val /= mpcore_timer_scale(s); | |
101 | if (val < 0) | |
102 | val = 0; | |
103 | return val; | |
104 | case 8: /* Control. */ | |
105 | return s->control; | |
106 | case 12: /* Interrupt status. */ | |
107 | return s->status; | |
108 | } | |
109 | } | |
110 | ||
111 | static void mpcore_timer_write(mpcore_timer_state *s, int offset, | |
112 | uint32_t value) | |
113 | { | |
114 | int64_t old; | |
115 | switch (offset) { | |
116 | case 0: /* Load */ | |
117 | s->load = value; | |
118 | /* Fall through. */ | |
119 | case 4: /* Counter. */ | |
120 | if ((s->control & 1) && s->count) { | |
121 | /* Cancel the previous timer. */ | |
122 | qemu_del_timer(s->timer); | |
123 | } | |
124 | s->count = value; | |
125 | if (s->control & 1) { | |
126 | mpcore_timer_reload(s, 1); | |
127 | } | |
128 | break; | |
129 | case 8: /* Control. */ | |
130 | old = s->control; | |
131 | s->control = value; | |
132 | if (((old & 1) == 0) && (value & 1)) { | |
133 | if (s->count == 0 && (s->control & 2)) | |
134 | s->count = s->load; | |
135 | mpcore_timer_reload(s, 1); | |
136 | } | |
137 | break; | |
138 | case 12: /* Interrupt status. */ | |
139 | s->status &= ~value; | |
140 | mpcore_timer_update_irq(s); | |
141 | break; | |
142 | } | |
143 | } | |
144 | ||
145 | static void mpcore_timer_init(mpcore_priv_state *mpcore, | |
146 | mpcore_timer_state *s, int id) | |
147 | { | |
148 | s->id = id; | |
149 | s->mpcore = mpcore; | |
150 | s->timer = qemu_new_timer(vm_clock, mpcore_timer_tick, s); | |
151 | } | |
152 | ||
153 | ||
154 | /* Per-CPU private memory mapped IO. */ | |
155 | ||
156 | static uint32_t mpcore_priv_read(void *opaque, target_phys_addr_t offset) | |
157 | { | |
158 | mpcore_priv_state *s = (mpcore_priv_state *)opaque; | |
159 | int id; | |
160 | offset &= 0xfff; | |
161 | if (offset < 0x100) { | |
162 | /* SCU */ | |
163 | switch (offset) { | |
164 | case 0x00: /* Control. */ | |
165 | return s->scu_control; | |
166 | case 0x04: /* Configuration. */ | |
167 | return 0xf3; | |
168 | case 0x08: /* CPU status. */ | |
169 | return 0; | |
170 | case 0x0c: /* Invalidate all. */ | |
171 | return 0; | |
172 | default: | |
173 | goto bad_reg; | |
174 | } | |
175 | } else if (offset < 0x600) { | |
176 | /* Interrupt controller. */ | |
177 | if (offset < 0x200) { | |
178 | id = gic_get_current_cpu(); | |
179 | } else { | |
180 | id = (offset - 0x200) >> 8; | |
181 | } | |
182 | return gic_cpu_read(s->gic, id, offset & 0xff); | |
183 | } else if (offset < 0xb00) { | |
184 | /* Timers. */ | |
185 | if (offset < 0x700) { | |
186 | id = gic_get_current_cpu(); | |
187 | } else { | |
188 | id = (offset - 0x700) >> 8; | |
189 | } | |
190 | id <<= 1; | |
191 | if (offset & 0x20) | |
192 | id++; | |
193 | return mpcore_timer_read(&s->timer[id], offset & 0xf); | |
194 | } | |
195 | bad_reg: | |
196 | cpu_abort(cpu_single_env, "mpcore_priv_read: Bad offset %x\n", | |
197 | (int)offset); | |
198 | return 0; | |
199 | } | |
200 | ||
201 | static void mpcore_priv_write(void *opaque, target_phys_addr_t offset, | |
202 | uint32_t value) | |
203 | { | |
204 | mpcore_priv_state *s = (mpcore_priv_state *)opaque; | |
205 | int id; | |
206 | offset &= 0xfff; | |
207 | if (offset < 0x100) { | |
208 | /* SCU */ | |
209 | switch (offset) { | |
210 | case 0: /* Control register. */ | |
211 | s->scu_control = value & 1; | |
212 | break; | |
213 | case 0x0c: /* Invalidate all. */ | |
214 | /* This is a no-op as cache is not emulated. */ | |
215 | break; | |
216 | default: | |
217 | goto bad_reg; | |
218 | } | |
219 | } else if (offset < 0x600) { | |
220 | /* Interrupt controller. */ | |
221 | if (offset < 0x200) { | |
222 | id = gic_get_current_cpu(); | |
223 | } else { | |
224 | id = (offset - 0x200) >> 8; | |
225 | } | |
226 | gic_cpu_write(s->gic, id, offset & 0xff, value); | |
227 | } else if (offset < 0xb00) { | |
228 | /* Timers. */ | |
229 | if (offset < 0x700) { | |
230 | id = gic_get_current_cpu(); | |
231 | } else { | |
232 | id = (offset - 0x700) >> 8; | |
233 | } | |
234 | id <<= 1; | |
235 | if (offset & 0x20) | |
236 | id++; | |
237 | mpcore_timer_write(&s->timer[id], offset & 0xf, value); | |
238 | return; | |
239 | } | |
240 | return; | |
241 | bad_reg: | |
242 | cpu_abort(cpu_single_env, "mpcore_priv_read: Bad offset %x\n", | |
243 | (int)offset); | |
244 | } | |
245 | ||
246 | static CPUReadMemoryFunc *mpcore_priv_readfn[] = { | |
247 | mpcore_priv_read, | |
248 | mpcore_priv_read, | |
249 | mpcore_priv_read | |
250 | }; | |
251 | ||
252 | static CPUWriteMemoryFunc *mpcore_priv_writefn[] = { | |
253 | mpcore_priv_write, | |
254 | mpcore_priv_write, | |
255 | mpcore_priv_write | |
256 | }; | |
257 | ||
258 | ||
259 | static qemu_irq *mpcore_priv_init(uint32_t base, qemu_irq *pic_irq) | |
260 | { | |
261 | mpcore_priv_state *s; | |
262 | int iomemtype; | |
263 | int i; | |
264 | ||
265 | s = (mpcore_priv_state *)qemu_mallocz(sizeof(mpcore_priv_state)); | |
266 | if (!s) | |
267 | return NULL; | |
268 | s->gic = gic_init(base, pic_irq); | |
269 | if (!s->gic) | |
270 | return NULL; | |
271 | iomemtype = cpu_register_io_memory(0, mpcore_priv_readfn, | |
272 | mpcore_priv_writefn, s); | |
273 | cpu_register_physical_memory(base, 0x00001000, iomemtype); | |
274 | for (i = 0; i < 8; i++) { | |
275 | mpcore_timer_init(s, &s->timer[i], i); | |
276 | } | |
277 | return s->gic->in; | |
278 | } | |
279 | ||
280 | /* Dummy PIC to route IRQ lines. The baseboard has 4 independent IRQ | |
281 | controllers. The output of these, plus some of the raw input lines | |
282 | are fed into a single SMP-aware interrupt controller on the CPU. */ | |
283 | typedef struct { | |
284 | qemu_irq *cpuic; | |
285 | qemu_irq *rvic[4]; | |
286 | } mpcore_rirq_state; | |
287 | ||
288 | /* Map baseboard IRQs onto CPU IRQ lines. */ | |
289 | static const int mpcore_irq_map[32] = { | |
290 | -1, -1, -1, -1, 1, 2, -1, -1, | |
291 | -1, -1, 6, -1, 4, 5, -1, -1, | |
292 | -1, 14, 15, 0, 7, 8, -1, -1, | |
293 | -1, -1, -1, -1, 9, 3, -1, -1, | |
294 | }; | |
295 | ||
296 | static void mpcore_rirq_set_irq(void *opaque, int irq, int level) | |
297 | { | |
298 | mpcore_rirq_state *s = (mpcore_rirq_state *)opaque; | |
299 | int i; | |
300 | ||
301 | for (i = 0; i < 4; i++) { | |
302 | qemu_set_irq(s->rvic[i][irq], level); | |
303 | } | |
304 | if (irq < 32) { | |
305 | irq = mpcore_irq_map[irq]; | |
306 | if (irq >= 0) { | |
307 | qemu_set_irq(s->cpuic[irq], level); | |
308 | } | |
309 | } | |
310 | } | |
311 | ||
312 | qemu_irq *mpcore_irq_init(qemu_irq *cpu_irq) | |
313 | { | |
314 | mpcore_rirq_state *s; | |
315 | int n; | |
316 | ||
317 | /* ??? IRQ routing is hardcoded to "normal" mode. */ | |
318 | s = qemu_mallocz(sizeof(mpcore_rirq_state)); | |
319 | s->cpuic = mpcore_priv_init(MPCORE_PRIV_BASE, cpu_irq); | |
320 | for (n = 0; n < 4; n++) { | |
321 | s->rvic[n] = realview_gic_init(0x10040000 + n * 0x10000, | |
322 | s->cpuic[10 + n]); | |
323 | } | |
324 | return qemu_allocate_irqs(mpcore_rirq_set_irq, s, 64); | |
325 | } |