]>
Commit | Line | Data |
---|---|---|
e46cdb66 JF |
1 | /* |
2 | * Xen event channels | |
3 | * | |
4 | * Xen models interrupts with abstract event channels. Because each | |
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | |
6 | * must dynamically map irqs<->event channels. The event channels | |
7 | * interface with the rest of the kernel by defining a xen interrupt | |
8 | * chip. When an event is recieved, it is mapped to an irq and sent | |
9 | * through the normal interrupt processing path. | |
10 | * | |
11 | * There are four kinds of events which can be mapped to an event | |
12 | * channel: | |
13 | * | |
14 | * 1. Inter-domain notifications. This includes all the virtual | |
15 | * device events, since they're driven by front-ends in another domain | |
16 | * (typically dom0). | |
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | |
18 | * 3. IPIs. | |
19 | * 4. Hardware interrupts. Not supported at present. | |
20 | * | |
21 | * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007 | |
22 | */ | |
23 | ||
24 | #include <linux/linkage.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/string.h> | |
29 | ||
30 | #include <asm/ptrace.h> | |
31 | #include <asm/irq.h> | |
32 | #include <asm/sync_bitops.h> | |
33 | #include <asm/xen/hypercall.h> | |
8d1b8753 | 34 | #include <asm/xen/hypervisor.h> |
e46cdb66 | 35 | |
e04d0d07 | 36 | #include <xen/xen-ops.h> |
e46cdb66 JF |
37 | #include <xen/events.h> |
38 | #include <xen/interface/xen.h> | |
39 | #include <xen/interface/event_channel.h> | |
40 | ||
e46cdb66 JF |
41 | /* |
42 | * This lock protects updates to the following mapping and reference-count | |
43 | * arrays. The lock does not need to be acquired to read the mapping tables. | |
44 | */ | |
45 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | |
46 | ||
47 | /* IRQ <-> VIRQ mapping. */ | |
48 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |
49 | ||
f87e4cac JF |
50 | /* IRQ <-> IPI mapping */ |
51 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | |
52 | ||
e46cdb66 JF |
53 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ |
54 | struct packed_irq | |
55 | { | |
56 | unsigned short evtchn; | |
57 | unsigned char index; | |
58 | unsigned char type; | |
59 | }; | |
60 | ||
61 | static struct packed_irq irq_info[NR_IRQS]; | |
62 | ||
63 | /* Binding types. */ | |
f87e4cac JF |
64 | enum { |
65 | IRQT_UNBOUND, | |
66 | IRQT_PIRQ, | |
67 | IRQT_VIRQ, | |
68 | IRQT_IPI, | |
69 | IRQT_EVTCHN | |
70 | }; | |
e46cdb66 JF |
71 | |
72 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | |
73 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | |
74 | ||
75 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | |
76 | [0 ... NR_EVENT_CHANNELS-1] = -1 | |
77 | }; | |
78 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | |
79 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | |
80 | ||
81 | /* Reference counts for bindings to IRQs. */ | |
82 | static int irq_bindcount[NR_IRQS]; | |
83 | ||
84 | /* Xen will never allocate port zero for any purpose. */ | |
85 | #define VALID_EVTCHN(chn) ((chn) != 0) | |
86 | ||
e46cdb66 JF |
87 | static struct irq_chip xen_dynamic_chip; |
88 | ||
89 | /* Constructor for packed IRQ information. */ | |
90 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | |
91 | { | |
92 | return (struct packed_irq) { evtchn, index, type }; | |
93 | } | |
94 | ||
95 | /* | |
96 | * Accessors for packed IRQ information. | |
97 | */ | |
98 | static inline unsigned int evtchn_from_irq(int irq) | |
99 | { | |
100 | return irq_info[irq].evtchn; | |
101 | } | |
102 | ||
103 | static inline unsigned int index_from_irq(int irq) | |
104 | { | |
105 | return irq_info[irq].index; | |
106 | } | |
107 | ||
108 | static inline unsigned int type_from_irq(int irq) | |
109 | { | |
110 | return irq_info[irq].type; | |
111 | } | |
112 | ||
113 | static inline unsigned long active_evtchns(unsigned int cpu, | |
114 | struct shared_info *sh, | |
115 | unsigned int idx) | |
116 | { | |
117 | return (sh->evtchn_pending[idx] & | |
118 | cpu_evtchn_mask[cpu][idx] & | |
119 | ~sh->evtchn_mask[idx]); | |
120 | } | |
121 | ||
122 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |
123 | { | |
124 | int irq = evtchn_to_irq[chn]; | |
125 | ||
126 | BUG_ON(irq == -1); | |
127 | #ifdef CONFIG_SMP | |
08678b08 | 128 | irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); |
e46cdb66 JF |
129 | #endif |
130 | ||
131 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | |
132 | __set_bit(chn, cpu_evtchn_mask[cpu]); | |
133 | ||
134 | cpu_evtchn[chn] = cpu; | |
135 | } | |
136 | ||
137 | static void init_evtchn_cpu_bindings(void) | |
138 | { | |
139 | #ifdef CONFIG_SMP | |
10e58084 | 140 | struct irq_desc *desc; |
e46cdb66 | 141 | int i; |
10e58084 | 142 | |
e46cdb66 | 143 | /* By default all event channels notify CPU#0. */ |
0b8f1efa YL |
144 | for_each_irq_desc(i, desc) { |
145 | if (!desc) | |
146 | continue; | |
147 | ||
08678b08 | 148 | desc->affinity = cpumask_of_cpu(0); |
0b8f1efa | 149 | } |
e46cdb66 JF |
150 | #endif |
151 | ||
152 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | |
153 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | |
154 | } | |
155 | ||
156 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | |
157 | { | |
158 | return cpu_evtchn[evtchn]; | |
159 | } | |
160 | ||
161 | static inline void clear_evtchn(int port) | |
162 | { | |
163 | struct shared_info *s = HYPERVISOR_shared_info; | |
164 | sync_clear_bit(port, &s->evtchn_pending[0]); | |
165 | } | |
166 | ||
167 | static inline void set_evtchn(int port) | |
168 | { | |
169 | struct shared_info *s = HYPERVISOR_shared_info; | |
170 | sync_set_bit(port, &s->evtchn_pending[0]); | |
171 | } | |
172 | ||
168d2f46 JF |
173 | static inline int test_evtchn(int port) |
174 | { | |
175 | struct shared_info *s = HYPERVISOR_shared_info; | |
176 | return sync_test_bit(port, &s->evtchn_pending[0]); | |
177 | } | |
178 | ||
e46cdb66 JF |
179 | |
180 | /** | |
181 | * notify_remote_via_irq - send event to remote end of event channel via irq | |
182 | * @irq: irq of event channel to send event to | |
183 | * | |
184 | * Unlike notify_remote_via_evtchn(), this is safe to use across | |
185 | * save/restore. Notifications on a broken connection are silently | |
186 | * dropped. | |
187 | */ | |
188 | void notify_remote_via_irq(int irq) | |
189 | { | |
190 | int evtchn = evtchn_from_irq(irq); | |
191 | ||
192 | if (VALID_EVTCHN(evtchn)) | |
193 | notify_remote_via_evtchn(evtchn); | |
194 | } | |
195 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |
196 | ||
197 | static void mask_evtchn(int port) | |
198 | { | |
199 | struct shared_info *s = HYPERVISOR_shared_info; | |
200 | sync_set_bit(port, &s->evtchn_mask[0]); | |
201 | } | |
202 | ||
203 | static void unmask_evtchn(int port) | |
204 | { | |
205 | struct shared_info *s = HYPERVISOR_shared_info; | |
206 | unsigned int cpu = get_cpu(); | |
207 | ||
208 | BUG_ON(!irqs_disabled()); | |
209 | ||
210 | /* Slow path (hypercall) if this is a non-local port. */ | |
211 | if (unlikely(cpu != cpu_from_evtchn(port))) { | |
212 | struct evtchn_unmask unmask = { .port = port }; | |
213 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | |
214 | } else { | |
215 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
216 | ||
217 | sync_clear_bit(port, &s->evtchn_mask[0]); | |
218 | ||
219 | /* | |
220 | * The following is basically the equivalent of | |
221 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | |
222 | * the interrupt edge' if the channel is masked. | |
223 | */ | |
224 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | |
225 | !sync_test_and_set_bit(port / BITS_PER_LONG, | |
226 | &vcpu_info->evtchn_pending_sel)) | |
227 | vcpu_info->evtchn_upcall_pending = 1; | |
228 | } | |
229 | ||
230 | put_cpu(); | |
231 | } | |
232 | ||
233 | static int find_unbound_irq(void) | |
234 | { | |
235 | int irq; | |
236 | ||
237 | /* Only allocate from dynirq range */ | |
0b8f1efa | 238 | for (irq = 0; irq < nr_irqs; irq++) |
e46cdb66 JF |
239 | if (irq_bindcount[irq] == 0) |
240 | break; | |
241 | ||
5a15d7e8 YL |
242 | if (irq == nr_irqs) |
243 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | |
e46cdb66 JF |
244 | |
245 | return irq; | |
246 | } | |
247 | ||
b536b4b9 | 248 | int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66 JF |
249 | { |
250 | int irq; | |
251 | ||
252 | spin_lock(&irq_mapping_update_lock); | |
253 | ||
254 | irq = evtchn_to_irq[evtchn]; | |
255 | ||
256 | if (irq == -1) { | |
257 | irq = find_unbound_irq(); | |
258 | ||
259 | dynamic_irq_init(irq); | |
260 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | |
261 | handle_level_irq, "event"); | |
262 | ||
263 | evtchn_to_irq[evtchn] = irq; | |
264 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | |
265 | } | |
266 | ||
267 | irq_bindcount[irq]++; | |
268 | ||
269 | spin_unlock(&irq_mapping_update_lock); | |
270 | ||
271 | return irq; | |
272 | } | |
b536b4b9 | 273 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66 | 274 | |
f87e4cac JF |
275 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
276 | { | |
277 | struct evtchn_bind_ipi bind_ipi; | |
278 | int evtchn, irq; | |
279 | ||
280 | spin_lock(&irq_mapping_update_lock); | |
281 | ||
282 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | |
283 | if (irq == -1) { | |
284 | irq = find_unbound_irq(); | |
285 | if (irq < 0) | |
286 | goto out; | |
287 | ||
288 | dynamic_irq_init(irq); | |
289 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | |
290 | handle_level_irq, "ipi"); | |
291 | ||
292 | bind_ipi.vcpu = cpu; | |
293 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
294 | &bind_ipi) != 0) | |
295 | BUG(); | |
296 | evtchn = bind_ipi.port; | |
297 | ||
298 | evtchn_to_irq[evtchn] = irq; | |
299 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | |
300 | ||
301 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | |
302 | ||
303 | bind_evtchn_to_cpu(evtchn, cpu); | |
304 | } | |
305 | ||
306 | irq_bindcount[irq]++; | |
307 | ||
308 | out: | |
309 | spin_unlock(&irq_mapping_update_lock); | |
310 | return irq; | |
311 | } | |
312 | ||
313 | ||
e46cdb66 JF |
314 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
315 | { | |
316 | struct evtchn_bind_virq bind_virq; | |
317 | int evtchn, irq; | |
318 | ||
319 | spin_lock(&irq_mapping_update_lock); | |
320 | ||
321 | irq = per_cpu(virq_to_irq, cpu)[virq]; | |
322 | ||
323 | if (irq == -1) { | |
324 | bind_virq.virq = virq; | |
325 | bind_virq.vcpu = cpu; | |
326 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
327 | &bind_virq) != 0) | |
328 | BUG(); | |
329 | evtchn = bind_virq.port; | |
330 | ||
331 | irq = find_unbound_irq(); | |
332 | ||
333 | dynamic_irq_init(irq); | |
334 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | |
335 | handle_level_irq, "virq"); | |
336 | ||
337 | evtchn_to_irq[evtchn] = irq; | |
338 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | |
339 | ||
340 | per_cpu(virq_to_irq, cpu)[virq] = irq; | |
341 | ||
342 | bind_evtchn_to_cpu(evtchn, cpu); | |
343 | } | |
344 | ||
345 | irq_bindcount[irq]++; | |
346 | ||
347 | spin_unlock(&irq_mapping_update_lock); | |
348 | ||
349 | return irq; | |
350 | } | |
351 | ||
352 | static void unbind_from_irq(unsigned int irq) | |
353 | { | |
354 | struct evtchn_close close; | |
355 | int evtchn = evtchn_from_irq(irq); | |
356 | ||
357 | spin_lock(&irq_mapping_update_lock); | |
358 | ||
0f2287ad | 359 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { |
e46cdb66 JF |
360 | close.port = evtchn; |
361 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
362 | BUG(); | |
363 | ||
364 | switch (type_from_irq(irq)) { | |
365 | case IRQT_VIRQ: | |
366 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | |
367 | [index_from_irq(irq)] = -1; | |
368 | break; | |
d68d82af AN |
369 | case IRQT_IPI: |
370 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | |
371 | [index_from_irq(irq)] = -1; | |
372 | break; | |
e46cdb66 JF |
373 | default: |
374 | break; | |
375 | } | |
376 | ||
377 | /* Closed ports are implicitly re-bound to VCPU0. */ | |
378 | bind_evtchn_to_cpu(evtchn, 0); | |
379 | ||
380 | evtchn_to_irq[evtchn] = -1; | |
381 | irq_info[irq] = IRQ_UNBOUND; | |
382 | ||
0f2287ad | 383 | dynamic_irq_cleanup(irq); |
e46cdb66 JF |
384 | } |
385 | ||
386 | spin_unlock(&irq_mapping_update_lock); | |
387 | } | |
388 | ||
389 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | |
7c239975 | 390 | irq_handler_t handler, |
e46cdb66 JF |
391 | unsigned long irqflags, |
392 | const char *devname, void *dev_id) | |
393 | { | |
394 | unsigned int irq; | |
395 | int retval; | |
396 | ||
397 | irq = bind_evtchn_to_irq(evtchn); | |
398 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
399 | if (retval != 0) { | |
400 | unbind_from_irq(irq); | |
401 | return retval; | |
402 | } | |
403 | ||
404 | return irq; | |
405 | } | |
406 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | |
407 | ||
408 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |
7c239975 | 409 | irq_handler_t handler, |
e46cdb66 JF |
410 | unsigned long irqflags, const char *devname, void *dev_id) |
411 | { | |
412 | unsigned int irq; | |
413 | int retval; | |
414 | ||
415 | irq = bind_virq_to_irq(virq, cpu); | |
416 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
417 | if (retval != 0) { | |
418 | unbind_from_irq(irq); | |
419 | return retval; | |
420 | } | |
421 | ||
422 | return irq; | |
423 | } | |
424 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | |
425 | ||
f87e4cac JF |
426 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
427 | unsigned int cpu, | |
428 | irq_handler_t handler, | |
429 | unsigned long irqflags, | |
430 | const char *devname, | |
431 | void *dev_id) | |
432 | { | |
433 | int irq, retval; | |
434 | ||
435 | irq = bind_ipi_to_irq(ipi, cpu); | |
436 | if (irq < 0) | |
437 | return irq; | |
438 | ||
439 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
440 | if (retval != 0) { | |
441 | unbind_from_irq(irq); | |
442 | return retval; | |
443 | } | |
444 | ||
445 | return irq; | |
446 | } | |
447 | ||
e46cdb66 JF |
448 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
449 | { | |
450 | free_irq(irq, dev_id); | |
451 | unbind_from_irq(irq); | |
452 | } | |
453 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | |
454 | ||
f87e4cac JF |
455 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
456 | { | |
457 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | |
458 | BUG_ON(irq < 0); | |
459 | notify_remote_via_irq(irq); | |
460 | } | |
461 | ||
ee523ca1 JF |
462 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
463 | { | |
464 | struct shared_info *sh = HYPERVISOR_shared_info; | |
465 | int cpu = smp_processor_id(); | |
466 | int i; | |
467 | unsigned long flags; | |
468 | static DEFINE_SPINLOCK(debug_lock); | |
469 | ||
470 | spin_lock_irqsave(&debug_lock, flags); | |
471 | ||
472 | printk("vcpu %d\n ", cpu); | |
473 | ||
474 | for_each_online_cpu(i) { | |
475 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | |
476 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | |
e849c3e9 | 477 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
ee523ca1 JF |
478 | v->evtchn_upcall_pending, |
479 | v->evtchn_pending_sel); | |
480 | } | |
481 | printk("pending:\n "); | |
482 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | |
483 | printk("%08lx%s", sh->evtchn_pending[i], | |
484 | i % 8 == 0 ? "\n " : " "); | |
485 | printk("\nmasks:\n "); | |
486 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
487 | printk("%08lx%s", sh->evtchn_mask[i], | |
488 | i % 8 == 0 ? "\n " : " "); | |
489 | ||
490 | printk("\nunmasked:\n "); | |
491 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
492 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | |
493 | i % 8 == 0 ? "\n " : " "); | |
494 | ||
495 | printk("\npending list:\n"); | |
496 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | |
497 | if (sync_test_bit(i, sh->evtchn_pending)) { | |
498 | printk(" %d: event %d -> irq %d\n", | |
499 | cpu_evtchn[i], i, | |
500 | evtchn_to_irq[i]); | |
501 | } | |
502 | } | |
503 | ||
504 | spin_unlock_irqrestore(&debug_lock, flags); | |
505 | ||
506 | return IRQ_HANDLED; | |
507 | } | |
508 | ||
f87e4cac | 509 | |
e46cdb66 JF |
510 | /* |
511 | * Search the CPUs pending events bitmasks. For each one found, map | |
512 | * the event number to an irq, and feed it into do_IRQ() for | |
513 | * handling. | |
514 | * | |
515 | * Xen uses a two-level bitmap to speed searching. The first level is | |
516 | * a bitset of words which contain pending event bits. The second | |
517 | * level is a bitset of pending events themselves. | |
518 | */ | |
75604d7f | 519 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
e46cdb66 JF |
520 | { |
521 | int cpu = get_cpu(); | |
522 | struct shared_info *s = HYPERVISOR_shared_info; | |
523 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
229664be JF |
524 | static DEFINE_PER_CPU(unsigned, nesting_count); |
525 | unsigned count; | |
e46cdb66 | 526 | |
229664be JF |
527 | do { |
528 | unsigned long pending_words; | |
e46cdb66 | 529 | |
229664be | 530 | vcpu_info->evtchn_upcall_pending = 0; |
e46cdb66 | 531 | |
229664be JF |
532 | if (__get_cpu_var(nesting_count)++) |
533 | goto out; | |
e46cdb66 | 534 | |
e849c3e9 IY |
535 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
536 | /* Clear master flag /before/ clearing selector flag. */ | |
6673cf63 | 537 | wmb(); |
e849c3e9 | 538 | #endif |
229664be JF |
539 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
540 | while (pending_words != 0) { | |
541 | unsigned long pending_bits; | |
542 | int word_idx = __ffs(pending_words); | |
543 | pending_words &= ~(1UL << word_idx); | |
544 | ||
545 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | |
546 | int bit_idx = __ffs(pending_bits); | |
547 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | |
548 | int irq = evtchn_to_irq[port]; | |
549 | ||
e849c3e9 IY |
550 | if (irq != -1) |
551 | xen_do_IRQ(irq, regs); | |
e46cdb66 JF |
552 | } |
553 | } | |
e46cdb66 | 554 | |
229664be JF |
555 | BUG_ON(!irqs_disabled()); |
556 | ||
557 | count = __get_cpu_var(nesting_count); | |
558 | __get_cpu_var(nesting_count) = 0; | |
559 | } while(count != 1); | |
560 | ||
561 | out: | |
e46cdb66 JF |
562 | put_cpu(); |
563 | } | |
564 | ||
eb1e305f JF |
565 | /* Rebind a new event channel to an existing irq. */ |
566 | void rebind_evtchn_irq(int evtchn, int irq) | |
567 | { | |
568 | /* Make sure the irq is masked, since the new event channel | |
569 | will also be masked. */ | |
570 | disable_irq(irq); | |
571 | ||
572 | spin_lock(&irq_mapping_update_lock); | |
573 | ||
574 | /* After resume the irq<->evtchn mappings are all cleared out */ | |
575 | BUG_ON(evtchn_to_irq[evtchn] != -1); | |
576 | /* Expect irq to have been bound before, | |
577 | so the bindcount should be non-0 */ | |
578 | BUG_ON(irq_bindcount[irq] == 0); | |
579 | ||
580 | evtchn_to_irq[evtchn] = irq; | |
581 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | |
582 | ||
583 | spin_unlock(&irq_mapping_update_lock); | |
584 | ||
585 | /* new event channels are always bound to cpu 0 */ | |
586 | irq_set_affinity(irq, cpumask_of_cpu(0)); | |
587 | ||
588 | /* Unmask the event channel. */ | |
589 | enable_irq(irq); | |
590 | } | |
591 | ||
e46cdb66 JF |
592 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
593 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |
594 | { | |
595 | struct evtchn_bind_vcpu bind_vcpu; | |
596 | int evtchn = evtchn_from_irq(irq); | |
597 | ||
598 | if (!VALID_EVTCHN(evtchn)) | |
599 | return; | |
600 | ||
601 | /* Send future instances of this interrupt to other vcpu. */ | |
602 | bind_vcpu.port = evtchn; | |
603 | bind_vcpu.vcpu = tcpu; | |
604 | ||
605 | /* | |
606 | * If this fails, it usually just indicates that we're dealing with a | |
607 | * virq or IPI channel, which don't actually need to be rebound. Ignore | |
608 | * it, but don't do the xenlinux-level rebind in that case. | |
609 | */ | |
610 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | |
611 | bind_evtchn_to_cpu(evtchn, tcpu); | |
612 | } | |
613 | ||
614 | ||
615 | static void set_affinity_irq(unsigned irq, cpumask_t dest) | |
616 | { | |
617 | unsigned tcpu = first_cpu(dest); | |
618 | rebind_irq_to_cpu(irq, tcpu); | |
619 | } | |
620 | ||
642e0c88 IY |
621 | int resend_irq_on_evtchn(unsigned int irq) |
622 | { | |
623 | int masked, evtchn = evtchn_from_irq(irq); | |
624 | struct shared_info *s = HYPERVISOR_shared_info; | |
625 | ||
626 | if (!VALID_EVTCHN(evtchn)) | |
627 | return 1; | |
628 | ||
629 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | |
630 | sync_set_bit(evtchn, s->evtchn_pending); | |
631 | if (!masked) | |
632 | unmask_evtchn(evtchn); | |
633 | ||
634 | return 1; | |
635 | } | |
636 | ||
e46cdb66 JF |
637 | static void enable_dynirq(unsigned int irq) |
638 | { | |
639 | int evtchn = evtchn_from_irq(irq); | |
640 | ||
641 | if (VALID_EVTCHN(evtchn)) | |
642 | unmask_evtchn(evtchn); | |
643 | } | |
644 | ||
645 | static void disable_dynirq(unsigned int irq) | |
646 | { | |
647 | int evtchn = evtchn_from_irq(irq); | |
648 | ||
649 | if (VALID_EVTCHN(evtchn)) | |
650 | mask_evtchn(evtchn); | |
651 | } | |
652 | ||
653 | static void ack_dynirq(unsigned int irq) | |
654 | { | |
655 | int evtchn = evtchn_from_irq(irq); | |
656 | ||
657 | move_native_irq(irq); | |
658 | ||
659 | if (VALID_EVTCHN(evtchn)) | |
660 | clear_evtchn(evtchn); | |
661 | } | |
662 | ||
663 | static int retrigger_dynirq(unsigned int irq) | |
664 | { | |
665 | int evtchn = evtchn_from_irq(irq); | |
ee8fa1c6 | 666 | struct shared_info *sh = HYPERVISOR_shared_info; |
e46cdb66 JF |
667 | int ret = 0; |
668 | ||
669 | if (VALID_EVTCHN(evtchn)) { | |
ee8fa1c6 JF |
670 | int masked; |
671 | ||
672 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | |
673 | sync_set_bit(evtchn, sh->evtchn_pending); | |
674 | if (!masked) | |
675 | unmask_evtchn(evtchn); | |
e46cdb66 JF |
676 | ret = 1; |
677 | } | |
678 | ||
679 | return ret; | |
680 | } | |
681 | ||
0e91398f JF |
682 | static void restore_cpu_virqs(unsigned int cpu) |
683 | { | |
684 | struct evtchn_bind_virq bind_virq; | |
685 | int virq, irq, evtchn; | |
686 | ||
687 | for (virq = 0; virq < NR_VIRQS; virq++) { | |
688 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | |
689 | continue; | |
690 | ||
691 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); | |
692 | BUG_ON(irq_info[irq].index != virq); | |
693 | ||
694 | /* Get a new binding from Xen. */ | |
695 | bind_virq.virq = virq; | |
696 | bind_virq.vcpu = cpu; | |
697 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
698 | &bind_virq) != 0) | |
699 | BUG(); | |
700 | evtchn = bind_virq.port; | |
701 | ||
702 | /* Record the new mapping. */ | |
703 | evtchn_to_irq[evtchn] = irq; | |
704 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | |
705 | bind_evtchn_to_cpu(evtchn, cpu); | |
706 | ||
707 | /* Ready for use. */ | |
708 | unmask_evtchn(evtchn); | |
709 | } | |
710 | } | |
711 | ||
712 | static void restore_cpu_ipis(unsigned int cpu) | |
713 | { | |
714 | struct evtchn_bind_ipi bind_ipi; | |
715 | int ipi, irq, evtchn; | |
716 | ||
717 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { | |
718 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | |
719 | continue; | |
720 | ||
721 | BUG_ON(irq_info[irq].type != IRQT_IPI); | |
722 | BUG_ON(irq_info[irq].index != ipi); | |
723 | ||
724 | /* Get a new binding from Xen. */ | |
725 | bind_ipi.vcpu = cpu; | |
726 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
727 | &bind_ipi) != 0) | |
728 | BUG(); | |
729 | evtchn = bind_ipi.port; | |
730 | ||
731 | /* Record the new mapping. */ | |
732 | evtchn_to_irq[evtchn] = irq; | |
733 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | |
734 | bind_evtchn_to_cpu(evtchn, cpu); | |
735 | ||
736 | /* Ready for use. */ | |
737 | unmask_evtchn(evtchn); | |
738 | ||
739 | } | |
740 | } | |
741 | ||
2d9e1e2f JF |
742 | /* Clear an irq's pending state, in preparation for polling on it */ |
743 | void xen_clear_irq_pending(int irq) | |
744 | { | |
745 | int evtchn = evtchn_from_irq(irq); | |
746 | ||
747 | if (VALID_EVTCHN(evtchn)) | |
748 | clear_evtchn(evtchn); | |
749 | } | |
750 | ||
168d2f46 JF |
751 | void xen_set_irq_pending(int irq) |
752 | { | |
753 | int evtchn = evtchn_from_irq(irq); | |
754 | ||
755 | if (VALID_EVTCHN(evtchn)) | |
756 | set_evtchn(evtchn); | |
757 | } | |
758 | ||
759 | bool xen_test_irq_pending(int irq) | |
760 | { | |
761 | int evtchn = evtchn_from_irq(irq); | |
762 | bool ret = false; | |
763 | ||
764 | if (VALID_EVTCHN(evtchn)) | |
765 | ret = test_evtchn(evtchn); | |
766 | ||
767 | return ret; | |
768 | } | |
769 | ||
2d9e1e2f JF |
770 | /* Poll waiting for an irq to become pending. In the usual case, the |
771 | irq will be disabled so it won't deliver an interrupt. */ | |
772 | void xen_poll_irq(int irq) | |
773 | { | |
774 | evtchn_port_t evtchn = evtchn_from_irq(irq); | |
775 | ||
776 | if (VALID_EVTCHN(evtchn)) { | |
777 | struct sched_poll poll; | |
778 | ||
779 | poll.nr_ports = 1; | |
780 | poll.timeout = 0; | |
ff3c5362 | 781 | set_xen_guest_handle(poll.ports, &evtchn); |
2d9e1e2f JF |
782 | |
783 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) | |
784 | BUG(); | |
785 | } | |
786 | } | |
787 | ||
0e91398f JF |
788 | void xen_irq_resume(void) |
789 | { | |
790 | unsigned int cpu, irq, evtchn; | |
791 | ||
792 | init_evtchn_cpu_bindings(); | |
793 | ||
794 | /* New event-channel space is not 'live' yet. */ | |
795 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
796 | mask_evtchn(evtchn); | |
797 | ||
798 | /* No IRQ <-> event-channel mappings. */ | |
0b8f1efa | 799 | for (irq = 0; irq < nr_irqs; irq++) |
0e91398f JF |
800 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
801 | ||
802 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
803 | evtchn_to_irq[evtchn] = -1; | |
804 | ||
805 | for_each_possible_cpu(cpu) { | |
806 | restore_cpu_virqs(cpu); | |
807 | restore_cpu_ipis(cpu); | |
808 | } | |
809 | } | |
810 | ||
e46cdb66 JF |
811 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
812 | .name = "xen-dyn", | |
813 | .mask = disable_dynirq, | |
814 | .unmask = enable_dynirq, | |
815 | .ack = ack_dynirq, | |
816 | .set_affinity = set_affinity_irq, | |
817 | .retrigger = retrigger_dynirq, | |
818 | }; | |
819 | ||
820 | void __init xen_init_IRQ(void) | |
821 | { | |
822 | int i; | |
823 | ||
824 | init_evtchn_cpu_bindings(); | |
825 | ||
826 | /* No event channels are 'live' right now. */ | |
827 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
828 | mask_evtchn(i); | |
829 | ||
830 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | |
0b8f1efa | 831 | for (i = 0; i < nr_irqs; i++) |
e46cdb66 JF |
832 | irq_bindcount[i] = 0; |
833 | ||
834 | irq_ctx_init(smp_processor_id()); | |
835 | } |