aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c657
1 files changed, 657 insertions, 0 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
new file mode 100644
index 000000000000..c50d499b1e6f
--- /dev/null
+++ b/drivers/xen/events.c
@@ -0,0 +1,657 @@
1/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
29
30#include <asm/ptrace.h>
31#include <asm/irq.h>
32#include <asm/sync_bitops.h>
33#include <asm/xen/hypercall.h>
34#include <asm/xen/hypervisor.h>
35
36#include <xen/xen-ops.h>
37#include <xen/events.h>
38#include <xen/interface/xen.h>
39#include <xen/interface/event_channel.h>
40
41/*
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
44 */
45static DEFINE_SPINLOCK(irq_mapping_update_lock);
46
47/* IRQ <-> VIRQ mapping. */
48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
49
50/* IRQ <-> IPI mapping */
51static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
52
53/* Packed IRQ information: binding type, sub-type index, and event channel. */
54struct packed_irq
55{
56 unsigned short evtchn;
57 unsigned char index;
58 unsigned char type;
59};
60
61static struct packed_irq irq_info[NR_IRQS];
62
63/* Binding types. */
64enum {
65 IRQT_UNBOUND,
66 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_EVTCHN
70};
71
72/* Convenient shorthand for packed representation of an unbound IRQ. */
73#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
74
75static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
76 [0 ... NR_EVENT_CHANNELS-1] = -1
77};
78static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
79static u8 cpu_evtchn[NR_EVENT_CHANNELS];
80
81/* Reference counts for bindings to IRQs. */
82static int irq_bindcount[NR_IRQS];
83
84/* Xen will never allocate port zero for any purpose. */
85#define VALID_EVTCHN(chn) ((chn) != 0)
86
87/*
88 * Force a proper event-channel callback from Xen after clearing the
89 * callback mask. We do this in a very simple manner, by making a call
90 * down into Xen. The pending flag will be checked by Xen on return.
91 */
92void force_evtchn_callback(void)
93{
94 (void)HYPERVISOR_xen_version(0, NULL);
95}
96EXPORT_SYMBOL_GPL(force_evtchn_callback);
97
98static struct irq_chip xen_dynamic_chip;
99
100/* Constructor for packed IRQ information. */
101static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
102{
103 return (struct packed_irq) { evtchn, index, type };
104}
105
106/*
107 * Accessors for packed IRQ information.
108 */
109static inline unsigned int evtchn_from_irq(int irq)
110{
111 return irq_info[irq].evtchn;
112}
113
114static inline unsigned int index_from_irq(int irq)
115{
116 return irq_info[irq].index;
117}
118
119static inline unsigned int type_from_irq(int irq)
120{
121 return irq_info[irq].type;
122}
123
124static inline unsigned long active_evtchns(unsigned int cpu,
125 struct shared_info *sh,
126 unsigned int idx)
127{
128 return (sh->evtchn_pending[idx] &
129 cpu_evtchn_mask[cpu][idx] &
130 ~sh->evtchn_mask[idx]);
131}
132
133static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
134{
135 int irq = evtchn_to_irq[chn];
136
137 BUG_ON(irq == -1);
138#ifdef CONFIG_SMP
139 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
140#endif
141
142 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
143 __set_bit(chn, cpu_evtchn_mask[cpu]);
144
145 cpu_evtchn[chn] = cpu;
146}
147
148static void init_evtchn_cpu_bindings(void)
149{
150#ifdef CONFIG_SMP
151 int i;
152 /* By default all event channels notify CPU#0. */
153 for (i = 0; i < NR_IRQS; i++)
154 irq_desc[i].affinity = cpumask_of_cpu(0);
155#endif
156
157 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
158 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
159}
160
161static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
162{
163 return cpu_evtchn[evtchn];
164}
165
166static inline void clear_evtchn(int port)
167{
168 struct shared_info *s = HYPERVISOR_shared_info;
169 sync_clear_bit(port, &s->evtchn_pending[0]);
170}
171
172static inline void set_evtchn(int port)
173{
174 struct shared_info *s = HYPERVISOR_shared_info;
175 sync_set_bit(port, &s->evtchn_pending[0]);
176}
177
178
179/**
180 * notify_remote_via_irq - send event to remote end of event channel via irq
181 * @irq: irq of event channel to send event to
182 *
183 * Unlike notify_remote_via_evtchn(), this is safe to use across
184 * save/restore. Notifications on a broken connection are silently
185 * dropped.
186 */
187void notify_remote_via_irq(int irq)
188{
189 int evtchn = evtchn_from_irq(irq);
190
191 if (VALID_EVTCHN(evtchn))
192 notify_remote_via_evtchn(evtchn);
193}
194EXPORT_SYMBOL_GPL(notify_remote_via_irq);
195
196static void mask_evtchn(int port)
197{
198 struct shared_info *s = HYPERVISOR_shared_info;
199 sync_set_bit(port, &s->evtchn_mask[0]);
200}
201
202static void unmask_evtchn(int port)
203{
204 struct shared_info *s = HYPERVISOR_shared_info;
205 unsigned int cpu = get_cpu();
206
207 BUG_ON(!irqs_disabled());
208
209 /* Slow path (hypercall) if this is a non-local port. */
210 if (unlikely(cpu != cpu_from_evtchn(port))) {
211 struct evtchn_unmask unmask = { .port = port };
212 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
213 } else {
214 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
215
216 sync_clear_bit(port, &s->evtchn_mask[0]);
217
218 /*
219 * The following is basically the equivalent of
220 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
221 * the interrupt edge' if the channel is masked.
222 */
223 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
224 !sync_test_and_set_bit(port / BITS_PER_LONG,
225 &vcpu_info->evtchn_pending_sel))
226 vcpu_info->evtchn_upcall_pending = 1;
227 }
228
229 put_cpu();
230}
231
232static int find_unbound_irq(void)
233{
234 int irq;
235
236 /* Only allocate from dynirq range */
237 for (irq = 0; irq < NR_IRQS; irq++)
238 if (irq_bindcount[irq] == 0)
239 break;
240
241 if (irq == NR_IRQS)
242 panic("No available IRQ to bind to: increase NR_IRQS!\n");
243
244 return irq;
245}
246
247int bind_evtchn_to_irq(unsigned int evtchn)
248{
249 int irq;
250
251 spin_lock(&irq_mapping_update_lock);
252
253 irq = evtchn_to_irq[evtchn];
254
255 if (irq == -1) {
256 irq = find_unbound_irq();
257
258 dynamic_irq_init(irq);
259 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
260 handle_level_irq, "event");
261
262 evtchn_to_irq[evtchn] = irq;
263 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
264 }
265
266 irq_bindcount[irq]++;
267
268 spin_unlock(&irq_mapping_update_lock);
269
270 return irq;
271}
272EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
273
274static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
275{
276 struct evtchn_bind_ipi bind_ipi;
277 int evtchn, irq;
278
279 spin_lock(&irq_mapping_update_lock);
280
281 irq = per_cpu(ipi_to_irq, cpu)[ipi];
282 if (irq == -1) {
283 irq = find_unbound_irq();
284 if (irq < 0)
285 goto out;
286
287 dynamic_irq_init(irq);
288 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
289 handle_level_irq, "ipi");
290
291 bind_ipi.vcpu = cpu;
292 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
293 &bind_ipi) != 0)
294 BUG();
295 evtchn = bind_ipi.port;
296
297 evtchn_to_irq[evtchn] = irq;
298 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
299
300 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
301
302 bind_evtchn_to_cpu(evtchn, cpu);
303 }
304
305 irq_bindcount[irq]++;
306
307 out:
308 spin_unlock(&irq_mapping_update_lock);
309 return irq;
310}
311
312
313static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
314{
315 struct evtchn_bind_virq bind_virq;
316 int evtchn, irq;
317
318 spin_lock(&irq_mapping_update_lock);
319
320 irq = per_cpu(virq_to_irq, cpu)[virq];
321
322 if (irq == -1) {
323 bind_virq.virq = virq;
324 bind_virq.vcpu = cpu;
325 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
326 &bind_virq) != 0)
327 BUG();
328 evtchn = bind_virq.port;
329
330 irq = find_unbound_irq();
331
332 dynamic_irq_init(irq);
333 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
334 handle_level_irq, "virq");
335
336 evtchn_to_irq[evtchn] = irq;
337 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
338
339 per_cpu(virq_to_irq, cpu)[virq] = irq;
340
341 bind_evtchn_to_cpu(evtchn, cpu);
342 }
343
344 irq_bindcount[irq]++;
345
346 spin_unlock(&irq_mapping_update_lock);
347
348 return irq;
349}
350
351static void unbind_from_irq(unsigned int irq)
352{
353 struct evtchn_close close;
354 int evtchn = evtchn_from_irq(irq);
355
356 spin_lock(&irq_mapping_update_lock);
357
358 if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) {
359 close.port = evtchn;
360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
361 BUG();
362
363 switch (type_from_irq(irq)) {
364 case IRQT_VIRQ:
365 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
366 [index_from_irq(irq)] = -1;
367 break;
368 default:
369 break;
370 }
371
372 /* Closed ports are implicitly re-bound to VCPU0. */
373 bind_evtchn_to_cpu(evtchn, 0);
374
375 evtchn_to_irq[evtchn] = -1;
376 irq_info[irq] = IRQ_UNBOUND;
377
378 dynamic_irq_init(irq);
379 }
380
381 spin_unlock(&irq_mapping_update_lock);
382}
383
384int bind_evtchn_to_irqhandler(unsigned int evtchn,
385 irq_handler_t handler,
386 unsigned long irqflags,
387 const char *devname, void *dev_id)
388{
389 unsigned int irq;
390 int retval;
391
392 irq = bind_evtchn_to_irq(evtchn);
393 retval = request_irq(irq, handler, irqflags, devname, dev_id);
394 if (retval != 0) {
395 unbind_from_irq(irq);
396 return retval;
397 }
398
399 return irq;
400}
401EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
402
403int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
404 irq_handler_t handler,
405 unsigned long irqflags, const char *devname, void *dev_id)
406{
407 unsigned int irq;
408 int retval;
409
410 irq = bind_virq_to_irq(virq, cpu);
411 retval = request_irq(irq, handler, irqflags, devname, dev_id);
412 if (retval != 0) {
413 unbind_from_irq(irq);
414 return retval;
415 }
416
417 return irq;
418}
419EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
420
421int bind_ipi_to_irqhandler(enum ipi_vector ipi,
422 unsigned int cpu,
423 irq_handler_t handler,
424 unsigned long irqflags,
425 const char *devname,
426 void *dev_id)
427{
428 int irq, retval;
429
430 irq = bind_ipi_to_irq(ipi, cpu);
431 if (irq < 0)
432 return irq;
433
434 retval = request_irq(irq, handler, irqflags, devname, dev_id);
435 if (retval != 0) {
436 unbind_from_irq(irq);
437 return retval;
438 }
439
440 return irq;
441}
442
443void unbind_from_irqhandler(unsigned int irq, void *dev_id)
444{
445 free_irq(irq, dev_id);
446 unbind_from_irq(irq);
447}
448EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
449
450void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
451{
452 int irq = per_cpu(ipi_to_irq, cpu)[vector];
453 BUG_ON(irq < 0);
454 notify_remote_via_irq(irq);
455}
456
457irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
458{
459 struct shared_info *sh = HYPERVISOR_shared_info;
460 int cpu = smp_processor_id();
461 int i;
462 unsigned long flags;
463 static DEFINE_SPINLOCK(debug_lock);
464
465 spin_lock_irqsave(&debug_lock, flags);
466
467 printk("vcpu %d\n ", cpu);
468
469 for_each_online_cpu(i) {
470 struct vcpu_info *v = per_cpu(xen_vcpu, i);
471 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
472 (get_irq_regs() && i == cpu) ? !(get_irq_regs()->flags & X86_EFLAGS_IF) : v->evtchn_upcall_mask,
473 v->evtchn_upcall_pending,
474 v->evtchn_pending_sel);
475 }
476 printk("pending:\n ");
477 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
478 printk("%08lx%s", sh->evtchn_pending[i],
479 i % 8 == 0 ? "\n " : " ");
480 printk("\nmasks:\n ");
481 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
482 printk("%08lx%s", sh->evtchn_mask[i],
483 i % 8 == 0 ? "\n " : " ");
484
485 printk("\nunmasked:\n ");
486 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
487 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
488 i % 8 == 0 ? "\n " : " ");
489
490 printk("\npending list:\n");
491 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
492 if (sync_test_bit(i, sh->evtchn_pending)) {
493 printk(" %d: event %d -> irq %d\n",
494 cpu_evtchn[i], i,
495 evtchn_to_irq[i]);
496 }
497 }
498
499 spin_unlock_irqrestore(&debug_lock, flags);
500
501 return IRQ_HANDLED;
502}
503
504
505/*
506 * Search the CPUs pending events bitmasks. For each one found, map
507 * the event number to an irq, and feed it into do_IRQ() for
508 * handling.
509 *
510 * Xen uses a two-level bitmap to speed searching. The first level is
511 * a bitset of words which contain pending event bits. The second
512 * level is a bitset of pending events themselves.
513 */
514void xen_evtchn_do_upcall(struct pt_regs *regs)
515{
516 int cpu = get_cpu();
517 struct shared_info *s = HYPERVISOR_shared_info;
518 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
519 static DEFINE_PER_CPU(unsigned, nesting_count);
520 unsigned count;
521
522 do {
523 unsigned long pending_words;
524
525 vcpu_info->evtchn_upcall_pending = 0;
526
527 if (__get_cpu_var(nesting_count)++)
528 goto out;
529
530 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
531 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
532 while (pending_words != 0) {
533 unsigned long pending_bits;
534 int word_idx = __ffs(pending_words);
535 pending_words &= ~(1UL << word_idx);
536
537 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
538 int bit_idx = __ffs(pending_bits);
539 int port = (word_idx * BITS_PER_LONG) + bit_idx;
540 int irq = evtchn_to_irq[port];
541
542 if (irq != -1) {
543 regs->orig_ax = ~irq;
544 do_IRQ(regs);
545 }
546 }
547 }
548
549 BUG_ON(!irqs_disabled());
550
551 count = __get_cpu_var(nesting_count);
552 __get_cpu_var(nesting_count) = 0;
553 } while(count != 1);
554
555out:
556 put_cpu();
557}
558
559/* Rebind an evtchn so that it gets delivered to a specific cpu */
560static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
561{
562 struct evtchn_bind_vcpu bind_vcpu;
563 int evtchn = evtchn_from_irq(irq);
564
565 if (!VALID_EVTCHN(evtchn))
566 return;
567
568 /* Send future instances of this interrupt to other vcpu. */
569 bind_vcpu.port = evtchn;
570 bind_vcpu.vcpu = tcpu;
571
572 /*
573 * If this fails, it usually just indicates that we're dealing with a
574 * virq or IPI channel, which don't actually need to be rebound. Ignore
575 * it, but don't do the xenlinux-level rebind in that case.
576 */
577 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
578 bind_evtchn_to_cpu(evtchn, tcpu);
579}
580
581
582static void set_affinity_irq(unsigned irq, cpumask_t dest)
583{
584 unsigned tcpu = first_cpu(dest);
585 rebind_irq_to_cpu(irq, tcpu);
586}
587
588static void enable_dynirq(unsigned int irq)
589{
590 int evtchn = evtchn_from_irq(irq);
591
592 if (VALID_EVTCHN(evtchn))
593 unmask_evtchn(evtchn);
594}
595
596static void disable_dynirq(unsigned int irq)
597{
598 int evtchn = evtchn_from_irq(irq);
599
600 if (VALID_EVTCHN(evtchn))
601 mask_evtchn(evtchn);
602}
603
604static void ack_dynirq(unsigned int irq)
605{
606 int evtchn = evtchn_from_irq(irq);
607
608 move_native_irq(irq);
609
610 if (VALID_EVTCHN(evtchn))
611 clear_evtchn(evtchn);
612}
613
614static int retrigger_dynirq(unsigned int irq)
615{
616 int evtchn = evtchn_from_irq(irq);
617 struct shared_info *sh = HYPERVISOR_shared_info;
618 int ret = 0;
619
620 if (VALID_EVTCHN(evtchn)) {
621 int masked;
622
623 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
624 sync_set_bit(evtchn, sh->evtchn_pending);
625 if (!masked)
626 unmask_evtchn(evtchn);
627 ret = 1;
628 }
629
630 return ret;
631}
632
633static struct irq_chip xen_dynamic_chip __read_mostly = {
634 .name = "xen-dyn",
635 .mask = disable_dynirq,
636 .unmask = enable_dynirq,
637 .ack = ack_dynirq,
638 .set_affinity = set_affinity_irq,
639 .retrigger = retrigger_dynirq,
640};
641
642void __init xen_init_IRQ(void)
643{
644 int i;
645
646 init_evtchn_cpu_bindings();
647
648 /* No event channels are 'live' right now. */
649 for (i = 0; i < NR_EVENT_CHANNELS; i++)
650 mask_evtchn(i);
651
652 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
653 for (i = 0; i < NR_IRQS; i++)
654 irq_bindcount[i] = 0;
655
656 irq_ctx_init(smp_processor_id());
657}