aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/xen/events.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-07-17 21:37:06 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-07-18 11:47:44 -0400
commitf87e4cac4f4e940b328d3deb5b53e642e3881f43 (patch)
tree7409f86561e5f97459378abd2ae21e9a5c82bfea /arch/i386/xen/events.c
parentab55028886dd1dd54585f22bf19a00eb23869340 (diff)
xen: SMP guest support
This is a fairly straightforward Xen implementation of smp_ops. Xen has its own IPI mechanisms, and has no dependency on any APIC-based IPI. The smp_ops hooks and the flush_tlb_others pv_op allow a Xen guest to avoid all APIC code in arch/i386 (the only apic operation is a single apic_read for the apic version number). One subtle point which needs to be addressed is unpinning pagetables when another cpu may have a lazy tlb reference to the pagetable. Xen will not allow an in-use pagetable to be unpinned, so we must find any other cpus with a reference to the pagetable and get them to shoot down their references. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/xen/events.c')
-rw-r--r--arch/i386/xen/events.c80
1 files changed, 79 insertions, 1 deletions
diff --git a/arch/i386/xen/events.c b/arch/i386/xen/events.c
index e7c5d00ab4fe..4103b8bf22fd 100644
--- a/arch/i386/xen/events.c
+++ b/arch/i386/xen/events.c
@@ -47,6 +47,9 @@ static DEFINE_SPINLOCK(irq_mapping_update_lock);
47/* IRQ <-> VIRQ mapping. */ 47/* IRQ <-> VIRQ mapping. */
48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; 48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
49 49
50/* IRQ <-> IPI mapping */
51static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
52
50/* Packed IRQ information: binding type, sub-type index, and event channel. */ 53/* Packed IRQ information: binding type, sub-type index, and event channel. */
51struct packed_irq 54struct packed_irq
52{ 55{
@@ -58,7 +61,13 @@ struct packed_irq
58static struct packed_irq irq_info[NR_IRQS]; 61static struct packed_irq irq_info[NR_IRQS];
59 62
60/* Binding types. */ 63/* Binding types. */
61enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN }; 64enum {
65 IRQT_UNBOUND,
66 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_EVTCHN
70};
62 71
63/* Convenient shorthand for packed representation of an unbound IRQ. */ 72/* Convenient shorthand for packed representation of an unbound IRQ. */
64#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) 73#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
@@ -261,6 +270,45 @@ static int bind_evtchn_to_irq(unsigned int evtchn)
261 return irq; 270 return irq;
262} 271}
263 272
273static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
274{
275 struct evtchn_bind_ipi bind_ipi;
276 int evtchn, irq;
277
278 spin_lock(&irq_mapping_update_lock);
279
280 irq = per_cpu(ipi_to_irq, cpu)[ipi];
281 if (irq == -1) {
282 irq = find_unbound_irq();
283 if (irq < 0)
284 goto out;
285
286 dynamic_irq_init(irq);
287 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
288 handle_level_irq, "ipi");
289
290 bind_ipi.vcpu = cpu;
291 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
292 &bind_ipi) != 0)
293 BUG();
294 evtchn = bind_ipi.port;
295
296 evtchn_to_irq[evtchn] = irq;
297 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
298
299 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
300
301 bind_evtchn_to_cpu(evtchn, cpu);
302 }
303
304 irq_bindcount[irq]++;
305
306 out:
307 spin_unlock(&irq_mapping_update_lock);
308 return irq;
309}
310
311
264static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 312static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
265{ 313{
266 struct evtchn_bind_virq bind_virq; 314 struct evtchn_bind_virq bind_virq;
@@ -369,6 +417,28 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
369} 417}
370EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 418EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
371 419
420int bind_ipi_to_irqhandler(enum ipi_vector ipi,
421 unsigned int cpu,
422 irq_handler_t handler,
423 unsigned long irqflags,
424 const char *devname,
425 void *dev_id)
426{
427 int irq, retval;
428
429 irq = bind_ipi_to_irq(ipi, cpu);
430 if (irq < 0)
431 return irq;
432
433 retval = request_irq(irq, handler, irqflags, devname, dev_id);
434 if (retval != 0) {
435 unbind_from_irq(irq);
436 return retval;
437 }
438
439 return irq;
440}
441
372void unbind_from_irqhandler(unsigned int irq, void *dev_id) 442void unbind_from_irqhandler(unsigned int irq, void *dev_id)
373{ 443{
374 free_irq(irq, dev_id); 444 free_irq(irq, dev_id);
@@ -376,6 +446,14 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
376} 446}
377EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 447EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
378 448
449void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
450{
451 int irq = per_cpu(ipi_to_irq, cpu)[vector];
452 BUG_ON(irq < 0);
453 notify_remote_via_irq(irq);
454}
455
456
379/* 457/*
380 * Search the CPUs pending events bitmasks. For each one found, map 458 * Search the CPUs pending events bitmasks. For each one found, map
381 * the event number to an irq, and feed it into do_IRQ() for 459 * the event number to an irq, and feed it into do_IRQ() for