diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:39:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:39:44 -0400 |
commit | ada3fa15057205b7d3f727bba5cd26b5912e350f (patch) | |
tree | 60962fc9e4021b92f484d1a58e72cd3906d4f3db /drivers/xen | |
parent | 2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff) | |
parent | 5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits)
powerpc64: convert to dynamic percpu allocator
sparc64: use embedding percpu first chunk allocator
percpu: kill lpage first chunk allocator
x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA
percpu: update embedding first chunk allocator to handle sparse units
percpu: use group information to allocate vmap areas sparsely
vmalloc: implement pcpu_get_vm_areas()
vmalloc: separate out insert_vmalloc_vm()
percpu: add chunk->base_addr
percpu: add pcpu_unit_offsets[]
percpu: introduce pcpu_alloc_info and pcpu_group_info
percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward
percpu: add @align to pcpu_fc_alloc_fn_t
percpu: make @dyn_size mandatory for pcpu_setup_first_chunk()
percpu: drop @static_size from first chunk allocators
percpu: generalize first chunk allocator selection
percpu: build first chunk allocators selectively
percpu: rename 4k first chunk allocator to page
percpu: improve boot messages
percpu: fix pcpu_reclaim() locking
...
Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/events.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index abad71b1632..2f57276e87a 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -47,10 +47,10 @@ | |||
47 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | 47 | static DEFINE_SPINLOCK(irq_mapping_update_lock); |
48 | 48 | ||
49 | /* IRQ <-> VIRQ mapping. */ | 49 | /* IRQ <-> VIRQ mapping. */ |
50 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | 50 | static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; |
51 | 51 | ||
52 | /* IRQ <-> IPI mapping */ | 52 | /* IRQ <-> IPI mapping */ |
53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | 53 | static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; |
54 | 54 | ||
55 | /* Interrupt types. */ | 55 | /* Interrupt types. */ |
56 | enum xen_irq_type { | 56 | enum xen_irq_type { |
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
602 | return IRQ_HANDLED; | 602 | return IRQ_HANDLED; |
603 | } | 603 | } |
604 | 604 | ||
605 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); | ||
606 | |||
605 | /* | 607 | /* |
606 | * Search the CPUs pending events bitmasks. For each one found, map | 608 | * Search the CPUs pending events bitmasks. For each one found, map |
607 | * the event number to an irq, and feed it into do_IRQ() for | 609 | * the event number to an irq, and feed it into do_IRQ() for |
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
617 | struct pt_regs *old_regs = set_irq_regs(regs); | 619 | struct pt_regs *old_regs = set_irq_regs(regs); |
618 | struct shared_info *s = HYPERVISOR_shared_info; | 620 | struct shared_info *s = HYPERVISOR_shared_info; |
619 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 621 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
620 | static DEFINE_PER_CPU(unsigned, nesting_count); | ||
621 | unsigned count; | 622 | unsigned count; |
622 | 623 | ||
623 | exit_idle(); | 624 | exit_idle(); |
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
628 | 629 | ||
629 | vcpu_info->evtchn_upcall_pending = 0; | 630 | vcpu_info->evtchn_upcall_pending = 0; |
630 | 631 | ||
631 | if (__get_cpu_var(nesting_count)++) | 632 | if (__get_cpu_var(xed_nesting_count)++) |
632 | goto out; | 633 | goto out; |
633 | 634 | ||
634 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ | 635 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
653 | 654 | ||
654 | BUG_ON(!irqs_disabled()); | 655 | BUG_ON(!irqs_disabled()); |
655 | 656 | ||
656 | count = __get_cpu_var(nesting_count); | 657 | count = __get_cpu_var(xed_nesting_count); |
657 | __get_cpu_var(nesting_count) = 0; | 658 | __get_cpu_var(xed_nesting_count) = 0; |
658 | } while(count != 1); | 659 | } while(count != 1); |
659 | 660 | ||
660 | out: | 661 | out: |