diff options
author | James Hogan <james.hogan@imgtec.com> | 2013-02-12 11:04:53 -0500 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 15:11:14 -0500 |
commit | fa771d029af8f8a23089c97b6ab6a5745e98ad7e (patch) | |
tree | 949fbbe9278e5da6711d2da055888e82772c2a62 | |
parent | 97c3ec63089fdcd2abf66619b913900909841dc0 (diff) |
metag: move irq enable out of irqflags.h on SMP
The SMP version of arch_local_irq_enable() uses preempt_disable(), but
<asm/irqflags.h> doesn't include <linux/preempt.h> causing the following
errors on SMP when pstore/ftrace is enabled (caught by buildbot smp
allyesconfig):
In file included from include/linux/irqflags.h:15,
from fs/pstore/ftrace.c:16:
arch/metag/include/asm/irqflags.h: In function 'arch_local_irq_enable':
arch/metag/include/asm/irqflags.h:84: error: implicit declaration of function 'preempt_disable'
arch/metag/include/asm/irqflags.h:86: error: implicit declaration of function 'preempt_enable_no_resched'
However <linux/preempt.h> cannot be easily included from
<asm/irqflags.h> as it can cause circular include dependencies in the
!SMP case, and potentially in the SMP case in the future. Therefore move
the SMP implementation of arch_local_irq_enable() into traps.c and use
an inline version of get_trigger_mask() which is also defined in traps.c
for SMP.
This adds an extra layer of function call / stack push when
preempt_disable needs to call other functions, however in the
non-preemptive SMP case it should be about as fast, as it was already
calling the get_trigger_mask() function which is now used inline.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
-rw-r--r-- | arch/metag/include/asm/irqflags.h | 11 | ||||
-rw-r--r-- | arch/metag/kernel/traps.c | 16 |
2 files changed, 20 insertions, 7 deletions
diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h index cba5e135bc9a..339b16f062eb 100644 --- a/arch/metag/include/asm/irqflags.h +++ b/arch/metag/include/asm/irqflags.h | |||
@@ -78,16 +78,15 @@ static inline void arch_local_irq_disable(void) | |||
78 | asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); | 78 | asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline void arch_local_irq_enable(void) | ||
82 | { | ||
83 | #ifdef CONFIG_SMP | 81 | #ifdef CONFIG_SMP |
84 | preempt_disable(); | 82 | /* Avoid circular include dependencies through <linux/preempt.h> */ |
85 | arch_local_irq_restore(get_trigger_mask()); | 83 | void arch_local_irq_enable(void); |
86 | preempt_enable_no_resched(); | ||
87 | #else | 84 | #else |
85 | static inline void arch_local_irq_enable(void) | ||
86 | { | ||
88 | arch_local_irq_restore(get_trigger_mask()); | 87 | arch_local_irq_restore(get_trigger_mask()); |
89 | #endif | ||
90 | } | 88 | } |
89 | #endif | ||
91 | 90 | ||
92 | #endif /* (__ASSEMBLY__) */ | 91 | #endif /* (__ASSEMBLY__) */ |
93 | 92 | ||
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 1ad363ce1ee9..202be405771e 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/preempt.h> | ||
18 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
20 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
@@ -776,17 +777,30 @@ int traps_restore_context(void) | |||
776 | #endif | 777 | #endif |
777 | 778 | ||
778 | #ifdef CONFIG_SMP | 779 | #ifdef CONFIG_SMP |
779 | unsigned int get_trigger_mask(void) | 780 | static inline unsigned int _get_trigger_mask(void) |
780 | { | 781 | { |
781 | unsigned long cpu = smp_processor_id(); | 782 | unsigned long cpu = smp_processor_id(); |
782 | return per_cpu(trigger_mask, cpu); | 783 | return per_cpu(trigger_mask, cpu); |
783 | } | 784 | } |
784 | 785 | ||
786 | unsigned int get_trigger_mask(void) | ||
787 | { | ||
788 | return _get_trigger_mask(); | ||
789 | } | ||
790 | |||
785 | static void set_trigger_mask(unsigned int mask) | 791 | static void set_trigger_mask(unsigned int mask) |
786 | { | 792 | { |
787 | unsigned long cpu = smp_processor_id(); | 793 | unsigned long cpu = smp_processor_id(); |
788 | per_cpu(trigger_mask, cpu) = mask; | 794 | per_cpu(trigger_mask, cpu) = mask; |
789 | } | 795 | } |
796 | |||
797 | void arch_local_irq_enable(void) | ||
798 | { | ||
799 | preempt_disable(); | ||
800 | arch_local_irq_restore(_get_trigger_mask()); | ||
801 | preempt_enable_no_resched(); | ||
802 | } | ||
803 | EXPORT_SYMBOL(arch_local_irq_enable); | ||
790 | #else | 804 | #else |
791 | static void set_trigger_mask(unsigned int mask) | 805 | static void set_trigger_mask(unsigned int mask) |
792 | { | 806 | { |