diff options
author | Paul Mackerras <paulus@samba.org> | 2009-03-16 06:00:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:14 -0400 |
commit | b6c5a71da1477d261bc36254fe1f20d32b57598d (patch) | |
tree | 02815fa0a0016eacd6203b1f496109f07bbd7be5 | |
parent | 7bb497bd885eedd0f56dfe3cc1b5ff20710d33b9 (diff) |
perf_counter: abstract wakeup flag setting in core to fix powerpc build
Impact: build fix for powerpc
Commit bd753921015e7905 ("perf_counter: software counter event
infrastructure") introduced a use of TIF_PERF_COUNTERS into the core
perfcounter code. This breaks the build on powerpc because we use
a flag in a per-cpu area to signal wakeups on powerpc rather than
a thread_info flag, because the thread_info flags have to be
manipulated with atomic operations and are thus slower than per-cpu
flags.
This fixes the by changing the core to use an abstracted
set_perf_counter_pending() function, which is defined on x86 to set
the TIF_PERF_COUNTERS flag and on powerpc to set the per-cpu flag
(paca->perf_counter_pending). It changes the previous powerpc
definition of set_perf_counter_pending to not take an argument and
adds a clear_perf_counter_pending, so as to simplify the definition
on x86.
On x86, set_perf_counter_pending() is defined as a macro. Defining
it as a static inline in arch/x86/include/asm/perf_counters.h causes
compile failures because <asm/perf_counters.h> gets included early in
<linux/sched.h>, and the definitions of set_tsk_thread_flag etc. are
therefore not available in <asm/perf_counters.h>. (On powerpc this
problem is avoided by defining set_perf_counter_pending etc. in
<asm/hw_irq.h>.)
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_counter.h | 3 | ||||
-rw-r--r-- | kernel/perf_counter.c | 2 |
5 files changed, 19 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index b43076ff92c9..cb32d571c9c7 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -142,10 +142,17 @@ static inline unsigned long get_perf_counter_pending(void) | |||
142 | return x; | 142 | return x; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline void set_perf_counter_pending(int x) | 145 | static inline void set_perf_counter_pending(void) |
146 | { | 146 | { |
147 | asm volatile("stb %0,%1(13)" : : | 147 | asm volatile("stb %0,%1(13)" : : |
148 | "r" (x), | 148 | "r" (1), |
149 | "i" (offsetof(struct paca_struct, perf_counter_pending))); | ||
150 | } | ||
151 | |||
152 | static inline void clear_perf_counter_pending(void) | ||
153 | { | ||
154 | asm volatile("stb %0,%1(13)" : : | ||
155 | "r" (0), | ||
149 | "i" (offsetof(struct paca_struct, perf_counter_pending))); | 156 | "i" (offsetof(struct paca_struct, perf_counter_pending))); |
150 | } | 157 | } |
151 | 158 | ||
@@ -158,7 +165,8 @@ static inline unsigned long get_perf_counter_pending(void) | |||
158 | return 0; | 165 | return 0; |
159 | } | 166 | } |
160 | 167 | ||
161 | static inline void set_perf_counter_pending(int x) {} | 168 | static inline void set_perf_counter_pending(void) {} |
169 | static inline void clear_perf_counter_pending(void) {} | ||
162 | static inline void perf_counter_do_pending(void) {} | 170 | static inline void perf_counter_do_pending(void) {} |
163 | #endif /* CONFIG_PERF_COUNTERS */ | 171 | #endif /* CONFIG_PERF_COUNTERS */ |
164 | 172 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 0d2e37c57738..469e9635ff04 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -104,13 +104,6 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
104 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 104 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
105 | } | 105 | } |
106 | 106 | ||
107 | #ifdef CONFIG_PERF_COUNTERS | ||
108 | notrace void __weak perf_counter_do_pending(void) | ||
109 | { | ||
110 | set_perf_counter_pending(0); | ||
111 | } | ||
112 | #endif | ||
113 | |||
114 | notrace void raw_local_irq_restore(unsigned long en) | 107 | notrace void raw_local_irq_restore(unsigned long en) |
115 | { | 108 | { |
116 | /* | 109 | /* |
@@ -142,8 +135,10 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
142 | iseries_handle_interrupts(); | 135 | iseries_handle_interrupts(); |
143 | } | 136 | } |
144 | 137 | ||
145 | if (get_perf_counter_pending()) | 138 | if (get_perf_counter_pending()) { |
139 | clear_perf_counter_pending(); | ||
146 | perf_counter_do_pending(); | 140 | perf_counter_do_pending(); |
141 | } | ||
147 | 142 | ||
148 | /* | 143 | /* |
149 | * if (get_paca()->hard_enabled) return; | 144 | * if (get_paca()->hard_enabled) return; |
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 0e33d27cd464..5008762e8bf4 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -653,7 +653,6 @@ void perf_counter_do_pending(void) | |||
653 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); | 653 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); |
654 | struct perf_counter *counter; | 654 | struct perf_counter *counter; |
655 | 655 | ||
656 | set_perf_counter_pending(0); | ||
657 | for (i = 0; i < cpuhw->n_counters; ++i) { | 656 | for (i = 0; i < cpuhw->n_counters; ++i) { |
658 | counter = cpuhw->counter[i]; | 657 | counter = cpuhw->counter[i]; |
659 | if (counter && counter->wakeup_pending) { | 658 | if (counter && counter->wakeup_pending) { |
@@ -811,7 +810,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
811 | perf_counter_do_pending(); | 810 | perf_counter_do_pending(); |
812 | irq_exit(); | 811 | irq_exit(); |
813 | } else { | 812 | } else { |
814 | set_perf_counter_pending(1); | 813 | set_perf_counter_pending(); |
815 | } | 814 | } |
816 | } | 815 | } |
817 | } | 816 | } |
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 2e08ed736647..1662043b340f 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -84,6 +84,9 @@ union cpuid10_edx { | |||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) |
86 | 86 | ||
87 | #define set_perf_counter_pending() \ | ||
88 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | ||
89 | |||
87 | #ifdef CONFIG_PERF_COUNTERS | 90 | #ifdef CONFIG_PERF_COUNTERS |
88 | extern void init_hw_perf_counters(void); | 91 | extern void init_hw_perf_counters(void); |
89 | extern void perf_counters_lapic_init(int nmi); | 92 | extern void perf_counters_lapic_init(int nmi); |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0018c5e81249..b39456ad74a1 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1433,7 +1433,7 @@ static void perf_swcounter_interrupt(struct perf_counter *counter, | |||
1433 | 1433 | ||
1434 | if (nmi) { | 1434 | if (nmi) { |
1435 | counter->wakeup_pending = 1; | 1435 | counter->wakeup_pending = 1; |
1436 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | 1436 | set_perf_counter_pending(); |
1437 | } else | 1437 | } else |
1438 | wake_up(&counter->waitq); | 1438 | wake_up(&counter->waitq); |
1439 | } | 1439 | } |