aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-12-08 02:25:15 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-08 03:34:43 -0500
commit6ab8886326a1b9a3a8d164d8174e3c20703a03a2 (patch)
treedab951be5106d9e4350916af6a9281045e6b2f80 /kernel/hw_breakpoint.c
parent2ff6cfd70720780234fdfea636218c2a62b31287 (diff)
perf: hw_breakpoints: Fix percpu namespace clash
Today's linux-next build failed with: kernel/hw_breakpoint.c:86: error: 'task_bp_pinned' redeclared as different kind of symbol ... Caused by commit dd17c8f72993f9461e9c19250e3f155d6d99df22 ("percpu: remove per_cpu__ prefix") from the percpu tree interacting with commit 56053170ea2a2c0dc17420e9b94aa3ca51d80408 ("hw-breakpoints: Fix task-bound breakpoint slot allocation") from the tip tree. Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <20091208182515.bb6dda4a.sfr@canb.auug.org.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r--kernel/hw_breakpoint.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 02b492504a5a..03a0773ac2b2 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -52,7 +52,7 @@
52static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); 52static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
53 53
54/* Number of pinned task breakpoints in a cpu */ 54/* Number of pinned task breakpoints in a cpu */
55static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); 55static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
56 56
57/* Number of non-pinned cpu/task breakpoints in a cpu */ 57/* Number of non-pinned cpu/task breakpoints in a cpu */
58static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); 58static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex);
73static unsigned int max_task_bp_pinned(int cpu) 73static unsigned int max_task_bp_pinned(int cpu)
74{ 74{
75 int i; 75 int i;
76 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); 76 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
77 77
78 for (i = HBP_NUM -1; i >= 0; i--) { 78 for (i = HBP_NUM -1; i >= 0; i--) {
79 if (tsk_pinned[i] > 0) 79 if (tsk_pinned[i] > 0)
@@ -162,7 +162,7 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
162 162
163 count = task_bp_pinned(tsk); 163 count = task_bp_pinned(tsk);
164 164
165 tsk_pinned = per_cpu(task_bp_pinned, cpu); 165 tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
166 if (enable) { 166 if (enable) {
167 tsk_pinned[count]++; 167 tsk_pinned[count]++;
168 if (count > 0) 168 if (count > 0)
@@ -209,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
209 * - If attached to a single cpu, check: 209 * - If attached to a single cpu, check:
210 * 210 *
211 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) 211 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
212 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM 212 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
213 * 213 *
214 * -> If there are already non-pinned counters in this cpu, it means 214 * -> If there are already non-pinned counters in this cpu, it means
215 * there is already a free slot for them. 215 * there is already a free slot for them.
@@ -220,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
220 * - If attached to every cpus, check: 220 * - If attached to every cpus, check:
221 * 221 *
222 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) 222 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
223 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM 223 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
224 * 224 *
225 * -> This is roughly the same, except we check the number of per cpu 225 * -> This is roughly the same, except we check the number of per cpu
226 * bp for every cpu and we keep the max one. Same for the per tasks 226 * bp for every cpu and we keep the max one. Same for the per tasks
@@ -232,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
232 * - If attached to a single cpu, check: 232 * - If attached to a single cpu, check:
233 * 233 *
234 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) 234 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
235 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM 235 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
236 * 236 *
237 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep 237 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
238 * one register at least (or they will never be fed). 238 * one register at least (or they will never be fed).
@@ -240,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
240 * - If attached to every cpus, check: 240 * - If attached to every cpus, check:
241 * 241 *
242 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) 242 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
243 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM 243 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
244 */ 244 */
245int reserve_bp_slot(struct perf_event *bp) 245int reserve_bp_slot(struct perf_event *bp)
246{ 246{