aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 05:45:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 04:49:01 -0400
commit849691a6cd40270ff5f4a8846d5f6bf8df663ffc (patch)
treeb61157f375905d21bf0facae603e4247e1de9007
parenta39d6f2556c4a19f58f538c6aa28bf8faca4fcb8 (diff)
perf_counter: remove rq->lock usage
Now that all the task runtime clock users are gone, remove the ugly rq->lock usage from perf counters, which solves the nasty deadlock seen when a software task clock counter was read from an NMI overflow context. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.531137582@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--kernel/perf_counter.c42
-rw-r--r--kernel/sched.c20
3 files changed, 16 insertions, 48 deletions
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index b6d2887a5d88..080d1fd461d7 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -85,8 +85,6 @@ static inline unsigned int kstat_irqs(unsigned int irq)
85/* 85/*
86 * Lock/unlock the current runqueue - to extract task statistics: 86 * Lock/unlock the current runqueue - to extract task statistics:
87 */ 87 */
88extern void curr_rq_lock_irq_save(unsigned long *flags);
89extern void curr_rq_unlock_irq_restore(unsigned long *flags);
90extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update); 88extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update);
91extern unsigned long long task_delta_exec(struct task_struct *); 89extern unsigned long long task_delta_exec(struct task_struct *);
92 90
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 56b7eb53d673..f4f7596f7841 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -172,8 +172,7 @@ static void __perf_counter_remove_from_context(void *info)
172 if (ctx->task && cpuctx->task_ctx != ctx) 172 if (ctx->task && cpuctx->task_ctx != ctx)
173 return; 173 return;
174 174
175 curr_rq_lock_irq_save(&flags); 175 spin_lock_irqsave(&ctx->lock, flags);
176 spin_lock(&ctx->lock);
177 176
178 counter_sched_out(counter, cpuctx, ctx); 177 counter_sched_out(counter, cpuctx, ctx);
179 178
@@ -198,8 +197,7 @@ static void __perf_counter_remove_from_context(void *info)
198 perf_max_counters - perf_reserved_percpu); 197 perf_max_counters - perf_reserved_percpu);
199 } 198 }
200 199
201 spin_unlock(&ctx->lock); 200 spin_unlock_irqrestore(&ctx->lock, flags);
202 curr_rq_unlock_irq_restore(&flags);
203} 201}
204 202
205 203
@@ -319,8 +317,7 @@ static void __perf_counter_disable(void *info)
319 if (ctx->task && cpuctx->task_ctx != ctx) 317 if (ctx->task && cpuctx->task_ctx != ctx)
320 return; 318 return;
321 319
322 curr_rq_lock_irq_save(&flags); 320 spin_lock_irqsave(&ctx->lock, flags);
323 spin_lock(&ctx->lock);
324 321
325 /* 322 /*
326 * If the counter is on, turn it off. 323 * If the counter is on, turn it off.
@@ -336,8 +333,7 @@ static void __perf_counter_disable(void *info)
336 counter->state = PERF_COUNTER_STATE_OFF; 333 counter->state = PERF_COUNTER_STATE_OFF;
337 } 334 }
338 335
339 spin_unlock(&ctx->lock); 336 spin_unlock_irqrestore(&ctx->lock, flags);
340 curr_rq_unlock_irq_restore(&flags);
341} 337}
342 338
343/* 339/*
@@ -515,8 +511,7 @@ static void __perf_install_in_context(void *info)
515 if (ctx->task && cpuctx->task_ctx != ctx) 511 if (ctx->task && cpuctx->task_ctx != ctx)
516 return; 512 return;
517 513
518 curr_rq_lock_irq_save(&flags); 514 spin_lock_irqsave(&ctx->lock, flags);
519 spin_lock(&ctx->lock);
520 update_context_time(ctx); 515 update_context_time(ctx);
521 516
522 /* 517 /*
@@ -565,8 +560,7 @@ static void __perf_install_in_context(void *info)
565 unlock: 560 unlock:
566 hw_perf_restore(perf_flags); 561 hw_perf_restore(perf_flags);
567 562
568 spin_unlock(&ctx->lock); 563 spin_unlock_irqrestore(&ctx->lock, flags);
569 curr_rq_unlock_irq_restore(&flags);
570} 564}
571 565
572/* 566/*
@@ -641,8 +635,7 @@ static void __perf_counter_enable(void *info)
641 if (ctx->task && cpuctx->task_ctx != ctx) 635 if (ctx->task && cpuctx->task_ctx != ctx)
642 return; 636 return;
643 637
644 curr_rq_lock_irq_save(&flags); 638 spin_lock_irqsave(&ctx->lock, flags);
645 spin_lock(&ctx->lock);
646 update_context_time(ctx); 639 update_context_time(ctx);
647 640
648 counter->prev_state = counter->state; 641 counter->prev_state = counter->state;
@@ -678,8 +671,7 @@ static void __perf_counter_enable(void *info)
678 } 671 }
679 672
680 unlock: 673 unlock:
681 spin_unlock(&ctx->lock); 674 spin_unlock_irqrestore(&ctx->lock, flags);
682 curr_rq_unlock_irq_restore(&flags);
683} 675}
684 676
685/* 677/*
@@ -971,7 +963,7 @@ int perf_counter_task_disable(void)
971 if (likely(!ctx->nr_counters)) 963 if (likely(!ctx->nr_counters))
972 return 0; 964 return 0;
973 965
974 curr_rq_lock_irq_save(&flags); 966 local_irq_save(flags);
975 cpu = smp_processor_id(); 967 cpu = smp_processor_id();
976 968
977 perf_counter_task_sched_out(curr, cpu); 969 perf_counter_task_sched_out(curr, cpu);
@@ -992,9 +984,7 @@ int perf_counter_task_disable(void)
992 984
993 hw_perf_restore(perf_flags); 985 hw_perf_restore(perf_flags);
994 986
995 spin_unlock(&ctx->lock); 987 spin_unlock_irqrestore(&ctx->lock, flags);
996
997 curr_rq_unlock_irq_restore(&flags);
998 988
999 return 0; 989 return 0;
1000} 990}
@@ -1011,7 +1001,7 @@ int perf_counter_task_enable(void)
1011 if (likely(!ctx->nr_counters)) 1001 if (likely(!ctx->nr_counters))
1012 return 0; 1002 return 0;
1013 1003
1014 curr_rq_lock_irq_save(&flags); 1004 local_irq_save(flags);
1015 cpu = smp_processor_id(); 1005 cpu = smp_processor_id();
1016 1006
1017 perf_counter_task_sched_out(curr, cpu); 1007 perf_counter_task_sched_out(curr, cpu);
@@ -1037,7 +1027,7 @@ int perf_counter_task_enable(void)
1037 1027
1038 perf_counter_task_sched_in(curr, cpu); 1028 perf_counter_task_sched_in(curr, cpu);
1039 1029
1040 curr_rq_unlock_irq_restore(&flags); 1030 local_irq_restore(flags);
1041 1031
1042 return 0; 1032 return 0;
1043} 1033}
@@ -1095,12 +1085,12 @@ static void __read(void *info)
1095 struct perf_counter_context *ctx = counter->ctx; 1085 struct perf_counter_context *ctx = counter->ctx;
1096 unsigned long flags; 1086 unsigned long flags;
1097 1087
1098 curr_rq_lock_irq_save(&flags); 1088 local_irq_save(flags);
1099 if (ctx->is_active) 1089 if (ctx->is_active)
1100 update_context_time(ctx); 1090 update_context_time(ctx);
1101 counter->hw_ops->read(counter); 1091 counter->hw_ops->read(counter);
1102 update_counter_times(counter); 1092 update_counter_times(counter);
1103 curr_rq_unlock_irq_restore(&flags); 1093 local_irq_restore(flags);
1104} 1094}
1105 1095
1106static u64 perf_counter_read(struct perf_counter *counter) 1096static u64 perf_counter_read(struct perf_counter *counter)
@@ -2890,7 +2880,7 @@ __perf_counter_exit_task(struct task_struct *child,
2890 * Be careful about zapping the list - IRQ/NMI context 2880 * Be careful about zapping the list - IRQ/NMI context
2891 * could still be processing it: 2881 * could still be processing it:
2892 */ 2882 */
2893 curr_rq_lock_irq_save(&flags); 2883 local_irq_save(flags);
2894 perf_flags = hw_perf_save_disable(); 2884 perf_flags = hw_perf_save_disable();
2895 2885
2896 cpuctx = &__get_cpu_var(perf_cpu_context); 2886 cpuctx = &__get_cpu_var(perf_cpu_context);
@@ -2903,7 +2893,7 @@ __perf_counter_exit_task(struct task_struct *child,
2903 child_ctx->nr_counters--; 2893 child_ctx->nr_counters--;
2904 2894
2905 hw_perf_restore(perf_flags); 2895 hw_perf_restore(perf_flags);
2906 curr_rq_unlock_irq_restore(&flags); 2896 local_irq_restore(flags);
2907 } 2897 }
2908 2898
2909 parent_counter = child_counter->parent; 2899 parent_counter = child_counter->parent;
diff --git a/kernel/sched.c b/kernel/sched.c
index f76e3c0188a2..0de2f814fb18 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -997,26 +997,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
997 } 997 }
998} 998}
999 999
1000void curr_rq_lock_irq_save(unsigned long *flags)
1001 __acquires(rq->lock)
1002{
1003 struct rq *rq;
1004
1005 local_irq_save(*flags);
1006 rq = cpu_rq(smp_processor_id());
1007 spin_lock(&rq->lock);
1008}
1009
1010void curr_rq_unlock_irq_restore(unsigned long *flags)
1011 __releases(rq->lock)
1012{
1013 struct rq *rq;
1014
1015 rq = cpu_rq(smp_processor_id());
1016 spin_unlock(&rq->lock);
1017 local_irq_restore(*flags);
1018}
1019
1020void task_rq_unlock_wait(struct task_struct *p) 1000void task_rq_unlock_wait(struct task_struct *p)
1021{ 1001{
1022 struct rq *rq = task_rq(p); 1002 struct rq *rq = task_rq(p);