aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-11 07:45:51 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-11 09:45:53 -0500
commit01b2838c4298c5e0d30b4993c195ac34dd9df61e (patch)
tree56a526fb72bd068eb4e06aa79a7569e6afb4138a /kernel/perf_counter.c
parent5c92d12411dfe5f0f3d1b1c1e2f756245e6f7249 (diff)
perf counters: consolidate hw_perf save/restore APIs
Impact: cleanup Rename them to better match up the usual IRQ disable/enable APIs: hw_perf_disable_all() => hw_perf_save_disable() hw_perf_restore_ctrl() => hw_perf_restore() Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 506286e5ba63..0e93fea17120 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter)
43 return ERR_PTR(-EINVAL); 43 return ERR_PTR(-EINVAL);
44} 44}
45 45
46u64 __weak hw_perf_disable_all(void) { return 0; } 46u64 __weak hw_perf_save_disable(void) { return 0; }
47void __weak hw_perf_restore_ctrl(u64 ctrl) { } 47void __weak hw_perf_restore(u64 ctrl) { }
48void __weak hw_perf_counter_setup(void) { } 48void __weak hw_perf_counter_setup(void) { }
49 49
50#if BITS_PER_LONG == 64 50#if BITS_PER_LONG == 64
@@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info)
180 * Protect the list operation against NMI by disabling the 180 * Protect the list operation against NMI by disabling the
181 * counters on a global level. NOP for non NMI based counters. 181 * counters on a global level. NOP for non NMI based counters.
182 */ 182 */
183 perf_flags = hw_perf_disable_all(); 183 perf_flags = hw_perf_save_disable();
184 list_del_counter(counter, ctx); 184 list_del_counter(counter, ctx);
185 hw_perf_restore_ctrl(perf_flags); 185 hw_perf_restore(perf_flags);
186 186
187 if (!ctx->task) { 187 if (!ctx->task) {
188 /* 188 /*
@@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info)
273 * Protect the list operation against NMI by disabling the 273 * Protect the list operation against NMI by disabling the
274 * counters on a global level. NOP for non NMI based counters. 274 * counters on a global level. NOP for non NMI based counters.
275 */ 275 */
276 perf_flags = hw_perf_disable_all(); 276 perf_flags = hw_perf_save_disable();
277 list_add_counter(counter, ctx); 277 list_add_counter(counter, ctx);
278 hw_perf_restore_ctrl(perf_flags); 278 hw_perf_restore(perf_flags);
279 279
280 ctx->nr_counters++; 280 ctx->nr_counters++;
281 281
@@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
495 /* 495 /*
496 * Rotate the first entry last (works just fine for group counters too): 496 * Rotate the first entry last (works just fine for group counters too):
497 */ 497 */
498 perf_flags = hw_perf_disable_all(); 498 perf_flags = hw_perf_save_disable();
499 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 499 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
500 list_del(&counter->list_entry); 500 list_del(&counter->list_entry);
501 list_add_tail(&counter->list_entry, &ctx->counter_list); 501 list_add_tail(&counter->list_entry, &ctx->counter_list);
502 break; 502 break;
503 } 503 }
504 hw_perf_restore_ctrl(perf_flags); 504 hw_perf_restore(perf_flags);
505 505
506 spin_unlock(&ctx->lock); 506 spin_unlock(&ctx->lock);
507 507