aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-17 08:20:28 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:13 -0500
commit7671581f1666ef4b54a1c1e598c51ac44c060a9b (patch)
tree8b3e7536e89f2b01d232de0c53c3b297b85618ba
parent862a1a5f346fe7e9181ea51eaae48cf2cd70f746 (diff)
perfcounters: hw ops rename
Impact: rename field names Shorten them. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c6
-rw-r--r--include/linux/perf_counter.h6
-rw-r--r--kernel/perf_counter.c50
3 files changed, 31 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 358af5266407..b67557121425 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -577,9 +577,9 @@ static void pmc_generic_read(struct perf_counter *counter)
577} 577}
578 578
579static const struct hw_perf_counter_ops x86_perf_counter_ops = { 579static const struct hw_perf_counter_ops x86_perf_counter_ops = {
580 .hw_perf_counter_enable = pmc_generic_enable, 580 .enable = pmc_generic_enable,
581 .hw_perf_counter_disable = pmc_generic_disable, 581 .disable = pmc_generic_disable,
582 .hw_perf_counter_read = pmc_generic_read, 582 .read = pmc_generic_read,
583}; 583};
584 584
585const struct hw_perf_counter_ops * 585const struct hw_perf_counter_ops *
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 984da540224b..48f76d2e54c2 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -128,9 +128,9 @@ struct perf_counter;
128 * struct hw_perf_counter_ops - performance counter hw ops 128 * struct hw_perf_counter_ops - performance counter hw ops
129 */ 129 */
130struct hw_perf_counter_ops { 130struct hw_perf_counter_ops {
131 void (*hw_perf_counter_enable) (struct perf_counter *counter); 131 void (*enable) (struct perf_counter *counter);
132 void (*hw_perf_counter_disable) (struct perf_counter *counter); 132 void (*disable) (struct perf_counter *counter);
133 void (*hw_perf_counter_read) (struct perf_counter *counter); 133 void (*read) (struct perf_counter *counter);
134}; 134};
135 135
136/** 136/**
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f8a4d9a5d5d3..961d651aa574 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -109,7 +109,7 @@ static void __perf_counter_remove_from_context(void *info)
109 spin_lock_irqsave(&ctx->lock, flags); 109 spin_lock_irqsave(&ctx->lock, flags);
110 110
111 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 111 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
112 counter->hw_ops->hw_perf_counter_disable(counter); 112 counter->hw_ops->disable(counter);
113 counter->state = PERF_COUNTER_STATE_INACTIVE; 113 counter->state = PERF_COUNTER_STATE_INACTIVE;
114 ctx->nr_active--; 114 ctx->nr_active--;
115 cpuctx->active_oncpu--; 115 cpuctx->active_oncpu--;
@@ -226,7 +226,7 @@ static void __perf_install_in_context(void *info)
226 counter->oncpu = cpu; 226 counter->oncpu = cpu;
227 ctx->nr_active++; 227 ctx->nr_active++;
228 cpuctx->active_oncpu++; 228 cpuctx->active_oncpu++;
229 counter->hw_ops->hw_perf_counter_enable(counter); 229 counter->hw_ops->enable(counter);
230 } 230 }
231 231
232 if (!ctx->task && cpuctx->max_pertask) 232 if (!ctx->task && cpuctx->max_pertask)
@@ -297,7 +297,7 @@ counter_sched_out(struct perf_counter *counter,
297 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 297 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
298 return; 298 return;
299 299
300 counter->hw_ops->hw_perf_counter_disable(counter); 300 counter->hw_ops->disable(counter);
301 counter->state = PERF_COUNTER_STATE_INACTIVE; 301 counter->state = PERF_COUNTER_STATE_INACTIVE;
302 counter->oncpu = -1; 302 counter->oncpu = -1;
303 303
@@ -327,7 +327,7 @@ group_sched_out(struct perf_counter *group_counter,
327 * 327 *
328 * We stop each counter and update the counter value in counter->count. 328 * We stop each counter and update the counter value in counter->count.
329 * 329 *
330 * This does not protect us against NMI, but hw_perf_counter_disable() 330 * This does not protect us against NMI, but disable()
331 * sets the disabled bit in the control field of counter _before_ 331 * sets the disabled bit in the control field of counter _before_
332 * accessing the counter control register. If a NMI hits, then it will 332 * accessing the counter control register. If a NMI hits, then it will
333 * not restart the counter. 333 * not restart the counter.
@@ -359,7 +359,7 @@ counter_sched_in(struct perf_counter *counter,
359 if (counter->state == PERF_COUNTER_STATE_OFF) 359 if (counter->state == PERF_COUNTER_STATE_OFF)
360 return; 360 return;
361 361
362 counter->hw_ops->hw_perf_counter_enable(counter); 362 counter->hw_ops->enable(counter);
363 counter->state = PERF_COUNTER_STATE_ACTIVE; 363 counter->state = PERF_COUNTER_STATE_ACTIVE;
364 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 364 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
365 365
@@ -395,7 +395,7 @@ group_sched_in(struct perf_counter *group_counter,
395 * 395 *
396 * We restore the counter value and then enable it. 396 * We restore the counter value and then enable it.
397 * 397 *
398 * This does not protect us against NMI, but hw_perf_counter_enable() 398 * This does not protect us against NMI, but enable()
399 * sets the enabled bit in the control field of counter _before_ 399 * sets the enabled bit in the control field of counter _before_
400 * accessing the counter control register. If a NMI hits, then it will 400 * accessing the counter control register. If a NMI hits, then it will
401 * keep the counter running. 401 * keep the counter running.
@@ -537,11 +537,11 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
537/* 537/*
538 * Cross CPU call to read the hardware counter 538 * Cross CPU call to read the hardware counter
539 */ 539 */
540static void __hw_perf_counter_read(void *info) 540static void __read(void *info)
541{ 541{
542 struct perf_counter *counter = info; 542 struct perf_counter *counter = info;
543 543
544 counter->hw_ops->hw_perf_counter_read(counter); 544 counter->hw_ops->read(counter);
545} 545}
546 546
547static u64 perf_counter_read(struct perf_counter *counter) 547static u64 perf_counter_read(struct perf_counter *counter)
@@ -552,7 +552,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
552 */ 552 */
553 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 553 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
554 smp_call_function_single(counter->oncpu, 554 smp_call_function_single(counter->oncpu,
555 __hw_perf_counter_read, counter, 1); 555 __read, counter, 1);
556 } 556 }
557 557
558 return atomic64_read(&counter->count); 558 return atomic64_read(&counter->count);
@@ -855,9 +855,9 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter)
855} 855}
856 856
857static const struct hw_perf_counter_ops perf_ops_cpu_clock = { 857static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
858 .hw_perf_counter_enable = cpu_clock_perf_counter_enable, 858 .enable = cpu_clock_perf_counter_enable,
859 .hw_perf_counter_disable = cpu_clock_perf_counter_disable, 859 .disable = cpu_clock_perf_counter_disable,
860 .hw_perf_counter_read = cpu_clock_perf_counter_read, 860 .read = cpu_clock_perf_counter_read,
861}; 861};
862 862
863static void task_clock_perf_counter_update(struct perf_counter *counter) 863static void task_clock_perf_counter_update(struct perf_counter *counter)
@@ -891,9 +891,9 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter)
891} 891}
892 892
893static const struct hw_perf_counter_ops perf_ops_task_clock = { 893static const struct hw_perf_counter_ops perf_ops_task_clock = {
894 .hw_perf_counter_enable = task_clock_perf_counter_enable, 894 .enable = task_clock_perf_counter_enable,
895 .hw_perf_counter_disable = task_clock_perf_counter_disable, 895 .disable = task_clock_perf_counter_disable,
896 .hw_perf_counter_read = task_clock_perf_counter_read, 896 .read = task_clock_perf_counter_read,
897}; 897};
898 898
899static u64 get_page_faults(void) 899static u64 get_page_faults(void)
@@ -937,9 +937,9 @@ static void page_faults_perf_counter_disable(struct perf_counter *counter)
937} 937}
938 938
939static const struct hw_perf_counter_ops perf_ops_page_faults = { 939static const struct hw_perf_counter_ops perf_ops_page_faults = {
940 .hw_perf_counter_enable = page_faults_perf_counter_enable, 940 .enable = page_faults_perf_counter_enable,
941 .hw_perf_counter_disable = page_faults_perf_counter_disable, 941 .disable = page_faults_perf_counter_disable,
942 .hw_perf_counter_read = page_faults_perf_counter_read, 942 .read = page_faults_perf_counter_read,
943}; 943};
944 944
945static u64 get_context_switches(void) 945static u64 get_context_switches(void)
@@ -983,9 +983,9 @@ static void context_switches_perf_counter_disable(struct perf_counter *counter)
983} 983}
984 984
985static const struct hw_perf_counter_ops perf_ops_context_switches = { 985static const struct hw_perf_counter_ops perf_ops_context_switches = {
986 .hw_perf_counter_enable = context_switches_perf_counter_enable, 986 .enable = context_switches_perf_counter_enable,
987 .hw_perf_counter_disable = context_switches_perf_counter_disable, 987 .disable = context_switches_perf_counter_disable,
988 .hw_perf_counter_read = context_switches_perf_counter_read, 988 .read = context_switches_perf_counter_read,
989}; 989};
990 990
991static inline u64 get_cpu_migrations(void) 991static inline u64 get_cpu_migrations(void)
@@ -1027,9 +1027,9 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1027} 1027}
1028 1028
1029static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { 1029static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1030 .hw_perf_counter_enable = cpu_migrations_perf_counter_enable, 1030 .enable = cpu_migrations_perf_counter_enable,
1031 .hw_perf_counter_disable = cpu_migrations_perf_counter_disable, 1031 .disable = cpu_migrations_perf_counter_disable,
1032 .hw_perf_counter_read = cpu_migrations_perf_counter_read, 1032 .read = cpu_migrations_perf_counter_read,
1033}; 1033};
1034 1034
1035static const struct hw_perf_counter_ops * 1035static const struct hw_perf_counter_ops *
@@ -1283,7 +1283,7 @@ __perf_counter_exit_task(struct task_struct *child,
1283 1283
1284 cpuctx = &__get_cpu_var(perf_cpu_context); 1284 cpuctx = &__get_cpu_var(perf_cpu_context);
1285 1285
1286 child_counter->hw_ops->hw_perf_counter_disable(child_counter); 1286 child_counter->hw_ops->disable(child_counter);
1287 child_counter->state = PERF_COUNTER_STATE_INACTIVE; 1287 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
1288 child_counter->oncpu = -1; 1288 child_counter->oncpu = -1;
1289 1289