diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 8 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 10 | ||||
-rw-r--r-- | include/linux/perf_counter.h | 10 | ||||
-rw-r--r-- | kernel/perf_counter.c | 16 |
4 files changed, 22 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 43c8e9a38b4e..3e1dbebe22b9 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -118,13 +118,13 @@ void hw_perf_enable_all(void) | |||
118 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); | 118 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
119 | } | 119 | } |
120 | 120 | ||
121 | void hw_perf_restore_ctrl(u64 ctrl) | 121 | void hw_perf_restore(u64 ctrl) |
122 | { | 122 | { |
123 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); | 123 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); |
124 | } | 124 | } |
125 | EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); | 125 | EXPORT_SYMBOL_GPL(hw_perf_restore); |
126 | 126 | ||
127 | u64 hw_perf_disable_all(void) | 127 | u64 hw_perf_save_disable(void) |
128 | { | 128 | { |
129 | u64 ctrl; | 129 | u64 ctrl; |
130 | 130 | ||
@@ -132,7 +132,7 @@ u64 hw_perf_disable_all(void) | |||
132 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | 132 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
133 | return ctrl; | 133 | return ctrl; |
134 | } | 134 | } |
135 | EXPORT_SYMBOL_GPL(hw_perf_disable_all); | 135 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
136 | 136 | ||
137 | static inline void | 137 | static inline void |
138 | __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) | 138 | __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index cca804e6f1dd..a3e66a33b7a2 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -270,11 +270,11 @@ static atomic_t c3_cpu_count; | |||
270 | /* Common C-state entry for C2, C3, .. */ | 270 | /* Common C-state entry for C2, C3, .. */ |
271 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | 271 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) |
272 | { | 272 | { |
273 | u64 pctrl; | 273 | u64 perf_flags; |
274 | 274 | ||
275 | /* Don't trace irqs off for idle */ | 275 | /* Don't trace irqs off for idle */ |
276 | stop_critical_timings(); | 276 | stop_critical_timings(); |
277 | pctrl = hw_perf_disable_all(); | 277 | perf_flags = hw_perf_save_disable(); |
278 | if (cstate->entry_method == ACPI_CSTATE_FFH) { | 278 | if (cstate->entry_method == ACPI_CSTATE_FFH) { |
279 | /* Call into architectural FFH based C-state */ | 279 | /* Call into architectural FFH based C-state */ |
280 | acpi_processor_ffh_cstate_enter(cstate); | 280 | acpi_processor_ffh_cstate_enter(cstate); |
@@ -287,7 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
287 | gets asserted in time to freeze execution properly. */ | 287 | gets asserted in time to freeze execution properly. */ |
288 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 288 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
289 | } | 289 | } |
290 | hw_perf_restore_ctrl(pctrl); | 290 | hw_perf_restore(perf_flags); |
291 | start_critical_timings(); | 291 | start_critical_timings(); |
292 | } | 292 | } |
293 | #endif /* !CONFIG_CPU_IDLE */ | 293 | #endif /* !CONFIG_CPU_IDLE */ |
@@ -1433,7 +1433,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
1433 | 1433 | ||
1434 | /* Don't trace irqs off for idle */ | 1434 | /* Don't trace irqs off for idle */ |
1435 | stop_critical_timings(); | 1435 | stop_critical_timings(); |
1436 | pctrl = hw_perf_disable_all(); | 1436 | pctrl = hw_perf_save_disable(); |
1437 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 1437 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
1438 | /* Call into architectural FFH based C-state */ | 1438 | /* Call into architectural FFH based C-state */ |
1439 | acpi_processor_ffh_cstate_enter(cx); | 1439 | acpi_processor_ffh_cstate_enter(cx); |
@@ -1448,7 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
1448 | gets asserted in time to freeze execution properly. */ | 1448 | gets asserted in time to freeze execution properly. */ |
1449 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1449 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1450 | } | 1450 | } |
1451 | hw_perf_restore_ctrl(pctrl); | 1451 | hw_perf_restore(pctrl); |
1452 | start_critical_timings(); | 1452 | start_critical_timings(); |
1453 | } | 1453 | } |
1454 | 1454 | ||
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 9a1713a1be27..68f6e3ad531f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -67,7 +67,7 @@ enum perf_counter_record_type { | |||
67 | * Hardware event to monitor via a performance monitoring counter: | 67 | * Hardware event to monitor via a performance monitoring counter: |
68 | */ | 68 | */ |
69 | struct perf_counter_hw_event { | 69 | struct perf_counter_hw_event { |
70 | u64 type; | 70 | s64 type; |
71 | 71 | ||
72 | u64 irq_period; | 72 | u64 irq_period; |
73 | u32 record_type; | 73 | u32 record_type; |
@@ -206,8 +206,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |||
206 | extern void perf_counter_init_task(struct task_struct *task); | 206 | extern void perf_counter_init_task(struct task_struct *task); |
207 | extern void perf_counter_notify(struct pt_regs *regs); | 207 | extern void perf_counter_notify(struct pt_regs *regs); |
208 | extern void perf_counter_print_debug(void); | 208 | extern void perf_counter_print_debug(void); |
209 | extern void hw_perf_restore_ctrl(u64 ctrl); | 209 | extern u64 hw_perf_save_disable(void); |
210 | extern u64 hw_perf_disable_all(void); | 210 | extern void hw_perf_restore(u64 ctrl); |
211 | extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); | 211 | extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); |
212 | extern u64 atomic64_counter_read(struct perf_counter *counter); | 212 | extern u64 atomic64_counter_read(struct perf_counter *counter); |
213 | 213 | ||
@@ -221,8 +221,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { } | |||
221 | static inline void perf_counter_init_task(struct task_struct *task) { } | 221 | static inline void perf_counter_init_task(struct task_struct *task) { } |
222 | static inline void perf_counter_notify(struct pt_regs *regs) { } | 222 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
223 | static inline void perf_counter_print_debug(void) { } | 223 | static inline void perf_counter_print_debug(void) { } |
224 | static inline void hw_perf_restore_ctrl(u64 ctrl) { } | 224 | static inline void hw_perf_restore(u64 ctrl) { } |
225 | static inline u64 hw_perf_disable_all(void) { return 0; } | 225 | static inline u64 hw_perf_save_disable(void) { return 0; } |
226 | #endif | 226 | #endif |
227 | 227 | ||
228 | #endif /* _LINUX_PERF_COUNTER_H */ | 228 | #endif /* _LINUX_PERF_COUNTER_H */ |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 506286e5ba63..0e93fea17120 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter) | |||
43 | return ERR_PTR(-EINVAL); | 43 | return ERR_PTR(-EINVAL); |
44 | } | 44 | } |
45 | 45 | ||
46 | u64 __weak hw_perf_disable_all(void) { return 0; } | 46 | u64 __weak hw_perf_save_disable(void) { return 0; } |
47 | void __weak hw_perf_restore_ctrl(u64 ctrl) { } | 47 | void __weak hw_perf_restore(u64 ctrl) { } |
48 | void __weak hw_perf_counter_setup(void) { } | 48 | void __weak hw_perf_counter_setup(void) { } |
49 | 49 | ||
50 | #if BITS_PER_LONG == 64 | 50 | #if BITS_PER_LONG == 64 |
@@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) | |||
180 | * Protect the list operation against NMI by disabling the | 180 | * Protect the list operation against NMI by disabling the |
181 | * counters on a global level. NOP for non NMI based counters. | 181 | * counters on a global level. NOP for non NMI based counters. |
182 | */ | 182 | */ |
183 | perf_flags = hw_perf_disable_all(); | 183 | perf_flags = hw_perf_save_disable(); |
184 | list_del_counter(counter, ctx); | 184 | list_del_counter(counter, ctx); |
185 | hw_perf_restore_ctrl(perf_flags); | 185 | hw_perf_restore(perf_flags); |
186 | 186 | ||
187 | if (!ctx->task) { | 187 | if (!ctx->task) { |
188 | /* | 188 | /* |
@@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info) | |||
273 | * Protect the list operation against NMI by disabling the | 273 | * Protect the list operation against NMI by disabling the |
274 | * counters on a global level. NOP for non NMI based counters. | 274 | * counters on a global level. NOP for non NMI based counters. |
275 | */ | 275 | */ |
276 | perf_flags = hw_perf_disable_all(); | 276 | perf_flags = hw_perf_save_disable(); |
277 | list_add_counter(counter, ctx); | 277 | list_add_counter(counter, ctx); |
278 | hw_perf_restore_ctrl(perf_flags); | 278 | hw_perf_restore(perf_flags); |
279 | 279 | ||
280 | ctx->nr_counters++; | 280 | ctx->nr_counters++; |
281 | 281 | ||
@@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
495 | /* | 495 | /* |
496 | * Rotate the first entry last (works just fine for group counters too): | 496 | * Rotate the first entry last (works just fine for group counters too): |
497 | */ | 497 | */ |
498 | perf_flags = hw_perf_disable_all(); | 498 | perf_flags = hw_perf_save_disable(); |
499 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 499 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { |
500 | list_del(&counter->list_entry); | 500 | list_del(&counter->list_entry); |
501 | list_add_tail(&counter->list_entry, &ctx->counter_list); | 501 | list_add_tail(&counter->list_entry, &ctx->counter_list); |
502 | break; | 502 | break; |
503 | } | 503 | } |
504 | hw_perf_restore_ctrl(perf_flags); | 504 | hw_perf_restore(perf_flags); |
505 | 505 | ||
506 | spin_unlock(&ctx->lock); | 506 | spin_unlock(&ctx->lock); |
507 | 507 | ||