diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 28 |
2 files changed, 11 insertions, 24 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4109679863c1..479cc8c418c1 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
36 | #include <trace/power.h> | 36 | #include <trace/events/power.h> |
37 | 37 | ||
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
@@ -72,8 +72,6 @@ static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | |||
72 | 72 | ||
73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); | 73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); |
74 | 74 | ||
75 | DEFINE_TRACE(power_mark); | ||
76 | |||
77 | /* acpi_perf_data is a pointer to percpu data. */ | 75 | /* acpi_perf_data is a pointer to percpu data. */ |
78 | static struct acpi_processor_performance *acpi_perf_data; | 76 | static struct acpi_processor_performance *acpi_perf_data; |
79 | 77 | ||
@@ -332,7 +330,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
332 | unsigned int next_perf_state = 0; /* Index into perf table */ | 330 | unsigned int next_perf_state = 0; /* Index into perf table */ |
333 | unsigned int i; | 331 | unsigned int i; |
334 | int result = 0; | 332 | int result = 0; |
335 | struct power_trace it; | ||
336 | 333 | ||
337 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | 334 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); |
338 | 335 | ||
@@ -364,7 +361,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
364 | } | 361 | } |
365 | } | 362 | } |
366 | 363 | ||
367 | trace_power_mark(&it, POWER_PSTATE, next_perf_state); | 364 | trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency); |
368 | 365 | ||
369 | switch (data->cpu_feature) { | 366 | switch (data->cpu_feature) { |
370 | case SYSTEM_INTEL_MSR_CAPABLE: | 367 | case SYSTEM_INTEL_MSR_CAPABLE: |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 071166a4ba83..7b60e3906889 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
12 | #include <trace/power.h> | 12 | #include <trace/events/power.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
15 | #include <asm/syscalls.h> | 15 | #include <asm/syscalls.h> |
@@ -25,9 +25,6 @@ EXPORT_SYMBOL(idle_nomwait); | |||
25 | 25 | ||
26 | struct kmem_cache *task_xstate_cachep; | 26 | struct kmem_cache *task_xstate_cachep; |
27 | 27 | ||
28 | DEFINE_TRACE(power_start); | ||
29 | DEFINE_TRACE(power_end); | ||
30 | |||
31 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 28 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
32 | { | 29 | { |
33 | *dst = *src; | 30 | *dst = *src; |
@@ -299,9 +296,7 @@ static inline int hlt_use_halt(void) | |||
299 | void default_idle(void) | 296 | void default_idle(void) |
300 | { | 297 | { |
301 | if (hlt_use_halt()) { | 298 | if (hlt_use_halt()) { |
302 | struct power_trace it; | 299 | trace_power_start(POWER_CSTATE, 1); |
303 | |||
304 | trace_power_start(&it, POWER_CSTATE, 1); | ||
305 | current_thread_info()->status &= ~TS_POLLING; | 300 | current_thread_info()->status &= ~TS_POLLING; |
306 | /* | 301 | /* |
307 | * TS_POLLING-cleared state must be visible before we | 302 | * TS_POLLING-cleared state must be visible before we |
@@ -314,7 +309,7 @@ void default_idle(void) | |||
314 | else | 309 | else |
315 | local_irq_enable(); | 310 | local_irq_enable(); |
316 | current_thread_info()->status |= TS_POLLING; | 311 | current_thread_info()->status |= TS_POLLING; |
317 | trace_power_end(&it); | 312 | trace_power_end(0); |
318 | } else { | 313 | } else { |
319 | local_irq_enable(); | 314 | local_irq_enable(); |
320 | /* loop is done by the caller */ | 315 | /* loop is done by the caller */ |
@@ -372,9 +367,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
372 | */ | 367 | */ |
373 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | 368 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) |
374 | { | 369 | { |
375 | struct power_trace it; | 370 | trace_power_start(POWER_CSTATE, (ax>>4)+1); |
376 | |||
377 | trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); | ||
378 | if (!need_resched()) { | 371 | if (!need_resched()) { |
379 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 372 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) |
380 | clflush((void *)¤t_thread_info()->flags); | 373 | clflush((void *)¤t_thread_info()->flags); |
@@ -384,15 +377,14 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
384 | if (!need_resched()) | 377 | if (!need_resched()) |
385 | __mwait(ax, cx); | 378 | __mwait(ax, cx); |
386 | } | 379 | } |
387 | trace_power_end(&it); | 380 | trace_power_end(0); |
388 | } | 381 | } |
389 | 382 | ||
390 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | 383 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ |
391 | static void mwait_idle(void) | 384 | static void mwait_idle(void) |
392 | { | 385 | { |
393 | struct power_trace it; | ||
394 | if (!need_resched()) { | 386 | if (!need_resched()) { |
395 | trace_power_start(&it, POWER_CSTATE, 1); | 387 | trace_power_start(POWER_CSTATE, 1); |
396 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 388 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) |
397 | clflush((void *)¤t_thread_info()->flags); | 389 | clflush((void *)¤t_thread_info()->flags); |
398 | 390 | ||
@@ -402,7 +394,7 @@ static void mwait_idle(void) | |||
402 | __sti_mwait(0, 0); | 394 | __sti_mwait(0, 0); |
403 | else | 395 | else |
404 | local_irq_enable(); | 396 | local_irq_enable(); |
405 | trace_power_end(&it); | 397 | trace_power_end(0); |
406 | } else | 398 | } else |
407 | local_irq_enable(); | 399 | local_irq_enable(); |
408 | } | 400 | } |
@@ -414,13 +406,11 @@ static void mwait_idle(void) | |||
414 | */ | 406 | */ |
415 | static void poll_idle(void) | 407 | static void poll_idle(void) |
416 | { | 408 | { |
417 | struct power_trace it; | 409 | trace_power_start(POWER_CSTATE, 0); |
418 | |||
419 | trace_power_start(&it, POWER_CSTATE, 0); | ||
420 | local_irq_enable(); | 410 | local_irq_enable(); |
421 | while (!need_resched()) | 411 | while (!need_resched()) |
422 | cpu_relax(); | 412 | cpu_relax(); |
423 | trace_power_end(&it); | 413 | trace_power_end(0); |
424 | } | 414 | } |
425 | 415 | ||
426 | /* | 416 | /* |