aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-01-12 18:06:19 -0500
committerLen Brown <len.brown@intel.com>2011-01-12 18:06:19 -0500
commit43952886f0b8b3c344c3392b88de067d5fa5419a (patch)
treec1a738f11a479246c09976902be5b73aaf731722 /arch/x86/kernel
parent56dbed129df3fdd4caf9018b6e7599ee258a5420 (diff)
parentf77cfe4ea21760268c0277fa3e4b02dfd2a2c2f4 (diff)
Merge branch 'cpuidle-perf-events' into idle-test
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c6
3 files changed, 4 insertions, 12 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 7c23a0cd3eb9..d8286ed54ffa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -381,6 +381,8 @@ void default_idle(void)
381 else 381 else
382 local_irq_enable(); 382 local_irq_enable();
383 current_thread_info()->status |= TS_POLLING; 383 current_thread_info()->status |= TS_POLLING;
384 trace_power_end(smp_processor_id());
385 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
384 } else { 386 } else {
385 local_irq_enable(); 387 local_irq_enable();
386 /* loop is done by the caller */ 388 /* loop is done by the caller */
@@ -438,8 +440,6 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
438 */ 440 */
439void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 441void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
440{ 442{
441 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
442 trace_cpu_idle((ax>>4)+1, smp_processor_id());
443 if (!need_resched()) { 443 if (!need_resched()) {
444 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 444 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
445 clflush((void *)&current_thread_info()->flags); 445 clflush((void *)&current_thread_info()->flags);
@@ -466,6 +466,8 @@ static void mwait_idle(void)
466 __sti_mwait(0, 0); 466 __sti_mwait(0, 0);
467 else 467 else
468 local_irq_enable(); 468 local_irq_enable();
469 trace_power_end(smp_processor_id());
470 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
469 } else 471 } else
470 local_irq_enable(); 472 local_irq_enable();
471} 473}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 4b9befa0e347..8d128783af47 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -57,8 +57,6 @@
57#include <asm/syscalls.h> 57#include <asm/syscalls.h>
58#include <asm/debugreg.h> 58#include <asm/debugreg.h>
59 59
60#include <trace/events/power.h>
61
62asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
63 61
64/* 62/*
@@ -113,8 +111,6 @@ void cpu_idle(void)
113 stop_critical_timings(); 111 stop_critical_timings();
114 pm_idle(); 112 pm_idle();
115 start_critical_timings(); 113 start_critical_timings();
116 trace_power_end(smp_processor_id());
117 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
118 } 114 }
119 tick_nohz_restart_sched_tick(); 115 tick_nohz_restart_sched_tick();
120 preempt_enable_no_resched(); 116 preempt_enable_no_resched();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4c818a738396..bd387e8f73b4 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -51,8 +51,6 @@
51#include <asm/syscalls.h> 51#include <asm/syscalls.h>
52#include <asm/debugreg.h> 52#include <asm/debugreg.h>
53 53
54#include <trace/events/power.h>
55
56asmlinkage extern void ret_from_fork(void); 54asmlinkage extern void ret_from_fork(void);
57 55
58DEFINE_PER_CPU(unsigned long, old_rsp); 56DEFINE_PER_CPU(unsigned long, old_rsp);
@@ -141,10 +139,6 @@ void cpu_idle(void)
141 pm_idle(); 139 pm_idle();
142 start_critical_timings(); 140 start_critical_timings();
143 141
144 trace_power_end(smp_processor_id());
145 trace_cpu_idle(PWR_EVENT_EXIT,
146 smp_processor_id());
147
148 /* In many cases the interrupt that ended idle 142 /* In many cases the interrupt that ended idle
149 has already called exit_idle. But some idle 143 has already called exit_idle. But some idle
150 loops can be woken up without interrupt. */ 144 loops can be woken up without interrupt. */