aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-10-17 19:09:09 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-10-17 19:12:33 -0400
commit0f8f86c7bdd1c954fbe153af437a0d91a6c5721a (patch)
tree94a8d419a470a4f9852ca397bb9bbe48db92ff5c /arch/x86/kernel/process.c
parentdca2d6ac09d9ef59ff46820d4f0c94b08a671202 (diff)
parentf39cdf25bf77219676ec5360980ac40b1a7e144a (diff)
Merge commit 'perf/core' into perf/hw-breakpoint
Conflicts: kernel/Makefile kernel/trace/Makefile kernel/trace/trace.h samples/Makefile Merge reason: We need to be uptodate with the perf events development branch because we plan to rewrite the breakpoints API on top of perf events.
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c31
1 files changed, 8 insertions, 23 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1092a1a2fbe6..2275ce5776de 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -9,7 +9,7 @@
9#include <linux/pm.h> 9#include <linux/pm.h>
10#include <linux/clockchips.h> 10#include <linux/clockchips.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <trace/power.h> 12#include <trace/events/power.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/apic.h> 14#include <asm/apic.h>
15#include <asm/syscalls.h> 15#include <asm/syscalls.h>
@@ -27,9 +27,6 @@ EXPORT_SYMBOL(idle_nomwait);
27 27
28struct kmem_cache *task_xstate_cachep; 28struct kmem_cache *task_xstate_cachep;
29 29
30DEFINE_TRACE(power_start);
31DEFINE_TRACE(power_end);
32
33int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 30int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
34{ 31{
35 *dst = *src; 32 *dst = *src;
@@ -289,9 +286,7 @@ static inline int hlt_use_halt(void)
289void default_idle(void) 286void default_idle(void)
290{ 287{
291 if (hlt_use_halt()) { 288 if (hlt_use_halt()) {
292 struct power_trace it; 289 trace_power_start(POWER_CSTATE, 1);
293
294 trace_power_start(&it, POWER_CSTATE, 1);
295 current_thread_info()->status &= ~TS_POLLING; 290 current_thread_info()->status &= ~TS_POLLING;
296 /* 291 /*
297 * TS_POLLING-cleared state must be visible before we 292 * TS_POLLING-cleared state must be visible before we
@@ -304,7 +299,6 @@ void default_idle(void)
304 else 299 else
305 local_irq_enable(); 300 local_irq_enable();
306 current_thread_info()->status |= TS_POLLING; 301 current_thread_info()->status |= TS_POLLING;
307 trace_power_end(&it);
308 } else { 302 } else {
309 local_irq_enable(); 303 local_irq_enable();
310 /* loop is done by the caller */ 304 /* loop is done by the caller */
@@ -362,9 +356,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
362 */ 356 */
363void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 357void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
364{ 358{
365 struct power_trace it; 359 trace_power_start(POWER_CSTATE, (ax>>4)+1);
366
367 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
368 if (!need_resched()) { 360 if (!need_resched()) {
369 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 361 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
370 clflush((void *)&current_thread_info()->flags); 362 clflush((void *)&current_thread_info()->flags);
@@ -374,15 +366,13 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
374 if (!need_resched()) 366 if (!need_resched())
375 __mwait(ax, cx); 367 __mwait(ax, cx);
376 } 368 }
377 trace_power_end(&it);
378} 369}
379 370
380/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 371/* Default MONITOR/MWAIT with no hints, used for default C1 state */
381static void mwait_idle(void) 372static void mwait_idle(void)
382{ 373{
383 struct power_trace it;
384 if (!need_resched()) { 374 if (!need_resched()) {
385 trace_power_start(&it, POWER_CSTATE, 1); 375 trace_power_start(POWER_CSTATE, 1);
386 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 376 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
387 clflush((void *)&current_thread_info()->flags); 377 clflush((void *)&current_thread_info()->flags);
388 378
@@ -392,7 +382,6 @@ static void mwait_idle(void)
392 __sti_mwait(0, 0); 382 __sti_mwait(0, 0);
393 else 383 else
394 local_irq_enable(); 384 local_irq_enable();
395 trace_power_end(&it);
396 } else 385 } else
397 local_irq_enable(); 386 local_irq_enable();
398} 387}
@@ -404,13 +393,11 @@ static void mwait_idle(void)
404 */ 393 */
405static void poll_idle(void) 394static void poll_idle(void)
406{ 395{
407 struct power_trace it; 396 trace_power_start(POWER_CSTATE, 0);
408
409 trace_power_start(&it, POWER_CSTATE, 0);
410 local_irq_enable(); 397 local_irq_enable();
411 while (!need_resched()) 398 while (!need_resched())
412 cpu_relax(); 399 cpu_relax();
413 trace_power_end(&it); 400 trace_power_end(0);
414} 401}
415 402
416/* 403/*
@@ -558,10 +545,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
558void __init init_c1e_mask(void) 545void __init init_c1e_mask(void)
559{ 546{
560 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 547 /* If we're using c1e_idle, we need to allocate c1e_mask. */
561 if (pm_idle == c1e_idle) { 548 if (pm_idle == c1e_idle)
562 alloc_cpumask_var(&c1e_mask, GFP_KERNEL); 549 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
563 cpumask_clear(c1e_mask);
564 }
565} 550}
566 551
567static int __init idle_setup(char *str) 552static int __init idle_setup(char *str)