diff options
author | Len Brown <len.brown@intel.com> | 2013-02-09 21:45:03 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2013-02-17 23:34:58 -0500 |
commit | a476bda30baf7efa7f305793a340aae07b6e5780 (patch) | |
tree | 149069bd724145e6dd9af3da00918db70b271f65 | |
parent | dd8af076262cc1ff85a8d5e0c5b1a4716d19fe25 (diff) |
x86 idle: rename global pm_idle to static x86_idle
(pm_idle)() is being removed from linux/pm.h
because Linux does not have such a cross-architecture concept.
x86 uses an idle function pointer in its architecture
specific code as a backup to cpuidle. So we re-name
x86 use of pm_idle to x86_idle, and make it static to x86.
Signed-off-by: Len Brown <len.brown@intel.com>
Cc: x86@kernel.org
-rw-r--r-- | arch/x86/kernel/process.c | 28 |
1 files changed, 12 insertions, 16 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f571a6e08710..ceb05db59be1 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -268,10 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
268 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; | 268 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
269 | EXPORT_SYMBOL(boot_option_idle_override); | 269 | EXPORT_SYMBOL(boot_option_idle_override); |
270 | 270 | ||
271 | /* | 271 | static void (*x86_idle)(void); |
272 | * Powermanagement idle function, if any.. | ||
273 | */ | ||
274 | void (*pm_idle)(void); | ||
275 | 272 | ||
276 | #ifndef CONFIG_SMP | 273 | #ifndef CONFIG_SMP |
277 | static inline void play_dead(void) | 274 | static inline void play_dead(void) |
@@ -348,7 +345,7 @@ void cpu_idle(void) | |||
348 | rcu_idle_enter(); | 345 | rcu_idle_enter(); |
349 | 346 | ||
350 | if (cpuidle_idle_call()) | 347 | if (cpuidle_idle_call()) |
351 | pm_idle(); | 348 | x86_idle(); |
352 | 349 | ||
353 | rcu_idle_exit(); | 350 | rcu_idle_exit(); |
354 | start_critical_timings(); | 351 | start_critical_timings(); |
@@ -395,9 +392,9 @@ EXPORT_SYMBOL(default_idle); | |||
395 | 392 | ||
396 | bool set_pm_idle_to_default(void) | 393 | bool set_pm_idle_to_default(void) |
397 | { | 394 | { |
398 | bool ret = !!pm_idle; | 395 | bool ret = !!x86_idle; |
399 | 396 | ||
400 | pm_idle = default_idle; | 397 | x86_idle = default_idle; |
401 | 398 | ||
402 | return ret; | 399 | return ret; |
403 | } | 400 | } |
@@ -564,11 +561,10 @@ static void amd_e400_idle(void) | |||
564 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 561 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
565 | { | 562 | { |
566 | #ifdef CONFIG_SMP | 563 | #ifdef CONFIG_SMP |
567 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | 564 | if (x86_idle == poll_idle && smp_num_siblings > 1) |
568 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); | 565 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); |
569 | } | ||
570 | #endif | 566 | #endif |
571 | if (pm_idle) | 567 | if (x86_idle) |
572 | return; | 568 | return; |
573 | 569 | ||
574 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | 570 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { |
@@ -576,19 +572,19 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
576 | * One CPU supports mwait => All CPUs supports mwait | 572 | * One CPU supports mwait => All CPUs supports mwait |
577 | */ | 573 | */ |
578 | pr_info("using mwait in idle threads\n"); | 574 | pr_info("using mwait in idle threads\n"); |
579 | pm_idle = mwait_idle; | 575 | x86_idle = mwait_idle; |
580 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { | 576 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { |
581 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ | 577 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ |
582 | pr_info("using AMD E400 aware idle routine\n"); | 578 | pr_info("using AMD E400 aware idle routine\n"); |
583 | pm_idle = amd_e400_idle; | 579 | x86_idle = amd_e400_idle; |
584 | } else | 580 | } else |
585 | pm_idle = default_idle; | 581 | x86_idle = default_idle; |
586 | } | 582 | } |
587 | 583 | ||
588 | void __init init_amd_e400_c1e_mask(void) | 584 | void __init init_amd_e400_c1e_mask(void) |
589 | { | 585 | { |
590 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ | 586 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ |
591 | if (pm_idle == amd_e400_idle) | 587 | if (x86_idle == amd_e400_idle) |
592 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); | 588 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); |
593 | } | 589 | } |
594 | 590 | ||
@@ -599,7 +595,7 @@ static int __init idle_setup(char *str) | |||
599 | 595 | ||
600 | if (!strcmp(str, "poll")) { | 596 | if (!strcmp(str, "poll")) { |
601 | pr_info("using polling idle threads\n"); | 597 | pr_info("using polling idle threads\n"); |
602 | pm_idle = poll_idle; | 598 | x86_idle = poll_idle; |
603 | boot_option_idle_override = IDLE_POLL; | 599 | boot_option_idle_override = IDLE_POLL; |
604 | } else if (!strcmp(str, "mwait")) { | 600 | } else if (!strcmp(str, "mwait")) { |
605 | boot_option_idle_override = IDLE_FORCE_MWAIT; | 601 | boot_option_idle_override = IDLE_FORCE_MWAIT; |
@@ -612,7 +608,7 @@ static int __init idle_setup(char *str) | |||
612 | * To continue to load the CPU idle driver, don't touch | 608 | * To continue to load the CPU idle driver, don't touch |
613 | * the boot_option_idle_override. | 609 | * the boot_option_idle_override. |
614 | */ | 610 | */ |
615 | pm_idle = default_idle; | 611 | x86_idle = default_idle; |
616 | boot_option_idle_override = IDLE_HALT; | 612 | boot_option_idle_override = IDLE_HALT; |
617 | } else if (!strcmp(str, "nomwait")) { | 613 | } else if (!strcmp(str, "nomwait")) { |
618 | /* | 614 | /* |