diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-09 22:03:16 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-09 22:03:16 -0500 |
| commit | 3a43aaa31790c36b69ebf8a6396f37fade86b531 (patch) | |
| tree | 7c7f8da6219d546f2b44534cb7be1fb5591d6ac4 /arch/sh/kernel/idle.c | |
| parent | aed886ce777590eac87f7ce2897d9f8357754331 (diff) | |
| parent | 6a5a0b9139b19dd1a107870269a35bc9cf18d2dc (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (137 commits)
sh: include empty zero page in romImage
sh: Make associative cache writes fatal on all SH-4A parts.
sh: Drop associative writes for SH-4 cache flushes.
sh: Partial revert of copy/clear_user_highpage() optimizations.
sh: Add default uImage rule for se7724, ap325rxa, and migor.
sh: allow runtime pm without suspend/resume callbacks
sh: mach-ecovec24: Remove un-defined settings for VPU
sh: mach-ecovec24: LCDC drive ability become high
sh: fix sh7724 VEU3F resource size
serial: sh-sci: Fix too early port disabling.
sh: pfc: pr_info() -> pr_debug() cleanups.
sh: pfc: Convert from ctrl_xxx() to __raw_xxx() I/O routines.
sh: Improve kfr2r09 serial port setup code
sh: Break out SuperH PFC code
sh: Move KEYSC header file
sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
sh: Add CPG save/restore code for sh7724 R-standby
sh: Add SDHI power control support to Ecovec
mfd: Add power control platform data to SDHI driver
sh: mach-ecovec24: modify address map
...
Diffstat (limited to 'arch/sh/kernel/idle.c')
| -rw-r--r-- | arch/sh/kernel/idle.c | 78 |
1 files changed, 61 insertions, 17 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc093c7..aaff0037fcd7 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
| 22 | 22 | ||
| 23 | static int hlt_counter; | 23 | static int hlt_counter; |
| 24 | void (*pm_idle)(void); | 24 | void (*pm_idle)(void) = NULL; |
| 25 | void (*pm_power_off)(void); | 25 | void (*pm_power_off)(void); |
| 26 | EXPORT_SYMBOL(pm_power_off); | 26 | EXPORT_SYMBOL(pm_power_off); |
| 27 | 27 | ||
| @@ -39,48 +39,92 @@ static int __init hlt_setup(char *__unused) | |||
| 39 | } | 39 | } |
| 40 | __setup("hlt", hlt_setup); | 40 | __setup("hlt", hlt_setup); |
| 41 | 41 | ||
| 42 | static inline int hlt_works(void) | ||
| 43 | { | ||
| 44 | return !hlt_counter; | ||
| 45 | } | ||
| 46 | |||
| 47 | /* | ||
| 48 | * On SMP it's slightly faster (but much more power-consuming!) | ||
| 49 | * to poll the ->work.need_resched flag instead of waiting for the | ||
| 50 | * cross-CPU IPI to arrive. Use this option with caution. | ||
| 51 | */ | ||
| 52 | static void poll_idle(void) | ||
| 53 | { | ||
| 54 | local_irq_enable(); | ||
| 55 | while (!need_resched()) | ||
| 56 | cpu_relax(); | ||
| 57 | } | ||
| 58 | |||
| 42 | void default_idle(void) | 59 | void default_idle(void) |
| 43 | { | 60 | { |
| 44 | if (!hlt_counter) { | 61 | if (hlt_works()) { |
| 45 | clear_thread_flag(TIF_POLLING_NRFLAG); | 62 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 46 | smp_mb__after_clear_bit(); | 63 | smp_mb__after_clear_bit(); |
| 47 | set_bl_bit(); | ||
| 48 | stop_critical_timings(); | ||
| 49 | 64 | ||
| 50 | while (!need_resched()) | 65 | if (!need_resched()) { |
| 66 | local_irq_enable(); | ||
| 51 | cpu_sleep(); | 67 | cpu_sleep(); |
| 68 | } else | ||
| 69 | local_irq_enable(); | ||
| 52 | 70 | ||
| 53 | start_critical_timings(); | ||
| 54 | clear_bl_bit(); | ||
| 55 | set_thread_flag(TIF_POLLING_NRFLAG); | 71 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 56 | } else | 72 | } else |
| 57 | while (!need_resched()) | 73 | poll_idle(); |
| 58 | cpu_relax(); | ||
| 59 | } | 74 | } |
| 60 | 75 | ||
| 76 | /* | ||
| 77 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
| 78 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
| 79 | * say that they'd like to reschedule) | ||
| 80 | */ | ||
| 61 | void cpu_idle(void) | 81 | void cpu_idle(void) |
| 62 | { | 82 | { |
| 83 | unsigned int cpu = smp_processor_id(); | ||
| 84 | |||
| 63 | set_thread_flag(TIF_POLLING_NRFLAG); | 85 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 64 | 86 | ||
| 65 | /* endless idle loop with no priority at all */ | 87 | /* endless idle loop with no priority at all */ |
| 66 | while (1) { | 88 | while (1) { |
| 67 | void (*idle)(void) = pm_idle; | 89 | tick_nohz_stop_sched_tick(1); |
| 68 | 90 | ||
| 69 | if (!idle) | 91 | while (!need_resched() && cpu_online(cpu)) { |
| 70 | idle = default_idle; | 92 | check_pgt_cache(); |
| 93 | rmb(); | ||
| 71 | 94 | ||
| 72 | tick_nohz_stop_sched_tick(1); | 95 | local_irq_disable(); |
| 73 | while (!need_resched()) | 96 | /* Don't trace irqs off for idle */ |
| 74 | idle(); | 97 | stop_critical_timings(); |
| 75 | tick_nohz_restart_sched_tick(); | 98 | pm_idle(); |
| 99 | /* | ||
| 100 | * Sanity check to ensure that pm_idle() returns | ||
| 101 | * with IRQs enabled | ||
| 102 | */ | ||
| 103 | WARN_ON(irqs_disabled()); | ||
| 104 | start_critical_timings(); | ||
| 105 | } | ||
| 76 | 106 | ||
| 107 | tick_nohz_restart_sched_tick(); | ||
| 77 | preempt_enable_no_resched(); | 108 | preempt_enable_no_resched(); |
| 78 | schedule(); | 109 | schedule(); |
| 79 | preempt_disable(); | 110 | preempt_disable(); |
| 80 | check_pgt_cache(); | ||
| 81 | } | 111 | } |
| 82 | } | 112 | } |
| 83 | 113 | ||
| 114 | void __cpuinit select_idle_routine(void) | ||
| 115 | { | ||
| 116 | /* | ||
| 117 | * If a platform has set its own idle routine, leave it alone. | ||
| 118 | */ | ||
| 119 | if (pm_idle) | ||
| 120 | return; | ||
| 121 | |||
| 122 | if (hlt_works()) | ||
| 123 | pm_idle = default_idle; | ||
| 124 | else | ||
| 125 | pm_idle = poll_idle; | ||
| 126 | } | ||
| 127 | |||
| 84 | static void do_nothing(void *unused) | 128 | static void do_nothing(void *unused) |
| 85 | { | 129 | { |
| 86 | } | 130 | } |
