aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorarnd@arndb.de <arnd@arndb.de>2006-10-24 12:31:26 -0400
committerPaul Mackerras <paulus@samba.org>2006-10-25 00:20:22 -0400
commit302eca184fb844670fb128c69e22a8a28bbce48a (patch)
tree6d61e03b85e2d11c58d527f6125bd33cbd27c5ae
parentb3d7dc1967d1303d4897ff9537d29f6e077de147 (diff)
[POWERPC] cell: use ppc_md->power_save instead of cbe_idle_loop
This moves the cell idle function to use the default cpu_idle with a special power_save callback, like all other platforms except iSeries already do. It also makes it possible to disable this power_save function with a new powerpc-specific boot option "powersave=off". Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/idle.c7
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c96
2 files changed, 34 insertions, 69 deletions
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4180c3998b39..8994af327b47 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,6 +39,13 @@
39#define cpu_should_die() 0 39#define cpu_should_die() 0
40#endif 40#endif
41 41
42static int __init powersave_off(char *arg)
43{
44 ppc_md.power_save = NULL;
45 return 0;
46}
47__setup("powersave=off", powersave_off);
48
42/* 49/*
43 * The body of the idle task. 50 * The body of the idle task.
44 */ 51 */
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index fdcd89e99f1b..c68fabdc7874 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -38,32 +38,16 @@
38#include "pervasive.h" 38#include "pervasive.h"
39#include "cbe_regs.h" 39#include "cbe_regs.h"
40 40
41static DEFINE_SPINLOCK(cbe_pervasive_lock); 41static void cbe_power_save(void)
42
43static void __init cbe_enable_pause_zero(void)
44{ 42{
45 unsigned long thread_switch_control; 43 unsigned long ctrl, thread_switch_control;
46 unsigned long temp_register; 44 ctrl = mfspr(SPRN_CTRLF);
47 struct cbe_pmd_regs __iomem *pregs;
48
49 spin_lock_irq(&cbe_pervasive_lock);
50 pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
51 if (pregs == NULL)
52 goto out;
53
54 pr_debug("Power Management: CPU %d\n", smp_processor_id());
55
56 /* Enable Pause(0) control bit */
57 temp_register = in_be64(&pregs->pmcr);
58
59 out_be64(&pregs->pmcr,
60 temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
61 45
62 /* Enable DEC and EE interrupt request */ 46 /* Enable DEC and EE interrupt request */
63 thread_switch_control = mfspr(SPRN_TSC_CELL); 47 thread_switch_control = mfspr(SPRN_TSC_CELL);
64 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; 48 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
65 49
66 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { 50 switch (ctrl & CTRL_CT) {
67 case CTRL_CT0: 51 case CTRL_CT0:
68 thread_switch_control |= TSC_CELL_DEC_ENABLE_0; 52 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
69 break; 53 break;
@@ -75,58 +59,21 @@ static void __init cbe_enable_pause_zero(void)
75 __FUNCTION__); 59 __FUNCTION__);
76 break; 60 break;
77 } 61 }
78
79 mtspr(SPRN_TSC_CELL, thread_switch_control); 62 mtspr(SPRN_TSC_CELL, thread_switch_control);
80 63
81out: 64 /*
82 spin_unlock_irq(&cbe_pervasive_lock); 65 * go into low thread priority, medium priority will be
83} 66 * restored for us after wake-up.
84
85static void cbe_idle(void)
86{
87 unsigned long ctrl;
88
89 /* Why do we do that on every idle ? Couldn't that be done once for
90 * all or do we lose the state some way ? Also, the pmcr
91 * register setting, that can't be set once at boot ? We really want
92 * to move that away in order to implement a simple powersave
93 */ 67 */
94 cbe_enable_pause_zero(); 68 HMT_low();
95
96 while (1) {
97 if (!need_resched()) {
98 local_irq_disable();
99 while (!need_resched()) {
100 /* go into low thread priority */
101 HMT_low();
102
103 /*
104 * atomically disable thread execution
105 * and runlatch.
106 * External and Decrementer exceptions
107 * are still handled when the thread
108 * is disabled but now enter in
109 * cbe_system_reset_exception()
110 */
111 ctrl = mfspr(SPRN_CTRLF);
112 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
113 mtspr(SPRN_CTRLT, ctrl);
114 }
115 /* restore thread prio */
116 HMT_medium();
117 local_irq_enable();
118 }
119 69
120 /* 70 /*
121 * turn runlatch on again before scheduling the 71 * atomically disable thread execution and runlatch.
122 * process we just woke up 72 * External and Decrementer exceptions are still handled when the
123 */ 73 * thread is disabled but now enter in cbe_system_reset_exception()
124 ppc64_runlatch_on(); 74 */
125 75 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
126 preempt_enable_no_resched(); 76 mtspr(SPRN_CTRLT, ctrl);
127 schedule();
128 preempt_disable();
129 }
130} 77}
131 78
132static int cbe_system_reset_exception(struct pt_regs *regs) 79static int cbe_system_reset_exception(struct pt_regs *regs)
@@ -158,9 +105,20 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
158 105
159void __init cbe_pervasive_init(void) 106void __init cbe_pervasive_init(void)
160{ 107{
108 int cpu;
161 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) 109 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
162 return; 110 return;
163 111
164 ppc_md.idle_loop = cbe_idle; 112 for_each_possible_cpu(cpu) {
113 struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
114 if (!regs)
115 continue;
116
117 /* Enable Pause(0) control bit */
118 out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
119 CBE_PMD_PAUSE_ZERO_CONTROL);
120 }
121
122 ppc_md.power_save = cbe_power_save;
165 ppc_md.system_reset_exception = cbe_system_reset_exception; 123 ppc_md.system_reset_exception = cbe_system_reset_exception;
166} 124}