diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-07-07 20:56:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-07 21:23:41 -0400 |
commit | c66d5dd6b5b62e1435b95c0fb42f6bcddeb395ea (patch) | |
tree | bb7a7241f07b1f7c75101469f87e2485f486e592 /arch/ppc64/kernel/idle.c | |
parent | d200903e11f6867b91dffa81b2038e55be599f49 (diff) |
[PATCH] ppc64: Move pSeries idle functions into pSeries_setup.c
dedicated_idle() and shared_idle() are only used by pSeries, so move them into
pSeries_setup.c
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/kernel/idle.c')
-rw-r--r-- | arch/ppc64/kernel/idle.c | 131 |
1 files changed, 0 insertions, 131 deletions
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 226152467791..69b7c22bad54 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c | |||
@@ -74,137 +74,6 @@ int default_idle(void) | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | #ifdef CONFIG_PPC_PSERIES | ||
78 | |||
79 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | ||
80 | |||
81 | int dedicated_idle(void) | ||
82 | { | ||
83 | long oldval; | ||
84 | struct paca_struct *lpaca = get_paca(), *ppaca; | ||
85 | unsigned long start_snooze; | ||
86 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); | ||
87 | unsigned int cpu = smp_processor_id(); | ||
88 | |||
89 | ppaca = &paca[cpu ^ 1]; | ||
90 | |||
91 | while (1) { | ||
92 | /* | ||
93 | * Indicate to the HV that we are idle. Now would be | ||
94 | * a good time to find other work to dispatch. | ||
95 | */ | ||
96 | lpaca->lppaca.idle = 1; | ||
97 | |||
98 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | ||
99 | if (!oldval) { | ||
100 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
101 | start_snooze = __get_tb() + | ||
102 | *smt_snooze_delay * tb_ticks_per_usec; | ||
103 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
104 | /* | ||
105 | * Go into low thread priority and possibly | ||
106 | * low power mode. | ||
107 | */ | ||
108 | HMT_low(); | ||
109 | HMT_very_low(); | ||
110 | |||
111 | if (*smt_snooze_delay == 0 || | ||
112 | __get_tb() < start_snooze) | ||
113 | continue; | ||
114 | |||
115 | HMT_medium(); | ||
116 | |||
117 | if (!(ppaca->lppaca.idle)) { | ||
118 | local_irq_disable(); | ||
119 | |||
120 | /* | ||
121 | * We are about to sleep the thread | ||
122 | * and so wont be polling any | ||
123 | * more. | ||
124 | */ | ||
125 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
126 | |||
127 | /* | ||
128 | * SMT dynamic mode. Cede will result | ||
129 | * in this thread going dormant, if the | ||
130 | * partner thread is still doing work. | ||
131 | * Thread wakes up if partner goes idle, | ||
132 | * an interrupt is presented, or a prod | ||
133 | * occurs. Returning from the cede | ||
134 | * enables external interrupts. | ||
135 | */ | ||
136 | if (!need_resched()) | ||
137 | cede_processor(); | ||
138 | else | ||
139 | local_irq_enable(); | ||
140 | } else { | ||
141 | /* | ||
142 | * Give the HV an opportunity at the | ||
143 | * processor, since we are not doing | ||
144 | * any work. | ||
145 | */ | ||
146 | poll_pending(); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
151 | } else { | ||
152 | set_need_resched(); | ||
153 | } | ||
154 | |||
155 | HMT_medium(); | ||
156 | lpaca->lppaca.idle = 0; | ||
157 | schedule(); | ||
158 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | ||
159 | cpu_die(); | ||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int shared_idle(void) | ||
165 | { | ||
166 | struct paca_struct *lpaca = get_paca(); | ||
167 | unsigned int cpu = smp_processor_id(); | ||
168 | |||
169 | while (1) { | ||
170 | /* | ||
171 | * Indicate to the HV that we are idle. Now would be | ||
172 | * a good time to find other work to dispatch. | ||
173 | */ | ||
174 | lpaca->lppaca.idle = 1; | ||
175 | |||
176 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
177 | local_irq_disable(); | ||
178 | |||
179 | /* | ||
180 | * Yield the processor to the hypervisor. We return if | ||
181 | * an external interrupt occurs (which are driven prior | ||
182 | * to returning here) or if a prod occurs from another | ||
183 | * processor. When returning here, external interrupts | ||
184 | * are enabled. | ||
185 | * | ||
186 | * Check need_resched() again with interrupts disabled | ||
187 | * to avoid a race. | ||
188 | */ | ||
189 | if (!need_resched()) | ||
190 | cede_processor(); | ||
191 | else | ||
192 | local_irq_enable(); | ||
193 | } | ||
194 | |||
195 | HMT_medium(); | ||
196 | lpaca->lppaca.idle = 0; | ||
197 | schedule(); | ||
198 | if (cpu_is_offline(smp_processor_id()) && | ||
199 | system_state == SYSTEM_RUNNING) | ||
200 | cpu_die(); | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | #endif /* CONFIG_PPC_PSERIES */ | ||
207 | |||
208 | int native_idle(void) | 77 | int native_idle(void) |
209 | { | 78 | { |
210 | while(1) { | 79 | while(1) { |