diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-11-09 00:39:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-09 10:56:33 -0500 |
commit | 64c7c8f88559624abdbe12b5da6502e8879f8d28 (patch) | |
tree | 02f85a35ddd0f24dec70e5d6ecd61073578fd8d6 /drivers/acpi/processor_idle.c | |
parent | 5bfb5d690f36d316a5f3b4f7775fda996faa6b12 (diff) |
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 37 |
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 161db4acfb91..573b6a97bb1f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -167,6 +167,19 @@ acpi_processor_power_activate(struct acpi_processor *pr, | |||
167 | return; | 167 | return; |
168 | } | 168 | } |
169 | 169 | ||
170 | static void acpi_safe_halt(void) | ||
171 | { | ||
172 | int polling = test_thread_flag(TIF_POLLING_NRFLAG); | ||
173 | if (polling) { | ||
174 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
175 | smp_mb__after_clear_bit(); | ||
176 | } | ||
177 | if (!need_resched()) | ||
178 | safe_halt(); | ||
179 | if (polling) | ||
180 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
181 | } | ||
182 | |||
170 | static atomic_t c3_cpu_count; | 183 | static atomic_t c3_cpu_count; |
171 | 184 | ||
172 | static void acpi_processor_idle(void) | 185 | static void acpi_processor_idle(void) |
@@ -177,7 +190,7 @@ static void acpi_processor_idle(void) | |||
177 | int sleep_ticks = 0; | 190 | int sleep_ticks = 0; |
178 | u32 t1, t2 = 0; | 191 | u32 t1, t2 = 0; |
179 | 192 | ||
180 | pr = processors[raw_smp_processor_id()]; | 193 | pr = processors[smp_processor_id()]; |
181 | if (!pr) | 194 | if (!pr) |
182 | return; | 195 | return; |
183 | 196 | ||
@@ -197,8 +210,13 @@ static void acpi_processor_idle(void) | |||
197 | } | 210 | } |
198 | 211 | ||
199 | cx = pr->power.state; | 212 | cx = pr->power.state; |
200 | if (!cx) | 213 | if (!cx) { |
201 | goto easy_out; | 214 | if (pm_idle_save) |
215 | pm_idle_save(); | ||
216 | else | ||
217 | acpi_safe_halt(); | ||
218 | return; | ||
219 | } | ||
202 | 220 | ||
203 | /* | 221 | /* |
204 | * Check BM Activity | 222 | * Check BM Activity |
@@ -278,7 +296,8 @@ static void acpi_processor_idle(void) | |||
278 | if (pm_idle_save) | 296 | if (pm_idle_save) |
279 | pm_idle_save(); | 297 | pm_idle_save(); |
280 | else | 298 | else |
281 | safe_halt(); | 299 | acpi_safe_halt(); |
300 | |||
282 | /* | 301 | /* |
283 | * TBD: Can't get time duration while in C1, as resumes | 302 | * TBD: Can't get time duration while in C1, as resumes |
284 | * go to an ISR rather than here. Need to instrument | 303 | * go to an ISR rather than here. Need to instrument |
@@ -414,16 +433,6 @@ static void acpi_processor_idle(void) | |||
414 | */ | 433 | */ |
415 | if (next_state != pr->power.state) | 434 | if (next_state != pr->power.state) |
416 | acpi_processor_power_activate(pr, next_state); | 435 | acpi_processor_power_activate(pr, next_state); |
417 | |||
418 | return; | ||
419 | |||
420 | easy_out: | ||
421 | /* do C1 instead of busy loop */ | ||
422 | if (pm_idle_save) | ||
423 | pm_idle_save(); | ||
424 | else | ||
425 | safe_halt(); | ||
426 | return; | ||
427 | } | 436 | } |
428 | 437 | ||
429 | static int acpi_processor_set_power_policy(struct acpi_processor *pr) | 438 | static int acpi_processor_set_power_policy(struct acpi_processor *pr) |