diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-11-09 00:39:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-09 10:56:33 -0500 |
commit | 64c7c8f88559624abdbe12b5da6502e8879f8d28 (patch) | |
tree | 02f85a35ddd0f24dec70e5d6ecd61073578fd8d6 /arch/i386 | |
parent | 5bfb5d690f36d316a5f3b4f7775fda996faa6b12 (diff) |
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/kernel/apm.c | 20 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 64 |
2 files changed, 47 insertions, 37 deletions
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 86e80c551478..003548b8735f 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -769,8 +769,26 @@ static int set_system_power_state(u_short state) | |||
769 | static int apm_do_idle(void) | 769 | static int apm_do_idle(void) |
770 | { | 770 | { |
771 | u32 eax; | 771 | u32 eax; |
772 | u8 ret = 0; | ||
773 | int idled = 0; | ||
774 | int polling; | ||
775 | |||
776 | polling = test_thread_flag(TIF_POLLING_NRFLAG); | ||
777 | if (polling) { | ||
778 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
779 | smp_mb__after_clear_bit(); | ||
780 | } | ||
781 | if (!need_resched()) { | ||
782 | idled = 1; | ||
783 | ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); | ||
784 | } | ||
785 | if (polling) | ||
786 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
787 | |||
788 | if (!idled) | ||
789 | return 0; | ||
772 | 790 | ||
773 | if (apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax)) { | 791 | if (ret) { |
774 | static unsigned long t; | 792 | static unsigned long t; |
775 | 793 | ||
776 | /* This always fails on some SMP boards running UP kernels. | 794 | /* This always fails on some SMP boards running UP kernels. |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 5296e284ea36..1cb261f225d5 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -99,14 +99,22 @@ EXPORT_SYMBOL(enable_hlt); | |||
99 | */ | 99 | */ |
100 | void default_idle(void) | 100 | void default_idle(void) |
101 | { | 101 | { |
102 | local_irq_enable(); | ||
103 | |||
102 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | 104 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { |
103 | local_irq_disable(); | 105 | clear_thread_flag(TIF_POLLING_NRFLAG); |
104 | if (!need_resched()) | 106 | smp_mb__after_clear_bit(); |
105 | safe_halt(); | 107 | while (!need_resched()) { |
106 | else | 108 | local_irq_disable(); |
107 | local_irq_enable(); | 109 | if (!need_resched()) |
110 | safe_halt(); | ||
111 | else | ||
112 | local_irq_enable(); | ||
113 | } | ||
114 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
108 | } else { | 115 | } else { |
109 | cpu_relax(); | 116 | while (!need_resched()) |
117 | cpu_relax(); | ||
110 | } | 118 | } |
111 | } | 119 | } |
112 | #ifdef CONFIG_APM_MODULE | 120 | #ifdef CONFIG_APM_MODULE |
@@ -120,29 +128,14 @@ EXPORT_SYMBOL(default_idle); | |||
120 | */ | 128 | */ |
121 | static void poll_idle (void) | 129 | static void poll_idle (void) |
122 | { | 130 | { |
123 | int oldval; | ||
124 | |||
125 | local_irq_enable(); | 131 | local_irq_enable(); |
126 | 132 | ||
127 | /* | 133 | asm volatile( |
128 | * Deal with another CPU just having chosen a thread to | 134 | "2:" |
129 | * run here: | 135 | "testl %0, %1;" |
130 | */ | 136 | "rep; nop;" |
131 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | 137 | "je 2b;" |
132 | 138 | : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags)); | |
133 | if (!oldval) { | ||
134 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
135 | asm volatile( | ||
136 | "2:" | ||
137 | "testl %0, %1;" | ||
138 | "rep; nop;" | ||
139 | "je 2b;" | ||
140 | : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags)); | ||
141 | |||
142 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
143 | } else { | ||
144 | set_need_resched(); | ||
145 | } | ||
146 | } | 139 | } |
147 | 140 | ||
148 | #ifdef CONFIG_HOTPLUG_CPU | 141 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -181,6 +174,8 @@ void cpu_idle(void) | |||
181 | { | 174 | { |
182 | int cpu = smp_processor_id(); | 175 | int cpu = smp_processor_id(); |
183 | 176 | ||
177 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
178 | |||
184 | /* endless idle loop with no priority at all */ | 179 | /* endless idle loop with no priority at all */ |
185 | while (1) { | 180 | while (1) { |
186 | while (!need_resched()) { | 181 | while (!need_resched()) { |
@@ -246,15 +241,12 @@ static void mwait_idle(void) | |||
246 | { | 241 | { |
247 | local_irq_enable(); | 242 | local_irq_enable(); |
248 | 243 | ||
249 | if (!need_resched()) { | 244 | while (!need_resched()) { |
250 | set_thread_flag(TIF_POLLING_NRFLAG); | 245 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
251 | do { | 246 | smp_mb(); |
252 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 247 | if (need_resched()) |
253 | if (need_resched()) | 248 | break; |
254 | break; | 249 | __mwait(0, 0); |
255 | __mwait(0, 0); | ||
256 | } while (!need_resched()); | ||
257 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
258 | } | 250 | } |
259 | } | 251 | } |
260 | 252 | ||