diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-11-09 00:39:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-09 10:56:33 -0500 |
commit | 64c7c8f88559624abdbe12b5da6502e8879f8d28 (patch) | |
tree | 02f85a35ddd0f24dec70e5d6ecd61073578fd8d6 /arch/x86_64/kernel | |
parent | 5bfb5d690f36d316a5f3b4f7775fda996faa6b12 (diff) |
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r-- | arch/x86_64/kernel/process.c | 67 |
1 files changed, 31 insertions, 36 deletions
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 571f9fe490ce..59be85d9a4bc 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -86,12 +86,22 @@ EXPORT_SYMBOL(enable_hlt); | |||
86 | */ | 86 | */ |
87 | void default_idle(void) | 87 | void default_idle(void) |
88 | { | 88 | { |
89 | local_irq_enable(); | ||
90 | |||
89 | if (!atomic_read(&hlt_counter)) { | 91 | if (!atomic_read(&hlt_counter)) { |
90 | local_irq_disable(); | 92 | clear_thread_flag(TIF_POLLING_NRFLAG); |
91 | if (!need_resched()) | 93 | smp_mb__after_clear_bit(); |
92 | safe_halt(); | 94 | while (!need_resched()) { |
93 | else | 95 | local_irq_disable(); |
94 | local_irq_enable(); | 96 | if (!need_resched()) |
97 | safe_halt(); | ||
98 | else | ||
99 | local_irq_enable(); | ||
100 | } | ||
101 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
102 | } else { | ||
103 | while (!need_resched()) | ||
104 | cpu_relax(); | ||
95 | } | 105 | } |
96 | } | 106 | } |
97 | 107 | ||
@@ -102,30 +112,16 @@ void default_idle(void) | |||
102 | */ | 112 | */ |
103 | static void poll_idle (void) | 113 | static void poll_idle (void) |
104 | { | 114 | { |
105 | int oldval; | ||
106 | |||
107 | local_irq_enable(); | 115 | local_irq_enable(); |
108 | 116 | ||
109 | /* | 117 | asm volatile( |
110 | * Deal with another CPU just having chosen a thread to | 118 | "2:" |
111 | * run here: | 119 | "testl %0,%1;" |
112 | */ | 120 | "rep; nop;" |
113 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | 121 | "je 2b;" |
114 | 122 | : : | |
115 | if (!oldval) { | 123 | "i" (_TIF_NEED_RESCHED), |
116 | set_thread_flag(TIF_POLLING_NRFLAG); | 124 | "m" (current_thread_info()->flags)); |
117 | asm volatile( | ||
118 | "2:" | ||
119 | "testl %0,%1;" | ||
120 | "rep; nop;" | ||
121 | "je 2b;" | ||
122 | : : | ||
123 | "i" (_TIF_NEED_RESCHED), | ||
124 | "m" (current_thread_info()->flags)); | ||
125 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
126 | } else { | ||
127 | set_need_resched(); | ||
128 | } | ||
129 | } | 125 | } |
130 | 126 | ||
131 | void cpu_idle_wait(void) | 127 | void cpu_idle_wait(void) |
@@ -187,6 +183,8 @@ static inline void play_dead(void) | |||
187 | */ | 183 | */ |
188 | void cpu_idle (void) | 184 | void cpu_idle (void) |
189 | { | 185 | { |
186 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
187 | |||
190 | /* endless idle loop with no priority at all */ | 188 | /* endless idle loop with no priority at all */ |
191 | while (1) { | 189 | while (1) { |
192 | while (!need_resched()) { | 190 | while (!need_resched()) { |
@@ -221,15 +219,12 @@ static void mwait_idle(void) | |||
221 | { | 219 | { |
222 | local_irq_enable(); | 220 | local_irq_enable(); |
223 | 221 | ||
224 | if (!need_resched()) { | 222 | while (!need_resched()) { |
225 | set_thread_flag(TIF_POLLING_NRFLAG); | 223 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
226 | do { | 224 | smp_mb(); |
227 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 225 | if (need_resched()) |
228 | if (need_resched()) | 226 | break; |
229 | break; | 227 | __mwait(0, 0); |
230 | __mwait(0, 0); | ||
231 | } while (!need_resched()); | ||
232 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
233 | } | 228 | } |
234 | } | 229 | } |
235 | 230 | ||