aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/system.h
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 17:57:23 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:43 -0400
commit4866cde064afbb6c2a488c265e696879de616daa (patch)
tree6effad1ab6271129fc607b98273086409876563a /include/asm-arm/system.h
parent48c08d3f8ff94fa118187e4d8d4a5707bb85e59d (diff)
[PATCH] sched: cleanup context switch locking
Instead of requiring architecture code to interact with the scheduler's locking implementation, provide a couple of defines that can be used by the architecture to request runqueue unlocked context switches, and ask for interrupts to be enabled over the context switch. Also replaces the "switch_lock" used by these architectures with an oncpu flag (note, not a potentially slow bitflag). This eliminates one bus locked memory operation when context switching, and simplifies the task_running function. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-arm/system.h')
-rw-r--r--include/asm-arm/system.h30
1 files changed, 4 insertions, 26 deletions
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 39dd7008013c..3d0d2860b6db 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -145,34 +145,12 @@ extern unsigned int user_debug;
145#define set_wmb(var, value) do { var = value; wmb(); } while (0) 145#define set_wmb(var, value) do { var = value; wmb(); } while (0)
146#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 146#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
147 147
148#ifdef CONFIG_SMP
149/* 148/*
150 * Define our own context switch locking. This allows us to enable 149 * switch_mm() may do a full cache flush over the context switch,
151 * interrupts over the context switch, otherwise we end up with high 150 * so enable interrupts over the context switch to avoid high
152 * interrupt latency. The real problem area is switch_mm() which may 151 * latency.
153 * do a full cache flush.
154 */ 152 */
155#define prepare_arch_switch(rq,next) \ 153#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
156do { \
157 spin_lock(&(next)->switch_lock); \
158 spin_unlock_irq(&(rq)->lock); \
159} while (0)
160
161#define finish_arch_switch(rq,prev) \
162 spin_unlock(&(prev)->switch_lock)
163
164#define task_running(rq,p) \
165 ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
166#else
167/*
168 * Our UP-case is more simple, but we assume knowledge of how
169 * spin_unlock_irq() and friends are implemented. This avoids
170 * us needlessly decrementing and incrementing the preempt count.
171 */
172#define prepare_arch_switch(rq,next) local_irq_enable()
173#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
174#define task_running(rq,p) ((rq)->curr == (p))
175#endif
176 154
177/* 155/*
178 * switch_to(prev, next) should switch from task `prev' to `next' 156 * switch_to(prev, next) should switch from task `prev' to `next'