aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-01-30 07:31:20 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:20 -0500
commit95c354fe9f7d6decc08a92aa26eb233ecc2155bf (patch)
treeec9267032ea875e84216cfb20acb2cfc7c62149f /include/linux/sched.h
parenta95d67f87e1a5f1b4429be3ba3bf7b4051657908 (diff)
spinlock: lockbreak cleanup
The break_lock data structure and code for spinlocks is quite nasty. Not only does it double the size of a spinlock but it changes locking to a potentially less optimal trylock. Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a __raw_spin_is_contended that uses the lock data itself to determine whether there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is not set. Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to decouple it from the spinlock implementation, and make it typesafe (rwlocks do not have any need_lockbreak sites -- why do they even get bloated up with that break_lock then?). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h21
1 files changed, 7 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2d0546e884ea..9d4797609aa5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void);
1922 1922
1923/* 1923/*
1924 * Does a critical section need to be broken due to another 1924 * Does a critical section need to be broken due to another
1925 * task waiting?: 1925 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1926 * but a general need for low latency)
1926 */ 1927 */
1927#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) 1928static inline int spin_needbreak(spinlock_t *lock)
1928# define need_lockbreak(lock) ((lock)->break_lock)
1929#else
1930# define need_lockbreak(lock) 0
1931#endif
1932
1933/*
1934 * Does a critical section need to be broken due to another
1935 * task waiting or preemption being signalled:
1936 */
1937static inline int lock_need_resched(spinlock_t *lock)
1938{ 1929{
1939 if (need_lockbreak(lock) || need_resched()) 1930#ifdef CONFIG_PREEMPT
1940 return 1; 1931 return spin_is_contended(lock);
1932#else
1941 return 0; 1933 return 0;
1934#endif
1942} 1935}
1943 1936
1944/* 1937/*