aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-01-30 07:31:20 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:20 -0500
commit95c354fe9f7d6decc08a92aa26eb233ecc2155bf (patch)
treeec9267032ea875e84216cfb20acb2cfc7c62149f /include/linux
parenta95d67f87e1a5f1b4429be3ba3bf7b4051657908 (diff)
spinlock: lockbreak cleanup
The break_lock data structure and code for spinlocks is quite nasty. Not only does it double the size of a spinlock but it changes locking to a potentially less optimal trylock. Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a __raw_spin_is_contended that uses the lock data itself to determine whether there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is not set. Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to decouple it from the spinlock implementation, and make it typesafe (rwlocks do not have any need_lockbreak sites -- why do they even get bloated up with that break_lock then?). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h21
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_types.h4
-rw-r--r--include/linux/spinlock_up.h2
4 files changed, 17 insertions, 16 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2d0546e884ea..9d4797609aa5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void);
1922 1922
1923/* 1923/*
1924 * Does a critical section need to be broken due to another 1924 * Does a critical section need to be broken due to another
1925 * task waiting?: 1925 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1926 * but a general need for low latency)
1926 */ 1927 */
1927#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) 1928static inline int spin_needbreak(spinlock_t *lock)
1928# define need_lockbreak(lock) ((lock)->break_lock)
1929#else
1930# define need_lockbreak(lock) 0
1931#endif
1932
1933/*
1934 * Does a critical section need to be broken due to another
1935 * task waiting or preemption being signalled:
1936 */
1937static inline int lock_need_resched(spinlock_t *lock)
1938{ 1929{
1939 if (need_lockbreak(lock) || need_resched()) 1930#ifdef CONFIG_PREEMPT
1940 return 1; 1931 return spin_is_contended(lock);
1932#else
1941 return 0; 1933 return 0;
1934#endif
1942} 1935}
1943 1936
1944/* 1937/*
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index c376f3b36c89..124449733c55 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -120,6 +120,12 @@ do { \
120 120
121#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 121#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
122 122
123#ifdef CONFIG_GENERIC_LOCKBREAK
124#define spin_is_contended(lock) ((lock)->break_lock)
125#else
126#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
127#endif
128
123/** 129/**
124 * spin_unlock_wait - wait until the spinlock gets unlocked 130 * spin_unlock_wait - wait until the spinlock gets unlocked
125 * @lock: the spinlock in question. 131 * @lock: the spinlock in question.
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index f6a3a951b79e..68d88f71f1a2 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -19,7 +19,7 @@
19 19
20typedef struct { 20typedef struct {
21 raw_spinlock_t raw_lock; 21 raw_spinlock_t raw_lock;
22#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
24#endif 24#endif
25#ifdef CONFIG_DEBUG_SPINLOCK 25#ifdef CONFIG_DEBUG_SPINLOCK
@@ -35,7 +35,7 @@ typedef struct {
35 35
36typedef struct { 36typedef struct {
37 raw_rwlock_t raw_lock; 37 raw_rwlock_t raw_lock;
38#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) 38#ifdef CONFIG_GENERIC_LOCKBREAK
39 unsigned int break_lock; 39 unsigned int break_lock;
40#endif 40#endif
41#ifdef CONFIG_DEBUG_SPINLOCK 41#ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index ea54c4c9a4ec..938234c4a996 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
64# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 64# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
65#endif /* DEBUG_SPINLOCK */ 65#endif /* DEBUG_SPINLOCK */
66 66
67#define __raw_spin_is_contended(lock) (((void)(lock), 0))
68
67#define __raw_read_can_lock(lock) (((void)(lock), 1)) 69#define __raw_read_can_lock(lock) (((void)(lock), 1))
68#define __raw_write_can_lock(lock) (((void)(lock), 1)) 70#define __raw_write_can_lock(lock) (((void)(lock), 1))
69 71