diff options
author | Nick Piggin <npiggin@suse.de> | 2008-01-30 07:31:20 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:20 -0500 |
commit | 95c354fe9f7d6decc08a92aa26eb233ecc2155bf (patch) | |
tree | ec9267032ea875e84216cfb20acb2cfc7c62149f /kernel | |
parent | a95d67f87e1a5f1b4429be3ba3bf7b4051657908 (diff) |
spinlock: lockbreak cleanup
The break_lock data structure and code for spinlocks is quite nasty.
Not only does it double the size of a spinlock but it changes locking to
a potentially less optimal trylock.
Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a
__raw_spin_is_contended that uses the lock data itself to determine whether
there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is
not set.
Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to
decouple it from the spinlock implementation, and make it typesafe (rwlocks
do not have any need_lockbreak sites -- why do they even get bloated up
with that break_lock then?).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 16 | ||||
-rw-r--r-- | kernel/spinlock.c | 3 |
2 files changed, 7 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 524285e46fa7..ba4c88088f62 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4945,19 +4945,15 @@ EXPORT_SYMBOL(_cond_resched); | |||
4945 | */ | 4945 | */ |
4946 | int cond_resched_lock(spinlock_t *lock) | 4946 | int cond_resched_lock(spinlock_t *lock) |
4947 | { | 4947 | { |
4948 | int resched = need_resched() && system_state == SYSTEM_RUNNING; | ||
4948 | int ret = 0; | 4949 | int ret = 0; |
4949 | 4950 | ||
4950 | if (need_lockbreak(lock)) { | 4951 | if (spin_needbreak(lock) || resched) { |
4951 | spin_unlock(lock); | 4952 | spin_unlock(lock); |
4952 | cpu_relax(); | 4953 | if (resched && need_resched()) |
4953 | ret = 1; | 4954 | __cond_resched(); |
4954 | spin_lock(lock); | 4955 | else |
4955 | } | 4956 | cpu_relax(); |
4956 | if (need_resched() && system_state == SYSTEM_RUNNING) { | ||
4957 | spin_release(&lock->dep_map, 1, _THIS_IP_); | ||
4958 | _raw_spin_unlock(lock); | ||
4959 | preempt_enable_no_resched(); | ||
4960 | __cond_resched(); | ||
4961 | ret = 1; | 4957 | ret = 1; |
4962 | spin_lock(lock); | 4958 | spin_lock(lock); |
4963 | } | 4959 | } |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index cd72424c2662..ae28c8245123 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock); | |||
65 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 65 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
66 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 66 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
67 | */ | 67 | */ |
68 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ | 68 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
69 | defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
70 | 69 | ||
71 | void __lockfunc _read_lock(rwlock_t *lock) | 70 | void __lockfunc _read_lock(rwlock_t *lock) |
72 | { | 71 | { |