aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /kernel
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/debug/debug_core.c4
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/locking/lockdep_internals.h6
-rw-r--r--kernel/locking/rwsem-xadd.c49
-rw-r--r--kernel/rcu/tree.c22
-rw-r--r--kernel/rcu/tree_plugin.h8
-rw-r--r--kernel/sched/core.c16
-rw-r--r--kernel/sched/cpupri.c6
-rw-r--r--kernel/sched/wait.c2
10 files changed, 92 insertions, 27 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 2956c8da1605..1adf62b39b96 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -534,7 +534,7 @@ return_normal:
534 kgdb_info[cpu].exception_state &= 534 kgdb_info[cpu].exception_state &=
535 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 535 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
536 kgdb_info[cpu].enter_kgdb--; 536 kgdb_info[cpu].enter_kgdb--;
537 smp_mb__before_atomic_dec(); 537 smp_mb__before_atomic();
538 atomic_dec(&slaves_in_kgdb); 538 atomic_dec(&slaves_in_kgdb);
539 dbg_touch_watchdogs(); 539 dbg_touch_watchdogs();
540 local_irq_restore(flags); 540 local_irq_restore(flags);
@@ -662,7 +662,7 @@ kgdb_restore:
662 kgdb_info[cpu].exception_state &= 662 kgdb_info[cpu].exception_state &=
663 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 663 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
664 kgdb_info[cpu].enter_kgdb--; 664 kgdb_info[cpu].enter_kgdb--;
665 smp_mb__before_atomic_dec(); 665 smp_mb__before_atomic();
666 atomic_dec(&masters_in_kgdb); 666 atomic_dec(&masters_in_kgdb);
667 /* Free kgdb_active */ 667 /* Free kgdb_active */
668 atomic_set(&kgdb_active, -1); 668 atomic_set(&kgdb_active, -1);
diff --git a/kernel/futex.c b/kernel/futex.c
index 81dbe773ce4c..89bc9d59ac65 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -267,7 +267,7 @@ static inline void futex_get_mm(union futex_key *key)
267 * get_futex_key() implies a full barrier. This is relied upon 267 * get_futex_key() implies a full barrier. This is relied upon
268 * as full barrier (B), see the ordering comment above. 268 * as full barrier (B), see the ordering comment above.
269 */ 269 */
270 smp_mb__after_atomic_inc(); 270 smp_mb__after_atomic();
271} 271}
272 272
273/* 273/*
@@ -280,7 +280,7 @@ static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
280 /* 280 /*
281 * Full barrier (A), see the ordering comment above. 281 * Full barrier (A), see the ordering comment above.
282 */ 282 */
283 smp_mb__after_atomic_inc(); 283 smp_mb__after_atomic();
284#endif 284#endif
285} 285}
286 286
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 6b375af4958d..0ac67a5861c5 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -498,7 +498,7 @@ int __usermodehelper_disable(enum umh_disable_depth depth)
498static void helper_lock(void) 498static void helper_lock(void)
499{ 499{
500 atomic_inc(&running_helpers); 500 atomic_inc(&running_helpers);
501 smp_mb__after_atomic_inc(); 501 smp_mb__after_atomic();
502} 502}
503 503
504static void helper_unlock(void) 504static void helper_unlock(void)
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 4f560cfedc8f..51c4b24b6328 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -54,9 +54,9 @@ enum {
54 * table (if it's not there yet), and we check it for lock order 54 * table (if it's not there yet), and we check it for lock order
55 * conflicts and deadlocks. 55 * conflicts and deadlocks.
56 */ 56 */
57#define MAX_LOCKDEP_ENTRIES 16384UL 57#define MAX_LOCKDEP_ENTRIES 32768UL
58 58
59#define MAX_LOCKDEP_CHAINS_BITS 15 59#define MAX_LOCKDEP_CHAINS_BITS 16
60#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 60#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
61 61
62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) 62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
@@ -65,7 +65,7 @@ enum {
65 * Stack-trace: tightly packed array of stack backtrace 65 * Stack-trace: tightly packed array of stack backtrace
66 * addresses. Protected by the hash_lock. 66 * addresses. Protected by the hash_lock.
67 */ 67 */
68#define MAX_STACK_TRACE_ENTRIES 262144UL 68#define MAX_STACK_TRACE_ENTRIES 524288UL
69 69
70extern struct list_head all_lock_classes; 70extern struct list_head all_lock_classes;
71extern struct lock_chain lock_chains[]; 71extern struct lock_chain lock_chains[];
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 1d66e08e897d..b4219ff87b8c 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -12,6 +12,55 @@
12#include <linux/export.h> 12#include <linux/export.h>
13 13
14/* 14/*
15 * Guide to the rw_semaphore's count field for common values.
16 * (32-bit case illustrated, similar for 64-bit)
17 *
18 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
19 * X = #active_readers + #readers attempting to lock
20 * (X*ACTIVE_BIAS)
21 *
22 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
23 * attempting to read lock or write lock.
24 *
25 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
26 * X = #active readers + # readers attempting lock
27 * (X*ACTIVE_BIAS + WAITING_BIAS)
28 * (2) 1 writer attempting lock, no waiters for lock
29 * X-1 = #active readers + #readers attempting lock
30 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
31 * (3) 1 writer active, no waiters for lock
32 * X-1 = #active readers + #readers attempting lock
33 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
34 *
35 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
36 * (WAITING_BIAS + ACTIVE_BIAS)
37 * (2) 1 writer active or attempting lock, no waiters for lock
38 * (ACTIVE_WRITE_BIAS)
39 *
40 * 0xffff0000 (1) There are writers or readers queued but none active
41 * or in the process of attempting lock.
42 * (WAITING_BIAS)
43 * Note: writer can attempt to steal lock for this count by adding
44 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
45 *
46 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
47 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
48 *
49 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
50 * the count becomes more than 0 for successful lock acquisition,
51 * i.e. the case where there are only readers or nobody has lock.
52 * (1st and 2nd case above).
53 *
54 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
55 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
56 * acquisition (i.e. nobody else has lock or attempts lock). If
57 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
58 * are only waiters but none active (5th case above), and attempt to
59 * steal the lock.
60 *
61 */
62
63/*
15 * Initialize an rwsem: 64 * Initialize an rwsem:
16 */ 65 */
17void __init_rwsem(struct rw_semaphore *sem, const char *name, 66void __init_rwsem(struct rw_semaphore *sem, const char *name,
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3e3f13e8b429..f1ba77363fbb 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -458,9 +458,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
458 } 458 }
459 rcu_prepare_for_idle(smp_processor_id()); 459 rcu_prepare_for_idle(smp_processor_id());
460 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 460 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
461 smp_mb__before_atomic_inc(); /* See above. */ 461 smp_mb__before_atomic(); /* See above. */
462 atomic_inc(&rdtp->dynticks); 462 atomic_inc(&rdtp->dynticks);
463 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ 463 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
464 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 464 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
465 465
466 /* 466 /*
@@ -578,10 +578,10 @@ void rcu_irq_exit(void)
578static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, 578static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
579 int user) 579 int user)
580{ 580{
581 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ 581 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
582 atomic_inc(&rdtp->dynticks); 582 atomic_inc(&rdtp->dynticks);
583 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 583 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
584 smp_mb__after_atomic_inc(); /* See above. */ 584 smp_mb__after_atomic(); /* See above. */
585 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 585 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
586 rcu_cleanup_after_idle(smp_processor_id()); 586 rcu_cleanup_after_idle(smp_processor_id());
587 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); 587 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
@@ -706,10 +706,10 @@ void rcu_nmi_enter(void)
706 (atomic_read(&rdtp->dynticks) & 0x1)) 706 (atomic_read(&rdtp->dynticks) & 0x1))
707 return; 707 return;
708 rdtp->dynticks_nmi_nesting++; 708 rdtp->dynticks_nmi_nesting++;
709 smp_mb__before_atomic_inc(); /* Force delay from prior write. */ 709 smp_mb__before_atomic(); /* Force delay from prior write. */
710 atomic_inc(&rdtp->dynticks); 710 atomic_inc(&rdtp->dynticks);
711 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 711 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
712 smp_mb__after_atomic_inc(); /* See above. */ 712 smp_mb__after_atomic(); /* See above. */
713 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 713 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
714} 714}
715 715
@@ -728,9 +728,9 @@ void rcu_nmi_exit(void)
728 --rdtp->dynticks_nmi_nesting != 0) 728 --rdtp->dynticks_nmi_nesting != 0)
729 return; 729 return;
730 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 730 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
731 smp_mb__before_atomic_inc(); /* See above. */ 731 smp_mb__before_atomic(); /* See above. */
732 atomic_inc(&rdtp->dynticks); 732 atomic_inc(&rdtp->dynticks);
733 smp_mb__after_atomic_inc(); /* Force delay to next write. */ 733 smp_mb__after_atomic(); /* Force delay to next write. */
734 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 734 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
735} 735}
736 736
@@ -2918,7 +2918,7 @@ void synchronize_sched_expedited(void)
2918 s = atomic_long_read(&rsp->expedited_done); 2918 s = atomic_long_read(&rsp->expedited_done);
2919 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2919 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2920 /* ensure test happens before caller kfree */ 2920 /* ensure test happens before caller kfree */
2921 smp_mb__before_atomic_inc(); /* ^^^ */ 2921 smp_mb__before_atomic(); /* ^^^ */
2922 atomic_long_inc(&rsp->expedited_workdone1); 2922 atomic_long_inc(&rsp->expedited_workdone1);
2923 return; 2923 return;
2924 } 2924 }
@@ -2936,7 +2936,7 @@ void synchronize_sched_expedited(void)
2936 s = atomic_long_read(&rsp->expedited_done); 2936 s = atomic_long_read(&rsp->expedited_done);
2937 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2937 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2938 /* ensure test happens before caller kfree */ 2938 /* ensure test happens before caller kfree */
2939 smp_mb__before_atomic_inc(); /* ^^^ */ 2939 smp_mb__before_atomic(); /* ^^^ */
2940 atomic_long_inc(&rsp->expedited_workdone2); 2940 atomic_long_inc(&rsp->expedited_workdone2);
2941 return; 2941 return;
2942 } 2942 }
@@ -2965,7 +2965,7 @@ void synchronize_sched_expedited(void)
2965 s = atomic_long_read(&rsp->expedited_done); 2965 s = atomic_long_read(&rsp->expedited_done);
2966 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { 2966 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2967 /* ensure test happens before caller kfree */ 2967 /* ensure test happens before caller kfree */
2968 smp_mb__before_atomic_inc(); /* ^^^ */ 2968 smp_mb__before_atomic(); /* ^^^ */
2969 atomic_long_inc(&rsp->expedited_done_lost); 2969 atomic_long_inc(&rsp->expedited_done_lost);
2970 break; 2970 break;
2971 } 2971 }
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 29977ae84e7e..cbc2c45265e2 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2462,9 +2462,9 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2462 /* Record start of fully idle period. */ 2462 /* Record start of fully idle period. */
2463 j = jiffies; 2463 j = jiffies;
2464 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; 2464 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2465 smp_mb__before_atomic_inc(); 2465 smp_mb__before_atomic();
2466 atomic_inc(&rdtp->dynticks_idle); 2466 atomic_inc(&rdtp->dynticks_idle);
2467 smp_mb__after_atomic_inc(); 2467 smp_mb__after_atomic();
2468 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); 2468 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2469} 2469}
2470 2470
@@ -2529,9 +2529,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2529 } 2529 }
2530 2530
2531 /* Record end of idle period. */ 2531 /* Record end of idle period. */
2532 smp_mb__before_atomic_inc(); 2532 smp_mb__before_atomic();
2533 atomic_inc(&rdtp->dynticks_idle); 2533 atomic_inc(&rdtp->dynticks_idle);
2534 smp_mb__after_atomic_inc(); 2534 smp_mb__after_atomic();
2535 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); 2535 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2536 2536
2537 /* 2537 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 71d9a9c93954..a62a7dec3986 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -90,6 +90,22 @@
90#define CREATE_TRACE_POINTS 90#define CREATE_TRACE_POINTS
91#include <trace/events/sched.h> 91#include <trace/events/sched.h>
92 92
93#ifdef smp_mb__before_atomic
94void __smp_mb__before_atomic(void)
95{
96 smp_mb__before_atomic();
97}
98EXPORT_SYMBOL(__smp_mb__before_atomic);
99#endif
100
101#ifdef smp_mb__after_atomic
102void __smp_mb__after_atomic(void)
103{
104 smp_mb__after_atomic();
105}
106EXPORT_SYMBOL(__smp_mb__after_atomic);
107#endif
108
93void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) 109void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
94{ 110{
95 unsigned long delta; 111 unsigned long delta;
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8834243abee2..981fcd7dc394 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
165 * do a write memory barrier, and then update the count, to 165 * do a write memory barrier, and then update the count, to
166 * make sure the vector is visible when count is set. 166 * make sure the vector is visible when count is set.
167 */ 167 */
168 smp_mb__before_atomic_inc(); 168 smp_mb__before_atomic();
169 atomic_inc(&(vec)->count); 169 atomic_inc(&(vec)->count);
170 do_mb = 1; 170 do_mb = 1;
171 } 171 }
@@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
185 * the new priority vec. 185 * the new priority vec.
186 */ 186 */
187 if (do_mb) 187 if (do_mb)
188 smp_mb__after_atomic_inc(); 188 smp_mb__after_atomic();
189 189
190 /* 190 /*
191 * When removing from the vector, we decrement the counter first 191 * When removing from the vector, we decrement the counter first
192 * do a memory barrier and then clear the mask. 192 * do a memory barrier and then clear the mask.
193 */ 193 */
194 atomic_dec(&(vec)->count); 194 atomic_dec(&(vec)->count);
195 smp_mb__after_atomic_inc(); 195 smp_mb__after_atomic();
196 cpumask_clear_cpu(cpu, vec->mask); 196 cpumask_clear_cpu(cpu, vec->mask);
197 } 197 }
198 198
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 7d50f794e248..0ffa20ae657b 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit);
394 * 394 *
395 * In order for this to function properly, as it uses waitqueue_active() 395 * In order for this to function properly, as it uses waitqueue_active()
396 * internally, some kind of memory barrier must be done prior to calling 396 * internally, some kind of memory barrier must be done prior to calling
397 * this. Typically, this will be smp_mb__after_clear_bit(), but in some 397 * this. Typically, this will be smp_mb__after_atomic(), but in some
398 * cases where bitflags are manipulated non-atomically under a lock, one 398 * cases where bitflags are manipulated non-atomically under a lock, one
399 * may need to use a less regular barrier, such fs/inode.c's smp_mb(), 399 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
400 * because spin_unlock() does not guarantee a memory barrier. 400 * because spin_unlock() does not guarantee a memory barrier.