summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
commit8e9a2dba8686187d8c8179e5b86640e653963889 (patch)
treea4ba543649219cbb28d91aab65b785d763f5d069 /kernel/rcu/tree.c
parent6098850e7e6978f95a958f79a645a653228d0002 (diff)
parent450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e4fe06d42385..f9c0ca2ccf0c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -734,7 +734,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
734 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; 734 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
735 int *fp = &rnp->need_future_gp[idx]; 735 int *fp = &rnp->need_future_gp[idx];
736 736
737 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!"); 737 lockdep_assert_irqs_disabled();
738 return READ_ONCE(*fp); 738 return READ_ONCE(*fp);
739} 739}
740 740
@@ -746,7 +746,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
746static bool 746static bool
747cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 747cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
748{ 748{
749 RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!"); 749 lockdep_assert_irqs_disabled();
750 if (rcu_gp_in_progress(rsp)) 750 if (rcu_gp_in_progress(rsp))
751 return false; /* No, a grace period is already in progress. */ 751 return false; /* No, a grace period is already in progress. */
752 if (rcu_future_needs_gp(rsp)) 752 if (rcu_future_needs_gp(rsp))
@@ -773,7 +773,7 @@ static void rcu_eqs_enter_common(bool user)
773 struct rcu_data *rdp; 773 struct rcu_data *rdp;
774 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 774 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
775 775
776 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!"); 776 lockdep_assert_irqs_disabled();
777 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); 777 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
778 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 778 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
779 !user && !is_idle_task(current)) { 779 !user && !is_idle_task(current)) {
@@ -843,7 +843,7 @@ static void rcu_eqs_enter(bool user)
843 */ 843 */
844void rcu_idle_enter(void) 844void rcu_idle_enter(void)
845{ 845{
846 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!"); 846 lockdep_assert_irqs_disabled();
847 rcu_eqs_enter(false); 847 rcu_eqs_enter(false);
848} 848}
849 849
@@ -861,7 +861,7 @@ void rcu_idle_enter(void)
861 */ 861 */
862void rcu_user_enter(void) 862void rcu_user_enter(void)
863{ 863{
864 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!"); 864 lockdep_assert_irqs_disabled();
865 rcu_eqs_enter(true); 865 rcu_eqs_enter(true);
866} 866}
867#endif /* CONFIG_NO_HZ_FULL */ 867#endif /* CONFIG_NO_HZ_FULL */
@@ -889,7 +889,7 @@ void rcu_irq_exit(void)
889{ 889{
890 struct rcu_dynticks *rdtp; 890 struct rcu_dynticks *rdtp;
891 891
892 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); 892 lockdep_assert_irqs_disabled();
893 rdtp = this_cpu_ptr(&rcu_dynticks); 893 rdtp = this_cpu_ptr(&rcu_dynticks);
894 894
895 /* Page faults can happen in NMI handlers, so check... */ 895 /* Page faults can happen in NMI handlers, so check... */
@@ -959,7 +959,7 @@ static void rcu_eqs_exit(bool user)
959 struct rcu_dynticks *rdtp; 959 struct rcu_dynticks *rdtp;
960 long long oldval; 960 long long oldval;
961 961
962 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!"); 962 lockdep_assert_irqs_disabled();
963 rdtp = this_cpu_ptr(&rcu_dynticks); 963 rdtp = this_cpu_ptr(&rcu_dynticks);
964 oldval = rdtp->dynticks_nesting; 964 oldval = rdtp->dynticks_nesting;
965 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 965 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
@@ -1039,7 +1039,7 @@ void rcu_irq_enter(void)
1039 struct rcu_dynticks *rdtp; 1039 struct rcu_dynticks *rdtp;
1040 long long oldval; 1040 long long oldval;
1041 1041
1042 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); 1042 lockdep_assert_irqs_disabled();
1043 rdtp = this_cpu_ptr(&rcu_dynticks); 1043 rdtp = this_cpu_ptr(&rcu_dynticks);
1044 1044
1045 /* Page faults can happen in NMI handlers, so check... */ 1045 /* Page faults can happen in NMI handlers, so check... */