diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-10-15 15:49:59 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-10-15 15:49:59 -0400 |
commit | 252997330908cb8ee3d5714539ed967b977c2eae (patch) | |
tree | 627395514c622dc6eb51ae3a2a5bbebddc788299 | |
parent | 25e03a74e4a14e0d52a66fb56c728f049a6a26d3 (diff) | |
parent | 5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 (diff) |
Merge branch 'idle.2013.09.25a' into HEAD
idle.2013.09.25a: Topic branch for idle entry-/exit-related changes.
-rw-r--r-- | include/linux/rcupdate.h | 24 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 17 | ||||
-rw-r--r-- | include/linux/rcutree.h | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 4 | ||||
-rw-r--r-- | kernel/rcupdate.c | 2 | ||||
-rw-r--r-- | kernel/rcutiny.c | 10 | ||||
-rw-r--r-- | kernel/rcutree.c | 23 | ||||
-rw-r--r-- | kernel/rcutree.h | 2 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 24 |
9 files changed, 72 insertions, 36 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f1f1bc39346b..39cbb889e20d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, | |||
261 | rcu_irq_exit(); \ | 261 | rcu_irq_exit(); \ |
262 | } while (0) | 262 | } while (0) |
263 | 263 | ||
264 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) | ||
265 | extern bool __rcu_is_watching(void); | ||
266 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ | ||
267 | |||
264 | /* | 268 | /* |
265 | * Infrastructure to implement the synchronize_() primitives in | 269 | * Infrastructure to implement the synchronize_() primitives in |
266 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | 270 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
297 | } | 301 | } |
298 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 302 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
299 | 303 | ||
300 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) | ||
301 | extern int rcu_is_cpu_idle(void); | ||
302 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */ | ||
303 | |||
304 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) | 304 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
305 | bool rcu_lockdep_current_cpu_online(void); | 305 | bool rcu_lockdep_current_cpu_online(void); |
306 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ | 306 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void) | |||
351 | { | 351 | { |
352 | if (!debug_lockdep_rcu_enabled()) | 352 | if (!debug_lockdep_rcu_enabled()) |
353 | return 1; | 353 | return 1; |
354 | if (rcu_is_cpu_idle()) | 354 | if (!rcu_is_watching()) |
355 | return 0; | 355 | return 0; |
356 | if (!rcu_lockdep_current_cpu_online()) | 356 | if (!rcu_lockdep_current_cpu_online()) |
357 | return 0; | 357 | return 0; |
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
402 | 402 | ||
403 | if (!debug_lockdep_rcu_enabled()) | 403 | if (!debug_lockdep_rcu_enabled()) |
404 | return 1; | 404 | return 1; |
405 | if (rcu_is_cpu_idle()) | 405 | if (!rcu_is_watching()) |
406 | return 0; | 406 | return 0; |
407 | if (!rcu_lockdep_current_cpu_online()) | 407 | if (!rcu_lockdep_current_cpu_online()) |
408 | return 0; | 408 | return 0; |
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void) | |||
771 | __rcu_read_lock(); | 771 | __rcu_read_lock(); |
772 | __acquire(RCU); | 772 | __acquire(RCU); |
773 | rcu_lock_acquire(&rcu_lock_map); | 773 | rcu_lock_acquire(&rcu_lock_map); |
774 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 774 | rcu_lockdep_assert(rcu_is_watching(), |
775 | "rcu_read_lock() used illegally while idle"); | 775 | "rcu_read_lock() used illegally while idle"); |
776 | } | 776 | } |
777 | 777 | ||
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void) | |||
792 | */ | 792 | */ |
793 | static inline void rcu_read_unlock(void) | 793 | static inline void rcu_read_unlock(void) |
794 | { | 794 | { |
795 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 795 | rcu_lockdep_assert(rcu_is_watching(), |
796 | "rcu_read_unlock() used illegally while idle"); | 796 | "rcu_read_unlock() used illegally while idle"); |
797 | rcu_lock_release(&rcu_lock_map); | 797 | rcu_lock_release(&rcu_lock_map); |
798 | __release(RCU); | 798 | __release(RCU); |
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void) | |||
821 | local_bh_disable(); | 821 | local_bh_disable(); |
822 | __acquire(RCU_BH); | 822 | __acquire(RCU_BH); |
823 | rcu_lock_acquire(&rcu_bh_lock_map); | 823 | rcu_lock_acquire(&rcu_bh_lock_map); |
824 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 824 | rcu_lockdep_assert(rcu_is_watching(), |
825 | "rcu_read_lock_bh() used illegally while idle"); | 825 | "rcu_read_lock_bh() used illegally while idle"); |
826 | } | 826 | } |
827 | 827 | ||
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void) | |||
832 | */ | 832 | */ |
833 | static inline void rcu_read_unlock_bh(void) | 833 | static inline void rcu_read_unlock_bh(void) |
834 | { | 834 | { |
835 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 835 | rcu_lockdep_assert(rcu_is_watching(), |
836 | "rcu_read_unlock_bh() used illegally while idle"); | 836 | "rcu_read_unlock_bh() used illegally while idle"); |
837 | rcu_lock_release(&rcu_bh_lock_map); | 837 | rcu_lock_release(&rcu_bh_lock_map); |
838 | __release(RCU_BH); | 838 | __release(RCU_BH); |
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void) | |||
857 | preempt_disable(); | 857 | preempt_disable(); |
858 | __acquire(RCU_SCHED); | 858 | __acquire(RCU_SCHED); |
859 | rcu_lock_acquire(&rcu_sched_lock_map); | 859 | rcu_lock_acquire(&rcu_sched_lock_map); |
860 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 860 | rcu_lockdep_assert(rcu_is_watching(), |
861 | "rcu_read_lock_sched() used illegally while idle"); | 861 | "rcu_read_lock_sched() used illegally while idle"); |
862 | } | 862 | } |
863 | 863 | ||
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
875 | */ | 875 | */ |
876 | static inline void rcu_read_unlock_sched(void) | 876 | static inline void rcu_read_unlock_sched(void) |
877 | { | 877 | { |
878 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 878 | rcu_lockdep_assert(rcu_is_watching(), |
879 | "rcu_read_unlock_sched() used illegally while idle"); | 879 | "rcu_read_unlock_sched() used illegally while idle"); |
880 | rcu_lock_release(&rcu_sched_lock_map); | 880 | rcu_lock_release(&rcu_sched_lock_map); |
881 | __release(RCU_SCHED); | 881 | __release(RCU_SCHED); |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e31005ee339e..09ebcbe9fd78 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void) | |||
132 | } | 132 | } |
133 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 133 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
134 | 134 | ||
135 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) | ||
136 | |||
137 | static inline bool rcu_is_watching(void) | ||
138 | { | ||
139 | return __rcu_is_watching(); | ||
140 | } | ||
141 | |||
142 | #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | ||
143 | |||
144 | static inline bool rcu_is_watching(void) | ||
145 | { | ||
146 | return true; | ||
147 | } | ||
148 | |||
149 | |||
150 | #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | ||
151 | |||
135 | #endif /* __LINUX_RCUTINY_H */ | 152 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 226169d1bd2b..4b9c81548742 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -90,4 +90,6 @@ extern void exit_rcu(void); | |||
90 | extern void rcu_scheduler_starting(void); | 90 | extern void rcu_scheduler_starting(void); |
91 | extern int rcu_scheduler_active __read_mostly; | 91 | extern int rcu_scheduler_active __read_mostly; |
92 | 92 | ||
93 | extern bool rcu_is_watching(void); | ||
94 | |||
93 | #endif /* __LINUX_RCUTREE_H */ | 95 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b9ee77..4e8e14c34e42 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4224 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", | 4224 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", |
4225 | !rcu_lockdep_current_cpu_online() | 4225 | !rcu_lockdep_current_cpu_online() |
4226 | ? "RCU used illegally from offline CPU!\n" | 4226 | ? "RCU used illegally from offline CPU!\n" |
4227 | : rcu_is_cpu_idle() | 4227 | : !rcu_is_watching() |
4228 | ? "RCU used illegally from idle CPU!\n" | 4228 | ? "RCU used illegally from idle CPU!\n" |
4229 | : "", | 4229 | : "", |
4230 | rcu_scheduler_active, debug_locks); | 4230 | rcu_scheduler_active, debug_locks); |
@@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4247 | * So complain bitterly if someone does call rcu_read_lock(), | 4247 | * So complain bitterly if someone does call rcu_read_lock(), |
4248 | * rcu_read_lock_bh() and so on from extended quiescent states. | 4248 | * rcu_read_lock_bh() and so on from extended quiescent states. |
4249 | */ | 4249 | */ |
4250 | if (rcu_is_cpu_idle()) | 4250 | if (!rcu_is_watching()) |
4251 | printk("RCU used illegally from extended quiescent state!\n"); | 4251 | printk("RCU used illegally from extended quiescent state!\n"); |
4252 | 4252 | ||
4253 | lockdep_print_held_locks(curr); | 4253 | lockdep_print_held_locks(curr); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 3260a1074b48..c07af1c4e1bb 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void) | |||
148 | { | 148 | { |
149 | if (!debug_lockdep_rcu_enabled()) | 149 | if (!debug_lockdep_rcu_enabled()) |
150 | return 1; | 150 | return 1; |
151 | if (rcu_is_cpu_idle()) | 151 | if (!rcu_is_watching()) |
152 | return 0; | 152 | return 0; |
153 | if (!rcu_lockdep_current_cpu_online()) | 153 | if (!rcu_lockdep_current_cpu_online()) |
154 | return 0; | 154 | return 0; |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 7e3b0d6fc6e2..312e9709713f 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -176,18 +176,18 @@ void rcu_irq_enter(void) | |||
176 | } | 176 | } |
177 | EXPORT_SYMBOL_GPL(rcu_irq_enter); | 177 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
178 | 178 | ||
179 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 179 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Test whether RCU thinks that the current CPU is idle. | 182 | * Test whether RCU thinks that the current CPU is idle. |
183 | */ | 183 | */ |
184 | int rcu_is_cpu_idle(void) | 184 | bool __rcu_is_watching(void) |
185 | { | 185 | { |
186 | return !rcu_dynticks_nesting; | 186 | return rcu_dynticks_nesting; |
187 | } | 187 | } |
188 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 188 | EXPORT_SYMBOL(__rcu_is_watching); |
189 | 189 | ||
190 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 190 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Test whether the current CPU was interrupted from idle. Nested | 193 | * Test whether the current CPU was interrupted from idle. Nested |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index a06d172c75e0..240604aa3f70 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -650,21 +650,34 @@ void rcu_nmi_exit(void) | |||
650 | } | 650 | } |
651 | 651 | ||
652 | /** | 652 | /** |
653 | * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle | 653 | * __rcu_is_watching - are RCU read-side critical sections safe? |
654 | * | ||
655 | * Return true if RCU is watching the running CPU, which means that | ||
656 | * this CPU can safely enter RCU read-side critical sections. Unlike | ||
657 | * rcu_is_watching(), the caller of __rcu_is_watching() must have at | ||
658 | * least disabled preemption. | ||
659 | */ | ||
660 | bool __rcu_is_watching(void) | ||
661 | { | ||
662 | return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; | ||
663 | } | ||
664 | |||
665 | /** | ||
666 | * rcu_is_watching - see if RCU thinks that the current CPU is idle | ||
654 | * | 667 | * |
655 | * If the current CPU is in its idle loop and is neither in an interrupt | 668 | * If the current CPU is in its idle loop and is neither in an interrupt |
656 | * or NMI handler, return true. | 669 | * or NMI handler, return true. |
657 | */ | 670 | */ |
658 | int rcu_is_cpu_idle(void) | 671 | bool rcu_is_watching(void) |
659 | { | 672 | { |
660 | int ret; | 673 | int ret; |
661 | 674 | ||
662 | preempt_disable(); | 675 | preempt_disable(); |
663 | ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0; | 676 | ret = __rcu_is_watching(); |
664 | preempt_enable(); | 677 | preempt_enable(); |
665 | return ret; | 678 | return ret; |
666 | } | 679 | } |
667 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 680 | EXPORT_SYMBOL_GPL(rcu_is_watching); |
668 | 681 | ||
669 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 682 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
670 | 683 | ||
@@ -2321,7 +2334,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, | |||
2321 | * If called from an extended quiescent state, invoke the RCU | 2334 | * If called from an extended quiescent state, invoke the RCU |
2322 | * core in order to force a re-evaluation of RCU's idleness. | 2335 | * core in order to force a re-evaluation of RCU's idleness. |
2323 | */ | 2336 | */ |
2324 | if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) | 2337 | if (!rcu_is_watching() && cpu_online(smp_processor_id())) |
2325 | invoke_rcu_core(); | 2338 | invoke_rcu_core(); |
2326 | 2339 | ||
2327 | /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ | 2340 | /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 5f97eab602cd..52be957c9fe2 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -104,6 +104,8 @@ struct rcu_dynticks { | |||
104 | /* idle-period nonlazy_posted snapshot. */ | 104 | /* idle-period nonlazy_posted snapshot. */ |
105 | unsigned long last_accelerate; | 105 | unsigned long last_accelerate; |
106 | /* Last jiffy CBs were accelerated. */ | 106 | /* Last jiffy CBs were accelerated. */ |
107 | unsigned long last_advance_all; | ||
108 | /* Last jiffy CBs were all advanced. */ | ||
107 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ | 109 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
108 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 110 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
109 | }; | 111 | }; |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index cd95efa1da48..8d85a5ce093a 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1635,17 +1635,23 @@ module_param(rcu_idle_lazy_gp_delay, int, 0644); | |||
1635 | extern int tick_nohz_enabled; | 1635 | extern int tick_nohz_enabled; |
1636 | 1636 | ||
1637 | /* | 1637 | /* |
1638 | * Try to advance callbacks for all flavors of RCU on the current CPU. | 1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but |
1639 | * Afterwards, if there are any callbacks ready for immediate invocation, | 1639 | * only if it has been awhile since the last time we did so. Afterwards, |
1640 | * return true. | 1640 | * if there are any callbacks ready for immediate invocation, return true. |
1641 | */ | 1641 | */ |
1642 | static bool rcu_try_advance_all_cbs(void) | 1642 | static bool rcu_try_advance_all_cbs(void) |
1643 | { | 1643 | { |
1644 | bool cbs_ready = false; | 1644 | bool cbs_ready = false; |
1645 | struct rcu_data *rdp; | 1645 | struct rcu_data *rdp; |
1646 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
1646 | struct rcu_node *rnp; | 1647 | struct rcu_node *rnp; |
1647 | struct rcu_state *rsp; | 1648 | struct rcu_state *rsp; |
1648 | 1649 | ||
1650 | /* Exit early if we advanced recently. */ | ||
1651 | if (jiffies == rdtp->last_advance_all) | ||
1652 | return 0; | ||
1653 | rdtp->last_advance_all = jiffies; | ||
1654 | |||
1649 | for_each_rcu_flavor(rsp) { | 1655 | for_each_rcu_flavor(rsp) { |
1650 | rdp = this_cpu_ptr(rsp->rda); | 1656 | rdp = this_cpu_ptr(rsp->rda); |
1651 | rnp = rdp->mynode; | 1657 | rnp = rdp->mynode; |
@@ -1744,6 +1750,8 @@ static void rcu_prepare_for_idle(int cpu) | |||
1744 | */ | 1750 | */ |
1745 | if (rdtp->all_lazy && | 1751 | if (rdtp->all_lazy && |
1746 | rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { | 1752 | rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { |
1753 | rdtp->all_lazy = false; | ||
1754 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | ||
1747 | invoke_rcu_core(); | 1755 | invoke_rcu_core(); |
1748 | return; | 1756 | return; |
1749 | } | 1757 | } |
@@ -1773,17 +1781,11 @@ static void rcu_prepare_for_idle(int cpu) | |||
1773 | */ | 1781 | */ |
1774 | static void rcu_cleanup_after_idle(int cpu) | 1782 | static void rcu_cleanup_after_idle(int cpu) |
1775 | { | 1783 | { |
1776 | struct rcu_data *rdp; | ||
1777 | struct rcu_state *rsp; | ||
1778 | 1784 | ||
1779 | if (rcu_is_nocb_cpu(cpu)) | 1785 | if (rcu_is_nocb_cpu(cpu)) |
1780 | return; | 1786 | return; |
1781 | rcu_try_advance_all_cbs(); | 1787 | if (rcu_try_advance_all_cbs()) |
1782 | for_each_rcu_flavor(rsp) { | 1788 | invoke_rcu_core(); |
1783 | rdp = per_cpu_ptr(rsp->rda, cpu); | ||
1784 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | ||
1785 | invoke_rcu_core(); | ||
1786 | } | ||
1787 | } | 1789 | } |
1788 | 1790 | ||
1789 | /* | 1791 | /* |