aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 18:55:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 18:55:08 -0400
commit5bda4f638f36ef4c4e3b1397b02affc3db94356e (patch)
treed1bde148cde9981c31941382b2076084c7f5796c /include/linux
parenta45c657f28f82b056173d1afc2e7ed1f1f68829f (diff)
parent01c9db827146ce321562a992a5dbc1a49b1a99ce (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU changes from Ingo Molar: "The main changes: - torture-test updates - callback-offloading changes - maintainership changes - update RCU documentation - miscellaneous fixes" * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits) rcu: Allow for NULL tick_nohz_full_mask when nohz_full= missing rcu: Fix a sparse warning in rcu_report_unblock_qs_rnp() rcu: Fix a sparse warning in rcu_initiate_boost() rcu: Fix __rcu_reclaim() to use true/false for bool rcu: Remove CONFIG_PROVE_RCU_DELAY rcu: Use __this_cpu_read() instead of per_cpu_ptr() rcu: Don't use NMIs to dump other CPUs' stacks rcu: Bind grace-period kthreads to non-NO_HZ_FULL CPUs rcu: Simplify priority boosting by putting rt_mutex in rcu_node rcu: Check both root and current rcu_node when setting up future grace period rcu: Allow post-unlock reference for rt_mutex rcu: Loosen __call_rcu()'s rcu_head alignment constraint rcu: Eliminate read-modify-write ACCESS_ONCE() calls rcu: Remove redundant ACCESS_ONCE() from tick_do_timer_cpu rcu: Make rcu node arrays static const char * const signal: Explain local_irq_save() call rcu: Handle obsolete references to TINY_PREEMPT_RCU rcu: Document deadlock-avoidance information for rcu_read_unlock() scripts: Teach get_maintainer.pl about the new "R:" tag rcu: Update rcu torture maintainership filename patterns ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/rcupdate.h45
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/tick.h20
4 files changed, 57 insertions, 23 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6df7f9fe0d01..2bb4c4f3531a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,12 +102,6 @@ extern struct group_info init_groups;
102#define INIT_IDS 102#define INIT_IDS
103#endif 103#endif
104 104
105#ifdef CONFIG_RCU_BOOST
106#define INIT_TASK_RCU_BOOST() \
107 .rcu_boost_mutex = NULL,
108#else
109#define INIT_TASK_RCU_BOOST()
110#endif
111#ifdef CONFIG_TREE_PREEMPT_RCU 105#ifdef CONFIG_TREE_PREEMPT_RCU
112#define INIT_TASK_RCU_TREE_PREEMPT() \ 106#define INIT_TASK_RCU_TREE_PREEMPT() \
113 .rcu_blocked_node = NULL, 107 .rcu_blocked_node = NULL,
@@ -119,8 +113,7 @@ extern struct group_info init_groups;
119 .rcu_read_lock_nesting = 0, \ 113 .rcu_read_lock_nesting = 0, \
120 .rcu_read_unlock_special = 0, \ 114 .rcu_read_unlock_special = 0, \
121 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
122 INIT_TASK_RCU_TREE_PREEMPT() \ 116 INIT_TASK_RCU_TREE_PREEMPT()
123 INIT_TASK_RCU_BOOST()
124#else 117#else
125#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
126#endif 119#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6a94cc8b1ca0..d231aa17b1d7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -826,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void)
826 * read-side critical section that would block in a !PREEMPT kernel. 826 * read-side critical section that would block in a !PREEMPT kernel.
827 * But if you want the full story, read on! 827 * But if you want the full story, read on!
828 * 828 *
829 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it 829 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
830 * is illegal to block while in an RCU read-side critical section. In 830 * it is illegal to block while in an RCU read-side critical section.
831 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) 831 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
832 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may 832 * kernel builds, RCU read-side critical sections may be preempted,
833 * be preempted, but explicit blocking is illegal. Finally, in preemptible 833 * but explicit blocking is illegal. Finally, in preemptible RCU
834 * RCU implementations in real-time (with -rt patchset) kernel builds, 834 * implementations in real-time (with -rt patchset) kernel builds, RCU
835 * RCU read-side critical sections may be preempted and they may also 835 * read-side critical sections may be preempted and they may also block, but
836 * block, but only when acquiring spinlocks that are subject to priority 836 * only when acquiring spinlocks that are subject to priority inheritance.
837 * inheritance.
838 */ 837 */
839static inline void rcu_read_lock(void) 838static inline void rcu_read_lock(void)
840{ 839{
@@ -858,6 +857,34 @@ static inline void rcu_read_lock(void)
858/** 857/**
859 * rcu_read_unlock() - marks the end of an RCU read-side critical section. 858 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
860 * 859 *
860 * In most situations, rcu_read_unlock() is immune from deadlock.
861 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
862 * is responsible for deboosting, which it does via rt_mutex_unlock().
863 * Unfortunately, this function acquires the scheduler's runqueue and
864 * priority-inheritance spinlocks. This means that deadlock could result
865 * if the caller of rcu_read_unlock() already holds one of these locks or
866 * any lock that is ever acquired while holding them.
867 *
868 * That said, RCU readers are never priority boosted unless they were
869 * preempted. Therefore, one way to avoid deadlock is to make sure
870 * that preemption never happens within any RCU read-side critical
871 * section whose outermost rcu_read_unlock() is called with one of
872 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
873 * a number of ways, for example, by invoking preempt_disable() before
874 * critical section's outermost rcu_read_lock().
875 *
876 * Given that the set of locks acquired by rt_mutex_unlock() might change
877 * at any time, a somewhat more future-proofed approach is to make sure
878 * that that preemption never happens within any RCU read-side critical
879 * section whose outermost rcu_read_unlock() is called with irqs disabled.
880 * This approach relies on the fact that rt_mutex_unlock() currently only
881 * acquires irq-disabled locks.
882 *
883 * The second of these two approaches is best in most situations,
884 * however, the first approach can also be useful, at least to those
885 * developers willing to keep abreast of the set of locks acquired by
886 * rt_mutex_unlock().
887 *
861 * See rcu_read_lock() for more information. 888 * See rcu_read_lock() for more information.
862 */ 889 */
863static inline void rcu_read_unlock(void) 890static inline void rcu_read_unlock(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0376b054a0d0..b39a671cfd59 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1270,9 +1270,6 @@ struct task_struct {
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1270#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1271 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1273#ifdef CONFIG_RCU_BOOST
1274 struct rt_mutex *rcu_boost_mutex;
1275#endif /* #ifdef CONFIG_RCU_BOOST */
1276 1273
1277#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1278 struct sched_info sched_info; 1275 struct sched_info sched_info;
@@ -2009,9 +2006,6 @@ static inline void rcu_copy_process(struct task_struct *p)
2009#ifdef CONFIG_TREE_PREEMPT_RCU 2006#ifdef CONFIG_TREE_PREEMPT_RCU
2010 p->rcu_blocked_node = NULL; 2007 p->rcu_blocked_node = NULL;
2011#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2008#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2012#ifdef CONFIG_RCU_BOOST
2013 p->rcu_boost_mutex = NULL;
2014#endif /* #ifdef CONFIG_RCU_BOOST */
2015 INIT_LIST_HEAD(&p->rcu_node_entry); 2009 INIT_LIST_HEAD(&p->rcu_node_entry);
2016} 2010}
2017 2011
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773cb9f4c..06cc093ab7ad 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -12,6 +12,7 @@
12#include <linux/hrtimer.h> 12#include <linux/hrtimer.h>
13#include <linux/context_tracking_state.h> 13#include <linux/context_tracking_state.h>
14#include <linux/cpumask.h> 14#include <linux/cpumask.h>
15#include <linux/sched.h>
15 16
16#ifdef CONFIG_GENERIC_CLOCKEVENTS 17#ifdef CONFIG_GENERIC_CLOCKEVENTS
17 18
@@ -162,6 +163,7 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
162#ifdef CONFIG_NO_HZ_FULL 163#ifdef CONFIG_NO_HZ_FULL
163extern bool tick_nohz_full_running; 164extern bool tick_nohz_full_running;
164extern cpumask_var_t tick_nohz_full_mask; 165extern cpumask_var_t tick_nohz_full_mask;
166extern cpumask_var_t housekeeping_mask;
165 167
166static inline bool tick_nohz_full_enabled(void) 168static inline bool tick_nohz_full_enabled(void)
167{ 169{
@@ -194,6 +196,24 @@ static inline void tick_nohz_full_kick_all(void) { }
194static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } 196static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
195#endif 197#endif
196 198
199static inline bool is_housekeeping_cpu(int cpu)
200{
201#ifdef CONFIG_NO_HZ_FULL
202 if (tick_nohz_full_enabled())
203 return cpumask_test_cpu(cpu, housekeeping_mask);
204#endif
205 return true;
206}
207
208static inline void housekeeping_affine(struct task_struct *t)
209{
210#ifdef CONFIG_NO_HZ_FULL
211 if (tick_nohz_full_enabled())
212 set_cpus_allowed_ptr(t, housekeeping_mask);
213
214#endif
215}
216
197static inline void tick_nohz_full_check(void) 217static inline void tick_nohz_full_check(void)
198{ 218{
199 if (tick_nohz_full_enabled()) 219 if (tick_nohz_full_enabled())