aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:44:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:44:12 -0400
commitd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (patch)
tree53e36ac30a3d0fdca3834f4e1eb36ddc67b512ce /include/linux
parent5ff0b9e1a1da58b584aa4b8ea234be20b5a1164b (diff)
parentfd19bda491207f66d39aeba93487197a087bc00b (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar: "The main changes in this cycle were: - changes related to No-CBs CPUs and NO_HZ_FULL - RCU-tasks implementation - torture-test updates - miscellaneous fixes - locktorture updates - RCU documentation updates" * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (81 commits) workqueue: Use cond_resched_rcu_qs macro workqueue: Add quiescent state between work items locktorture: Cleanup header usage locktorture: Cannot hold read and write lock locktorture: Fix __acquire annotation for spinlock irq locktorture: Support rwlocks rcu: Eliminate deadlock between CPU hotplug and expedited grace periods locktorture: Document boot/module parameters rcutorture: Rename rcutorture_runnable parameter locktorture: Add test scenario for rwsem_lock locktorture: Add test scenario for mutex_lock locktorture: Make torture scripting account for new _runnable name locktorture: Introduce torture context locktorture: Support rwsems locktorture: Add infrastructure for torturing read locks torture: Address race in module cleanup locktorture: Make statistics generic locktorture: Teach about lock debugging locktorture: Support mutexes locktorture: Add documentation ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/lockdep.h1
-rw-r--r--include/linux/rcupdate.h106
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/sched.h39
-rw-r--r--include/linux/torture.h5
7 files changed, 105 insertions, 62 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 95978ad7fcdd..b2d9a43012b2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -213,6 +213,7 @@ extern struct bus_type cpu_subsys;
213extern void cpu_hotplug_begin(void); 213extern void cpu_hotplug_begin(void);
214extern void cpu_hotplug_done(void); 214extern void cpu_hotplug_done(void);
215extern void get_online_cpus(void); 215extern void get_online_cpus(void);
216extern bool try_get_online_cpus(void);
216extern void put_online_cpus(void); 217extern void put_online_cpus(void);
217extern void cpu_hotplug_disable(void); 218extern void cpu_hotplug_disable(void);
218extern void cpu_hotplug_enable(void); 219extern void cpu_hotplug_enable(void);
@@ -230,6 +231,7 @@ int cpu_down(unsigned int cpu);
230static inline void cpu_hotplug_begin(void) {} 231static inline void cpu_hotplug_begin(void) {}
231static inline void cpu_hotplug_done(void) {} 232static inline void cpu_hotplug_done(void) {}
232#define get_online_cpus() do { } while (0) 233#define get_online_cpus() do { } while (0)
234#define try_get_online_cpus() true
233#define put_online_cpus() do { } while (0) 235#define put_online_cpus() do { } while (0)
234#define cpu_hotplug_disable() do { } while (0) 236#define cpu_hotplug_disable() do { } while (0)
235#define cpu_hotplug_enable() do { } while (0) 237#define cpu_hotplug_enable() do { } while (0)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2bb4c4f3531a..77fc43f8fb72 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,12 +111,21 @@ extern struct group_info init_groups;
111#ifdef CONFIG_PREEMPT_RCU 111#ifdef CONFIG_PREEMPT_RCU
112#define INIT_TASK_RCU_PREEMPT(tsk) \ 112#define INIT_TASK_RCU_PREEMPT(tsk) \
113 .rcu_read_lock_nesting = 0, \ 113 .rcu_read_lock_nesting = 0, \
114 .rcu_read_unlock_special = 0, \ 114 .rcu_read_unlock_special.s = 0, \
115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
116 INIT_TASK_RCU_TREE_PREEMPT() 116 INIT_TASK_RCU_TREE_PREEMPT()
117#else 117#else
118#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
119#endif 119#endif
120#ifdef CONFIG_TASKS_RCU
121#define INIT_TASK_RCU_TASKS(tsk) \
122 .rcu_tasks_holdout = false, \
123 .rcu_tasks_holdout_list = \
124 LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
125 .rcu_tasks_idle_cpu = -1,
126#else
127#define INIT_TASK_RCU_TASKS(tsk)
128#endif
120 129
121extern struct cred init_cred; 130extern struct cred init_cred;
122 131
@@ -224,6 +233,7 @@ extern struct task_group root_task_group;
224 INIT_FTRACE_GRAPH \ 233 INIT_FTRACE_GRAPH \
225 INIT_TRACE_RECURSION \ 234 INIT_TRACE_RECURSION \
226 INIT_TASK_RCU_PREEMPT(tsk) \ 235 INIT_TASK_RCU_PREEMPT(tsk) \
236 INIT_TASK_RCU_TASKS(tsk) \
227 INIT_CPUSET_SEQ(tsk) \ 237 INIT_CPUSET_SEQ(tsk) \
228 INIT_RT_MUTEXES(tsk) \ 238 INIT_RT_MUTEXES(tsk) \
229 INIT_VTIME(tsk) \ 239 INIT_VTIME(tsk) \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 64c7425afbce..72d6dea7fac1 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -510,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr)
510 510
511#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 511#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
512#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 512#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
513#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
513#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 514#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
514 515
515#ifdef CONFIG_PROVE_LOCKING 516#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..a4a819ffb2d1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,14 +47,12 @@
47#include <asm/barrier.h> 47#include <asm/barrier.h>
48 48
49extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
50#ifdef CONFIG_RCU_TORTURE_TEST
51extern int rcutorture_runnable; /* for sysctl */
52#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
53 50
54enum rcutorture_type { 51enum rcutorture_type {
55 RCU_FLAVOR, 52 RCU_FLAVOR,
56 RCU_BH_FLAVOR, 53 RCU_BH_FLAVOR,
57 RCU_SCHED_FLAVOR, 54 RCU_SCHED_FLAVOR,
55 RCU_TASKS_FLAVOR,
58 SRCU_FLAVOR, 56 SRCU_FLAVOR,
59 INVALID_RCU_FLAVOR 57 INVALID_RCU_FLAVOR
60}; 58};
@@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head,
197 195
198void synchronize_sched(void); 196void synchronize_sched(void);
199 197
198/**
199 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
200 * @head: structure to be used for queueing the RCU updates.
201 * @func: actual callback function to be invoked after the grace period
202 *
203 * The callback function will be invoked some time after a full grace
204 * period elapses, in other words after all currently executing RCU
205 * read-side critical sections have completed. call_rcu_tasks() assumes
206 * that the read-side critical sections end at a voluntary context
207 * switch (not a preemption!), entry into idle, or transition to usermode
208 * execution. As such, there are no read-side primitives analogous to
209 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
210 * to determine that all tasks have passed through a safe state, not so
211 * much for data-strcuture synchronization.
212 *
213 * See the description of call_rcu() for more detailed information on
214 * memory ordering guarantees.
215 */
216void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
217void synchronize_rcu_tasks(void);
218void rcu_barrier_tasks(void);
219
200#ifdef CONFIG_PREEMPT_RCU 220#ifdef CONFIG_PREEMPT_RCU
201 221
202void __rcu_read_lock(void); 222void __rcu_read_lock(void);
@@ -238,8 +258,8 @@ static inline int rcu_preempt_depth(void)
238 258
239/* Internal to kernel */ 259/* Internal to kernel */
240void rcu_init(void); 260void rcu_init(void);
241void rcu_sched_qs(int cpu); 261void rcu_sched_qs(void);
242void rcu_bh_qs(int cpu); 262void rcu_bh_qs(void);
243void rcu_check_callbacks(int cpu, int user); 263void rcu_check_callbacks(int cpu, int user);
244struct notifier_block; 264struct notifier_block;
245void rcu_idle_enter(void); 265void rcu_idle_enter(void);
@@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
269 struct task_struct *next) { } 289 struct task_struct *next) { }
270#endif /* CONFIG_RCU_USER_QS */ 290#endif /* CONFIG_RCU_USER_QS */
271 291
292#ifdef CONFIG_RCU_NOCB_CPU
293void rcu_init_nohz(void);
294#else /* #ifdef CONFIG_RCU_NOCB_CPU */
295static inline void rcu_init_nohz(void)
296{
297}
298#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
299
272/** 300/**
273 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 301 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
274 * @a: Code that RCU needs to pay attention to. 302 * @a: Code that RCU needs to pay attention to.
@@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
294 rcu_irq_exit(); \ 322 rcu_irq_exit(); \
295 } while (0) 323 } while (0)
296 324
325/*
326 * Note a voluntary context switch for RCU-tasks benefit. This is a
327 * macro rather than an inline function to avoid #include hell.
328 */
329#ifdef CONFIG_TASKS_RCU
330#define TASKS_RCU(x) x
331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \
333 do { \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0)
340#endif /* #else #ifdef CONFIG_TASKS_RCU */
341
342/**
343 * cond_resched_rcu_qs - Report potential quiescent states to RCU
344 *
345 * This macro resembles cond_resched(), except that it is defined to
346 * report potential quiescent states to RCU-tasks even if the cond_resched()
347 * machinery were to be shut off, as some advocate for PREEMPT kernels.
348 */
349#define cond_resched_rcu_qs() \
350do { \
351 rcu_note_voluntary_context_switch(current); \
352 cond_resched(); \
353} while (0)
354
297#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 355#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
298bool __rcu_is_watching(void); 356bool __rcu_is_watching(void);
299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 357#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
@@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void);
349#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 407#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
350static inline bool rcu_lockdep_current_cpu_online(void) 408static inline bool rcu_lockdep_current_cpu_online(void)
351{ 409{
352 return 1; 410 return true;
353} 411}
354#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 412#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
355 413
@@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map;
371extern struct lockdep_map rcu_callback_map; 429extern struct lockdep_map rcu_callback_map;
372int debug_lockdep_rcu_enabled(void); 430int debug_lockdep_rcu_enabled(void);
373 431
374/** 432int rcu_read_lock_held(void);
375 * rcu_read_lock_held() - might we be in RCU read-side critical section?
376 *
377 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
378 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
379 * this assumes we are in an RCU read-side critical section unless it can
380 * prove otherwise. This is useful for debug checks in functions that
381 * require that they be called within an RCU read-side critical section.
382 *
383 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
384 * and while lockdep is disabled.
385 *
386 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
387 * occur in the same context, for example, it is illegal to invoke
388 * rcu_read_unlock() in process context if the matching rcu_read_lock()
389 * was invoked from within an irq handler.
390 *
391 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
392 * offline from an RCU perspective, so check for those as well.
393 */
394static inline int rcu_read_lock_held(void)
395{
396 if (!debug_lockdep_rcu_enabled())
397 return 1;
398 if (!rcu_is_watching())
399 return 0;
400 if (!rcu_lockdep_current_cpu_online())
401 return 0;
402 return lock_is_held(&rcu_lock_map);
403}
404
405/*
406 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
407 * hell.
408 */
409int rcu_read_lock_bh_held(void); 433int rcu_read_lock_bh_held(void);
410 434
411/** 435/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d40a6a451330..38cc5b1e252d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
80 80
81static inline void rcu_note_context_switch(int cpu) 81static inline void rcu_note_context_switch(int cpu)
82{ 82{
83 rcu_sched_qs(cpu); 83 rcu_sched_qs();
84} 84}
85 85
86/* 86/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5e63ba59258c..05a8c00e8339 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1213,6 +1213,13 @@ struct sched_dl_entity {
1213 struct hrtimer dl_timer; 1213 struct hrtimer dl_timer;
1214}; 1214};
1215 1215
1216union rcu_special {
1217 struct {
1218 bool blocked;
1219 bool need_qs;
1220 } b;
1221 short s;
1222};
1216struct rcu_node; 1223struct rcu_node;
1217 1224
1218enum perf_event_task_context { 1225enum perf_event_task_context {
@@ -1265,12 +1272,18 @@ struct task_struct {
1265 1272
1266#ifdef CONFIG_PREEMPT_RCU 1273#ifdef CONFIG_PREEMPT_RCU
1267 int rcu_read_lock_nesting; 1274 int rcu_read_lock_nesting;
1268 char rcu_read_unlock_special; 1275 union rcu_special rcu_read_unlock_special;
1269 struct list_head rcu_node_entry; 1276 struct list_head rcu_node_entry;
1270#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1277#endif /* #ifdef CONFIG_PREEMPT_RCU */
1271#ifdef CONFIG_TREE_PREEMPT_RCU 1278#ifdef CONFIG_TREE_PREEMPT_RCU
1272 struct rcu_node *rcu_blocked_node; 1279 struct rcu_node *rcu_blocked_node;
1273#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1280#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1281#ifdef CONFIG_TASKS_RCU
1282 unsigned long rcu_tasks_nvcsw;
1283 bool rcu_tasks_holdout;
1284 struct list_head rcu_tasks_holdout_list;
1285 int rcu_tasks_idle_cpu;
1286#endif /* #ifdef CONFIG_TASKS_RCU */
1274 1287
1275#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1288#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1276 struct sched_info sched_info; 1289 struct sched_info sched_info;
@@ -2014,29 +2027,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task);
2014extern void task_clear_jobctl_pending(struct task_struct *task, 2027extern void task_clear_jobctl_pending(struct task_struct *task,
2015 unsigned int mask); 2028 unsigned int mask);
2016 2029
2017#ifdef CONFIG_PREEMPT_RCU
2018
2019#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
2020#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
2021
2022static inline void rcu_copy_process(struct task_struct *p) 2030static inline void rcu_copy_process(struct task_struct *p)
2023{ 2031{
2032#ifdef CONFIG_PREEMPT_RCU
2024 p->rcu_read_lock_nesting = 0; 2033 p->rcu_read_lock_nesting = 0;
2025 p->rcu_read_unlock_special = 0; 2034 p->rcu_read_unlock_special.s = 0;
2026#ifdef CONFIG_TREE_PREEMPT_RCU
2027 p->rcu_blocked_node = NULL; 2035 p->rcu_blocked_node = NULL;
2028#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2029 INIT_LIST_HEAD(&p->rcu_node_entry); 2036 INIT_LIST_HEAD(&p->rcu_node_entry);
2037#endif /* #ifdef CONFIG_PREEMPT_RCU */
2038#ifdef CONFIG_TASKS_RCU
2039 p->rcu_tasks_holdout = false;
2040 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2041 p->rcu_tasks_idle_cpu = -1;
2042#endif /* #ifdef CONFIG_TASKS_RCU */
2030} 2043}
2031 2044
2032#else
2033
2034static inline void rcu_copy_process(struct task_struct *p)
2035{
2036}
2037
2038#endif
2039
2040static inline void tsk_restore_flags(struct task_struct *task, 2045static inline void tsk_restore_flags(struct task_struct *task,
2041 unsigned long orig_flags, unsigned long flags) 2046 unsigned long orig_flags, unsigned long flags)
2042{ 2047{
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 5ca58fcbaf1b..7759fc3c622d 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -51,7 +51,7 @@
51 51
52/* Definitions for online/offline exerciser. */ 52/* Definitions for online/offline exerciser. */
53int torture_onoff_init(long ooholdoff, long oointerval); 53int torture_onoff_init(long ooholdoff, long oointerval);
54char *torture_onoff_stats(char *page); 54void torture_onoff_stats(void);
55bool torture_onoff_failures(void); 55bool torture_onoff_failures(void);
56 56
57/* Low-rider random number generator. */ 57/* Low-rider random number generator. */
@@ -77,7 +77,8 @@ int torture_stutter_init(int s);
77/* Initialization and cleanup. */ 77/* Initialization and cleanup. */
78bool torture_init_begin(char *ttype, bool v, int *runnable); 78bool torture_init_begin(char *ttype, bool v, int *runnable);
79void torture_init_end(void); 79void torture_init_end(void);
80bool torture_cleanup(void); 80bool torture_cleanup_begin(void);
81void torture_cleanup_end(void);
81bool torture_must_stop(void); 82bool torture_must_stop(void);
82bool torture_must_stop_irq(void); 83bool torture_must_stop_irq(void);
83void torture_kthread_stopping(char *title); 84void torture_kthread_stopping(char *title);