aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:10:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:10:49 -0500
commit37ea95a959d4a49846ecbf2dd45326b6b34bf049 (patch)
tree43791e1244ce06d8ca18ecbfd0b0f6dcb86ebb8b /include
parentde0c276b31538fcd56611132f20b63eae2891876 (diff)
parent630e1e0bcddfda9566462d4f9a0d58b31c29d467 (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU update from Ingo Molnar: "The major features of this tree are: 1. A first version of no-callbacks CPUs. This version prohibits offlining CPU 0, but only when enabled via CONFIG_RCU_NOCB_CPU=y. Relaxing this constraint is in progress, but not yet ready for prime time. These commits were posted to LKML at https://lkml.org/lkml/2012/10/30/724. 2. Changes to SRCU that allows statically initialized srcu_struct structures. These commits were posted to LKML at https://lkml.org/lkml/2012/10/30/296. 3. Restructuring of RCU's debugfs output. These commits were posted to LKML at https://lkml.org/lkml/2012/10/30/341. 4. Additional CPU-hotplug/RCU improvements, posted to LKML at https://lkml.org/lkml/2012/10/30/327. Note that the commit eliminating __stop_machine() was judged to be too-high of risk, so is deferred to 3.9. 5. Changes to RCU's idle interface, most notably a new module parameter that redirects normal grace-period operations to their expedited equivalents. These were posted to LKML at https://lkml.org/lkml/2012/10/30/739. 6. Additional diagnostics for RCU's CPU stall warning facility, posted to LKML at https://lkml.org/lkml/2012/10/30/315. The most notable change reduces the default RCU CPU stall-warning time from 60 seconds to 21 seconds, so that it once again happens sooner than the softlockup timeout. 7. Documentation updates, which were posted to LKML at https://lkml.org/lkml/2012/10/30/280. A couple of late-breaking changes were posted at https://lkml.org/lkml/2012/11/16/634 and https://lkml.org/lkml/2012/11/16/547. 8. Miscellaneous fixes, which were posted to LKML at https://lkml.org/lkml/2012/10/30/309. 9. Finally, a fix for an lockdep-RCU splat was posted to LKML at https://lkml.org/lkml/2012/11/7/486." * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits) context_tracking: New context tracking susbsystem sched: Mark RCU reader in sched_show_task() rcu: Separate accounting of callbacks from callback-free CPUs rcu: Add callback-free CPUs rcu: Add documentation for the new rcuexp debugfs trace file rcu: Update documentation for TREE_RCU debugfs tracing rcu: Reduce default RCU CPU stall warning timeout rcu: Fix TINY_RCU rcu_is_cpu_rrupt_from_idle check rcu: Clarify memory-ordering properties of grace-period primitives rcu: Add new rcutorture module parameters to start/end test messages rcu: Remove list_for_each_continue_rcu() rcu: Fix batch-limit size problem rcu: Add tracing for synchronize_sched_expedited() rcu: Remove old debugfs interfaces and also RCU flavor name rcu: split 'rcuhier' to each flavor rcu: split 'rcugp' to each flavor rcu: split 'rcuboost' to each flavor rcu: split 'rcubarrier' to each flavor rcu: Fix tracing formatting rcu: Remove the interface "rcudata.csv" ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/context_tracking.h18
-rw-r--r--include/linux/rculist.h17
-rw-r--r--include/linux/rcupdate.h29
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/srcu.h34
-rw-r--r--include/trace/events/rcu.h1
6 files changed, 82 insertions, 27 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
new file mode 100644
index 000000000000..e24339ccb7f0
--- /dev/null
+++ b/include/linux/context_tracking.h
@@ -0,0 +1,18 @@
1#ifndef _LINUX_CONTEXT_TRACKING_H
2#define _LINUX_CONTEXT_TRACKING_H
3
4#ifdef CONFIG_CONTEXT_TRACKING
5#include <linux/sched.h>
6
7extern void user_enter(void);
8extern void user_exit(void);
9extern void context_tracking_task_switch(struct task_struct *prev,
10 struct task_struct *next);
11#else
12static inline void user_enter(void) { }
13static inline void user_exit(void) { }
14static inline void context_tracking_task_switch(struct task_struct *prev,
15 struct task_struct *next) { }
16#endif /* !CONFIG_CONTEXT_TRACKING */
17
18#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index e0f0fab20415..c92dd28eaa6c 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -286,23 +286,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
286 &pos->member != (head); \ 286 &pos->member != (head); \
287 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 287 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
288 288
289
290/**
291 * list_for_each_continue_rcu
292 * @pos: the &struct list_head to use as a loop cursor.
293 * @head: the head for your list.
294 *
295 * Iterate over an rcu-protected list, continuing after current point.
296 *
297 * This list-traversal primitive may safely run concurrently with
298 * the _rcu list-mutation primitives such as list_add_rcu()
299 * as long as the traversal is guarded by rcu_read_lock().
300 */
301#define list_for_each_continue_rcu(pos, head) \
302 for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \
303 (pos) != (head); \
304 (pos) = rcu_dereference_raw(list_next_rcu(pos)))
305
306/** 289/**
307 * list_for_each_entry_continue_rcu - continue iteration over list of given type 290 * list_for_each_entry_continue_rcu - continue iteration over list of given type
308 * @pos: the type * to use as a loop cursor. 291 * @pos: the type * to use as a loop cursor.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 7c968e4f929e..275aa3f1062d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename,
90 * that started after call_rcu() was invoked. RCU read-side critical 90 * that started after call_rcu() was invoked. RCU read-side critical
91 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 91 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
92 * and may be nested. 92 * and may be nested.
93 *
94 * Note that all CPUs must agree that the grace period extended beyond
95 * all pre-existing RCU read-side critical section. On systems with more
96 * than one CPU, this means that when "func()" is invoked, each CPU is
97 * guaranteed to have executed a full memory barrier since the end of its
98 * last RCU read-side critical section whose beginning preceded the call
99 * to call_rcu(). It also means that each CPU executing an RCU read-side
100 * critical section that continues beyond the start of "func()" must have
101 * executed a memory barrier after the call_rcu() but before the beginning
102 * of that RCU read-side critical section. Note that these guarantees
103 * include CPUs that are offline, idle, or executing in user mode, as
104 * well as CPUs that are executing in the kernel.
105 *
106 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
107 * resulting RCU callback function "func()", then both CPU A and CPU B are
108 * guaranteed to execute a full memory barrier during the time interval
109 * between the call to call_rcu() and the invocation of "func()" -- even
110 * if CPU A and CPU B are the same CPU (but again only if the system has
111 * more than one CPU).
93 */ 112 */
94extern void call_rcu(struct rcu_head *head, 113extern void call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *head)); 114 void (*func)(struct rcu_head *head));
@@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head,
118 * OR 137 * OR
119 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 138 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
120 * These may be nested. 139 * These may be nested.
140 *
141 * See the description of call_rcu() for more detailed information on
142 * memory ordering guarantees.
121 */ 143 */
122extern void call_rcu_bh(struct rcu_head *head, 144extern void call_rcu_bh(struct rcu_head *head,
123 void (*func)(struct rcu_head *head)); 145 void (*func)(struct rcu_head *head));
@@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head,
137 * OR 159 * OR
138 * anything that disables preemption. 160 * anything that disables preemption.
139 * These may be nested. 161 * These may be nested.
162 *
163 * See the description of call_rcu() for more detailed information on
164 * memory ordering guarantees.
140 */ 165 */
141extern void call_rcu_sched(struct rcu_head *head, 166extern void call_rcu_sched(struct rcu_head *head,
142 void (*func)(struct rcu_head *rcu)); 167 void (*func)(struct rcu_head *rcu));
@@ -197,13 +222,13 @@ extern void rcu_user_enter(void);
197extern void rcu_user_exit(void); 222extern void rcu_user_exit(void);
198extern void rcu_user_enter_after_irq(void); 223extern void rcu_user_enter_after_irq(void);
199extern void rcu_user_exit_after_irq(void); 224extern void rcu_user_exit_after_irq(void);
200extern void rcu_user_hooks_switch(struct task_struct *prev,
201 struct task_struct *next);
202#else 225#else
203static inline void rcu_user_enter(void) { } 226static inline void rcu_user_enter(void) { }
204static inline void rcu_user_exit(void) { } 227static inline void rcu_user_exit(void) { }
205static inline void rcu_user_enter_after_irq(void) { } 228static inline void rcu_user_enter_after_irq(void) { }
206static inline void rcu_user_exit_after_irq(void) { } 229static inline void rcu_user_exit_after_irq(void) { }
230static inline void rcu_user_hooks_switch(struct task_struct *prev,
231 struct task_struct *next) { }
207#endif /* CONFIG_RCU_USER_QS */ 232#endif /* CONFIG_RCU_USER_QS */
208 233
209extern void exit_rcu(void); 234extern void exit_rcu(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3e387df065fc..29116b853ece 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -109,6 +109,8 @@ extern void update_cpu_load_nohz(void);
109 109
110extern unsigned long get_parent_ip(unsigned long addr); 110extern unsigned long get_parent_ip(unsigned long addr);
111 111
112extern void dump_cpu_task(int cpu);
113
112struct seq_file; 114struct seq_file;
113struct cfs_rq; 115struct cfs_rq;
114struct task_group; 116struct task_group;
@@ -1845,14 +1847,6 @@ static inline void rcu_copy_process(struct task_struct *p)
1845 1847
1846#endif 1848#endif
1847 1849
1848static inline void rcu_switch(struct task_struct *prev,
1849 struct task_struct *next)
1850{
1851#ifdef CONFIG_RCU_USER_QS
1852 rcu_user_hooks_switch(prev, next);
1853#endif
1854}
1855
1856static inline void tsk_restore_flags(struct task_struct *task, 1850static inline void tsk_restore_flags(struct task_struct *task,
1857 unsigned long orig_flags, unsigned long flags) 1851 unsigned long orig_flags, unsigned long flags)
1858{ 1852{
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 55a5c52cbb25..6eb691b08358 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -16,8 +16,10 @@
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright (C) IBM Corporation, 2006 18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
19 * 20 *
20 * Author: Paul McKenney <paulmck@us.ibm.com> 21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
21 * 23 *
22 * For detailed explanation of Read-Copy Update mechanism see - 24 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt 25 * Documentation/RCU/ *.txt
@@ -40,6 +42,8 @@ struct rcu_batch {
40 struct rcu_head *head, **tail; 42 struct rcu_head *head, **tail;
41}; 43};
42 44
45#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
46
43struct srcu_struct { 47struct srcu_struct {
44 unsigned completed; 48 unsigned completed;
45 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_struct_array __percpu *per_cpu_ref;
@@ -70,12 +74,42 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
70 __init_srcu_struct((sp), #sp, &__srcu_key); \ 74 __init_srcu_struct((sp), #sp, &__srcu_key); \
71}) 75})
72 76
77#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
73#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 78#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
74 79
75int init_srcu_struct(struct srcu_struct *sp); 80int init_srcu_struct(struct srcu_struct *sp);
76 81
82#define __SRCU_DEP_MAP_INIT(srcu_name)
77#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 83#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
78 84
85void process_srcu(struct work_struct *work);
86
87#define __SRCU_STRUCT_INIT(name) \
88 { \
89 .completed = -300, \
90 .per_cpu_ref = &name##_srcu_array, \
91 .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
92 .running = false, \
93 .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
94 .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
95 .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
96 .batch_done = RCU_BATCH_INIT(name.batch_done), \
97 .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
98 __SRCU_DEP_MAP_INIT(name) \
99 }
100
101/*
102 * define and init a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 */
105#define DEFINE_SRCU(name) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
107 struct srcu_struct name = __SRCU_STRUCT_INIT(name);
108
109#define DEFINE_STATIC_SRCU(name) \
110 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
111 static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
112
79/** 113/**
80 * call_srcu() - Queue a callback for invocation after an SRCU grace period 114 * call_srcu() - Queue a callback for invocation after an SRCU grace period
81 * @sp: srcu_struct in queue the callback 115 * @sp: srcu_struct in queue the callback
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5bde94d8585b..d4f559b1ec34 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -549,6 +549,7 @@ TRACE_EVENT(rcu_torture_read,
549 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit. 549 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
550 * "Inc1": rcu_barrier_callback() piggyback check counter incremented. 550 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
551 * "Offline": rcu_barrier_callback() found offline CPU 551 * "Offline": rcu_barrier_callback() found offline CPU
552 * "OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU.
552 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks. 553 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
553 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks. 554 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
554 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. 555 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.