diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 16:10:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 16:10:51 -0400 |
commit | b922df7383749a1c0b7ea64c50fa839263d3816b (patch) | |
tree | dd72306ac173753649eb049d6d2734f4e2b95ff6 /include | |
parent | c54dcd8ec9f05c8951d1e622e90904aef95379f9 (diff) | |
parent | cdbb92b31d3c465aa96bd09f2d42c39b87b32bee (diff) |
Merge branch 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (21 commits)
rcu: RCU-based detection of stalled CPUs for Classic RCU, fix
rcu: RCU-based detection of stalled CPUs for Classic RCU
rcu: add rcu_read_lock_sched() / rcu_read_unlock_sched()
rcu: fix sparse shadowed variable warning
doc/RCU: fix pseudocode in rcuref.txt
rcuclassic: fix compiler warning
rcu: use irq-safe locks
rcuclassic: fix compilation NG
rcu: fix locking cleanup fallout
rcu: remove redundant ACCESS_ONCE definition from rcupreempt.c
rcu: fix classic RCU locking cleanup lockdep problem
rcu: trace fix possible mem-leak
rcu: just rename call_rcu_bh instead of making it a macro
rcu: remove list_for_each_rcu()
rcu: fixes to include/linux/rcupreempt.h
rcu: classic RCU locking and memory-barrier cleanups
rcu: prevent console flood when one CPU sees another AWOL via RCU
rcu, debug: detect stalled grace periods, cleanups
rcu, debug: detect stalled grace periods
rcu classic: new algorithm for callbacks-processing(v2)
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/compiler.h | 4 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 37 | ||||
-rw-r--r-- | include/linux/rculist.h | 14 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 20 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 11 |
5 files changed, 58 insertions, 28 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c8bd2daf95ec..8322141ee480 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
190 | * ACCESS_ONCE() in different C statements. | 190 | * ACCESS_ONCE() in different C statements. |
191 | * | 191 | * |
192 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | 192 | * This macro does absolutely -nothing- to prevent the CPU from reordering, |
193 | * merging, or refetching absolutely anything at any time. | 193 | * merging, or refetching absolutely anything at any time. Its main intended |
194 | * use is to mediate communication between process-level code and irq/NMI | ||
195 | * handlers, all running on the same CPU. | ||
194 | */ | 196 | */ |
195 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 197 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
196 | 198 | ||
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 4ab843622727..5f89b62e6983 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -40,12 +40,21 @@ | |||
40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
42 | 42 | ||
43 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
44 | #define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ | ||
45 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ | ||
46 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
43 | 47 | ||
44 | /* Global control variables for rcupdate callback mechanism. */ | 48 | /* Global control variables for rcupdate callback mechanism. */ |
45 | struct rcu_ctrlblk { | 49 | struct rcu_ctrlblk { |
46 | long cur; /* Current batch number. */ | 50 | long cur; /* Current batch number. */ |
47 | long completed; /* Number of the last completed batch */ | 51 | long completed; /* Number of the last completed batch */ |
48 | int next_pending; /* Is the next batch already waiting? */ | 52 | long pending; /* Number of the last pending batch */ |
53 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
54 | unsigned long gp_start; /* Time at which GP started in jiffies. */ | ||
55 | unsigned long jiffies_stall; | ||
56 | /* Time at which to check for CPU stalls. */ | ||
57 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
49 | 58 | ||
50 | int signaled; | 59 | int signaled; |
51 | 60 | ||
@@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b) | |||
66 | return (a - b) > 0; | 75 | return (a - b) > 0; |
67 | } | 76 | } |
68 | 77 | ||
69 | /* | 78 | /* Per-CPU data for Read-Copy UPdate. */ |
70 | * Per-CPU data for Read-Copy UPdate. | ||
71 | * nxtlist - new callbacks are added here | ||
72 | * curlist - current batch for which quiescent cycle started if any | ||
73 | */ | ||
74 | struct rcu_data { | 79 | struct rcu_data { |
75 | /* 1) quiescent state handling : */ | 80 | /* 1) quiescent state handling : */ |
76 | long quiescbatch; /* Batch # for grace period */ | 81 | long quiescbatch; /* Batch # for grace period */ |
@@ -78,12 +83,24 @@ struct rcu_data { | |||
78 | int qs_pending; /* core waits for quiesc state */ | 83 | int qs_pending; /* core waits for quiesc state */ |
79 | 84 | ||
80 | /* 2) batch handling */ | 85 | /* 2) batch handling */ |
81 | long batch; /* Batch # for current RCU batch */ | 86 | /* |
87 | * if nxtlist is not NULL, then: | ||
88 | * batch: | ||
89 | * The batch # for the last entry of nxtlist | ||
90 | * [*nxttail[1], NULL = *nxttail[2]): | ||
91 | * Entries that batch # <= batch | ||
92 | * [*nxttail[0], *nxttail[1]): | ||
93 | * Entries that batch # <= batch - 1 | ||
94 | * [nxtlist, *nxttail[0]): | ||
95 | * Entries that batch # <= batch - 2 | ||
96 | * The grace period for these entries has completed, and | ||
97 | * the other grace-period-completed entries may be moved | ||
98 | * here temporarily in rcu_process_callbacks(). | ||
99 | */ | ||
100 | long batch; | ||
82 | struct rcu_head *nxtlist; | 101 | struct rcu_head *nxtlist; |
83 | struct rcu_head **nxttail; | 102 | struct rcu_head **nxttail[3]; |
84 | long qlen; /* # of queued callbacks */ | 103 | long qlen; /* # of queued callbacks */ |
85 | struct rcu_head *curlist; | ||
86 | struct rcu_head **curtail; | ||
87 | struct rcu_head *donelist; | 104 | struct rcu_head *donelist; |
88 | struct rcu_head **donetail; | 105 | struct rcu_head **donetail; |
89 | long blimit; /* Upper limit on a processed batch */ | 106 | long blimit; /* Upper limit on a processed batch */ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index eb4443c7e05b..e649bd3f2c97 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
198 | at->prev = last; | 198 | at->prev = last; |
199 | } | 199 | } |
200 | 200 | ||
201 | /** | ||
202 | * list_for_each_rcu - iterate over an rcu-protected list | ||
203 | * @pos: the &struct list_head to use as a loop cursor. | ||
204 | * @head: the head for your list. | ||
205 | * | ||
206 | * This list-traversal primitive may safely run concurrently with | ||
207 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
208 | * as long as the traversal is guarded by rcu_read_lock(). | ||
209 | */ | ||
210 | #define list_for_each_rcu(pos, head) \ | ||
211 | for (pos = rcu_dereference((head)->next); \ | ||
212 | prefetch(pos->next), pos != (head); \ | ||
213 | pos = rcu_dereference(pos->next)) | ||
214 | |||
215 | #define __list_for_each_rcu(pos, head) \ | 201 | #define __list_for_each_rcu(pos, head) \ |
216 | for (pos = rcu_dereference((head)->next); \ | 202 | for (pos = rcu_dereference((head)->next); \ |
217 | pos != (head); \ | 203 | pos != (head); \ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e8b4039cfb2f..86f1f5e43e33 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -133,6 +133,26 @@ struct rcu_head { | |||
133 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() | 133 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() |
134 | 134 | ||
135 | /** | 135 | /** |
136 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | ||
137 | * | ||
138 | * Should be used with either | ||
139 | * - synchronize_sched() | ||
140 | * or | ||
141 | * - call_rcu_sched() and rcu_barrier_sched() | ||
142 | * on the write-side to insure proper synchronization. | ||
143 | */ | ||
144 | #define rcu_read_lock_sched() preempt_disable() | ||
145 | |||
146 | /* | ||
147 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | ||
148 | * | ||
149 | * See rcu_read_lock_sched for more information. | ||
150 | */ | ||
151 | #define rcu_read_unlock_sched() preempt_enable() | ||
152 | |||
153 | |||
154 | |||
155 | /** | ||
136 | * rcu_dereference - fetch an RCU-protected pointer in an | 156 | * rcu_dereference - fetch an RCU-protected pointer in an |
137 | * RCU read-side critical section. This pointer may later | 157 | * RCU read-side critical section. This pointer may later |
138 | * be safely dereferenced. | 158 | * be safely dereferenced. |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 0967f03b0705..3e05c09b54a2 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu) | |||
57 | rdssp->sched_qs++; | 57 | rdssp->sched_qs++; |
58 | } | 58 | } |
59 | #define rcu_bh_qsctr_inc(cpu) | 59 | #define rcu_bh_qsctr_inc(cpu) |
60 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | 60 | |
61 | /* | ||
62 | * Someone might want to pass call_rcu_bh as a function pointer. | ||
63 | * So this needs to just be a rename and not a macro function. | ||
64 | * (no parentheses) | ||
65 | */ | ||
66 | #define call_rcu_bh call_rcu | ||
61 | 67 | ||
62 | /** | 68 | /** |
63 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | 69 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. |
@@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
111 | struct softirq_action; | 117 | struct softirq_action; |
112 | 118 | ||
113 | #ifdef CONFIG_NO_HZ | 119 | #ifdef CONFIG_NO_HZ |
114 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | ||
115 | 120 | ||
116 | static inline void rcu_enter_nohz(void) | 121 | static inline void rcu_enter_nohz(void) |
117 | { | 122 | { |
@@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void) | |||
126 | { | 131 | { |
127 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | 132 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
128 | 133 | ||
129 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
130 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | 134 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
135 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
131 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), | 136 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), |
132 | &rs); | 137 | &rs); |
133 | } | 138 | } |