diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/compiler.h | 4 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 31 | ||||
-rw-r--r-- | include/linux/rculist.h | 14 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 11 |
4 files changed, 32 insertions, 28 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c8bd2daf95ec..8322141ee480 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
190 | * ACCESS_ONCE() in different C statements. | 190 | * ACCESS_ONCE() in different C statements. |
191 | * | 191 | * |
192 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | 192 | * This macro does absolutely -nothing- to prevent the CPU from reordering, |
193 | * merging, or refetching absolutely anything at any time. | 193 | * merging, or refetching absolutely anything at any time. Its main intended |
194 | * use is to mediate communication between process-level code and irq/NMI | ||
195 | * handlers, all running on the same CPU. | ||
194 | */ | 196 | */ |
195 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 197 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
196 | 198 | ||
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 4ab843622727..29bf528c7dcc 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -45,7 +45,10 @@ | |||
45 | struct rcu_ctrlblk { | 45 | struct rcu_ctrlblk { |
46 | long cur; /* Current batch number. */ | 46 | long cur; /* Current batch number. */ |
47 | long completed; /* Number of the last completed batch */ | 47 | long completed; /* Number of the last completed batch */ |
48 | int next_pending; /* Is the next batch already waiting? */ | 48 | long pending; /* Number of the last pending batch */ |
49 | #ifdef CONFIG_DEBUG_RCU_STALL | ||
50 | unsigned long gp_check; /* Time grace period should end, in seconds. */ | ||
51 | #endif /* #ifdef CONFIG_DEBUG_RCU_STALL */ | ||
49 | 52 | ||
50 | int signaled; | 53 | int signaled; |
51 | 54 | ||
@@ -66,11 +69,7 @@ static inline int rcu_batch_after(long a, long b) | |||
66 | return (a - b) > 0; | 69 | return (a - b) > 0; |
67 | } | 70 | } |
68 | 71 | ||
69 | /* | 72 | /* Per-CPU data for Read-Copy UPdate. */ |
70 | * Per-CPU data for Read-Copy UPdate. | ||
71 | * nxtlist - new callbacks are added here | ||
72 | * curlist - current batch for which quiescent cycle started if any | ||
73 | */ | ||
74 | struct rcu_data { | 73 | struct rcu_data { |
75 | /* 1) quiescent state handling : */ | 74 | /* 1) quiescent state handling : */ |
76 | long quiescbatch; /* Batch # for grace period */ | 75 | long quiescbatch; /* Batch # for grace period */ |
@@ -78,12 +77,24 @@ struct rcu_data { | |||
78 | int qs_pending; /* core waits for quiesc state */ | 77 | int qs_pending; /* core waits for quiesc state */ |
79 | 78 | ||
80 | /* 2) batch handling */ | 79 | /* 2) batch handling */ |
81 | long batch; /* Batch # for current RCU batch */ | 80 | /* |
81 | * if nxtlist is not NULL, then: | ||
82 | * batch: | ||
83 | * The batch # for the last entry of nxtlist | ||
84 | * [*nxttail[1], NULL = *nxttail[2]): | ||
85 | * Entries that batch # <= batch | ||
86 | * [*nxttail[0], *nxttail[1]): | ||
87 | * Entries that batch # <= batch - 1 | ||
88 | * [nxtlist, *nxttail[0]): | ||
89 | * Entries that batch # <= batch - 2 | ||
90 | * The grace period for these entries has completed, and | ||
91 | * the other grace-period-completed entries may be moved | ||
92 | * here temporarily in rcu_process_callbacks(). | ||
93 | */ | ||
94 | long batch; | ||
82 | struct rcu_head *nxtlist; | 95 | struct rcu_head *nxtlist; |
83 | struct rcu_head **nxttail; | 96 | struct rcu_head **nxttail[3]; |
84 | long qlen; /* # of queued callbacks */ | 97 | long qlen; /* # of queued callbacks */ |
85 | struct rcu_head *curlist; | ||
86 | struct rcu_head **curtail; | ||
87 | struct rcu_head *donelist; | 98 | struct rcu_head *donelist; |
88 | struct rcu_head **donetail; | 99 | struct rcu_head **donetail; |
89 | long blimit; /* Upper limit on a processed batch */ | 100 | long blimit; /* Upper limit on a processed batch */ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index eb4443c7e05b..e649bd3f2c97 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
198 | at->prev = last; | 198 | at->prev = last; |
199 | } | 199 | } |
200 | 200 | ||
201 | /** | ||
202 | * list_for_each_rcu - iterate over an rcu-protected list | ||
203 | * @pos: the &struct list_head to use as a loop cursor. | ||
204 | * @head: the head for your list. | ||
205 | * | ||
206 | * This list-traversal primitive may safely run concurrently with | ||
207 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
208 | * as long as the traversal is guarded by rcu_read_lock(). | ||
209 | */ | ||
210 | #define list_for_each_rcu(pos, head) \ | ||
211 | for (pos = rcu_dereference((head)->next); \ | ||
212 | prefetch(pos->next), pos != (head); \ | ||
213 | pos = rcu_dereference(pos->next)) | ||
214 | |||
215 | #define __list_for_each_rcu(pos, head) \ | 201 | #define __list_for_each_rcu(pos, head) \ |
216 | for (pos = rcu_dereference((head)->next); \ | 202 | for (pos = rcu_dereference((head)->next); \ |
217 | pos != (head); \ | 203 | pos != (head); \ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 0967f03b0705..3e05c09b54a2 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu) | |||
57 | rdssp->sched_qs++; | 57 | rdssp->sched_qs++; |
58 | } | 58 | } |
59 | #define rcu_bh_qsctr_inc(cpu) | 59 | #define rcu_bh_qsctr_inc(cpu) |
60 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | 60 | |
61 | /* | ||
62 | * Someone might want to pass call_rcu_bh as a function pointer. | ||
63 | * So this needs to just be a rename and not a macro function. | ||
64 | * (no parentheses) | ||
65 | */ | ||
66 | #define call_rcu_bh call_rcu | ||
61 | 67 | ||
62 | /** | 68 | /** |
63 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | 69 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. |
@@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
111 | struct softirq_action; | 117 | struct softirq_action; |
112 | 118 | ||
113 | #ifdef CONFIG_NO_HZ | 119 | #ifdef CONFIG_NO_HZ |
114 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | ||
115 | 120 | ||
116 | static inline void rcu_enter_nohz(void) | 121 | static inline void rcu_enter_nohz(void) |
117 | { | 122 | { |
@@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void) | |||
126 | { | 131 | { |
127 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | 132 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
128 | 133 | ||
129 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
130 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | 134 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
135 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
131 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), | 136 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), |
132 | &rs); | 137 | &rs); |
133 | } | 138 | } |