aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-11 16:46:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-11 16:46:11 -0500
commit9061cbe62adeccf8c986883bcd40f4aeee59ea75 (patch)
tree3e99c9e86dc03e839558cf2a02f8d47d0e33cf63 /include/linux
parentddf1d6238dd13a3bd948e8fcb1109798ef0af49b (diff)
parent3104fb3dd45bb47ff1382d1c079c251710ddcae3 (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar: "The changes in this cycle were: - Adding transitivity uniformly to rcu_node structure ->lock acquisitions. (This is implemented by the first two commits on top of v4.4-rc2 due to the pervasive nature of this change.) - Documentation updates, including RCU requirements. - Expedited grace-period changes. - Miscellaneous fixes. - Linked-list fixes, courtesy of KTSAN. - Torture-test updates. - Late-breaking fix to sysrq-generated crash. One thing I should note is that these pieces of documentation are fairly large files: .../RCU/Design/Requirements/Requirements.html | 2897 ++++++++++++++++++++ .../RCU/Design/Requirements/Requirements.htmlx | 2741 ++++++++++++++++++ and are written in HTML, not the usual .txt style. I hope they are fine" Paul McKenney explains the html docs: "For whatever it is worth, the reason for this unconventional choice was that attempts to do the diagrams in ASCII art failed miserably. And attempts to do ASCII art for the upcoming documentation of the data structures failed even more miserably" * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits) sysrq: Fix warning in sysrq generated crash. list: Add lockless list traversal primitives rcu: Make rcu_gp_init() be bool rather than int rcu: Move wakeup out from under rnp->lock rcu: Fix comment for rcu_dereference_raw_notrace rcu: Don't redundantly disable irqs in rcu_irq_{enter,exit}() rcu: Make cpu_needs_another_gp() be bool rcu: Eliminate unused rcu_init_one() argument rcu: Remove TINY_RCU bloat from pointless boot parameters torture: Place console.log files correctly from the get-go torture: Abbreviate console error dump rcutorture: Print symbolic name for ->gp_state rcutorture: Print symbolic name for rcu_torture_writer_state rcutorture: Remove CONFIG_RCU_USER_QS from rcutorture selftest doc rcutorture: Default grace period to three minutes, allow override rcutorture: Dump stack when GP kthread stalls rcutorture: Flag nonexistent RCU GP kthread rcutorture: Add batch number to script printout Documentation/memory-barriers.txt: Fix ACCESS_ONCE thinko documentation: Update RCU requirements based on expedited changes ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/list_bl.h2
-rw-r--r--include/linux/list_nulls.h2
-rw-r--r--include/linux/rculist.h105
-rw-r--r--include/linux/rcupdate.h21
-rw-r--r--include/linux/rcutiny.h8
-rw-r--r--include/linux/rcutree.h4
-rw-r--r--include/linux/tracepoint.h4
8 files changed, 124 insertions, 36 deletions
diff --git a/include/linux/list.h b/include/linux/list.h
index 993395a2e55c..5356f4d661a7 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -24,7 +24,7 @@
24 24
25static inline void INIT_LIST_HEAD(struct list_head *list) 25static inline void INIT_LIST_HEAD(struct list_head *list)
26{ 26{
27 list->next = list; 27 WRITE_ONCE(list->next, list);
28 list->prev = list; 28 list->prev = list;
29} 29}
30 30
@@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new,
42 next->prev = new; 42 next->prev = new;
43 new->next = next; 43 new->next = next;
44 new->prev = prev; 44 new->prev = prev;
45 prev->next = new; 45 WRITE_ONCE(prev->next, new);
46} 46}
47#else 47#else
48extern void __list_add(struct list_head *new, 48extern void __list_add(struct list_head *new,
@@ -186,7 +186,7 @@ static inline int list_is_last(const struct list_head *list,
186 */ 186 */
187static inline int list_empty(const struct list_head *head) 187static inline int list_empty(const struct list_head *head)
188{ 188{
189 return head->next == head; 189 return READ_ONCE(head->next) == head;
190} 190}
191 191
192/** 192/**
@@ -608,7 +608,7 @@ static inline int hlist_unhashed(const struct hlist_node *h)
608 608
609static inline int hlist_empty(const struct hlist_head *h) 609static inline int hlist_empty(const struct hlist_head *h)
610{ 610{
611 return !h->first; 611 return !READ_ONCE(h->first);
612} 612}
613 613
614static inline void __hlist_del(struct hlist_node *n) 614static inline void __hlist_del(struct hlist_node *n)
@@ -642,7 +642,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
642 n->next = first; 642 n->next = first;
643 if (first) 643 if (first)
644 first->pprev = &n->next; 644 first->pprev = &n->next;
645 h->first = n; 645 WRITE_ONCE(h->first, n);
646 n->pprev = &h->first; 646 n->pprev = &h->first;
647} 647}
648 648
@@ -653,14 +653,14 @@ static inline void hlist_add_before(struct hlist_node *n,
653 n->pprev = next->pprev; 653 n->pprev = next->pprev;
654 n->next = next; 654 n->next = next;
655 next->pprev = &n->next; 655 next->pprev = &n->next;
656 *(n->pprev) = n; 656 WRITE_ONCE(*(n->pprev), n);
657} 657}
658 658
659static inline void hlist_add_behind(struct hlist_node *n, 659static inline void hlist_add_behind(struct hlist_node *n,
660 struct hlist_node *prev) 660 struct hlist_node *prev)
661{ 661{
662 n->next = prev->next; 662 n->next = prev->next;
663 prev->next = n; 663 WRITE_ONCE(prev->next, n);
664 n->pprev = &prev->next; 664 n->pprev = &prev->next;
665 665
666 if (n->next) 666 if (n->next)
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 8132214e8efd..ee7229a6c06a 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -70,7 +70,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
70 70
71static inline int hlist_bl_empty(const struct hlist_bl_head *h) 71static inline int hlist_bl_empty(const struct hlist_bl_head *h)
72{ 72{
73 return !((unsigned long)h->first & ~LIST_BL_LOCKMASK); 73 return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
74} 74}
75 75
76static inline void hlist_bl_add_head(struct hlist_bl_node *n, 76static inline void hlist_bl_add_head(struct hlist_bl_node *n,
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 444d2b1313bd..b01fe1009084 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -57,7 +57,7 @@ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
57 57
58static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) 58static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
59{ 59{
60 return is_a_nulls(h->first); 60 return is_a_nulls(READ_ONCE(h->first));
61} 61}
62 62
63static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, 63static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 5ed540986019..14ec1652daf4 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old,
179} 179}
180 180
181/** 181/**
182 * list_splice_init_rcu - splice an RCU-protected list into an existing list. 182 * __list_splice_init_rcu - join an RCU-protected list into an existing list.
183 * @list: the RCU-protected list to splice 183 * @list: the RCU-protected list to splice
184 * @head: the place in the list to splice the first list into 184 * @prev: points to the last element of the existing list
185 * @next: points to the first element of the existing list
185 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 186 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
186 * 187 *
187 * @head can be RCU-read traversed concurrently with this function. 188 * The list pointed to by @prev and @next can be RCU-read traversed
189 * concurrently with this function.
188 * 190 *
189 * Note that this function blocks. 191 * Note that this function blocks.
190 * 192 *
191 * Important note: the caller must take whatever action is necessary to 193 * Important note: the caller must take whatever action is necessary to prevent
192 * prevent any other updates to @head. In principle, it is possible 194 * any other updates to the existing list. In principle, it is possible to
193 * to modify the list as soon as sync() begins execution. 195 * modify the list as soon as sync() begins execution. If this sort of thing
194 * If this sort of thing becomes necessary, an alternative version 196 * becomes necessary, an alternative version based on call_rcu() could be
195 * based on call_rcu() could be created. But only if -really- 197 * created. But only if -really- needed -- there is no shortage of RCU API
196 * needed -- there is no shortage of RCU API members. 198 * members.
197 */ 199 */
198static inline void list_splice_init_rcu(struct list_head *list, 200static inline void __list_splice_init_rcu(struct list_head *list,
199 struct list_head *head, 201 struct list_head *prev,
200 void (*sync)(void)) 202 struct list_head *next,
203 void (*sync)(void))
201{ 204{
202 struct list_head *first = list->next; 205 struct list_head *first = list->next;
203 struct list_head *last = list->prev; 206 struct list_head *last = list->prev;
204 struct list_head *at = head->next;
205
206 if (list_empty(list))
207 return;
208 207
209 /* 208 /*
210 * "first" and "last" tracking list, so initialize it. RCU readers 209 * "first" and "last" tracking list, so initialize it. RCU readers
@@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list,
231 * this function. 230 * this function.
232 */ 231 */
233 232
234 last->next = at; 233 last->next = next;
235 rcu_assign_pointer(list_next_rcu(head), first); 234 rcu_assign_pointer(list_next_rcu(prev), first);
236 first->prev = head; 235 first->prev = prev;
237 at->prev = last; 236 next->prev = last;
237}
238
239/**
240 * list_splice_init_rcu - splice an RCU-protected list into an existing list,
241 * designed for stacks.
242 * @list: the RCU-protected list to splice
243 * @head: the place in the existing list to splice the first list into
244 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
245 */
246static inline void list_splice_init_rcu(struct list_head *list,
247 struct list_head *head,
248 void (*sync)(void))
249{
250 if (!list_empty(list))
251 __list_splice_init_rcu(list, head, head->next, sync);
252}
253
254/**
255 * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
256 * list, designed for queues.
257 * @list: the RCU-protected list to splice
258 * @head: the place in the existing list to splice the first list into
259 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
260 */
261static inline void list_splice_tail_init_rcu(struct list_head *list,
262 struct list_head *head,
263 void (*sync)(void))
264{
265 if (!list_empty(list))
266 __list_splice_init_rcu(list, head->prev, head, sync);
238} 267}
239 268
240/** 269/**
@@ -305,6 +334,42 @@ static inline void list_splice_init_rcu(struct list_head *list,
305 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 334 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
306 335
307/** 336/**
337 * list_entry_lockless - get the struct for this entry
338 * @ptr: the &struct list_head pointer.
339 * @type: the type of the struct this is embedded in.
340 * @member: the name of the list_head within the struct.
341 *
342 * This primitive may safely run concurrently with the _rcu list-mutation
343 * primitives such as list_add_rcu(), but requires some implicit RCU
344 * read-side guarding. One example is running within a special
345 * exception-time environment where preemption is disabled and where
346 * lockdep cannot be invoked (in which case updaters must use RCU-sched,
347 * as in synchronize_sched(), call_rcu_sched(), and friends). Another
348 * example is when items are added to the list, but never deleted.
349 */
350#define list_entry_lockless(ptr, type, member) \
351 container_of((typeof(ptr))lockless_dereference(ptr), type, member)
352
353/**
354 * list_for_each_entry_lockless - iterate over rcu list of given type
355 * @pos: the type * to use as a loop cursor.
356 * @head: the head for your list.
357 * @member: the name of the list_struct within the struct.
358 *
359 * This primitive may safely run concurrently with the _rcu list-mutation
360 * primitives such as list_add_rcu(), but requires some implicit RCU
361 * read-side guarding. One example is running within a special
362 * exception-time environment where preemption is disabled and where
363 * lockdep cannot be invoked (in which case updaters must use RCU-sched,
364 * as in synchronize_sched(), call_rcu_sched(), and friends). Another
365 * example is when items are added to the list, but never deleted.
366 */
367#define list_for_each_entry_lockless(pos, head, member) \
368 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
369 &pos->member != (head); \
370 pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
371
372/**
308 * list_for_each_entry_continue_rcu - continue iteration over list of given type 373 * list_for_each_entry_continue_rcu - continue iteration over list of given type
309 * @pos: the type * to use as a loop cursor. 374 * @pos: the type * to use as a loop cursor.
310 * @head: the head for your list. 375 * @head: the head for your list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a0189ba67fde..14e6f47ee16f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,10 +48,17 @@
48 48
49#include <asm/barrier.h> 49#include <asm/barrier.h>
50 50
51#ifndef CONFIG_TINY_RCU
51extern int rcu_expedited; /* for sysctl */ 52extern int rcu_expedited; /* for sysctl */
53extern int rcu_normal; /* also for sysctl */
54#endif /* #ifndef CONFIG_TINY_RCU */
52 55
53#ifdef CONFIG_TINY_RCU 56#ifdef CONFIG_TINY_RCU
54/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 57/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
58static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */
59{
60 return true;
61}
55static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ 62static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
56{ 63{
57 return false; 64 return false;
@@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void)
65{ 72{
66} 73}
67#else /* #ifdef CONFIG_TINY_RCU */ 74#else /* #ifdef CONFIG_TINY_RCU */
75bool rcu_gp_is_normal(void); /* Internal RCU use. */
68bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 76bool rcu_gp_is_expedited(void); /* Internal RCU use. */
69void rcu_expedite_gp(void); 77void rcu_expedite_gp(void);
70void rcu_unexpedite_gp(void); 78void rcu_unexpedite_gp(void);
@@ -321,7 +329,6 @@ static inline int rcu_preempt_depth(void)
321 329
322/* Internal to kernel */ 330/* Internal to kernel */
323void rcu_init(void); 331void rcu_init(void);
324void rcu_end_inkernel_boot(void);
325void rcu_sched_qs(void); 332void rcu_sched_qs(void);
326void rcu_bh_qs(void); 333void rcu_bh_qs(void);
327void rcu_check_callbacks(int user); 334void rcu_check_callbacks(int user);
@@ -329,6 +336,12 @@ struct notifier_block;
329int rcu_cpu_notify(struct notifier_block *self, 336int rcu_cpu_notify(struct notifier_block *self,
330 unsigned long action, void *hcpu); 337 unsigned long action, void *hcpu);
331 338
339#ifndef CONFIG_TINY_RCU
340void rcu_end_inkernel_boot(void);
341#else /* #ifndef CONFIG_TINY_RCU */
342static inline void rcu_end_inkernel_boot(void) { }
343#endif /* #ifndef CONFIG_TINY_RCU */
344
332#ifdef CONFIG_RCU_STALL_COMMON 345#ifdef CONFIG_RCU_STALL_COMMON
333void rcu_sysrq_start(void); 346void rcu_sysrq_start(void);
334void rcu_sysrq_end(void); 347void rcu_sysrq_end(void);
@@ -379,9 +392,9 @@ static inline void rcu_init_nohz(void)
379 */ 392 */
380#define RCU_NONIDLE(a) \ 393#define RCU_NONIDLE(a) \
381 do { \ 394 do { \
382 rcu_irq_enter(); \ 395 rcu_irq_enter_irqson(); \
383 do { a; } while (0); \ 396 do { a; } while (0); \
384 rcu_irq_exit(); \ 397 rcu_irq_exit_irqson(); \
385 } while (0) 398 } while (0)
386 399
387/* 400/*
@@ -741,7 +754,7 @@ static inline void rcu_preempt_sleep_check(void)
741 * The tracing infrastructure traces RCU (we want that), but unfortunately 754 * The tracing infrastructure traces RCU (we want that), but unfortunately
742 * some of the RCU checks causes tracing to lock up the system. 755 * some of the RCU checks causes tracing to lock up the system.
743 * 756 *
744 * The tracing version of rcu_dereference_raw() must not call 757 * The no-tracing version of rcu_dereference_raw() must not call
745 * rcu_read_lock_held(). 758 * rcu_read_lock_held().
746 */ 759 */
747#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) 760#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 4c1aaf9cce7b..64809aea661c 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void)
181{ 181{
182} 182}
183 183
184static inline void rcu_irq_exit_irqson(void)
185{
186}
187
188static inline void rcu_irq_enter_irqson(void)
189{
190}
191
184static inline void rcu_irq_exit(void) 192static inline void rcu_irq_exit(void)
185{ 193{
186} 194}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 60d15a080d7c..ad1eda9fa4da 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void);
37/* 37/*
38 * Note a virtualization-based context switch. This is simply a 38 * Note a virtualization-based context switch. This is simply a
39 * wrapper around rcu_note_context_switch(), which allows TINY_RCU 39 * wrapper around rcu_note_context_switch(), which allows TINY_RCU
40 * to save a few bytes. 40 * to save a few bytes. The caller must have disabled interrupts.
41 */ 41 */
42static inline void rcu_virt_note_context_switch(int cpu) 42static inline void rcu_virt_note_context_switch(int cpu)
43{ 43{
@@ -97,6 +97,8 @@ void rcu_idle_enter(void);
97void rcu_idle_exit(void); 97void rcu_idle_exit(void);
98void rcu_irq_enter(void); 98void rcu_irq_enter(void);
99void rcu_irq_exit(void); 99void rcu_irq_exit(void);
100void rcu_irq_enter_irqson(void);
101void rcu_irq_exit_irqson(void);
100 102
101void exit_rcu(void); 103void exit_rcu(void);
102 104
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 696a339c592c..7834a8a8bf1e 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -171,8 +171,8 @@ extern void syscall_unregfunc(void);
171 TP_PROTO(data_proto), \ 171 TP_PROTO(data_proto), \
172 TP_ARGS(data_args), \ 172 TP_ARGS(data_args), \
173 TP_CONDITION(cond), \ 173 TP_CONDITION(cond), \
174 rcu_irq_enter(), \ 174 rcu_irq_enter_irqson(), \
175 rcu_irq_exit()); \ 175 rcu_irq_exit_irqson()); \
176 } 176 }
177#else 177#else
178#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) 178#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)