aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/rculist.h40
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h11
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/srcu.h48
6 files changed, 104 insertions, 44 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index d079290843a..e0f0fab2041 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -30,6 +30,7 @@
30 * This is only for internal list manipulation where we know 30 * This is only for internal list manipulation where we know
31 * the prev/next entries already! 31 * the prev/next entries already!
32 */ 32 */
33#ifndef CONFIG_DEBUG_LIST
33static inline void __list_add_rcu(struct list_head *new, 34static inline void __list_add_rcu(struct list_head *new,
34 struct list_head *prev, struct list_head *next) 35 struct list_head *prev, struct list_head *next)
35{ 36{
@@ -38,6 +39,10 @@ static inline void __list_add_rcu(struct list_head *new,
38 rcu_assign_pointer(list_next_rcu(prev), new); 39 rcu_assign_pointer(list_next_rcu(prev), new);
39 next->prev = new; 40 next->prev = new;
40} 41}
42#else
43extern void __list_add_rcu(struct list_head *new,
44 struct list_head *prev, struct list_head *next);
45#endif
41 46
42/** 47/**
43 * list_add_rcu - add a new entry to rcu-protected list 48 * list_add_rcu - add a new entry to rcu-protected list
@@ -108,7 +113,7 @@ static inline void list_add_tail_rcu(struct list_head *new,
108 */ 113 */
109static inline void list_del_rcu(struct list_head *entry) 114static inline void list_del_rcu(struct list_head *entry)
110{ 115{
111 __list_del(entry->prev, entry->next); 116 __list_del_entry(entry);
112 entry->prev = LIST_POISON2; 117 entry->prev = LIST_POISON2;
113} 118}
114 119
@@ -228,18 +233,43 @@ static inline void list_splice_init_rcu(struct list_head *list,
228 }) 233 })
229 234
230/** 235/**
231 * list_first_entry_rcu - get the first element from a list 236 * Where are list_empty_rcu() and list_first_entry_rcu()?
237 *
238 * Implementing those functions following their counterparts list_empty() and
239 * list_first_entry() is not advisable because they lead to subtle race
240 * conditions as the following snippet shows:
241 *
242 * if (!list_empty_rcu(mylist)) {
243 * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
244 * do_something(bar);
245 * }
246 *
247 * The list may not be empty when list_empty_rcu checks it, but it may be when
248 * list_first_entry_rcu rereads the ->next pointer.
249 *
250 * Rereading the ->next pointer is not a problem for list_empty() and
251 * list_first_entry() because they would be protected by a lock that blocks
252 * writers.
253 *
254 * See list_first_or_null_rcu for an alternative.
255 */
256
257/**
258 * list_first_or_null_rcu - get the first element from a list
232 * @ptr: the list head to take the element from. 259 * @ptr: the list head to take the element from.
233 * @type: the type of the struct this is embedded in. 260 * @type: the type of the struct this is embedded in.
234 * @member: the name of the list_struct within the struct. 261 * @member: the name of the list_struct within the struct.
235 * 262 *
236 * Note, that list is expected to be not empty. 263 * Note that if the list is empty, it returns NULL.
237 * 264 *
238 * This primitive may safely run concurrently with the _rcu list-mutation 265 * This primitive may safely run concurrently with the _rcu list-mutation
239 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 266 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
240 */ 267 */
241#define list_first_entry_rcu(ptr, type, member) \ 268#define list_first_or_null_rcu(ptr, type, member) \
242 list_entry_rcu((ptr)->next, type, member) 269 ({struct list_head *__ptr = (ptr); \
270 struct list_head __rcu *__next = list_next_rcu(__ptr); \
271 likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
272 })
243 273
244/** 274/**
245 * list_for_each_entry_rcu - iterate over rcu list of given type 275 * list_for_each_entry_rcu - iterate over rcu list of given type
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 20fb776a1d4..26d1a47591f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -184,12 +184,14 @@ static inline int rcu_preempt_depth(void)
184/* Internal to kernel */ 184/* Internal to kernel */
185extern void rcu_sched_qs(int cpu); 185extern void rcu_sched_qs(int cpu);
186extern void rcu_bh_qs(int cpu); 186extern void rcu_bh_qs(int cpu);
187extern void rcu_preempt_note_context_switch(void);
187extern void rcu_check_callbacks(int cpu, int user); 188extern void rcu_check_callbacks(int cpu, int user);
188struct notifier_block; 189struct notifier_block;
189extern void rcu_idle_enter(void); 190extern void rcu_idle_enter(void);
190extern void rcu_idle_exit(void); 191extern void rcu_idle_exit(void);
191extern void rcu_irq_enter(void); 192extern void rcu_irq_enter(void);
192extern void rcu_irq_exit(void); 193extern void rcu_irq_exit(void);
194extern void exit_rcu(void);
193 195
194/** 196/**
195 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 197 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
@@ -922,6 +924,21 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
922 kfree_call_rcu(head, (rcu_callback)offset); 924 kfree_call_rcu(head, (rcu_callback)offset);
923} 925}
924 926
927/*
928 * Does the specified offset indicate that the corresponding rcu_head
929 * structure can be handled by kfree_rcu()?
930 */
931#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
932
933/*
934 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
935 */
936#define __kfree_rcu(head, offset) \
937 do { \
938 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
939 call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
940 } while (0)
941
925/** 942/**
926 * kfree_rcu() - kfree an object after a grace period. 943 * kfree_rcu() - kfree an object after a grace period.
927 * @ptr: pointer to kfree 944 * @ptr: pointer to kfree
@@ -944,6 +961,9 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
944 * 961 *
945 * Note that the allowable offset might decrease in the future, for example, 962 * Note that the allowable offset might decrease in the future, for example,
946 * to allow something like kmem_cache_free_rcu(). 963 * to allow something like kmem_cache_free_rcu().
964 *
965 * The BUILD_BUG_ON check must not involve any function calls, hence the
966 * checks are done in macros here.
947 */ 967 */
948#define kfree_rcu(ptr, rcu_head) \ 968#define kfree_rcu(ptr, rcu_head) \
949 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 969 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e93df77176d..adb5e5a38ca 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -87,14 +87,6 @@ static inline void kfree_call_rcu(struct rcu_head *head,
87 87
88#ifdef CONFIG_TINY_RCU 88#ifdef CONFIG_TINY_RCU
89 89
90static inline void rcu_preempt_note_context_switch(void)
91{
92}
93
94static inline void exit_rcu(void)
95{
96}
97
98static inline int rcu_needs_cpu(int cpu) 90static inline int rcu_needs_cpu(int cpu)
99{ 91{
100 return 0; 92 return 0;
@@ -102,8 +94,6 @@ static inline int rcu_needs_cpu(int cpu)
102 94
103#else /* #ifdef CONFIG_TINY_RCU */ 95#else /* #ifdef CONFIG_TINY_RCU */
104 96
105void rcu_preempt_note_context_switch(void);
106extern void exit_rcu(void);
107int rcu_preempt_needs_cpu(void); 97int rcu_preempt_needs_cpu(void);
108 98
109static inline int rcu_needs_cpu(int cpu) 99static inline int rcu_needs_cpu(int cpu)
@@ -116,7 +106,6 @@ static inline int rcu_needs_cpu(int cpu)
116static inline void rcu_note_context_switch(int cpu) 106static inline void rcu_note_context_switch(int cpu)
117{ 107{
118 rcu_sched_qs(cpu); 108 rcu_sched_qs(cpu);
119 rcu_preempt_note_context_switch();
120} 109}
121 110
122/* 111/*
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e8ee5dd0854..3c6083cde4f 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,18 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu)
45 rcu_note_context_switch(cpu); 45 rcu_note_context_switch(cpu);
46} 46}
47 47
48#ifdef CONFIG_TREE_PREEMPT_RCU
49
50extern void exit_rcu(void);
51
52#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
53
54static inline void exit_rcu(void)
55{
56}
57
58#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
59
60extern void synchronize_rcu_bh(void); 48extern void synchronize_rcu_bh(void);
61extern void synchronize_sched_expedited(void); 49extern void synchronize_sched_expedited(void);
62extern void synchronize_rcu_expedited(void); 50extern void synchronize_rcu_expedited(void);
@@ -98,13 +86,6 @@ extern void rcu_force_quiescent_state(void);
98extern void rcu_bh_force_quiescent_state(void); 86extern void rcu_bh_force_quiescent_state(void);
99extern void rcu_sched_force_quiescent_state(void); 87extern void rcu_sched_force_quiescent_state(void);
100 88
101/* A context switch is a grace period for RCU-sched and RCU-bh. */
102static inline int rcu_blocking_is_gp(void)
103{
104 might_sleep(); /* Check for RCU read-side critical section. */
105 return num_online_cpus() == 1;
106}
107
108extern void rcu_scheduler_starting(void); 89extern void rcu_scheduler_starting(void);
109extern int rcu_scheduler_active __read_mostly; 90extern int rcu_scheduler_active __read_mostly;
110 91
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 81a173c0897..8f3fd945070 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1905,12 +1905,22 @@ static inline void rcu_copy_process(struct task_struct *p)
1905 INIT_LIST_HEAD(&p->rcu_node_entry); 1905 INIT_LIST_HEAD(&p->rcu_node_entry);
1906} 1906}
1907 1907
1908static inline void rcu_switch_from(struct task_struct *prev)
1909{
1910 if (prev->rcu_read_lock_nesting != 0)
1911 rcu_preempt_note_context_switch();
1912}
1913
1908#else 1914#else
1909 1915
1910static inline void rcu_copy_process(struct task_struct *p) 1916static inline void rcu_copy_process(struct task_struct *p)
1911{ 1917{
1912} 1918}
1913 1919
1920static inline void rcu_switch_from(struct task_struct *prev)
1921{
1922}
1923
1914#endif 1924#endif
1915 1925
1916#ifdef CONFIG_SMP 1926#ifdef CONFIG_SMP
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index d3d5fa54f25..55a5c52cbb2 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -29,26 +29,35 @@
29 29
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/rcupdate.h> 31#include <linux/rcupdate.h>
32#include <linux/workqueue.h>
32 33
33struct srcu_struct_array { 34struct srcu_struct_array {
34 int c[2]; 35 unsigned long c[2];
36 unsigned long seq[2];
37};
38
39struct rcu_batch {
40 struct rcu_head *head, **tail;
35}; 41};
36 42
37struct srcu_struct { 43struct srcu_struct {
38 int completed; 44 unsigned completed;
39 struct srcu_struct_array __percpu *per_cpu_ref; 45 struct srcu_struct_array __percpu *per_cpu_ref;
40 struct mutex mutex; 46 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
47 bool running;
48 /* callbacks just queued */
49 struct rcu_batch batch_queue;
50 /* callbacks try to do the first check_zero */
51 struct rcu_batch batch_check0;
52 /* callbacks done with the first check_zero and the flip */
53 struct rcu_batch batch_check1;
54 struct rcu_batch batch_done;
55 struct delayed_work work;
41#ifdef CONFIG_DEBUG_LOCK_ALLOC 56#ifdef CONFIG_DEBUG_LOCK_ALLOC
42 struct lockdep_map dep_map; 57 struct lockdep_map dep_map;
43#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 58#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
44}; 59};
45 60
46#ifndef CONFIG_PREEMPT
47#define srcu_barrier() barrier()
48#else /* #ifndef CONFIG_PREEMPT */
49#define srcu_barrier()
50#endif /* #else #ifndef CONFIG_PREEMPT */
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC 61#ifdef CONFIG_DEBUG_LOCK_ALLOC
53 62
54int __init_srcu_struct(struct srcu_struct *sp, const char *name, 63int __init_srcu_struct(struct srcu_struct *sp, const char *name,
@@ -67,12 +76,33 @@ int init_srcu_struct(struct srcu_struct *sp);
67 76
68#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 77#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
69 78
79/**
80 * call_srcu() - Queue a callback for invocation after an SRCU grace period
81 * @sp: srcu_struct in queue the callback
82 * @head: structure to be used for queueing the SRCU callback.
83 * @func: function to be invoked after the SRCU grace period
84 *
85 * The callback function will be invoked some time after a full SRCU
86 * grace period elapses, in other words after all pre-existing SRCU
87 * read-side critical sections have completed. However, the callback
88 * function might well execute concurrently with other SRCU read-side
89 * critical sections that started after call_srcu() was invoked. SRCU
90 * read-side critical sections are delimited by srcu_read_lock() and
91 * srcu_read_unlock(), and may be nested.
92 *
93 * The callback will be invoked from process context, but must nevertheless
94 * be fast and must not block.
95 */
96void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
97 void (*func)(struct rcu_head *head));
98
70void cleanup_srcu_struct(struct srcu_struct *sp); 99void cleanup_srcu_struct(struct srcu_struct *sp);
71int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 100int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
72void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 101void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
73void synchronize_srcu(struct srcu_struct *sp); 102void synchronize_srcu(struct srcu_struct *sp);
74void synchronize_srcu_expedited(struct srcu_struct *sp); 103void synchronize_srcu_expedited(struct srcu_struct *sp);
75long srcu_batches_completed(struct srcu_struct *sp); 104long srcu_batches_completed(struct srcu_struct *sp);
105void srcu_barrier(struct srcu_struct *sp);
76 106
77#ifdef CONFIG_DEBUG_LOCK_ALLOC 107#ifdef CONFIG_DEBUG_LOCK_ALLOC
78 108