summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/srcu.h1
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/sync.c9
-rw-r--r--kernel/rcu/tree.c18
6 files changed, 33 insertions, 21 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b1fd8bf85fdc..2bea1d5e9930 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
276#define list_entry_rcu(ptr, type, member) \ 276#define list_entry_rcu(ptr, type, member) \
277 container_of(lockless_dereference(ptr), type, member) 277 container_of(lockless_dereference(ptr), type, member)
278 278
279/** 279/*
280 * Where are list_empty_rcu() and list_first_entry_rcu()? 280 * Where are list_empty_rcu() and list_first_entry_rcu()?
281 * 281 *
282 * Implementing those functions following their counterparts list_empty() and 282 * Implementing those functions following their counterparts list_empty() and
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de50d8a4cf41..1a9f70d44af9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
523 * Return the value of the specified RCU-protected pointer, but omit 523 * Return the value of the specified RCU-protected pointer, but omit
524 * both the smp_read_barrier_depends() and the READ_ONCE(). This 524 * both the smp_read_barrier_depends() and the READ_ONCE(). This
525 * is useful in cases where update-side locks prevent the value of the 525 * is useful in cases where update-side locks prevent the value of the
526 * pointer from changing. Please note that this primitive does -not- 526 * pointer from changing. Please note that this primitive does *not*
527 * prevent the compiler from repeating this reference or combining it 527 * prevent the compiler from repeating this reference or combining it
528 * with other references, so it should not be used without protection 528 * with other references, so it should not be used without protection
529 * of appropriate locks. 529 * of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
568 * is handed off from RCU to some other synchronization mechanism, for 568 * is handed off from RCU to some other synchronization mechanism, for
569 * example, reference counting or locking. In C11, it would map to 569 * example, reference counting or locking. In C11, it would map to
570 * kill_dependency(). It could be used as follows: 570 * kill_dependency(). It could be used as follows:
571 * 571 * ``
572 * rcu_read_lock(); 572 * rcu_read_lock();
573 * p = rcu_dereference(gp); 573 * p = rcu_dereference(gp);
574 * long_lived = is_long_lived(p); 574 * long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
579 * p = rcu_pointer_handoff(p); 579 * p = rcu_pointer_handoff(p);
580 * } 580 * }
581 * rcu_read_unlock(); 581 * rcu_read_unlock();
582 *``
582 */ 583 */
583#define rcu_pointer_handoff(p) (p) 584#define rcu_pointer_handoff(p) (p)
584 585
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
778 779
779/** 780/**
780 * RCU_INIT_POINTER() - initialize an RCU protected pointer 781 * RCU_INIT_POINTER() - initialize an RCU protected pointer
782 * @p: The pointer to be initialized.
783 * @v: The value to initialized the pointer to.
781 * 784 *
782 * Initialize an RCU-protected pointer in special cases where readers 785 * Initialize an RCU-protected pointer in special cases where readers
783 * do not need ordering constraints on the CPU or the compiler. These 786 * do not need ordering constraints on the CPU or the compiler. These
784 * special cases are: 787 * special cases are:
785 * 788 *
786 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- 789 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
787 * 2. The caller has taken whatever steps are required to prevent 790 * 2. The caller has taken whatever steps are required to prevent
788 * RCU readers from concurrently accessing this pointer -or- 791 * RCU readers from concurrently accessing this pointer *or*
789 * 3. The referenced data structure has already been exposed to 792 * 3. The referenced data structure has already been exposed to
790 * readers either at compile time or via rcu_assign_pointer() -and- 793 * readers either at compile time or via rcu_assign_pointer() *and*
791 * a. You have not made -any- reader-visible changes to 794 *
792 * this structure since then -or- 795 * a. You have not made *any* reader-visible changes to
796 * this structure since then *or*
793 * b. It is OK for readers accessing this structure from its 797 * b. It is OK for readers accessing this structure from its
794 * new location to see the old state of the structure. (For 798 * new location to see the old state of the structure. (For
795 * example, the changes were to statistical counters or to 799 * example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
805 * by a single external-to-structure RCU-protected pointer, then you may 809 * by a single external-to-structure RCU-protected pointer, then you may
806 * use RCU_INIT_POINTER() to initialize the internal RCU-protected 810 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
807 * pointers, but you must use rcu_assign_pointer() to initialize the 811 * pointers, but you must use rcu_assign_pointer() to initialize the
808 * external-to-structure pointer -after- you have completely initialized 812 * external-to-structure pointer *after* you have completely initialized
809 * the reader-accessible portions of the linked structure. 813 * the reader-accessible portions of the linked structure.
810 * 814 *
811 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no 815 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
819 823
820/** 824/**
821 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer 825 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
826 * @p: The pointer to be initialized.
827 * @v: The value to initialized the pointer to.
822 * 828 *
823 * GCC-style initialization for an RCU-protected pointer in a structure field. 829 * GCC-style initialization for an RCU-protected pointer in a structure field.
824 */ 830 */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 39af9bc0f653..62be8966e837 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
78 78
79/** 79/**
80 * srcu_read_lock_held - might we be in SRCU read-side critical section? 80 * srcu_read_lock_held - might we be in SRCU read-side critical section?
81 * @sp: The srcu_struct structure to check
81 * 82 *
82 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 83 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
83 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 84 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 729a8706751d..6d5880089ff6 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
854/** 854/**
855 * call_srcu() - Queue a callback for invocation after an SRCU grace period 855 * call_srcu() - Queue a callback for invocation after an SRCU grace period
856 * @sp: srcu_struct in queue the callback 856 * @sp: srcu_struct in queue the callback
857 * @head: structure to be used for queueing the SRCU callback. 857 * @rhp: structure to be used for queueing the SRCU callback.
858 * @func: function to be invoked after the SRCU grace period 858 * @func: function to be invoked after the SRCU grace period
859 * 859 *
860 * The callback function will be invoked some time after a full SRCU 860 * The callback function will be invoked some time after a full SRCU
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 50d1861f7759..3f943efcf61c 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
85} 85}
86 86
87/** 87/**
88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
89 * @rsp: Pointer to rcu_sync structure to use for synchronization
90 *
88 * Must be called after rcu_sync_init() and before first use. 91 * Must be called after rcu_sync_init() and before first use.
89 * 92 *
90 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
142 145
143/** 146/**
144 * rcu_sync_func() - Callback function managing reader access to fastpath 147 * rcu_sync_func() - Callback function managing reader access to fastpath
145 * @rsp: Pointer to rcu_sync structure to use for synchronization 148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
146 * 149 *
147 * This function is passed to one of the call_rcu() functions by 150 * This function is passed to one of the call_rcu() functions by
148 * rcu_sync_exit(), so that it is invoked after a grace period following the 151 * rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
158 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 161 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
159 * can again use their fastpaths. 162 * can again use their fastpaths.
160 */ 163 */
161static void rcu_sync_func(struct rcu_head *rcu) 164static void rcu_sync_func(struct rcu_head *rhp)
162{ 165{
163 struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head); 166 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
164 unsigned long flags; 167 unsigned long flags;
165 168
166 BUG_ON(rsp->gp_state != GP_PASSED); 169 BUG_ON(rsp->gp_state != GP_PASSED);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b0ad62b0e7b8..3e3650e94ae6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3097,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
3097 * read-side critical sections have completed. call_rcu_sched() assumes 3097 * read-side critical sections have completed. call_rcu_sched() assumes
3098 * that the read-side critical sections end on enabling of preemption 3098 * that the read-side critical sections end on enabling of preemption
3099 * or on voluntary preemption. 3099 * or on voluntary preemption.
3100 * RCU read-side critical sections are delimited by : 3100 * RCU read-side critical sections are delimited by:
3101 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR 3101 *
3102 * - anything that disables preemption. 3102 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
3103 * - anything that disables preemption.
3103 * 3104 *
3104 * These may be nested. 3105 * These may be nested.
3105 * 3106 *
@@ -3124,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
3124 * handler. This means that read-side critical sections in process 3125 * handler. This means that read-side critical sections in process
3125 * context must not be interrupted by softirqs. This interface is to be 3126 * context must not be interrupted by softirqs. This interface is to be
3126 * used when most of the read-side critical sections are in softirq context. 3127 * used when most of the read-side critical sections are in softirq context.
3127 * RCU read-side critical sections are delimited by : 3128 * RCU read-side critical sections are delimited by:
3128 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 3129 *
3129 * OR 3130 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
3130 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 3131 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3131 * These may be nested. 3132 *
3133 * These may be nested.
3132 * 3134 *
3133 * See the description of call_rcu() for more detailed information on 3135 * See the description of call_rcu() for more detailed information on
3134 * memory ordering guarantees. 3136 * memory ordering guarantees.