diff options
| -rw-r--r-- | Documentation/RCU/checklist.txt | 2 | ||||
| -rw-r--r-- | Documentation/RCU/rcuref.txt | 16 | ||||
| -rw-r--r-- | Documentation/RCU/whatisRCU.txt | 2 | ||||
| -rw-r--r-- | include/linux/compiler.h | 4 | ||||
| -rw-r--r-- | include/linux/rcuclassic.h | 37 | ||||
| -rw-r--r-- | include/linux/rculist.h | 14 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 20 | ||||
| -rw-r--r-- | include/linux/rcupreempt.h | 11 | ||||
| -rw-r--r-- | kernel/rcuclassic.c | 337 | ||||
| -rw-r--r-- | kernel/rcupreempt.c | 8 | ||||
| -rw-r--r-- | kernel/rcupreempt_trace.c | 7 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 13 |
12 files changed, 337 insertions, 134 deletions
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index cf5562cbe356..6e253407b3dc 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt | |||
| @@ -210,7 +210,7 @@ over a rather long period of time, but improvements are always welcome! | |||
| 210 | number of updates per grace period. | 210 | number of updates per grace period. |
| 211 | 211 | ||
| 212 | 9. All RCU list-traversal primitives, which include | 212 | 9. All RCU list-traversal primitives, which include |
| 213 | rcu_dereference(), list_for_each_rcu(), list_for_each_entry_rcu(), | 213 | rcu_dereference(), list_for_each_entry_rcu(), |
| 214 | list_for_each_continue_rcu(), and list_for_each_safe_rcu(), | 214 | list_for_each_continue_rcu(), and list_for_each_safe_rcu(), |
| 215 | must be either within an RCU read-side critical section or | 215 | must be either within an RCU read-side critical section or |
| 216 | must be protected by appropriate update-side locks. RCU | 216 | must be protected by appropriate update-side locks. RCU |
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt index 451de2ad8329..4202ad093130 100644 --- a/Documentation/RCU/rcuref.txt +++ b/Documentation/RCU/rcuref.txt | |||
| @@ -29,9 +29,9 @@ release_referenced() delete() | |||
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | If this list/array is made lock free using RCU as in changing the | 31 | If this list/array is made lock free using RCU as in changing the |
| 32 | write_lock() in add() and delete() to spin_lock and changing read_lock | 32 | write_lock() in add() and delete() to spin_lock() and changing read_lock() |
| 33 | in search_and_reference to rcu_read_lock(), the atomic_get in | 33 | in search_and_reference() to rcu_read_lock(), the atomic_inc() in |
| 34 | search_and_reference could potentially hold reference to an element which | 34 | search_and_reference() could potentially hold reference to an element which |
| 35 | has already been deleted from the list/array. Use atomic_inc_not_zero() | 35 | has already been deleted from the list/array. Use atomic_inc_not_zero() |
| 36 | in this scenario as follows: | 36 | in this scenario as follows: |
| 37 | 37 | ||
| @@ -40,20 +40,20 @@ add() search_and_reference() | |||
| 40 | { { | 40 | { { |
| 41 | alloc_object rcu_read_lock(); | 41 | alloc_object rcu_read_lock(); |
| 42 | ... search_for_element | 42 | ... search_for_element |
| 43 | atomic_set(&el->rc, 1); if (atomic_inc_not_zero(&el->rc)) { | 43 | atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) { |
| 44 | write_lock(&list_lock); rcu_read_unlock(); | 44 | spin_lock(&list_lock); rcu_read_unlock(); |
| 45 | return FAIL; | 45 | return FAIL; |
| 46 | add_element } | 46 | add_element } |
| 47 | ... ... | 47 | ... ... |
| 48 | write_unlock(&list_lock); rcu_read_unlock(); | 48 | spin_unlock(&list_lock); rcu_read_unlock(); |
| 49 | } } | 49 | } } |
| 50 | 3. 4. | 50 | 3. 4. |
| 51 | release_referenced() delete() | 51 | release_referenced() delete() |
| 52 | { { | 52 | { { |
| 53 | ... write_lock(&list_lock); | 53 | ... spin_lock(&list_lock); |
| 54 | if (atomic_dec_and_test(&el->rc)) ... | 54 | if (atomic_dec_and_test(&el->rc)) ... |
| 55 | call_rcu(&el->head, el_free); delete_element | 55 | call_rcu(&el->head, el_free); delete_element |
| 56 | ... write_unlock(&list_lock); | 56 | ... spin_unlock(&list_lock); |
| 57 | } ... | 57 | } ... |
| 58 | if (atomic_dec_and_test(&el->rc)) | 58 | if (atomic_dec_and_test(&el->rc)) |
| 59 | call_rcu(&el->head, el_free); | 59 | call_rcu(&el->head, el_free); |
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index e04d643a9f57..96170824a717 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt | |||
| @@ -786,8 +786,6 @@ RCU pointer/list traversal: | |||
| 786 | list_for_each_entry_rcu | 786 | list_for_each_entry_rcu |
| 787 | hlist_for_each_entry_rcu | 787 | hlist_for_each_entry_rcu |
| 788 | 788 | ||
| 789 | list_for_each_rcu (to be deprecated in favor of | ||
| 790 | list_for_each_entry_rcu) | ||
| 791 | list_for_each_continue_rcu (to be deprecated in favor of new | 789 | list_for_each_continue_rcu (to be deprecated in favor of new |
| 792 | list_for_each_entry_continue_rcu) | 790 | list_for_each_entry_continue_rcu) |
| 793 | 791 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c8bd2daf95ec..8322141ee480 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
| 190 | * ACCESS_ONCE() in different C statements. | 190 | * ACCESS_ONCE() in different C statements. |
| 191 | * | 191 | * |
| 192 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | 192 | * This macro does absolutely -nothing- to prevent the CPU from reordering, |
| 193 | * merging, or refetching absolutely anything at any time. | 193 | * merging, or refetching absolutely anything at any time. Its main intended |
| 194 | * use is to mediate communication between process-level code and irq/NMI | ||
| 195 | * handlers, all running on the same CPU. | ||
| 194 | */ | 196 | */ |
| 195 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 197 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
| 196 | 198 | ||
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 4ab843622727..5f89b62e6983 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
| @@ -40,12 +40,21 @@ | |||
| 40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
| 41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
| 42 | 42 | ||
| 43 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 44 | #define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ | ||
| 45 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ | ||
| 46 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 43 | 47 | ||
| 44 | /* Global control variables for rcupdate callback mechanism. */ | 48 | /* Global control variables for rcupdate callback mechanism. */ |
| 45 | struct rcu_ctrlblk { | 49 | struct rcu_ctrlblk { |
| 46 | long cur; /* Current batch number. */ | 50 | long cur; /* Current batch number. */ |
| 47 | long completed; /* Number of the last completed batch */ | 51 | long completed; /* Number of the last completed batch */ |
| 48 | int next_pending; /* Is the next batch already waiting? */ | 52 | long pending; /* Number of the last pending batch */ |
| 53 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 54 | unsigned long gp_start; /* Time at which GP started in jiffies. */ | ||
| 55 | unsigned long jiffies_stall; | ||
| 56 | /* Time at which to check for CPU stalls. */ | ||
| 57 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 49 | 58 | ||
| 50 | int signaled; | 59 | int signaled; |
| 51 | 60 | ||
| @@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b) | |||
| 66 | return (a - b) > 0; | 75 | return (a - b) > 0; |
| 67 | } | 76 | } |
| 68 | 77 | ||
| 69 | /* | 78 | /* Per-CPU data for Read-Copy UPdate. */ |
| 70 | * Per-CPU data for Read-Copy UPdate. | ||
| 71 | * nxtlist - new callbacks are added here | ||
| 72 | * curlist - current batch for which quiescent cycle started if any | ||
| 73 | */ | ||
| 74 | struct rcu_data { | 79 | struct rcu_data { |
| 75 | /* 1) quiescent state handling : */ | 80 | /* 1) quiescent state handling : */ |
| 76 | long quiescbatch; /* Batch # for grace period */ | 81 | long quiescbatch; /* Batch # for grace period */ |
| @@ -78,12 +83,24 @@ struct rcu_data { | |||
| 78 | int qs_pending; /* core waits for quiesc state */ | 83 | int qs_pending; /* core waits for quiesc state */ |
| 79 | 84 | ||
| 80 | /* 2) batch handling */ | 85 | /* 2) batch handling */ |
| 81 | long batch; /* Batch # for current RCU batch */ | 86 | /* |
| 87 | * if nxtlist is not NULL, then: | ||
| 88 | * batch: | ||
| 89 | * The batch # for the last entry of nxtlist | ||
| 90 | * [*nxttail[1], NULL = *nxttail[2]): | ||
| 91 | * Entries that batch # <= batch | ||
| 92 | * [*nxttail[0], *nxttail[1]): | ||
| 93 | * Entries that batch # <= batch - 1 | ||
| 94 | * [nxtlist, *nxttail[0]): | ||
| 95 | * Entries that batch # <= batch - 2 | ||
| 96 | * The grace period for these entries has completed, and | ||
| 97 | * the other grace-period-completed entries may be moved | ||
| 98 | * here temporarily in rcu_process_callbacks(). | ||
| 99 | */ | ||
| 100 | long batch; | ||
| 82 | struct rcu_head *nxtlist; | 101 | struct rcu_head *nxtlist; |
| 83 | struct rcu_head **nxttail; | 102 | struct rcu_head **nxttail[3]; |
| 84 | long qlen; /* # of queued callbacks */ | 103 | long qlen; /* # of queued callbacks */ |
| 85 | struct rcu_head *curlist; | ||
| 86 | struct rcu_head **curtail; | ||
| 87 | struct rcu_head *donelist; | 104 | struct rcu_head *donelist; |
| 88 | struct rcu_head **donetail; | 105 | struct rcu_head **donetail; |
| 89 | long blimit; /* Upper limit on a processed batch */ | 106 | long blimit; /* Upper limit on a processed batch */ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index eb4443c7e05b..e649bd3f2c97 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 198 | at->prev = last; | 198 | at->prev = last; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | /** | ||
| 202 | * list_for_each_rcu - iterate over an rcu-protected list | ||
| 203 | * @pos: the &struct list_head to use as a loop cursor. | ||
| 204 | * @head: the head for your list. | ||
| 205 | * | ||
| 206 | * This list-traversal primitive may safely run concurrently with | ||
| 207 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
| 208 | * as long as the traversal is guarded by rcu_read_lock(). | ||
| 209 | */ | ||
| 210 | #define list_for_each_rcu(pos, head) \ | ||
| 211 | for (pos = rcu_dereference((head)->next); \ | ||
| 212 | prefetch(pos->next), pos != (head); \ | ||
| 213 | pos = rcu_dereference(pos->next)) | ||
| 214 | |||
| 215 | #define __list_for_each_rcu(pos, head) \ | 201 | #define __list_for_each_rcu(pos, head) \ |
| 216 | for (pos = rcu_dereference((head)->next); \ | 202 | for (pos = rcu_dereference((head)->next); \ |
| 217 | pos != (head); \ | 203 | pos != (head); \ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e8b4039cfb2f..86f1f5e43e33 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -133,6 +133,26 @@ struct rcu_head { | |||
| 133 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() | 133 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() |
| 134 | 134 | ||
| 135 | /** | 135 | /** |
| 136 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | ||
| 137 | * | ||
| 138 | * Should be used with either | ||
| 139 | * - synchronize_sched() | ||
| 140 | * or | ||
| 141 | * - call_rcu_sched() and rcu_barrier_sched() | ||
| 142 | * on the write-side to insure proper synchronization. | ||
| 143 | */ | ||
| 144 | #define rcu_read_lock_sched() preempt_disable() | ||
| 145 | |||
| 146 | /* | ||
| 147 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | ||
| 148 | * | ||
| 149 | * See rcu_read_lock_sched for more information. | ||
| 150 | */ | ||
| 151 | #define rcu_read_unlock_sched() preempt_enable() | ||
| 152 | |||
| 153 | |||
| 154 | |||
| 155 | /** | ||
| 136 | * rcu_dereference - fetch an RCU-protected pointer in an | 156 | * rcu_dereference - fetch an RCU-protected pointer in an |
| 137 | * RCU read-side critical section. This pointer may later | 157 | * RCU read-side critical section. This pointer may later |
| 138 | * be safely dereferenced. | 158 | * be safely dereferenced. |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 0967f03b0705..3e05c09b54a2 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
| @@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu) | |||
| 57 | rdssp->sched_qs++; | 57 | rdssp->sched_qs++; |
| 58 | } | 58 | } |
| 59 | #define rcu_bh_qsctr_inc(cpu) | 59 | #define rcu_bh_qsctr_inc(cpu) |
| 60 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | 60 | |
| 61 | /* | ||
| 62 | * Someone might want to pass call_rcu_bh as a function pointer. | ||
| 63 | * So this needs to just be a rename and not a macro function. | ||
| 64 | * (no parentheses) | ||
| 65 | */ | ||
| 66 | #define call_rcu_bh call_rcu | ||
| 61 | 67 | ||
| 62 | /** | 68 | /** |
| 63 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | 69 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. |
| @@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
| 111 | struct softirq_action; | 117 | struct softirq_action; |
| 112 | 118 | ||
| 113 | #ifdef CONFIG_NO_HZ | 119 | #ifdef CONFIG_NO_HZ |
| 114 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | ||
| 115 | 120 | ||
| 116 | static inline void rcu_enter_nohz(void) | 121 | static inline void rcu_enter_nohz(void) |
| 117 | { | 122 | { |
| @@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void) | |||
| 126 | { | 131 | { |
| 127 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | 132 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
| 128 | 133 | ||
| 129 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
| 130 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | 134 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
| 135 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
| 131 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), | 136 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), |
| 132 | &rs); | 137 | &rs); |
| 133 | } | 138 | } |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index aad93cdc9f68..37f72e551542 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
| 48 | #include <linux/cpu.h> | 48 | #include <linux/cpu.h> |
| 49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
| 50 | #include <linux/time.h> | ||
| 50 | 51 | ||
| 51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 52 | static struct lock_class_key rcu_lock_key; | 53 | static struct lock_class_key rcu_lock_key; |
| @@ -60,12 +61,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
| 60 | static struct rcu_ctrlblk rcu_ctrlblk = { | 61 | static struct rcu_ctrlblk rcu_ctrlblk = { |
| 61 | .cur = -300, | 62 | .cur = -300, |
| 62 | .completed = -300, | 63 | .completed = -300, |
| 64 | .pending = -300, | ||
| 63 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | 65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
| 64 | .cpumask = CPU_MASK_NONE, | 66 | .cpumask = CPU_MASK_NONE, |
| 65 | }; | 67 | }; |
| 66 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 68 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
| 67 | .cur = -300, | 69 | .cur = -300, |
| 68 | .completed = -300, | 70 | .completed = -300, |
| 71 | .pending = -300, | ||
| 69 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), | 72 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
| 70 | .cpumask = CPU_MASK_NONE, | 73 | .cpumask = CPU_MASK_NONE, |
| 71 | }; | 74 | }; |
| @@ -83,7 +86,10 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 83 | { | 86 | { |
| 84 | int cpu; | 87 | int cpu; |
| 85 | cpumask_t cpumask; | 88 | cpumask_t cpumask; |
| 89 | unsigned long flags; | ||
| 90 | |||
| 86 | set_need_resched(); | 91 | set_need_resched(); |
| 92 | spin_lock_irqsave(&rcp->lock, flags); | ||
| 87 | if (unlikely(!rcp->signaled)) { | 93 | if (unlikely(!rcp->signaled)) { |
| 88 | rcp->signaled = 1; | 94 | rcp->signaled = 1; |
| 89 | /* | 95 | /* |
| @@ -109,6 +115,7 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 109 | for_each_cpu_mask_nr(cpu, cpumask) | 115 | for_each_cpu_mask_nr(cpu, cpumask) |
| 110 | smp_send_reschedule(cpu); | 116 | smp_send_reschedule(cpu); |
| 111 | } | 117 | } |
| 118 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
| 112 | } | 119 | } |
| 113 | #else | 120 | #else |
| 114 | static inline void force_quiescent_state(struct rcu_data *rdp, | 121 | static inline void force_quiescent_state(struct rcu_data *rdp, |
| @@ -118,6 +125,126 @@ static inline void force_quiescent_state(struct rcu_data *rdp, | |||
| 118 | } | 125 | } |
| 119 | #endif | 126 | #endif |
| 120 | 127 | ||
| 128 | static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, | ||
| 129 | struct rcu_data *rdp) | ||
| 130 | { | ||
| 131 | long batch; | ||
| 132 | |||
| 133 | head->next = NULL; | ||
| 134 | smp_mb(); /* Read of rcu->cur must happen after any change by caller. */ | ||
| 135 | |||
| 136 | /* | ||
| 137 | * Determine the batch number of this callback. | ||
| 138 | * | ||
| 139 | * Using ACCESS_ONCE to avoid the following error when gcc eliminates | ||
| 140 | * local variable "batch" and emits codes like this: | ||
| 141 | * 1) rdp->batch = rcp->cur + 1 # gets old value | ||
| 142 | * ...... | ||
| 143 | * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value | ||
| 144 | * then [*nxttail[0], *nxttail[1]) may contain callbacks | ||
| 145 | * that batch# = rdp->batch, see the comment of struct rcu_data. | ||
| 146 | */ | ||
| 147 | batch = ACCESS_ONCE(rcp->cur) + 1; | ||
| 148 | |||
| 149 | if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { | ||
| 150 | /* process callbacks */ | ||
| 151 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
| 152 | rdp->nxttail[1] = rdp->nxttail[2]; | ||
| 153 | if (rcu_batch_after(batch - 1, rdp->batch)) | ||
| 154 | rdp->nxttail[0] = rdp->nxttail[2]; | ||
| 155 | } | ||
| 156 | |||
| 157 | rdp->batch = batch; | ||
| 158 | *rdp->nxttail[2] = head; | ||
| 159 | rdp->nxttail[2] = &head->next; | ||
| 160 | |||
| 161 | if (unlikely(++rdp->qlen > qhimark)) { | ||
| 162 | rdp->blimit = INT_MAX; | ||
| 163 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
| 164 | } | ||
| 165 | } | ||
| 166 | |||
| 167 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 168 | |||
| 169 | static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) | ||
| 170 | { | ||
| 171 | rcp->gp_start = jiffies; | ||
| 172 | rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; | ||
| 173 | } | ||
| 174 | |||
| 175 | static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | ||
| 176 | { | ||
| 177 | int cpu; | ||
| 178 | long delta; | ||
| 179 | unsigned long flags; | ||
| 180 | |||
| 181 | /* Only let one CPU complain about others per time interval. */ | ||
| 182 | |||
| 183 | spin_lock_irqsave(&rcp->lock, flags); | ||
| 184 | delta = jiffies - rcp->jiffies_stall; | ||
| 185 | if (delta < 2 || rcp->cur != rcp->completed) { | ||
| 186 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
| 187 | return; | ||
| 188 | } | ||
| 189 | rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
| 190 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
| 191 | |||
| 192 | /* OK, time to rat on our buddy... */ | ||
| 193 | |||
| 194 | printk(KERN_ERR "RCU detected CPU stalls:"); | ||
| 195 | for_each_possible_cpu(cpu) { | ||
| 196 | if (cpu_isset(cpu, rcp->cpumask)) | ||
| 197 | printk(" %d", cpu); | ||
| 198 | } | ||
| 199 | printk(" (detected by %d, t=%ld jiffies)\n", | ||
| 200 | smp_processor_id(), (long)(jiffies - rcp->gp_start)); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) | ||
| 204 | { | ||
| 205 | unsigned long flags; | ||
| 206 | |||
| 207 | printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n", | ||
| 208 | smp_processor_id(), jiffies, | ||
| 209 | jiffies - rcp->gp_start); | ||
| 210 | dump_stack(); | ||
| 211 | spin_lock_irqsave(&rcp->lock, flags); | ||
| 212 | if ((long)(jiffies - rcp->jiffies_stall) >= 0) | ||
| 213 | rcp->jiffies_stall = | ||
| 214 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
| 215 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
| 216 | set_need_resched(); /* kick ourselves to get things going. */ | ||
| 217 | } | ||
| 218 | |||
| 219 | static void check_cpu_stall(struct rcu_ctrlblk *rcp) | ||
| 220 | { | ||
| 221 | long delta; | ||
| 222 | |||
| 223 | delta = jiffies - rcp->jiffies_stall; | ||
| 224 | if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { | ||
| 225 | |||
| 226 | /* We haven't checked in, so go dump stack. */ | ||
| 227 | print_cpu_stall(rcp); | ||
| 228 | |||
| 229 | } else if (rcp->cur != rcp->completed && delta >= 2) { | ||
| 230 | |||
| 231 | /* They had two seconds to dump stack, so complain. */ | ||
| 232 | print_other_cpu_stall(rcp); | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 237 | |||
| 238 | static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) | ||
| 239 | { | ||
| 240 | } | ||
| 241 | |||
| 242 | static inline void check_cpu_stall(struct rcu_ctrlblk *rcp) | ||
| 243 | { | ||
| 244 | } | ||
| 245 | |||
| 246 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 247 | |||
| 121 | /** | 248 | /** |
| 122 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 249 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
| 123 | * @head: structure to be used for queueing the RCU updates. | 250 | * @head: structure to be used for queueing the RCU updates. |
| @@ -133,18 +260,10 @@ void call_rcu(struct rcu_head *head, | |||
| 133 | void (*func)(struct rcu_head *rcu)) | 260 | void (*func)(struct rcu_head *rcu)) |
| 134 | { | 261 | { |
| 135 | unsigned long flags; | 262 | unsigned long flags; |
| 136 | struct rcu_data *rdp; | ||
| 137 | 263 | ||
| 138 | head->func = func; | 264 | head->func = func; |
| 139 | head->next = NULL; | ||
| 140 | local_irq_save(flags); | 265 | local_irq_save(flags); |
| 141 | rdp = &__get_cpu_var(rcu_data); | 266 | __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
| 142 | *rdp->nxttail = head; | ||
| 143 | rdp->nxttail = &head->next; | ||
| 144 | if (unlikely(++rdp->qlen > qhimark)) { | ||
| 145 | rdp->blimit = INT_MAX; | ||
| 146 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
| 147 | } | ||
| 148 | local_irq_restore(flags); | 267 | local_irq_restore(flags); |
| 149 | } | 268 | } |
| 150 | EXPORT_SYMBOL_GPL(call_rcu); | 269 | EXPORT_SYMBOL_GPL(call_rcu); |
| @@ -169,20 +288,10 @@ void call_rcu_bh(struct rcu_head *head, | |||
| 169 | void (*func)(struct rcu_head *rcu)) | 288 | void (*func)(struct rcu_head *rcu)) |
| 170 | { | 289 | { |
| 171 | unsigned long flags; | 290 | unsigned long flags; |
| 172 | struct rcu_data *rdp; | ||
| 173 | 291 | ||
| 174 | head->func = func; | 292 | head->func = func; |
| 175 | head->next = NULL; | ||
| 176 | local_irq_save(flags); | 293 | local_irq_save(flags); |
| 177 | rdp = &__get_cpu_var(rcu_bh_data); | 294 | __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
| 178 | *rdp->nxttail = head; | ||
| 179 | rdp->nxttail = &head->next; | ||
| 180 | |||
| 181 | if (unlikely(++rdp->qlen > qhimark)) { | ||
| 182 | rdp->blimit = INT_MAX; | ||
| 183 | force_quiescent_state(rdp, &rcu_bh_ctrlblk); | ||
| 184 | } | ||
| 185 | |||
| 186 | local_irq_restore(flags); | 295 | local_irq_restore(flags); |
| 187 | } | 296 | } |
| 188 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 297 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
| @@ -211,12 +320,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |||
| 211 | static inline void raise_rcu_softirq(void) | 320 | static inline void raise_rcu_softirq(void) |
| 212 | { | 321 | { |
| 213 | raise_softirq(RCU_SOFTIRQ); | 322 | raise_softirq(RCU_SOFTIRQ); |
| 214 | /* | ||
| 215 | * The smp_mb() here is required to ensure that this cpu's | ||
| 216 | * __rcu_process_callbacks() reads the most recently updated | ||
| 217 | * value of rcu->cur. | ||
| 218 | */ | ||
| 219 | smp_mb(); | ||
| 220 | } | 323 | } |
| 221 | 324 | ||
| 222 | /* | 325 | /* |
| @@ -225,6 +328,7 @@ static inline void raise_rcu_softirq(void) | |||
| 225 | */ | 328 | */ |
| 226 | static void rcu_do_batch(struct rcu_data *rdp) | 329 | static void rcu_do_batch(struct rcu_data *rdp) |
| 227 | { | 330 | { |
| 331 | unsigned long flags; | ||
| 228 | struct rcu_head *next, *list; | 332 | struct rcu_head *next, *list; |
| 229 | int count = 0; | 333 | int count = 0; |
| 230 | 334 | ||
| @@ -239,9 +343,9 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
| 239 | } | 343 | } |
| 240 | rdp->donelist = list; | 344 | rdp->donelist = list; |
| 241 | 345 | ||
| 242 | local_irq_disable(); | 346 | local_irq_save(flags); |
| 243 | rdp->qlen -= count; | 347 | rdp->qlen -= count; |
| 244 | local_irq_enable(); | 348 | local_irq_restore(flags); |
| 245 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | 349 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
| 246 | rdp->blimit = blimit; | 350 | rdp->blimit = blimit; |
| 247 | 351 | ||
| @@ -269,6 +373,7 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
| 269 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | 373 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace |
| 270 | * period (if necessary). | 374 | * period (if necessary). |
| 271 | */ | 375 | */ |
| 376 | |||
| 272 | /* | 377 | /* |
| 273 | * Register a new batch of callbacks, and start it up if there is currently no | 378 | * Register a new batch of callbacks, and start it up if there is currently no |
| 274 | * active batch and the batch to be registered has not already occurred. | 379 | * active batch and the batch to be registered has not already occurred. |
| @@ -276,15 +381,10 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
| 276 | */ | 381 | */ |
| 277 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | 382 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
| 278 | { | 383 | { |
| 279 | if (rcp->next_pending && | 384 | if (rcp->cur != rcp->pending && |
| 280 | rcp->completed == rcp->cur) { | 385 | rcp->completed == rcp->cur) { |
| 281 | rcp->next_pending = 0; | ||
| 282 | /* | ||
| 283 | * next_pending == 0 must be visible in | ||
| 284 | * __rcu_process_callbacks() before it can see new value of cur. | ||
| 285 | */ | ||
| 286 | smp_wmb(); | ||
| 287 | rcp->cur++; | 386 | rcp->cur++; |
| 387 | record_gp_stall_check_time(rcp); | ||
| 288 | 388 | ||
| 289 | /* | 389 | /* |
| 290 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | 390 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a |
| @@ -322,6 +422,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | |||
| 322 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | 422 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, |
| 323 | struct rcu_data *rdp) | 423 | struct rcu_data *rdp) |
| 324 | { | 424 | { |
| 425 | unsigned long flags; | ||
| 426 | |||
| 325 | if (rdp->quiescbatch != rcp->cur) { | 427 | if (rdp->quiescbatch != rcp->cur) { |
| 326 | /* start new grace period: */ | 428 | /* start new grace period: */ |
| 327 | rdp->qs_pending = 1; | 429 | rdp->qs_pending = 1; |
| @@ -345,7 +447,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
| 345 | return; | 447 | return; |
| 346 | rdp->qs_pending = 0; | 448 | rdp->qs_pending = 0; |
| 347 | 449 | ||
| 348 | spin_lock(&rcp->lock); | 450 | spin_lock_irqsave(&rcp->lock, flags); |
| 349 | /* | 451 | /* |
| 350 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | 452 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync |
| 351 | * during cpu startup. Ignore the quiescent state. | 453 | * during cpu startup. Ignore the quiescent state. |
| @@ -353,7 +455,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
| 353 | if (likely(rdp->quiescbatch == rcp->cur)) | 455 | if (likely(rdp->quiescbatch == rcp->cur)) |
| 354 | cpu_quiet(rdp->cpu, rcp); | 456 | cpu_quiet(rdp->cpu, rcp); |
| 355 | 457 | ||
| 356 | spin_unlock(&rcp->lock); | 458 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 357 | } | 459 | } |
| 358 | 460 | ||
| 359 | 461 | ||
| @@ -364,33 +466,38 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
| 364 | * which is dead and hence not processing interrupts. | 466 | * which is dead and hence not processing interrupts. |
| 365 | */ | 467 | */ |
| 366 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | 468 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, |
| 367 | struct rcu_head **tail) | 469 | struct rcu_head **tail, long batch) |
| 368 | { | 470 | { |
| 369 | local_irq_disable(); | 471 | unsigned long flags; |
| 370 | *this_rdp->nxttail = list; | 472 | |
| 371 | if (list) | 473 | if (list) { |
| 372 | this_rdp->nxttail = tail; | 474 | local_irq_save(flags); |
| 373 | local_irq_enable(); | 475 | this_rdp->batch = batch; |
| 476 | *this_rdp->nxttail[2] = list; | ||
| 477 | this_rdp->nxttail[2] = tail; | ||
| 478 | local_irq_restore(flags); | ||
| 479 | } | ||
| 374 | } | 480 | } |
| 375 | 481 | ||
| 376 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | 482 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, |
| 377 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 483 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
| 378 | { | 484 | { |
| 379 | /* if the cpu going offline owns the grace period | 485 | unsigned long flags; |
| 486 | |||
| 487 | /* | ||
| 488 | * if the cpu going offline owns the grace period | ||
| 380 | * we can block indefinitely waiting for it, so flush | 489 | * we can block indefinitely waiting for it, so flush |
| 381 | * it here | 490 | * it here |
| 382 | */ | 491 | */ |
| 383 | spin_lock_bh(&rcp->lock); | 492 | spin_lock_irqsave(&rcp->lock, flags); |
| 384 | if (rcp->cur != rcp->completed) | 493 | if (rcp->cur != rcp->completed) |
| 385 | cpu_quiet(rdp->cpu, rcp); | 494 | cpu_quiet(rdp->cpu, rcp); |
| 386 | spin_unlock_bh(&rcp->lock); | 495 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); |
| 387 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | 496 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); |
| 388 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 497 | spin_unlock(&rcp->lock); |
| 389 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | ||
| 390 | 498 | ||
| 391 | local_irq_disable(); | ||
| 392 | this_rdp->qlen += rdp->qlen; | 499 | this_rdp->qlen += rdp->qlen; |
| 393 | local_irq_enable(); | 500 | local_irq_restore(flags); |
| 394 | } | 501 | } |
| 395 | 502 | ||
| 396 | static void rcu_offline_cpu(int cpu) | 503 | static void rcu_offline_cpu(int cpu) |
| @@ -420,38 +527,52 @@ static void rcu_offline_cpu(int cpu) | |||
| 420 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | 527 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, |
| 421 | struct rcu_data *rdp) | 528 | struct rcu_data *rdp) |
| 422 | { | 529 | { |
| 423 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | 530 | unsigned long flags; |
| 424 | *rdp->donetail = rdp->curlist; | 531 | long completed_snap; |
| 425 | rdp->donetail = rdp->curtail; | ||
| 426 | rdp->curlist = NULL; | ||
| 427 | rdp->curtail = &rdp->curlist; | ||
| 428 | } | ||
| 429 | 532 | ||
| 430 | if (rdp->nxtlist && !rdp->curlist) { | 533 | if (rdp->nxtlist) { |
| 431 | local_irq_disable(); | 534 | local_irq_save(flags); |
| 432 | rdp->curlist = rdp->nxtlist; | 535 | completed_snap = ACCESS_ONCE(rcp->completed); |
| 433 | rdp->curtail = rdp->nxttail; | ||
| 434 | rdp->nxtlist = NULL; | ||
| 435 | rdp->nxttail = &rdp->nxtlist; | ||
| 436 | local_irq_enable(); | ||
| 437 | 536 | ||
| 438 | /* | 537 | /* |
| 439 | * start the next batch of callbacks | 538 | * move the other grace-period-completed entries to |
| 539 | * [rdp->nxtlist, *rdp->nxttail[0]) temporarily | ||
| 440 | */ | 540 | */ |
| 541 | if (!rcu_batch_before(completed_snap, rdp->batch)) | ||
| 542 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; | ||
| 543 | else if (!rcu_batch_before(completed_snap, rdp->batch - 1)) | ||
| 544 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
| 441 | 545 | ||
| 442 | /* determine batch number */ | 546 | /* |
| 443 | rdp->batch = rcp->cur + 1; | 547 | * the grace period for entries in |
| 444 | /* see the comment and corresponding wmb() in | 548 | * [rdp->nxtlist, *rdp->nxttail[0]) has completed and |
| 445 | * the rcu_start_batch() | 549 | * move these entries to donelist |
| 446 | */ | 550 | */ |
| 447 | smp_rmb(); | 551 | if (rdp->nxttail[0] != &rdp->nxtlist) { |
| 552 | *rdp->donetail = rdp->nxtlist; | ||
| 553 | rdp->donetail = rdp->nxttail[0]; | ||
| 554 | rdp->nxtlist = *rdp->nxttail[0]; | ||
| 555 | *rdp->donetail = NULL; | ||
| 556 | |||
| 557 | if (rdp->nxttail[1] == rdp->nxttail[0]) | ||
| 558 | rdp->nxttail[1] = &rdp->nxtlist; | ||
| 559 | if (rdp->nxttail[2] == rdp->nxttail[0]) | ||
| 560 | rdp->nxttail[2] = &rdp->nxtlist; | ||
| 561 | rdp->nxttail[0] = &rdp->nxtlist; | ||
| 562 | } | ||
| 563 | |||
| 564 | local_irq_restore(flags); | ||
| 565 | |||
| 566 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | ||
| 567 | unsigned long flags2; | ||
| 448 | 568 | ||
| 449 | if (!rcp->next_pending) { | ||
| 450 | /* and start it/schedule start if it's a new batch */ | 569 | /* and start it/schedule start if it's a new batch */ |
| 451 | spin_lock(&rcp->lock); | 570 | spin_lock_irqsave(&rcp->lock, flags2); |
| 452 | rcp->next_pending = 1; | 571 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
| 453 | rcu_start_batch(rcp); | 572 | rcp->pending = rdp->batch; |
| 454 | spin_unlock(&rcp->lock); | 573 | rcu_start_batch(rcp); |
| 574 | } | ||
| 575 | spin_unlock_irqrestore(&rcp->lock, flags2); | ||
| 455 | } | 576 | } |
| 456 | } | 577 | } |
| 457 | 578 | ||
| @@ -462,21 +583,53 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
| 462 | 583 | ||
| 463 | static void rcu_process_callbacks(struct softirq_action *unused) | 584 | static void rcu_process_callbacks(struct softirq_action *unused) |
| 464 | { | 585 | { |
| 586 | /* | ||
| 587 | * Memory references from any prior RCU read-side critical sections | ||
| 588 | * executed by the interrupted code must be see before any RCU | ||
| 589 | * grace-period manupulations below. | ||
| 590 | */ | ||
| 591 | |||
| 592 | smp_mb(); /* See above block comment. */ | ||
| 593 | |||
| 465 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); | 594 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
| 466 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | 595 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
| 596 | |||
| 597 | /* | ||
| 598 | * Memory references from any later RCU read-side critical sections | ||
| 599 | * executed by the interrupted code must be see after any RCU | ||
| 600 | * grace-period manupulations above. | ||
| 601 | */ | ||
| 602 | |||
| 603 | smp_mb(); /* See above block comment. */ | ||
| 467 | } | 604 | } |
| 468 | 605 | ||
| 469 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 606 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
| 470 | { | 607 | { |
| 471 | /* This cpu has pending rcu entries and the grace period | 608 | /* Check for CPU stalls, if enabled. */ |
| 472 | * for them has completed. | 609 | check_cpu_stall(rcp); |
| 473 | */ | ||
| 474 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) | ||
| 475 | return 1; | ||
| 476 | 610 | ||
| 477 | /* This cpu has no pending entries, but there are new entries */ | 611 | if (rdp->nxtlist) { |
| 478 | if (!rdp->curlist && rdp->nxtlist) | 612 | long completed_snap = ACCESS_ONCE(rcp->completed); |
| 479 | return 1; | 613 | |
| 614 | /* | ||
| 615 | * This cpu has pending rcu entries and the grace period | ||
| 616 | * for them has completed. | ||
| 617 | */ | ||
| 618 | if (!rcu_batch_before(completed_snap, rdp->batch)) | ||
| 619 | return 1; | ||
| 620 | if (!rcu_batch_before(completed_snap, rdp->batch - 1) && | ||
| 621 | rdp->nxttail[0] != rdp->nxttail[1]) | ||
| 622 | return 1; | ||
| 623 | if (rdp->nxttail[0] != &rdp->nxtlist) | ||
| 624 | return 1; | ||
| 625 | |||
| 626 | /* | ||
| 627 | * This cpu has pending rcu entries and the new batch | ||
| 628 | * for then hasn't been started nor scheduled start | ||
| 629 | */ | ||
| 630 | if (rcu_batch_after(rdp->batch, rcp->pending)) | ||
| 631 | return 1; | ||
| 632 | } | ||
| 480 | 633 | ||
| 481 | /* This cpu has finished callbacks to invoke */ | 634 | /* This cpu has finished callbacks to invoke */ |
| 482 | if (rdp->donelist) | 635 | if (rdp->donelist) |
| @@ -512,9 +665,15 @@ int rcu_needs_cpu(int cpu) | |||
| 512 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 665 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
| 513 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | 666 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); |
| 514 | 667 | ||
| 515 | return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); | 668 | return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); |
| 516 | } | 669 | } |
| 517 | 670 | ||
| 671 | /* | ||
| 672 | * Top-level function driving RCU grace-period detection, normally | ||
| 673 | * invoked from the scheduler-clock interrupt. This function simply | ||
| 674 | * increments counters that are read only from softirq by this same | ||
| 675 | * CPU, so there are no memory barriers required. | ||
| 676 | */ | ||
| 518 | void rcu_check_callbacks(int cpu, int user) | 677 | void rcu_check_callbacks(int cpu, int user) |
| 519 | { | 678 | { |
| 520 | if (user || | 679 | if (user || |
| @@ -558,14 +717,17 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 558 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | 717 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
| 559 | struct rcu_data *rdp) | 718 | struct rcu_data *rdp) |
| 560 | { | 719 | { |
| 720 | unsigned long flags; | ||
| 721 | |||
| 722 | spin_lock_irqsave(&rcp->lock, flags); | ||
| 561 | memset(rdp, 0, sizeof(*rdp)); | 723 | memset(rdp, 0, sizeof(*rdp)); |
| 562 | rdp->curtail = &rdp->curlist; | 724 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; |
| 563 | rdp->nxttail = &rdp->nxtlist; | ||
| 564 | rdp->donetail = &rdp->donelist; | 725 | rdp->donetail = &rdp->donelist; |
| 565 | rdp->quiescbatch = rcp->completed; | 726 | rdp->quiescbatch = rcp->completed; |
| 566 | rdp->qs_pending = 0; | 727 | rdp->qs_pending = 0; |
| 567 | rdp->cpu = cpu; | 728 | rdp->cpu = cpu; |
| 568 | rdp->blimit = blimit; | 729 | rdp->blimit = blimit; |
| 730 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
| 569 | } | 731 | } |
| 570 | 732 | ||
| 571 | static void __cpuinit rcu_online_cpu(int cpu) | 733 | static void __cpuinit rcu_online_cpu(int cpu) |
| @@ -610,6 +772,9 @@ static struct notifier_block __cpuinitdata rcu_nb = { | |||
| 610 | */ | 772 | */ |
| 611 | void __init __rcu_init(void) | 773 | void __init __rcu_init(void) |
| 612 | { | 774 | { |
| 775 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 776 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
| 777 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 613 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | 778 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, |
| 614 | (void *)(long)smp_processor_id()); | 779 | (void *)(long)smp_processor_id()); |
| 615 | /* Register notifier for non-boot CPUs */ | 780 | /* Register notifier for non-boot CPUs */ |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 27827931ca0d..ca4bbbe04aa4 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
| @@ -59,14 +59,6 @@ | |||
| 59 | #include <linux/rcupreempt_trace.h> | 59 | #include <linux/rcupreempt_trace.h> |
| 60 | 60 | ||
| 61 | /* | 61 | /* |
| 62 | * Macro that prevents the compiler from reordering accesses, but does | ||
| 63 | * absolutely -nothing- to prevent CPUs from reordering. This is used | ||
| 64 | * only to mediate communication between mainline code and hardware | ||
| 65 | * interrupt and NMI handlers. | ||
| 66 | */ | ||
| 67 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | ||
| 68 | |||
| 69 | /* | ||
| 70 | * PREEMPT_RCU data structures. | 62 | * PREEMPT_RCU data structures. |
| 71 | */ | 63 | */ |
| 72 | 64 | ||
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c index 5edf82c34bbc..35c2d3360ecf 100644 --- a/kernel/rcupreempt_trace.c +++ b/kernel/rcupreempt_trace.c | |||
| @@ -308,11 +308,16 @@ out: | |||
| 308 | 308 | ||
| 309 | static int __init rcupreempt_trace_init(void) | 309 | static int __init rcupreempt_trace_init(void) |
| 310 | { | 310 | { |
| 311 | int ret; | ||
| 312 | |||
| 311 | mutex_init(&rcupreempt_trace_mutex); | 313 | mutex_init(&rcupreempt_trace_mutex); |
| 312 | rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); | 314 | rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); |
| 313 | if (!rcupreempt_trace_buf) | 315 | if (!rcupreempt_trace_buf) |
| 314 | return 1; | 316 | return 1; |
| 315 | return rcupreempt_debugfs_init(); | 317 | ret = rcupreempt_debugfs_init(); |
| 318 | if (ret) | ||
| 319 | kfree(rcupreempt_trace_buf); | ||
| 320 | return ret; | ||
| 316 | } | 321 | } |
| 317 | 322 | ||
| 318 | static void __exit rcupreempt_trace_cleanup(void) | 323 | static void __exit rcupreempt_trace_cleanup(void) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0b504814e378..9fee969dd60e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -597,6 +597,19 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
| 597 | Say N here if you want the RCU torture tests to start only | 597 | Say N here if you want the RCU torture tests to start only |
| 598 | after being manually enabled via /proc. | 598 | after being manually enabled via /proc. |
| 599 | 599 | ||
| 600 | config RCU_CPU_STALL_DETECTOR | ||
| 601 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
| 602 | depends on CLASSIC_RCU | ||
| 603 | default n | ||
| 604 | help | ||
| 605 | This option causes RCU to printk information on which | ||
| 606 | CPUs are delaying the current grace period, but only when | ||
| 607 | the grace period extends for excessive time periods. | ||
| 608 | |||
| 609 | Say Y if you want RCU to perform such checks. | ||
| 610 | |||
| 611 | Say N if you are unsure. | ||
| 612 | |||
| 600 | config KPROBES_SANITY_TEST | 613 | config KPROBES_SANITY_TEST |
| 601 | bool "Kprobes sanity tests" | 614 | bool "Kprobes sanity tests" |
| 602 | depends on DEBUG_KERNEL | 615 | depends on DEBUG_KERNEL |
