diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/exit.c | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 9 | ||||
| -rw-r--r-- | kernel/lockdep.c | 1 | ||||
| -rw-r--r-- | kernel/pid.c | 4 | ||||
| -rw-r--r-- | kernel/rcutree.h | 21 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 8 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 22 | ||||
| -rw-r--r-- | kernel/trace/trace_event_profile.c | 4 |
9 files changed, 49 insertions, 24 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index ce1e48c2d93d..cce59cb5ee6a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -87,7 +87,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 87 | 87 | ||
| 88 | sighand = rcu_dereference_check(tsk->sighand, | 88 | sighand = rcu_dereference_check(tsk->sighand, |
| 89 | rcu_read_lock_held() || | 89 | rcu_read_lock_held() || |
| 90 | lockdep_is_held(&tasklist_lock)); | 90 | lockdep_tasklist_lock_is_held()); |
| 91 | spin_lock(&sighand->siglock); | 91 | spin_lock(&sighand->siglock); |
| 92 | 92 | ||
| 93 | posix_cpu_timers_exit(tsk); | 93 | posix_cpu_timers_exit(tsk); |
diff --git a/kernel/fork.c b/kernel/fork.c index 1beb6c303c41..4799c5f0e6d0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -86,7 +86,14 @@ int max_threads; /* tunable limit on nr_threads */ | |||
| 86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; | 86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
| 87 | 87 | ||
| 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
| 89 | EXPORT_SYMBOL_GPL(tasklist_lock); | 89 | |
| 90 | #ifdef CONFIG_PROVE_RCU | ||
| 91 | int lockdep_tasklist_lock_is_held(void) | ||
| 92 | { | ||
| 93 | return lockdep_is_held(&tasklist_lock); | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); | ||
| 96 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
| 90 | 97 | ||
| 91 | int nr_processes(void) | 98 | int nr_processes(void) |
| 92 | { | 99 | { |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0c30d0455de1..681bc2e1e187 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -3822,6 +3822,7 @@ void lockdep_rcu_dereference(const char *file, const int line) | |||
| 3822 | printk("%s:%d invoked rcu_dereference_check() without protection!\n", | 3822 | printk("%s:%d invoked rcu_dereference_check() without protection!\n", |
| 3823 | file, line); | 3823 | file, line); |
| 3824 | printk("\nother info that might help us debug this:\n\n"); | 3824 | printk("\nother info that might help us debug this:\n\n"); |
| 3825 | printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks); | ||
| 3825 | lockdep_print_held_locks(curr); | 3826 | lockdep_print_held_locks(curr); |
| 3826 | printk("\nstack backtrace:\n"); | 3827 | printk("\nstack backtrace:\n"); |
| 3827 | dump_stack(); | 3828 | dump_stack(); |
diff --git a/kernel/pid.c b/kernel/pid.c index 86b296943e5f..aebb30d9c233 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -367,7 +367,9 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) | |||
| 367 | struct task_struct *result = NULL; | 367 | struct task_struct *result = NULL; |
| 368 | if (pid) { | 368 | if (pid) { |
| 369 | struct hlist_node *first; | 369 | struct hlist_node *first; |
| 370 | first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)); | 370 | first = rcu_dereference_check(pid->tasks[type].first, |
| 371 | rcu_read_lock_held() || | ||
| 372 | lockdep_tasklist_lock_is_held()); | ||
| 371 | if (first) | 373 | if (first) |
| 372 | result = hlist_entry(first, struct task_struct, pids[(type)].node); | 374 | result = hlist_entry(first, struct task_struct, pids[(type)].node); |
| 373 | } | 375 | } |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 1439eb504c22..4a525a30e08e 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -246,12 +246,21 @@ struct rcu_data { | |||
| 246 | 246 | ||
| 247 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | 247 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ |
| 248 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 248 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
| 249 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | 249 | |
| 250 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | 250 | #ifdef CONFIG_PROVE_RCU |
| 251 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | 251 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
| 252 | /* to take at least one */ | 252 | #else |
| 253 | /* scheduling clock irq */ | 253 | #define RCU_STALL_DELAY_DELTA 0 |
| 254 | /* before ratting on them. */ | 254 | #endif |
| 255 | |||
| 256 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) | ||
| 257 | /* for rsp->jiffies_stall */ | ||
| 258 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) | ||
| 259 | /* for rsp->jiffies_stall */ | ||
| 260 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | ||
| 261 | /* to take at least one */ | ||
| 262 | /* scheduling clock irq */ | ||
| 263 | /* before ratting on them. */ | ||
| 255 | 264 | ||
| 256 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 265 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 257 | 266 | ||
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 464ad2cdee00..79b53bda8943 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -1010,6 +1010,10 @@ int rcu_needs_cpu(int cpu) | |||
| 1010 | int c = 0; | 1010 | int c = 0; |
| 1011 | int thatcpu; | 1011 | int thatcpu; |
| 1012 | 1012 | ||
| 1013 | /* Check for being in the holdoff period. */ | ||
| 1014 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | ||
| 1015 | return rcu_needs_cpu_quick_check(cpu); | ||
| 1016 | |||
| 1013 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1017 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
| 1014 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1018 | for_each_cpu_not(thatcpu, nohz_cpu_mask) |
| 1015 | if (thatcpu != cpu) { | 1019 | if (thatcpu != cpu) { |
| @@ -1041,10 +1045,8 @@ int rcu_needs_cpu(int cpu) | |||
| 1041 | } | 1045 | } |
| 1042 | 1046 | ||
| 1043 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1047 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
| 1044 | if (c) { | 1048 | if (c) |
| 1045 | raise_softirq(RCU_SOFTIRQ); | 1049 | raise_softirq(RCU_SOFTIRQ); |
| 1046 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | ||
| 1047 | } | ||
| 1048 | return c; | 1050 | return c; |
| 1049 | } | 1051 | } |
| 1050 | 1052 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3e1fd96c6cf9..5a5ea2cd924f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -3476,7 +3476,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
| 3476 | 3476 | ||
| 3477 | static inline int on_null_domain(int cpu) | 3477 | static inline int on_null_domain(int cpu) |
| 3478 | { | 3478 | { |
| 3479 | return !rcu_dereference(cpu_rq(cpu)->sd); | 3479 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); |
| 3480 | } | 3480 | } |
| 3481 | 3481 | ||
| 3482 | /* | 3482 | /* |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index bb53edbb5c8c..d9062f5cc0c0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/hash.h> | 29 | #include <linux/hash.h> |
| 30 | #include <linux/rcupdate.h> | ||
| 30 | 31 | ||
| 31 | #include <trace/events/sched.h> | 32 | #include <trace/events/sched.h> |
| 32 | 33 | ||
| @@ -84,18 +85,22 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | |||
| 84 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 85 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
| 85 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 86 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
| 86 | 87 | ||
| 88 | /* | ||
| 89 | * Traverse the ftrace_list, invoking all entries. The reason that we | ||
| 90 | * can use rcu_dereference_raw() is that elements removed from this list | ||
| 91 | * are simply leaked, so there is no need to interact with a grace-period | ||
| 92 | * mechanism. The rcu_dereference_raw() calls are needed to handle | ||
| 93 | * concurrent insertions into the ftrace_list. | ||
| 94 | * | ||
| 95 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
| 96 | */ | ||
| 87 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 97 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
| 88 | { | 98 | { |
| 89 | struct ftrace_ops *op = ftrace_list; | 99 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ |
| 90 | |||
| 91 | /* in case someone actually ports this to alpha! */ | ||
| 92 | read_barrier_depends(); | ||
| 93 | 100 | ||
| 94 | while (op != &ftrace_list_end) { | 101 | while (op != &ftrace_list_end) { |
| 95 | /* silly alpha */ | ||
| 96 | read_barrier_depends(); | ||
| 97 | op->func(ip, parent_ip); | 102 | op->func(ip, parent_ip); |
| 98 | op = op->next; | 103 | op = rcu_dereference_raw(op->next); /*see above*/ |
| 99 | }; | 104 | }; |
| 100 | } | 105 | } |
| 101 | 106 | ||
| @@ -150,8 +155,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
| 150 | * the ops->next pointer is valid before another CPU sees | 155 | * the ops->next pointer is valid before another CPU sees |
| 151 | * the ops pointer included into the ftrace_list. | 156 | * the ops pointer included into the ftrace_list. |
| 152 | */ | 157 | */ |
| 153 | smp_wmb(); | 158 | rcu_assign_pointer(ftrace_list, ops); |
| 154 | ftrace_list = ops; | ||
| 155 | 159 | ||
| 156 | if (ftrace_enabled) { | 160 | if (ftrace_enabled) { |
| 157 | ftrace_func_t func; | 161 | ftrace_func_t func; |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index f0d693005075..c1cc3ab633de 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
| @@ -138,9 +138,9 @@ __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, | |||
| 138 | cpu = smp_processor_id(); | 138 | cpu = smp_processor_id(); |
| 139 | 139 | ||
| 140 | if (in_nmi()) | 140 | if (in_nmi()) |
| 141 | trace_buf = rcu_dereference(perf_trace_buf_nmi); | 141 | trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); |
| 142 | else | 142 | else |
| 143 | trace_buf = rcu_dereference(perf_trace_buf); | 143 | trace_buf = rcu_dereference_sched(perf_trace_buf); |
| 144 | 144 | ||
| 145 | if (!trace_buf) | 145 | if (!trace_buf) |
| 146 | goto err; | 146 | goto err; |
