summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2018-11-06 21:44:52 -0500
committerPaul E. McKenney <paulmck@linux.ibm.com>2018-11-27 12:21:41 -0500
commit7440172974e85b1828bdd84ac6b23b5bcad9c5eb (patch)
tree876817c8e09a9f55ffac2ae0fd4ad0d53040100f
parentc93ffc15cceb057924410f9178e679120ee12353 (diff)
tracing: Replace synchronize_sched() and call_rcu_sched()
Now that synchronize_rcu() waits for preempt-disable regions of code as well as RCU read-side critical sections, synchronize_sched() can be replaced by synchronize_rcu(). Similarly, call_rcu_sched() can be replaced by call_rcu(). This commit therefore makes these changes. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: <linux-kernel@vger.kernel.org> Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--kernel/trace/ring_buffer.c12
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/tracepoint.c4
7 files changed, 29 insertions, 29 deletions
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 538ba1a58f5b..432080b59c26 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
82static inline void tracepoint_synchronize_unregister(void) 82static inline void tracepoint_synchronize_unregister(void)
83{ 83{
84 synchronize_srcu(&tracepoint_srcu); 84 synchronize_srcu(&tracepoint_srcu);
85 synchronize_sched(); 85 synchronize_rcu();
86} 86}
87#else 87#else
88static inline void tracepoint_synchronize_unregister(void) 88static inline void tracepoint_synchronize_unregister(void)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f536f601bd46..5b4f73e4fd56 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
173{ 173{
174 /* 174 /*
175 * This function is just a stub to implement a hard force 175 * This function is just a stub to implement a hard force
176 * of synchronize_sched(). This requires synchronizing 176 * of synchronize_rcu(). This requires synchronizing
177 * tasks even in userspace and idle. 177 * tasks even in userspace and idle.
178 * 178 *
179 * Yes, function tracing is rude. 179 * Yes, function tracing is rude.
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
934 ftrace_profile_enabled = 0; 934 ftrace_profile_enabled = 0;
935 /* 935 /*
936 * unregister_ftrace_profiler calls stop_machine 936 * unregister_ftrace_profiler calls stop_machine
937 * so this acts like an synchronize_sched. 937 * so this acts like an synchronize_rcu.
938 */ 938 */
939 unregister_ftrace_profiler(); 939 unregister_ftrace_profiler();
940 } 940 }
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1086 1086
1087 /* 1087 /*
1088 * Some of the ops may be dynamically allocated, 1088 * Some of the ops may be dynamically allocated,
1089 * they are freed after a synchronize_sched(). 1089 * they are freed after a synchronize_rcu().
1090 */ 1090 */
1091 preempt_disable_notrace(); 1091 preempt_disable_notrace();
1092 1092
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1286{ 1286{
1287 if (!hash || hash == EMPTY_HASH) 1287 if (!hash || hash == EMPTY_HASH)
1288 return; 1288 return;
1289 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); 1289 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1290} 1290}
1291 1291
1292void ftrace_free_filter(struct ftrace_ops *ops) 1292void ftrace_free_filter(struct ftrace_ops *ops)
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
1501 * the ip is not in the ops->notrace_hash. 1501 * the ip is not in the ops->notrace_hash.
1502 * 1502 *
1503 * This needs to be called with preemption disabled as 1503 * This needs to be called with preemption disabled as
1504 * the hashes are freed with call_rcu_sched(). 1504 * the hashes are freed with call_rcu().
1505 */ 1505 */
1506static int 1506static int
1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4496 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4496 if (ftrace_enabled && !ftrace_hash_empty(hash))
4497 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4497 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4498 &old_hash_ops); 4498 &old_hash_ops);
4499 synchronize_sched(); 4499 synchronize_rcu();
4500 4500
4501 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4501 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4502 hlist_del(&entry->hlist); 4502 hlist_del(&entry->hlist);
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
5314 mutex_unlock(&graph_lock); 5314 mutex_unlock(&graph_lock);
5315 5315
5316 /* Wait till all users are no longer using the old hash */ 5316 /* Wait till all users are no longer using the old hash */
5317 synchronize_sched(); 5317 synchronize_rcu();
5318 5318
5319 free_ftrace_hash(old_hash); 5319 free_ftrace_hash(old_hash);
5320 } 5320 }
@@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod)
5707 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 5707 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5708 if (mod_map->mod == mod) { 5708 if (mod_map->mod == mod) {
5709 list_del_rcu(&mod_map->list); 5709 list_del_rcu(&mod_map->list);
5710 call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); 5710 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
5711 break; 5711 break;
5712 } 5712 }
5713 } 5713 }
@@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
5927 struct ftrace_mod_map *mod_map; 5927 struct ftrace_mod_map *mod_map;
5928 const char *ret = NULL; 5928 const char *ret = NULL;
5929 5929
5930 /* mod_map is freed via call_rcu_sched() */ 5930 /* mod_map is freed via call_rcu() */
5931 preempt_disable(); 5931 preempt_disable();
5932 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5932 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5933 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 5933 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6262 6262
6263 /* 6263 /*
6264 * Some of the ops may be dynamically allocated, 6264 * Some of the ops may be dynamically allocated,
6265 * they must be freed after a synchronize_sched(). 6265 * they must be freed after a synchronize_rcu().
6266 */ 6266 */
6267 preempt_disable_notrace(); 6267 preempt_disable_notrace();
6268 6268
@@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
6433 rcu_assign_pointer(tr->function_pids, NULL); 6433 rcu_assign_pointer(tr->function_pids, NULL);
6434 6434
6435 /* Wait till all users are no longer using pid filtering */ 6435 /* Wait till all users are no longer using pid filtering */
6436 synchronize_sched(); 6436 synchronize_rcu();
6437 6437
6438 trace_free_pid_list(pid_list); 6438 trace_free_pid_list(pid_list);
6439} 6439}
@@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
6580 rcu_assign_pointer(tr->function_pids, pid_list); 6580 rcu_assign_pointer(tr->function_pids, pid_list);
6581 6581
6582 if (filtered_pids) { 6582 if (filtered_pids) {
6583 synchronize_sched(); 6583 synchronize_rcu();
6584 trace_free_pid_list(filtered_pids); 6584 trace_free_pid_list(filtered_pids);
6585 } else if (pid_list) { 6585 } else if (pid_list) {
6586 /* Register a probe to set whether to ignore the tracing of a task */ 6586 /* Register a probe to set whether to ignore the tracing of a task */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 65bd4616220d..4f3247a53259 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1834 * There could have been a race between checking 1834 * There could have been a race between checking
1835 * record_disable and incrementing it. 1835 * record_disable and incrementing it.
1836 */ 1836 */
1837 synchronize_sched(); 1837 synchronize_rcu();
1838 for_each_buffer_cpu(buffer, cpu) { 1838 for_each_buffer_cpu(buffer, cpu) {
1839 cpu_buffer = buffer->buffers[cpu]; 1839 cpu_buffer = buffer->buffers[cpu];
1840 rb_check_pages(cpu_buffer); 1840 rb_check_pages(cpu_buffer);
@@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3151 * This prevents all writes to the buffer. Any attempt to write 3151 * This prevents all writes to the buffer. Any attempt to write
3152 * to the buffer after this will fail and return NULL. 3152 * to the buffer after this will fail and return NULL.
3153 * 3153 *
3154 * The caller should call synchronize_sched() after this. 3154 * The caller should call synchronize_rcu() after this.
3155 */ 3155 */
3156void ring_buffer_record_disable(struct ring_buffer *buffer) 3156void ring_buffer_record_disable(struct ring_buffer *buffer)
3157{ 3157{
@@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3253 * This prevents all writes to the buffer. Any attempt to write 3253 * This prevents all writes to the buffer. Any attempt to write
3254 * to the buffer after this will fail and return NULL. 3254 * to the buffer after this will fail and return NULL.
3255 * 3255 *
3256 * The caller should call synchronize_sched() after this. 3256 * The caller should call synchronize_rcu() after this.
3257 */ 3257 */
3258void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 3258void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3259{ 3259{
@@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4191void 4191void
4192ring_buffer_read_prepare_sync(void) 4192ring_buffer_read_prepare_sync(void)
4193{ 4193{
4194 synchronize_sched(); 4194 synchronize_rcu();
4195} 4195}
4196EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 4196EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4197 4197
@@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4363 atomic_inc(&cpu_buffer->record_disabled); 4363 atomic_inc(&cpu_buffer->record_disabled);
4364 4364
4365 /* Make sure all commits have finished */ 4365 /* Make sure all commits have finished */
4366 synchronize_sched(); 4366 synchronize_rcu();
4367 4367
4368 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4368 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4369 4369
@@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4496 goto out; 4496 goto out;
4497 4497
4498 /* 4498 /*
4499 * We can't do a synchronize_sched here because this 4499 * We can't do a synchronize_rcu here because this
4500 * function can be called in atomic context. 4500 * function can be called in atomic context.
4501 * Normally this will be called from the same CPU as cpu. 4501 * Normally this will be called from the same CPU as cpu.
4502 * If not it's up to the caller to protect this. 4502 * If not it's up to the caller to protect this.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ff1c4b20cd0a..51612b4a603f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu)
1681 ring_buffer_record_disable(buffer); 1681 ring_buffer_record_disable(buffer);
1682 1682
1683 /* Make sure all commits have finished */ 1683 /* Make sure all commits have finished */
1684 synchronize_sched(); 1684 synchronize_rcu();
1685 ring_buffer_reset_cpu(buffer, cpu); 1685 ring_buffer_reset_cpu(buffer, cpu);
1686 1686
1687 ring_buffer_record_enable(buffer); 1687 ring_buffer_record_enable(buffer);
@@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1698 ring_buffer_record_disable(buffer); 1698 ring_buffer_record_disable(buffer);
1699 1699
1700 /* Make sure all commits have finished */ 1700 /* Make sure all commits have finished */
1701 synchronize_sched(); 1701 synchronize_rcu();
1702 1702
1703 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 1703 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1704 1704
@@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void)
2250 preempt_enable(); 2250 preempt_enable();
2251 2251
2252 /* Wait for all current users to finish */ 2252 /* Wait for all current users to finish */
2253 synchronize_sched(); 2253 synchronize_rcu();
2254 2254
2255 for_each_tracing_cpu(cpu) { 2255 for_each_tracing_cpu(cpu) {
2256 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2256 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
@@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5398 if (tr->current_trace->reset) 5398 if (tr->current_trace->reset)
5399 tr->current_trace->reset(tr); 5399 tr->current_trace->reset(tr);
5400 5400
5401 /* Current trace needs to be nop_trace before synchronize_sched */ 5401 /* Current trace needs to be nop_trace before synchronize_rcu */
5402 tr->current_trace = &nop_trace; 5402 tr->current_trace = &nop_trace;
5403 5403
5404#ifdef CONFIG_TRACER_MAX_TRACE 5404#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5412 * The update_max_tr is called from interrupts disabled 5412 * The update_max_tr is called from interrupts disabled
5413 * so a synchronized_sched() is sufficient. 5413 * so a synchronized_sched() is sufficient.
5414 */ 5414 */
5415 synchronize_sched(); 5415 synchronize_rcu();
5416 free_snapshot(tr); 5416 free_snapshot(tr);
5417 } 5417 }
5418#endif 5418#endif
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 84a65173b1e9..35f3aa55be85 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1614,7 +1614,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
1614 1614
1615 /* 1615 /*
1616 * The calls can still be using the old filters. 1616 * The calls can still be using the old filters.
1617 * Do a synchronize_sched() and to ensure all calls are 1617 * Do a synchronize_rcu() and to ensure all calls are
1618 * done with them before we free them. 1618 * done with them before we free them.
1619 */ 1619 */
1620 tracepoint_synchronize_unregister(); 1620 tracepoint_synchronize_unregister();
@@ -1845,7 +1845,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1845 if (filter) { 1845 if (filter) {
1846 /* 1846 /*
1847 * No event actually uses the system filter 1847 * No event actually uses the system filter
1848 * we can free it without synchronize_sched(). 1848 * we can free it without synchronize_rcu().
1849 */ 1849 */
1850 __free_filter(system->filter); 1850 __free_filter(system->filter);
1851 system->filter = filter; 1851 system->filter = filter;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index fec67188c4d2..adc153ab51c0 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -333,7 +333,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
333 * event_call related objects, which will be accessed in 333 * event_call related objects, which will be accessed in
334 * the kprobe_trace_func/kretprobe_trace_func. 334 * the kprobe_trace_func/kretprobe_trace_func.
335 */ 335 */
336 synchronize_sched(); 336 synchronize_rcu();
337 kfree(link); /* Ignored if link == NULL */ 337 kfree(link); /* Ignored if link == NULL */
338 } 338 }
339 339
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index a3be42304485..46f2ab1e08a9 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -92,7 +92,7 @@ static __init int release_early_probes(void)
92 while (early_probes) { 92 while (early_probes) {
93 tmp = early_probes; 93 tmp = early_probes;
94 early_probes = tmp->next; 94 early_probes = tmp->next;
95 call_rcu_sched(tmp, rcu_free_old_probes); 95 call_rcu(tmp, rcu_free_old_probes);
96 } 96 }
97 97
98 return 0; 98 return 0;
@@ -123,7 +123,7 @@ static inline void release_probes(struct tracepoint_func *old)
123 * cover both cases. So let us chain the SRCU and sched RCU 123 * cover both cases. So let us chain the SRCU and sched RCU
124 * callbacks to wait for both grace periods. 124 * callbacks to wait for both grace periods.
125 */ 125 */
126 call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); 126 call_rcu(&tp_probes->rcu, rcu_free_old_probes);
127 } 127 }
128} 128}
129 129