diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2018-11-06 21:44:52 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2018-11-27 12:21:41 -0500 |
commit | 7440172974e85b1828bdd84ac6b23b5bcad9c5eb (patch) | |
tree | 876817c8e09a9f55ffac2ae0fd4ad0d53040100f /kernel/trace/ftrace.c | |
parent | c93ffc15cceb057924410f9178e679120ee12353 (diff) |
tracing: Replace synchronize_sched() and call_rcu_sched()
Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can
be replaced by synchronize_rcu(). Similarly, call_rcu_sched() can be
replaced by call_rcu(). This commit therefore makes these changes.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: <linux-kernel@vger.kernel.org>
Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f536f601bd46..5b4f73e4fd56 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work) | |||
173 | { | 173 | { |
174 | /* | 174 | /* |
175 | * This function is just a stub to implement a hard force | 175 | * This function is just a stub to implement a hard force |
176 | * of synchronize_sched(). This requires synchronizing | 176 | * of synchronize_rcu(). This requires synchronizing |
177 | * tasks even in userspace and idle. | 177 | * tasks even in userspace and idle. |
178 | * | 178 | * |
179 | * Yes, function tracing is rude. | 179 | * Yes, function tracing is rude. |
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
934 | ftrace_profile_enabled = 0; | 934 | ftrace_profile_enabled = 0; |
935 | /* | 935 | /* |
936 | * unregister_ftrace_profiler calls stop_machine | 936 | * unregister_ftrace_profiler calls stop_machine |
937 | * so this acts like an synchronize_sched. | 937 | * so this acts like an synchronize_rcu. |
938 | */ | 938 | */ |
939 | unregister_ftrace_profiler(); | 939 | unregister_ftrace_profiler(); |
940 | } | 940 | } |
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) | |||
1086 | 1086 | ||
1087 | /* | 1087 | /* |
1088 | * Some of the ops may be dynamically allocated, | 1088 | * Some of the ops may be dynamically allocated, |
1089 | * they are freed after a synchronize_sched(). | 1089 | * they are freed after a synchronize_rcu(). |
1090 | */ | 1090 | */ |
1091 | preempt_disable_notrace(); | 1091 | preempt_disable_notrace(); |
1092 | 1092 | ||
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
1286 | { | 1286 | { |
1287 | if (!hash || hash == EMPTY_HASH) | 1287 | if (!hash || hash == EMPTY_HASH) |
1288 | return; | 1288 | return; |
1289 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | 1289 | call_rcu(&hash->rcu, __free_ftrace_hash_rcu); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | void ftrace_free_filter(struct ftrace_ops *ops) | 1292 | void ftrace_free_filter(struct ftrace_ops *ops) |
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip, | |||
1501 | * the ip is not in the ops->notrace_hash. | 1501 | * the ip is not in the ops->notrace_hash. |
1502 | * | 1502 | * |
1503 | * This needs to be called with preemption disabled as | 1503 | * This needs to be called with preemption disabled as |
1504 | * the hashes are freed with call_rcu_sched(). | 1504 | * the hashes are freed with call_rcu(). |
1505 | */ | 1505 | */ |
1506 | static int | 1506 | static int |
1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | |||
4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) | 4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, | 4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
4498 | &old_hash_ops); | 4498 | &old_hash_ops); |
4499 | synchronize_sched(); | 4499 | synchronize_rcu(); |
4500 | 4500 | ||
4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { | 4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
4502 | hlist_del(&entry->hlist); | 4502 | hlist_del(&entry->hlist); |
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) | |||
5314 | mutex_unlock(&graph_lock); | 5314 | mutex_unlock(&graph_lock); |
5315 | 5315 | ||
5316 | /* Wait till all users are no longer using the old hash */ | 5316 | /* Wait till all users are no longer using the old hash */ |
5317 | synchronize_sched(); | 5317 | synchronize_rcu(); |
5318 | 5318 | ||
5319 | free_ftrace_hash(old_hash); | 5319 | free_ftrace_hash(old_hash); |
5320 | } | 5320 | } |
@@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod) | |||
5707 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { | 5707 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
5708 | if (mod_map->mod == mod) { | 5708 | if (mod_map->mod == mod) { |
5709 | list_del_rcu(&mod_map->list); | 5709 | list_del_rcu(&mod_map->list); |
5710 | call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); | 5710 | call_rcu(&mod_map->rcu, ftrace_free_mod_map); |
5711 | break; | 5711 | break; |
5712 | } | 5712 | } |
5713 | } | 5713 | } |
@@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, | |||
5927 | struct ftrace_mod_map *mod_map; | 5927 | struct ftrace_mod_map *mod_map; |
5928 | const char *ret = NULL; | 5928 | const char *ret = NULL; |
5929 | 5929 | ||
5930 | /* mod_map is freed via call_rcu_sched() */ | 5930 | /* mod_map is freed via call_rcu() */ |
5931 | preempt_disable(); | 5931 | preempt_disable(); |
5932 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | 5932 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
5933 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); | 5933 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
@@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
6262 | 6262 | ||
6263 | /* | 6263 | /* |
6264 | * Some of the ops may be dynamically allocated, | 6264 | * Some of the ops may be dynamically allocated, |
6265 | * they must be freed after a synchronize_sched(). | 6265 | * they must be freed after a synchronize_rcu(). |
6266 | */ | 6266 | */ |
6267 | preempt_disable_notrace(); | 6267 | preempt_disable_notrace(); |
6268 | 6268 | ||
@@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr) | |||
6433 | rcu_assign_pointer(tr->function_pids, NULL); | 6433 | rcu_assign_pointer(tr->function_pids, NULL); |
6434 | 6434 | ||
6435 | /* Wait till all users are no longer using pid filtering */ | 6435 | /* Wait till all users are no longer using pid filtering */ |
6436 | synchronize_sched(); | 6436 | synchronize_rcu(); |
6437 | 6437 | ||
6438 | trace_free_pid_list(pid_list); | 6438 | trace_free_pid_list(pid_list); |
6439 | } | 6439 | } |
@@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
6580 | rcu_assign_pointer(tr->function_pids, pid_list); | 6580 | rcu_assign_pointer(tr->function_pids, pid_list); |
6581 | 6581 | ||
6582 | if (filtered_pids) { | 6582 | if (filtered_pids) { |
6583 | synchronize_sched(); | 6583 | synchronize_rcu(); |
6584 | trace_free_pid_list(filtered_pids); | 6584 | trace_free_pid_list(filtered_pids); |
6585 | } else if (pid_list) { | 6585 | } else if (pid_list) { |
6586 | /* Register a probe to set whether to ignore the tracing of a task */ | 6586 | /* Register a probe to set whether to ignore the tracing of a task */ |