diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-26 16:07:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-26 16:07:19 -0500 |
commit | 792bf4d871dea8b69be2aaabdd320d7c6ed15985 (patch) | |
tree | 8cec3755ff6df5f82b12420fb6ad6a4d531ebfd1 /kernel/trace/ftrace.c | |
parent | eed9688f8513189295887e5a27ec7f576754b60e (diff) | |
parent | 4bbfd7467cfc7d42e18d3008fa6a28ffd56e901a (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar:
"The biggest RCU changes in this cycle were:
- Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar.
- Replace calls of RCU-bh and RCU-sched update-side functions to
their vanilla RCU counterparts. This series is a step towards
complete removal of the RCU-bh and RCU-sched update-side functions.
( Note that some of these conversions are going upstream via their
respective maintainers. )
- Documentation updates, including a number of flavor-consolidation
updates from Joel Fernandes.
- Miscellaneous fixes.
- Automate generation of the initrd filesystem used for rcutorture
testing.
- Convert spin_is_locked() assertions to instead use lockdep.
( Note that some of these conversions are going upstream via their
respective maintainers. )
- SRCU updates, especially including a fix from Dennis Krein for a
bag-on-head-class bug.
- RCU torture-test updates"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (112 commits)
rcutorture: Don't do busted forward-progress testing
rcutorture: Use 100ms buckets for forward-progress callback histograms
rcutorture: Recover from OOM during forward-progress tests
rcutorture: Print forward-progress test age upon failure
rcutorture: Print time since GP end upon forward-progress failure
rcutorture: Print histogram of CB invocation at OOM time
rcutorture: Print GP age upon forward-progress failure
rcu: Print per-CPU callback counts for forward-progress failures
rcu: Account for nocb-CPU callback counts in RCU CPU stall warnings
rcutorture: Dump grace-period diagnostics upon forward-progress OOM
rcutorture: Prepare for asynchronous access to rcu_fwd_startat
torture: Remove unnecessary "ret" variables
rcutorture: Affinity forward-progress test to avoid housekeeping CPUs
rcutorture: Break up too-long rcu_torture_fwd_prog() function
rcutorture: Remove cbflood facility
torture: Bring any extra CPUs online during kernel startup
rcutorture: Add call_rcu() flooding forward-progress tests
rcutorture/formal: Replace synchronize_sched() with synchronize_rcu()
tools/kernel.h: Replace synchronize_sched() with synchronize_rcu()
net/decnet: Replace rcu_barrier_bh() with rcu_barrier()
...
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e23eb9fc77aa..f0ff24173a0b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work) | |||
173 | { | 173 | { |
174 | /* | 174 | /* |
175 | * This function is just a stub to implement a hard force | 175 | * This function is just a stub to implement a hard force |
176 | * of synchronize_sched(). This requires synchronizing | 176 | * of synchronize_rcu(). This requires synchronizing |
177 | * tasks even in userspace and idle. | 177 | * tasks even in userspace and idle. |
178 | * | 178 | * |
179 | * Yes, function tracing is rude. | 179 | * Yes, function tracing is rude. |
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
934 | ftrace_profile_enabled = 0; | 934 | ftrace_profile_enabled = 0; |
935 | /* | 935 | /* |
936 | * unregister_ftrace_profiler calls stop_machine | 936 | * unregister_ftrace_profiler calls stop_machine |
937 | * so this acts like an synchronize_sched. | 937 | * so this acts like an synchronize_rcu. |
938 | */ | 938 | */ |
939 | unregister_ftrace_profiler(); | 939 | unregister_ftrace_profiler(); |
940 | } | 940 | } |
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) | |||
1086 | 1086 | ||
1087 | /* | 1087 | /* |
1088 | * Some of the ops may be dynamically allocated, | 1088 | * Some of the ops may be dynamically allocated, |
1089 | * they are freed after a synchronize_sched(). | 1089 | * they are freed after a synchronize_rcu(). |
1090 | */ | 1090 | */ |
1091 | preempt_disable_notrace(); | 1091 | preempt_disable_notrace(); |
1092 | 1092 | ||
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
1286 | { | 1286 | { |
1287 | if (!hash || hash == EMPTY_HASH) | 1287 | if (!hash || hash == EMPTY_HASH) |
1288 | return; | 1288 | return; |
1289 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | 1289 | call_rcu(&hash->rcu, __free_ftrace_hash_rcu); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | void ftrace_free_filter(struct ftrace_ops *ops) | 1292 | void ftrace_free_filter(struct ftrace_ops *ops) |
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip, | |||
1501 | * the ip is not in the ops->notrace_hash. | 1501 | * the ip is not in the ops->notrace_hash. |
1502 | * | 1502 | * |
1503 | * This needs to be called with preemption disabled as | 1503 | * This needs to be called with preemption disabled as |
1504 | * the hashes are freed with call_rcu_sched(). | 1504 | * the hashes are freed with call_rcu(). |
1505 | */ | 1505 | */ |
1506 | static int | 1506 | static int |
1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | |||
4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) | 4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, | 4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
4498 | &old_hash_ops); | 4498 | &old_hash_ops); |
4499 | synchronize_sched(); | 4499 | synchronize_rcu(); |
4500 | 4500 | ||
4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { | 4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
4502 | hlist_del(&entry->hlist); | 4502 | hlist_del(&entry->hlist); |
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) | |||
5314 | mutex_unlock(&graph_lock); | 5314 | mutex_unlock(&graph_lock); |
5315 | 5315 | ||
5316 | /* Wait till all users are no longer using the old hash */ | 5316 | /* Wait till all users are no longer using the old hash */ |
5317 | synchronize_sched(); | 5317 | synchronize_rcu(); |
5318 | 5318 | ||
5319 | free_ftrace_hash(old_hash); | 5319 | free_ftrace_hash(old_hash); |
5320 | } | 5320 | } |
@@ -5708,7 +5708,7 @@ void ftrace_release_mod(struct module *mod) | |||
5708 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { | 5708 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
5709 | if (mod_map->mod == mod) { | 5709 | if (mod_map->mod == mod) { |
5710 | list_del_rcu(&mod_map->list); | 5710 | list_del_rcu(&mod_map->list); |
5711 | call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); | 5711 | call_rcu(&mod_map->rcu, ftrace_free_mod_map); |
5712 | break; | 5712 | break; |
5713 | } | 5713 | } |
5714 | } | 5714 | } |
@@ -5928,7 +5928,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, | |||
5928 | struct ftrace_mod_map *mod_map; | 5928 | struct ftrace_mod_map *mod_map; |
5929 | const char *ret = NULL; | 5929 | const char *ret = NULL; |
5930 | 5930 | ||
5931 | /* mod_map is freed via call_rcu_sched() */ | 5931 | /* mod_map is freed via call_rcu() */ |
5932 | preempt_disable(); | 5932 | preempt_disable(); |
5933 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | 5933 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
5934 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); | 5934 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
@@ -6263,7 +6263,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
6263 | 6263 | ||
6264 | /* | 6264 | /* |
6265 | * Some of the ops may be dynamically allocated, | 6265 | * Some of the ops may be dynamically allocated, |
6266 | * they must be freed after a synchronize_sched(). | 6266 | * they must be freed after a synchronize_rcu(). |
6267 | */ | 6267 | */ |
6268 | preempt_disable_notrace(); | 6268 | preempt_disable_notrace(); |
6269 | 6269 | ||
@@ -6434,7 +6434,7 @@ static void clear_ftrace_pids(struct trace_array *tr) | |||
6434 | rcu_assign_pointer(tr->function_pids, NULL); | 6434 | rcu_assign_pointer(tr->function_pids, NULL); |
6435 | 6435 | ||
6436 | /* Wait till all users are no longer using pid filtering */ | 6436 | /* Wait till all users are no longer using pid filtering */ |
6437 | synchronize_sched(); | 6437 | synchronize_rcu(); |
6438 | 6438 | ||
6439 | trace_free_pid_list(pid_list); | 6439 | trace_free_pid_list(pid_list); |
6440 | } | 6440 | } |
@@ -6581,7 +6581,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
6581 | rcu_assign_pointer(tr->function_pids, pid_list); | 6581 | rcu_assign_pointer(tr->function_pids, pid_list); |
6582 | 6582 | ||
6583 | if (filtered_pids) { | 6583 | if (filtered_pids) { |
6584 | synchronize_sched(); | 6584 | synchronize_rcu(); |
6585 | trace_free_pid_list(filtered_pids); | 6585 | trace_free_pid_list(filtered_pids); |
6586 | } else if (pid_list) { | 6586 | } else if (pid_list) { |
6587 | /* Register a probe to set whether to ignore the tracing of a task */ | 6587 | /* Register a probe to set whether to ignore the tracing of a task */ |