aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-04 01:52:30 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-04 01:52:30 -0500
commit4bbfd7467cfc7d42e18d3008fa6a28ffd56e901a (patch)
tree3b6d27e740976d0393fd13ae675ae6a0e07812a9 /kernel/trace/ftrace.c
parent2595646791c319cadfdbf271563aac97d0843dc7 (diff)
parent5ac7cdc29897e5fc3f5e214f3f8c8b03ef8d7029 (diff)
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU changes from Paul E. McKenney: - Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar. - Replace calls of RCU-bh and RCU-sched update-side functions to their vanilla RCU counterparts. This series is a step towards complete removal of the RCU-bh and RCU-sched update-side functions. ( Note that some of these conversions are going upstream via their respective maintainers. ) - Documentation updates, including a number of flavor-consolidation updates from Joel Fernandes. - Miscellaneous fixes. - Automate generation of the initrd filesystem used for rcutorture testing. - Convert spin_is_locked() assertions to instead use lockdep. ( Note that some of these conversions are going upstream via their respective maintainers. ) - SRCU updates, especially including a fix from Dennis Krein for a bag-on-head-class bug. - RCU torture-test updates. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 77734451cb05..c375e33239f7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
173{ 173{
174 /* 174 /*
175 * This function is just a stub to implement a hard force 175 * This function is just a stub to implement a hard force
176 * of synchronize_sched(). This requires synchronizing 176 * of synchronize_rcu(). This requires synchronizing
177 * tasks even in userspace and idle. 177 * tasks even in userspace and idle.
178 * 178 *
179 * Yes, function tracing is rude. 179 * Yes, function tracing is rude.
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
934 ftrace_profile_enabled = 0; 934 ftrace_profile_enabled = 0;
935 /* 935 /*
936 * unregister_ftrace_profiler calls stop_machine 936 * unregister_ftrace_profiler calls stop_machine
937 * so this acts like an synchronize_sched. 937 * so this acts like an synchronize_rcu.
938 */ 938 */
939 unregister_ftrace_profiler(); 939 unregister_ftrace_profiler();
940 } 940 }
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1086 1086
1087 /* 1087 /*
1088 * Some of the ops may be dynamically allocated, 1088 * Some of the ops may be dynamically allocated,
1089 * they are freed after a synchronize_sched(). 1089 * they are freed after a synchronize_rcu().
1090 */ 1090 */
1091 preempt_disable_notrace(); 1091 preempt_disable_notrace();
1092 1092
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1286{ 1286{
1287 if (!hash || hash == EMPTY_HASH) 1287 if (!hash || hash == EMPTY_HASH)
1288 return; 1288 return;
1289 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); 1289 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1290} 1290}
1291 1291
1292void ftrace_free_filter(struct ftrace_ops *ops) 1292void ftrace_free_filter(struct ftrace_ops *ops)
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
1501 * the ip is not in the ops->notrace_hash. 1501 * the ip is not in the ops->notrace_hash.
1502 * 1502 *
1503 * This needs to be called with preemption disabled as 1503 * This needs to be called with preemption disabled as
1504 * the hashes are freed with call_rcu_sched(). 1504 * the hashes are freed with call_rcu().
1505 */ 1505 */
1506static int 1506static int
1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4496 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4496 if (ftrace_enabled && !ftrace_hash_empty(hash))
4497 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4497 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4498 &old_hash_ops); 4498 &old_hash_ops);
4499 synchronize_sched(); 4499 synchronize_rcu();
4500 4500
4501 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4501 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4502 hlist_del(&entry->hlist); 4502 hlist_del(&entry->hlist);
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
5314 mutex_unlock(&graph_lock); 5314 mutex_unlock(&graph_lock);
5315 5315
5316 /* Wait till all users are no longer using the old hash */ 5316 /* Wait till all users are no longer using the old hash */
5317 synchronize_sched(); 5317 synchronize_rcu();
5318 5318
5319 free_ftrace_hash(old_hash); 5319 free_ftrace_hash(old_hash);
5320 } 5320 }
@@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod)
5707 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 5707 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5708 if (mod_map->mod == mod) { 5708 if (mod_map->mod == mod) {
5709 list_del_rcu(&mod_map->list); 5709 list_del_rcu(&mod_map->list);
5710 call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); 5710 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
5711 break; 5711 break;
5712 } 5712 }
5713 } 5713 }
@@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
5927 struct ftrace_mod_map *mod_map; 5927 struct ftrace_mod_map *mod_map;
5928 const char *ret = NULL; 5928 const char *ret = NULL;
5929 5929
5930 /* mod_map is freed via call_rcu_sched() */ 5930 /* mod_map is freed via call_rcu() */
5931 preempt_disable(); 5931 preempt_disable();
5932 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5932 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5933 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 5933 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6262 6262
6263 /* 6263 /*
6264 * Some of the ops may be dynamically allocated, 6264 * Some of the ops may be dynamically allocated,
6265 * they must be freed after a synchronize_sched(). 6265 * they must be freed after a synchronize_rcu().
6266 */ 6266 */
6267 preempt_disable_notrace(); 6267 preempt_disable_notrace();
6268 6268
@@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
6433 rcu_assign_pointer(tr->function_pids, NULL); 6433 rcu_assign_pointer(tr->function_pids, NULL);
6434 6434
6435 /* Wait till all users are no longer using pid filtering */ 6435 /* Wait till all users are no longer using pid filtering */
6436 synchronize_sched(); 6436 synchronize_rcu();
6437 6437
6438 trace_free_pid_list(pid_list); 6438 trace_free_pid_list(pid_list);
6439} 6439}
@@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
6580 rcu_assign_pointer(tr->function_pids, pid_list); 6580 rcu_assign_pointer(tr->function_pids, pid_list);
6581 6581
6582 if (filtered_pids) { 6582 if (filtered_pids) {
6583 synchronize_sched(); 6583 synchronize_rcu();
6584 trace_free_pid_list(filtered_pids); 6584 trace_free_pid_list(filtered_pids);
6585 } else if (pid_list) { 6585 } else if (pid_list) {
6586 /* Register a probe to set whether to ignore the tracing of a task */ 6586 /* Register a probe to set whether to ignore the tracing of a task */