diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2018-11-07 17:16:57 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2018-12-01 15:38:50 -0500 |
commit | 6932689e4145f545062ca8c86cf76f38854d63d0 (patch) | |
tree | 49aaaa1e34aab31830da715e073d4510880030cc | |
parent | 2af3024cd78f120d027cb44b454186ba9d7dab24 (diff) |
livepatch: Replace synchronize_sched() with synchronize_rcu()
Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can be
replaced by synchronize_rcu(). This commit therefore makes this change,
even though it is but a comment.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
-rw-r--r-- | kernel/livepatch/patch.c | 4 | ||||
-rw-r--r-- | kernel/livepatch/transition.c | 4 |
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 82d584225dc6..7702cb4064fc 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c | |||
@@ -61,7 +61,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
61 | ops = container_of(fops, struct klp_ops, fops); | 61 | ops = container_of(fops, struct klp_ops, fops); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * A variant of synchronize_sched() is used to allow patching functions | 64 | * A variant of synchronize_rcu() is used to allow patching functions |
65 | * where RCU is not watching, see klp_synchronize_transition(). | 65 | * where RCU is not watching, see klp_synchronize_transition(). |
66 | */ | 66 | */ |
67 | preempt_disable_notrace(); | 67 | preempt_disable_notrace(); |
@@ -72,7 +72,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
72 | /* | 72 | /* |
73 | * func should never be NULL because preemption should be disabled here | 73 | * func should never be NULL because preemption should be disabled here |
74 | * and unregister_ftrace_function() does the equivalent of a | 74 | * and unregister_ftrace_function() does the equivalent of a |
75 | * synchronize_sched() before the func_stack removal. | 75 | * synchronize_rcu() before the func_stack removal. |
76 | */ | 76 | */ |
77 | if (WARN_ON_ONCE(!func)) | 77 | if (WARN_ON_ONCE(!func)) |
78 | goto unlock; | 78 | goto unlock; |
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 5bc349805e03..304d5eb8a98c 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c | |||
@@ -52,7 +52,7 @@ static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); | |||
52 | 52 | ||
53 | /* | 53 | /* |
54 | * This function is just a stub to implement a hard force | 54 | * This function is just a stub to implement a hard force |
55 | * of synchronize_sched(). This requires synchronizing | 55 | * of synchronize_rcu(). This requires synchronizing |
56 | * tasks even in userspace and idle. | 56 | * tasks even in userspace and idle. |
57 | */ | 57 | */ |
58 | static void klp_sync(struct work_struct *work) | 58 | static void klp_sync(struct work_struct *work) |
@@ -175,7 +175,7 @@ void klp_cancel_transition(void) | |||
175 | void klp_update_patch_state(struct task_struct *task) | 175 | void klp_update_patch_state(struct task_struct *task) |
176 | { | 176 | { |
177 | /* | 177 | /* |
178 | * A variant of synchronize_sched() is used to allow patching functions | 178 | * A variant of synchronize_rcu() is used to allow patching functions |
179 | * where RCU is not watching, see klp_synchronize_transition(). | 179 | * where RCU is not watching, see klp_synchronize_transition(). |
180 | */ | 180 | */ |
181 | preempt_disable_notrace(); | 181 | preempt_disable_notrace(); |