aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-06-21 15:02:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-06-21 15:02:48 -0400
commitdcba71086e0d1abf4f00cd381530b11d0db7fa1d (patch)
tree8f89180f41c3592529b9bfecb77e63a683b414fb /kernel/livepatch
parent021f601980ccf07e65bc03e40d0321243f782d34 (diff)
parent842c08846420baa619fe3cb8c9af538efdb89428 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull livepatching fix from Jiri Kosina: "Fix the way how livepatches are being stacked with respect to RCU, from Petr Mladek" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching: livepatch: Fix stacking of patches with respect to RCU
Diffstat (limited to 'kernel/livepatch')
-rw-r--r--kernel/livepatch/patch.c8
-rw-r--r--kernel/livepatch/transition.c36
2 files changed, 37 insertions, 7 deletions
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index f8269036bf0b..52c4e907c14b 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
59 59
60 ops = container_of(fops, struct klp_ops, fops); 60 ops = container_of(fops, struct klp_ops, fops);
61 61
62 rcu_read_lock(); 62 /*
63 * A variant of synchronize_sched() is used to allow patching functions
64 * where RCU is not watching, see klp_synchronize_transition().
65 */
66 preempt_disable_notrace();
63 67
64 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
65 stack_node); 69 stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
115 119
116 klp_arch_set_pc(regs, (unsigned long)func->new_func); 120 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117unlock: 121unlock:
118 rcu_read_unlock(); 122 preempt_enable_notrace();
119} 123}
120 124
121/* 125/*
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index adc0cc64aa4b..b004a1fb6032 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work)
49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50 50
51/* 51/*
52 * This function is just a stub to implement a hard force
53 * of synchronize_sched(). This requires synchronizing
54 * tasks even in userspace and idle.
55 */
56static void klp_sync(struct work_struct *work)
57{
58}
59
60/*
61 * We allow to patch also functions where RCU is not watching,
62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
63 * to do the synchronization. Instead hard force the sched synchronization.
64 *
65 * This approach allows to use RCU functions for manipulating func_stack
66 * safely.
67 */
68static void klp_synchronize_transition(void)
69{
70 schedule_on_each_cpu(klp_sync);
71}
72
73/*
52 * The transition to the target patch state is complete. Clean up the data 74 * The transition to the target patch state is complete. Clean up the data
53 * structures. 75 * structures.
54 */ 76 */
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
73 * func->transition gets cleared, the handler may choose a 95 * func->transition gets cleared, the handler may choose a
74 * removed function. 96 * removed function.
75 */ 97 */
76 synchronize_rcu(); 98 klp_synchronize_transition();
77 } 99 }
78 100
79 if (klp_transition_patch->immediate) 101 if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
92 114
93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 115 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 if (klp_target_state == KLP_PATCHED) 116 if (klp_target_state == KLP_PATCHED)
95 synchronize_rcu(); 117 klp_synchronize_transition();
96 118
97 read_lock(&tasklist_lock); 119 read_lock(&tasklist_lock);
98 for_each_process_thread(g, task) { 120 for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
136 */ 158 */
137void klp_update_patch_state(struct task_struct *task) 159void klp_update_patch_state(struct task_struct *task)
138{ 160{
139 rcu_read_lock(); 161 /*
162 * A variant of synchronize_sched() is used to allow patching functions
163 * where RCU is not watching, see klp_synchronize_transition().
164 */
165 preempt_disable_notrace();
140 166
141 /* 167 /*
142 * This test_and_clear_tsk_thread_flag() call also serves as a read 168 * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 179 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154 task->patch_state = READ_ONCE(klp_target_state); 180 task->patch_state = READ_ONCE(klp_target_state);
155 181
156 rcu_read_unlock(); 182 preempt_enable_notrace();
157} 183}
158 184
159/* 185/*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 565 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540 566
541 /* Let any remaining calls to klp_update_patch_state() complete */ 567 /* Let any remaining calls to klp_update_patch_state() complete */
542 synchronize_rcu(); 568 klp_synchronize_transition();
543 569
544 klp_start_transition(); 570 klp_start_transition();
545} 571}