diff options
Diffstat (limited to 'kernel/livepatch/patch.c')
-rw-r--r-- | kernel/livepatch/patch.c | 272 |
1 files changed, 272 insertions, 0 deletions
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c new file mode 100644 index 000000000000..f8269036bf0b --- /dev/null +++ b/kernel/livepatch/patch.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * patch.c - livepatch patching functions | ||
3 | * | ||
4 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> | ||
5 | * Copyright (C) 2014 SUSE | ||
6 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version 2 | ||
11 | * of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
23 | |||
24 | #include <linux/livepatch.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/ftrace.h> | ||
27 | #include <linux/rculist.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/bug.h> | ||
30 | #include <linux/printk.h> | ||
31 | #include "patch.h" | ||
32 | #include "transition.h" | ||
33 | |||
34 | static LIST_HEAD(klp_ops); | ||
35 | |||
36 | struct klp_ops *klp_find_ops(unsigned long old_addr) | ||
37 | { | ||
38 | struct klp_ops *ops; | ||
39 | struct klp_func *func; | ||
40 | |||
41 | list_for_each_entry(ops, &klp_ops, node) { | ||
42 | func = list_first_entry(&ops->func_stack, struct klp_func, | ||
43 | stack_node); | ||
44 | if (func->old_addr == old_addr) | ||
45 | return ops; | ||
46 | } | ||
47 | |||
48 | return NULL; | ||
49 | } | ||
50 | |||
51 | static void notrace klp_ftrace_handler(unsigned long ip, | ||
52 | unsigned long parent_ip, | ||
53 | struct ftrace_ops *fops, | ||
54 | struct pt_regs *regs) | ||
55 | { | ||
56 | struct klp_ops *ops; | ||
57 | struct klp_func *func; | ||
58 | int patch_state; | ||
59 | |||
60 | ops = container_of(fops, struct klp_ops, fops); | ||
61 | |||
62 | rcu_read_lock(); | ||
63 | |||
64 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | ||
65 | stack_node); | ||
66 | |||
67 | /* | ||
68 | * func should never be NULL because preemption should be disabled here | ||
69 | * and unregister_ftrace_function() does the equivalent of a | ||
70 | * synchronize_sched() before the func_stack removal. | ||
71 | */ | ||
72 | if (WARN_ON_ONCE(!func)) | ||
73 | goto unlock; | ||
74 | |||
75 | /* | ||
76 | * In the enable path, enforce the order of the ops->func_stack and | ||
77 | * func->transition reads. The corresponding write barrier is in | ||
78 | * __klp_enable_patch(). | ||
79 | * | ||
80 | * (Note that this barrier technically isn't needed in the disable | ||
81 | * path. In the rare case where klp_update_patch_state() runs before | ||
82 | * this handler, its TIF_PATCH_PENDING read and this func->transition | ||
83 | * read need to be ordered. But klp_update_patch_state() already | ||
84 | * enforces that.) | ||
85 | */ | ||
86 | smp_rmb(); | ||
87 | |||
88 | if (unlikely(func->transition)) { | ||
89 | |||
90 | /* | ||
91 | * Enforce the order of the func->transition and | ||
92 | * current->patch_state reads. Otherwise we could read an | ||
93 | * out-of-date task state and pick the wrong function. The | ||
94 | * corresponding write barrier is in klp_init_transition(). | ||
95 | */ | ||
96 | smp_rmb(); | ||
97 | |||
98 | patch_state = current->patch_state; | ||
99 | |||
100 | WARN_ON_ONCE(patch_state == KLP_UNDEFINED); | ||
101 | |||
102 | if (patch_state == KLP_UNPATCHED) { | ||
103 | /* | ||
104 | * Use the previously patched version of the function. | ||
105 | * If no previous patches exist, continue with the | ||
106 | * original function. | ||
107 | */ | ||
108 | func = list_entry_rcu(func->stack_node.next, | ||
109 | struct klp_func, stack_node); | ||
110 | |||
111 | if (&func->stack_node == &ops->func_stack) | ||
112 | goto unlock; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | ||
117 | unlock: | ||
118 | rcu_read_unlock(); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Convert a function address into the appropriate ftrace location. | ||
123 | * | ||
124 | * Usually this is just the address of the function, but on some architectures | ||
125 | * it's more complicated so allow them to provide a custom behaviour. | ||
126 | */ | ||
127 | #ifndef klp_get_ftrace_location | ||
128 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | ||
129 | { | ||
130 | return faddr; | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | static void klp_unpatch_func(struct klp_func *func) | ||
135 | { | ||
136 | struct klp_ops *ops; | ||
137 | |||
138 | if (WARN_ON(!func->patched)) | ||
139 | return; | ||
140 | if (WARN_ON(!func->old_addr)) | ||
141 | return; | ||
142 | |||
143 | ops = klp_find_ops(func->old_addr); | ||
144 | if (WARN_ON(!ops)) | ||
145 | return; | ||
146 | |||
147 | if (list_is_singular(&ops->func_stack)) { | ||
148 | unsigned long ftrace_loc; | ||
149 | |||
150 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
151 | if (WARN_ON(!ftrace_loc)) | ||
152 | return; | ||
153 | |||
154 | WARN_ON(unregister_ftrace_function(&ops->fops)); | ||
155 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | ||
156 | |||
157 | list_del_rcu(&func->stack_node); | ||
158 | list_del(&ops->node); | ||
159 | kfree(ops); | ||
160 | } else { | ||
161 | list_del_rcu(&func->stack_node); | ||
162 | } | ||
163 | |||
164 | func->patched = false; | ||
165 | } | ||
166 | |||
167 | static int klp_patch_func(struct klp_func *func) | ||
168 | { | ||
169 | struct klp_ops *ops; | ||
170 | int ret; | ||
171 | |||
172 | if (WARN_ON(!func->old_addr)) | ||
173 | return -EINVAL; | ||
174 | |||
175 | if (WARN_ON(func->patched)) | ||
176 | return -EINVAL; | ||
177 | |||
178 | ops = klp_find_ops(func->old_addr); | ||
179 | if (!ops) { | ||
180 | unsigned long ftrace_loc; | ||
181 | |||
182 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
183 | if (!ftrace_loc) { | ||
184 | pr_err("failed to find location for function '%s'\n", | ||
185 | func->old_name); | ||
186 | return -EINVAL; | ||
187 | } | ||
188 | |||
189 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
190 | if (!ops) | ||
191 | return -ENOMEM; | ||
192 | |||
193 | ops->fops.func = klp_ftrace_handler; | ||
194 | ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | | ||
195 | FTRACE_OPS_FL_DYNAMIC | | ||
196 | FTRACE_OPS_FL_IPMODIFY; | ||
197 | |||
198 | list_add(&ops->node, &klp_ops); | ||
199 | |||
200 | INIT_LIST_HEAD(&ops->func_stack); | ||
201 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
202 | |||
203 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | ||
204 | if (ret) { | ||
205 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | ||
206 | func->old_name, ret); | ||
207 | goto err; | ||
208 | } | ||
209 | |||
210 | ret = register_ftrace_function(&ops->fops); | ||
211 | if (ret) { | ||
212 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | ||
213 | func->old_name, ret); | ||
214 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | ||
215 | goto err; | ||
216 | } | ||
217 | |||
218 | |||
219 | } else { | ||
220 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
221 | } | ||
222 | |||
223 | func->patched = true; | ||
224 | |||
225 | return 0; | ||
226 | |||
227 | err: | ||
228 | list_del_rcu(&func->stack_node); | ||
229 | list_del(&ops->node); | ||
230 | kfree(ops); | ||
231 | return ret; | ||
232 | } | ||
233 | |||
234 | void klp_unpatch_object(struct klp_object *obj) | ||
235 | { | ||
236 | struct klp_func *func; | ||
237 | |||
238 | klp_for_each_func(obj, func) | ||
239 | if (func->patched) | ||
240 | klp_unpatch_func(func); | ||
241 | |||
242 | obj->patched = false; | ||
243 | } | ||
244 | |||
245 | int klp_patch_object(struct klp_object *obj) | ||
246 | { | ||
247 | struct klp_func *func; | ||
248 | int ret; | ||
249 | |||
250 | if (WARN_ON(obj->patched)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | klp_for_each_func(obj, func) { | ||
254 | ret = klp_patch_func(func); | ||
255 | if (ret) { | ||
256 | klp_unpatch_object(obj); | ||
257 | return ret; | ||
258 | } | ||
259 | } | ||
260 | obj->patched = true; | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | void klp_unpatch_objects(struct klp_patch *patch) | ||
266 | { | ||
267 | struct klp_object *obj; | ||
268 | |||
269 | klp_for_each_object(patch, obj) | ||
270 | if (obj->patched) | ||
271 | klp_unpatch_object(obj); | ||
272 | } | ||