aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJosh Poimboeuf <jpoimboe@redhat.com>2015-01-20 10:26:19 -0500
committerJiri Kosina <jkosina@suse.cz>2015-01-20 14:09:41 -0500
commit3c33f5b99d688deafd21d4a770303691c7c3a320 (patch)
tree1b133a30910364c15190202616f65c08d306f55e /kernel
parent83a90bb1345767f0cb96d242fd8b9db44b2b0e17 (diff)
livepatch: support for repatching a function
Add support for patching a function multiple times. If multiple patches affect a function, the function in the most recently enabled patch "wins". This enables a cumulative patch upgrade path, where each patch is a superset of previous patches. This requires restructuring the data a little bit. With the current design, where each klp_func struct has its own ftrace_ops, we'd have to unregister the old ops and then register the new ops, because FTRACE_OPS_FL_IPMODIFY prevents us from having two ops registered for the same function at the same time. That would leave a regression window where the function isn't patched at all (not good for a patch upgrade path). This patch replaces the per-klp_func ftrace_ops with a global klp_ops list, with one ftrace_ops per original function. A single ftrace_ops is shared between all klp_funcs which have the same old_addr. This allows the switch between function versions to happen instantaneously by updating the klp_ops struct's func_stack list. The winner is the klp_func at the top of the func_stack (front of the list). [ jkosina@suse.cz: turn WARN_ON() into WARN_ON_ONCE() in ftrace handler to avoid storm in pathological cases ] Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Reviewed-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/livepatch/core.c170
1 files changed, 119 insertions, 51 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 2401e7f955d3..bc05d390ce85 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -29,17 +29,53 @@
29#include <linux/kallsyms.h> 29#include <linux/kallsyms.h>
30#include <linux/livepatch.h> 30#include <linux/livepatch.h>
31 31
32/* 32/**
33 * The klp_mutex protects the klp_patches list and state transitions of any 33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 * structure reachable from the patches list. References to any structure must 34 *
35 * be obtained under mutex protection. 35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
36 */ 44 */
45struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49};
37 50
51/*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
38static DEFINE_MUTEX(klp_mutex); 57static DEFINE_MUTEX(klp_mutex);
58
39static LIST_HEAD(klp_patches); 59static LIST_HEAD(klp_patches);
60static LIST_HEAD(klp_ops);
40 61
41static struct kobject *klp_root_kobj; 62static struct kobject *klp_root_kobj;
42 63
64static struct klp_ops *klp_find_ops(unsigned long old_addr)
65{
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77}
78
43static bool klp_is_module(struct klp_object *obj) 79static bool klp_is_module(struct klp_object *obj)
44{ 80{
45 return obj->name; 81 return obj->name;
@@ -267,16 +303,28 @@ static int klp_write_object_relocations(struct module *pmod,
267 303
268static void notrace klp_ftrace_handler(unsigned long ip, 304static void notrace klp_ftrace_handler(unsigned long ip,
269 unsigned long parent_ip, 305 unsigned long parent_ip,
270 struct ftrace_ops *ops, 306 struct ftrace_ops *fops,
271 struct pt_regs *regs) 307 struct pt_regs *regs)
272{ 308{
273 struct klp_func *func = ops->private; 309 struct klp_ops *ops;
310 struct klp_func *func;
311
312 ops = container_of(fops, struct klp_ops, fops);
313
314 rcu_read_lock();
315 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
316 stack_node);
317 rcu_read_unlock();
318
319 if (WARN_ON_ONCE(!func))
320 return;
274 321
275 klp_arch_set_pc(regs, (unsigned long)func->new_func); 322 klp_arch_set_pc(regs, (unsigned long)func->new_func);
276} 323}
277 324
278static int klp_disable_func(struct klp_func *func) 325static int klp_disable_func(struct klp_func *func)
279{ 326{
327 struct klp_ops *ops;
280 int ret; 328 int ret;
281 329
282 if (WARN_ON(func->state != KLP_ENABLED)) 330 if (WARN_ON(func->state != KLP_ENABLED))
@@ -285,16 +333,28 @@ static int klp_disable_func(struct klp_func *func)
285 if (WARN_ON(!func->old_addr)) 333 if (WARN_ON(!func->old_addr))
286 return -EINVAL; 334 return -EINVAL;
287 335
288 ret = unregister_ftrace_function(func->fops); 336 ops = klp_find_ops(func->old_addr);
289 if (ret) { 337 if (WARN_ON(!ops))
290 pr_err("failed to unregister ftrace handler for function '%s' (%d)\n", 338 return -EINVAL;
291 func->old_name, ret);
292 return ret;
293 }
294 339
295 ret = ftrace_set_filter_ip(func->fops, func->old_addr, 1, 0); 340 if (list_is_singular(&ops->func_stack)) {
296 if (ret) 341 ret = unregister_ftrace_function(&ops->fops);
297 pr_warn("function unregister succeeded but failed to clear the filter\n"); 342 if (ret) {
343 pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
344 func->old_name, ret);
345 return ret;
346 }
347
348 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
349 if (ret)
350 pr_warn("function unregister succeeded but failed to clear the filter\n");
351
352 list_del_rcu(&func->stack_node);
353 list_del(&ops->node);
354 kfree(ops);
355 } else {
356 list_del_rcu(&func->stack_node);
357 }
298 358
299 func->state = KLP_DISABLED; 359 func->state = KLP_DISABLED;
300 360
@@ -303,6 +363,7 @@ static int klp_disable_func(struct klp_func *func)
303 363
304static int klp_enable_func(struct klp_func *func) 364static int klp_enable_func(struct klp_func *func)
305{ 365{
366 struct klp_ops *ops;
306 int ret; 367 int ret;
307 368
308 if (WARN_ON(!func->old_addr)) 369 if (WARN_ON(!func->old_addr))
@@ -311,22 +372,50 @@ static int klp_enable_func(struct klp_func *func)
311 if (WARN_ON(func->state != KLP_DISABLED)) 372 if (WARN_ON(func->state != KLP_DISABLED))
312 return -EINVAL; 373 return -EINVAL;
313 374
314 ret = ftrace_set_filter_ip(func->fops, func->old_addr, 0, 0); 375 ops = klp_find_ops(func->old_addr);
315 if (ret) { 376 if (!ops) {
316 pr_err("failed to set ftrace filter for function '%s' (%d)\n", 377 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
317 func->old_name, ret); 378 if (!ops)
318 return ret; 379 return -ENOMEM;
319 } 380
381 ops->fops.func = klp_ftrace_handler;
382 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
383 FTRACE_OPS_FL_DYNAMIC |
384 FTRACE_OPS_FL_IPMODIFY;
385
386 list_add(&ops->node, &klp_ops);
387
388 INIT_LIST_HEAD(&ops->func_stack);
389 list_add_rcu(&func->stack_node, &ops->func_stack);
390
391 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
392 if (ret) {
393 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
394 func->old_name, ret);
395 goto err;
396 }
397
398 ret = register_ftrace_function(&ops->fops);
399 if (ret) {
400 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
401 func->old_name, ret);
402 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
403 goto err;
404 }
405
320 406
321 ret = register_ftrace_function(func->fops);
322 if (ret) {
323 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
324 func->old_name, ret);
325 ftrace_set_filter_ip(func->fops, func->old_addr, 1, 0);
326 } else { 407 } else {
327 func->state = KLP_ENABLED; 408 list_add_rcu(&func->stack_node, &ops->func_stack);
328 } 409 }
329 410
411 func->state = KLP_ENABLED;
412
413 return ret;
414
415err:
416 list_del_rcu(&func->stack_node);
417 list_del(&ops->node);
418 kfree(ops);
330 return ret; 419 return ret;
331} 420}
332 421
@@ -582,10 +671,6 @@ static struct kobj_type klp_ktype_patch = {
582 671
583static void klp_kobj_release_func(struct kobject *kobj) 672static void klp_kobj_release_func(struct kobject *kobj)
584{ 673{
585 struct klp_func *func;
586
587 func = container_of(kobj, struct klp_func, kobj);
588 kfree(func->fops);
589} 674}
590 675
591static struct kobj_type klp_ktype_func = { 676static struct kobj_type klp_ktype_func = {
@@ -642,28 +727,11 @@ static void klp_free_patch(struct klp_patch *patch)
642 727
643static int klp_init_func(struct klp_object *obj, struct klp_func *func) 728static int klp_init_func(struct klp_object *obj, struct klp_func *func)
644{ 729{
645 struct ftrace_ops *ops; 730 INIT_LIST_HEAD(&func->stack_node);
646 int ret;
647
648 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
649 if (!ops)
650 return -ENOMEM;
651
652 ops->private = func;
653 ops->func = klp_ftrace_handler;
654 ops->flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_DYNAMIC |
655 FTRACE_OPS_FL_IPMODIFY;
656 func->fops = ops;
657 func->state = KLP_DISABLED; 731 func->state = KLP_DISABLED;
658 732
659 ret = kobject_init_and_add(&func->kobj, &klp_ktype_func, 733 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
660 obj->kobj, func->old_name); 734 obj->kobj, func->old_name);
661 if (ret) {
662 kfree(func->fops);
663 return ret;
664 }
665
666 return 0;
667} 735}
668 736
669/* parts of the initialization that is done only when the object is loaded */ 737/* parts of the initialization that is done only when the object is loaded */