diff options
author | Josh Poimboeuf <jpoimboe@redhat.com> | 2017-02-13 20:42:37 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2017-03-08 03:23:40 -0500 |
commit | c349cdcaba589fb49cf105093ebc695eb8b9ff08 (patch) | |
tree | 0ab3165baae80dbfed236b5e2c23afd3d14ba14d /kernel/livepatch/core.c | |
parent | aa82dc3e00da63751bb9dfab26983037b79fc39d (diff) |
livepatch: move patching functions into patch.c
Move functions related to the actual patching of functions and objects
into a new patch.c file.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Miroslav Benes <mbenes@suse.cz>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch/core.c')
-rw-r--r-- | kernel/livepatch/core.c | 202 |
1 files changed, 1 insertions, 201 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 47ed643a6362..6a137e1f4490 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -24,32 +24,13 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/ftrace.h> | ||
28 | #include <linux/list.h> | 27 | #include <linux/list.h> |
29 | #include <linux/kallsyms.h> | 28 | #include <linux/kallsyms.h> |
30 | #include <linux/livepatch.h> | 29 | #include <linux/livepatch.h> |
31 | #include <linux/elf.h> | 30 | #include <linux/elf.h> |
32 | #include <linux/moduleloader.h> | 31 | #include <linux/moduleloader.h> |
33 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
34 | 33 | #include "patch.h" | |
35 | /** | ||
36 | * struct klp_ops - structure for tracking registered ftrace ops structs | ||
37 | * | ||
38 | * A single ftrace_ops is shared between all enabled replacement functions | ||
39 | * (klp_func structs) which have the same old_addr. This allows the switch | ||
40 | * between function versions to happen instantaneously by updating the klp_ops | ||
41 | * struct's func_stack list. The winner is the klp_func at the top of the | ||
42 | * func_stack (front of the list). | ||
43 | * | ||
44 | * @node: node for the global klp_ops list | ||
45 | * @func_stack: list head for the stack of klp_func's (active func is on top) | ||
46 | * @fops: registered ftrace ops struct | ||
47 | */ | ||
48 | struct klp_ops { | ||
49 | struct list_head node; | ||
50 | struct list_head func_stack; | ||
51 | struct ftrace_ops fops; | ||
52 | }; | ||
53 | 34 | ||
54 | /* | 35 | /* |
55 | * The klp_mutex protects the global lists and state transitions of any | 36 | * The klp_mutex protects the global lists and state transitions of any |
@@ -60,28 +41,12 @@ struct klp_ops { | |||
60 | static DEFINE_MUTEX(klp_mutex); | 41 | static DEFINE_MUTEX(klp_mutex); |
61 | 42 | ||
62 | static LIST_HEAD(klp_patches); | 43 | static LIST_HEAD(klp_patches); |
63 | static LIST_HEAD(klp_ops); | ||
64 | 44 | ||
65 | static struct kobject *klp_root_kobj; | 45 | static struct kobject *klp_root_kobj; |
66 | 46 | ||
67 | /* TODO: temporary stub */ | 47 | /* TODO: temporary stub */ |
68 | void klp_update_patch_state(struct task_struct *task) {} | 48 | void klp_update_patch_state(struct task_struct *task) {} |
69 | 49 | ||
70 | static struct klp_ops *klp_find_ops(unsigned long old_addr) | ||
71 | { | ||
72 | struct klp_ops *ops; | ||
73 | struct klp_func *func; | ||
74 | |||
75 | list_for_each_entry(ops, &klp_ops, node) { | ||
76 | func = list_first_entry(&ops->func_stack, struct klp_func, | ||
77 | stack_node); | ||
78 | if (func->old_addr == old_addr) | ||
79 | return ops; | ||
80 | } | ||
81 | |||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | static bool klp_is_module(struct klp_object *obj) | 50 | static bool klp_is_module(struct klp_object *obj) |
86 | { | 51 | { |
87 | return obj->name; | 52 | return obj->name; |
@@ -314,171 +279,6 @@ static int klp_write_object_relocations(struct module *pmod, | |||
314 | return ret; | 279 | return ret; |
315 | } | 280 | } |
316 | 281 | ||
317 | static void notrace klp_ftrace_handler(unsigned long ip, | ||
318 | unsigned long parent_ip, | ||
319 | struct ftrace_ops *fops, | ||
320 | struct pt_regs *regs) | ||
321 | { | ||
322 | struct klp_ops *ops; | ||
323 | struct klp_func *func; | ||
324 | |||
325 | ops = container_of(fops, struct klp_ops, fops); | ||
326 | |||
327 | rcu_read_lock(); | ||
328 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | ||
329 | stack_node); | ||
330 | if (WARN_ON_ONCE(!func)) | ||
331 | goto unlock; | ||
332 | |||
333 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | ||
334 | unlock: | ||
335 | rcu_read_unlock(); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Convert a function address into the appropriate ftrace location. | ||
340 | * | ||
341 | * Usually this is just the address of the function, but on some architectures | ||
342 | * it's more complicated so allow them to provide a custom behaviour. | ||
343 | */ | ||
344 | #ifndef klp_get_ftrace_location | ||
345 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | ||
346 | { | ||
347 | return faddr; | ||
348 | } | ||
349 | #endif | ||
350 | |||
351 | static void klp_unpatch_func(struct klp_func *func) | ||
352 | { | ||
353 | struct klp_ops *ops; | ||
354 | |||
355 | if (WARN_ON(!func->patched)) | ||
356 | return; | ||
357 | if (WARN_ON(!func->old_addr)) | ||
358 | return; | ||
359 | |||
360 | ops = klp_find_ops(func->old_addr); | ||
361 | if (WARN_ON(!ops)) | ||
362 | return; | ||
363 | |||
364 | if (list_is_singular(&ops->func_stack)) { | ||
365 | unsigned long ftrace_loc; | ||
366 | |||
367 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
368 | if (WARN_ON(!ftrace_loc)) | ||
369 | return; | ||
370 | |||
371 | WARN_ON(unregister_ftrace_function(&ops->fops)); | ||
372 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | ||
373 | |||
374 | list_del_rcu(&func->stack_node); | ||
375 | list_del(&ops->node); | ||
376 | kfree(ops); | ||
377 | } else { | ||
378 | list_del_rcu(&func->stack_node); | ||
379 | } | ||
380 | |||
381 | func->patched = false; | ||
382 | } | ||
383 | |||
384 | static int klp_patch_func(struct klp_func *func) | ||
385 | { | ||
386 | struct klp_ops *ops; | ||
387 | int ret; | ||
388 | |||
389 | if (WARN_ON(!func->old_addr)) | ||
390 | return -EINVAL; | ||
391 | |||
392 | if (WARN_ON(func->patched)) | ||
393 | return -EINVAL; | ||
394 | |||
395 | ops = klp_find_ops(func->old_addr); | ||
396 | if (!ops) { | ||
397 | unsigned long ftrace_loc; | ||
398 | |||
399 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
400 | if (!ftrace_loc) { | ||
401 | pr_err("failed to find location for function '%s'\n", | ||
402 | func->old_name); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | |||
406 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
407 | if (!ops) | ||
408 | return -ENOMEM; | ||
409 | |||
410 | ops->fops.func = klp_ftrace_handler; | ||
411 | ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | | ||
412 | FTRACE_OPS_FL_DYNAMIC | | ||
413 | FTRACE_OPS_FL_IPMODIFY; | ||
414 | |||
415 | list_add(&ops->node, &klp_ops); | ||
416 | |||
417 | INIT_LIST_HEAD(&ops->func_stack); | ||
418 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
419 | |||
420 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | ||
421 | if (ret) { | ||
422 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | ||
423 | func->old_name, ret); | ||
424 | goto err; | ||
425 | } | ||
426 | |||
427 | ret = register_ftrace_function(&ops->fops); | ||
428 | if (ret) { | ||
429 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | ||
430 | func->old_name, ret); | ||
431 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | ||
432 | goto err; | ||
433 | } | ||
434 | |||
435 | |||
436 | } else { | ||
437 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
438 | } | ||
439 | |||
440 | func->patched = true; | ||
441 | |||
442 | return 0; | ||
443 | |||
444 | err: | ||
445 | list_del_rcu(&func->stack_node); | ||
446 | list_del(&ops->node); | ||
447 | kfree(ops); | ||
448 | return ret; | ||
449 | } | ||
450 | |||
451 | static void klp_unpatch_object(struct klp_object *obj) | ||
452 | { | ||
453 | struct klp_func *func; | ||
454 | |||
455 | klp_for_each_func(obj, func) | ||
456 | if (func->patched) | ||
457 | klp_unpatch_func(func); | ||
458 | |||
459 | obj->patched = false; | ||
460 | } | ||
461 | |||
462 | static int klp_patch_object(struct klp_object *obj) | ||
463 | { | ||
464 | struct klp_func *func; | ||
465 | int ret; | ||
466 | |||
467 | if (WARN_ON(obj->patched)) | ||
468 | return -EINVAL; | ||
469 | |||
470 | klp_for_each_func(obj, func) { | ||
471 | ret = klp_patch_func(func); | ||
472 | if (ret) { | ||
473 | klp_unpatch_object(obj); | ||
474 | return ret; | ||
475 | } | ||
476 | } | ||
477 | obj->patched = true; | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static int __klp_disable_patch(struct klp_patch *patch) | 282 | static int __klp_disable_patch(struct klp_patch *patch) |
483 | { | 283 | { |
484 | struct klp_object *obj; | 284 | struct klp_object *obj; |