aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch/core.c
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2019-01-09 07:43:26 -0500
committerJiri Kosina <jkosina@suse.cz>2019-01-11 14:51:24 -0500
commitd697bad588eb4e76311193e6eaacc7c7aaa5a4ba (patch)
tree53f96c1a0ddbec0307bc3c7f2c1bfda8c5368fc7 /kernel/livepatch/core.c
parente1452b607c48c642caf57299f4da83aa002f8533 (diff)
livepatch: Remove Nop structures when unused
Replaced patches are removed from the stack when the transition is finished. It means that Nop structures will never be needed again and can be removed. Why should we care? + Nop structures give the impression that the function is patched even though the ftrace handler has no effect. + Ftrace handlers do not come for free. They cause slowdown that might be visible in some workloads. The ftrace-related slowdown might actually be the reason why the function is no longer patched in the new cumulative patch. One would expect that cumulative patch would help solve these problems as well. + Cumulative patches are supposed to replace any earlier version of the patch. The amount of NOPs depends on which version was replaced. This multiplies the amount of scenarios that might happen. One might say that NOPs are innocent. But there are even optimized NOP instructions for different processors, for example, see arch/x86/kernel/alternative.c. And klp_ftrace_handler() is much more complicated. + It sounds natural to clean up a mess that is no longer needed. It could only be worse if we do not do it. This patch allows to unpatch and free the dynamic structures independently when the transition finishes. The free part is a bit tricky because kobject free callbacks are called asynchronously. We could not wait for them easily. Fortunately, we do not have to. Any further access can be avoided by removing them from the dynamic lists. Signed-off-by: Petr Mladek <pmladek@suse.com> Acked-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch/core.c')
-rw-r--r--kernel/livepatch/core.c48
1 files changed, 45 insertions, 3 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index ecb7660f1d8b..113645ee86b6 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -611,11 +611,16 @@ static struct kobj_type klp_ktype_func = {
611 .sysfs_ops = &kobj_sysfs_ops, 611 .sysfs_ops = &kobj_sysfs_ops,
612}; 612};
613 613
614static void klp_free_funcs(struct klp_object *obj) 614static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
615{ 615{
616 struct klp_func *func, *tmp_func; 616 struct klp_func *func, *tmp_func;
617 617
618 klp_for_each_func_safe(obj, func, tmp_func) { 618 klp_for_each_func_safe(obj, func, tmp_func) {
619 if (nops_only && !func->nop)
620 continue;
621
622 list_del(&func->node);
623
619 /* Might be called from klp_init_patch() error path. */ 624 /* Might be called from klp_init_patch() error path. */
620 if (func->kobj_added) { 625 if (func->kobj_added) {
621 kobject_put(&func->kobj); 626 kobject_put(&func->kobj);
@@ -640,12 +645,17 @@ static void klp_free_object_loaded(struct klp_object *obj)
640 } 645 }
641} 646}
642 647
643static void klp_free_objects(struct klp_patch *patch) 648static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
644{ 649{
645 struct klp_object *obj, *tmp_obj; 650 struct klp_object *obj, *tmp_obj;
646 651
647 klp_for_each_object_safe(patch, obj, tmp_obj) { 652 klp_for_each_object_safe(patch, obj, tmp_obj) {
648 klp_free_funcs(obj); 653 __klp_free_funcs(obj, nops_only);
654
655 if (nops_only && !obj->dynamic)
656 continue;
657
658 list_del(&obj->node);
649 659
650 /* Might be called from klp_init_patch() error path. */ 660 /* Might be called from klp_init_patch() error path. */
651 if (obj->kobj_added) { 661 if (obj->kobj_added) {
@@ -656,6 +666,16 @@ static void klp_free_objects(struct klp_patch *patch)
656 } 666 }
657} 667}
658 668
669static void klp_free_objects(struct klp_patch *patch)
670{
671 __klp_free_objects(patch, false);
672}
673
674static void klp_free_objects_dynamic(struct klp_patch *patch)
675{
676 __klp_free_objects(patch, true);
677}
678
659/* 679/*
660 * This function implements the free operations that can be called safely 680 * This function implements the free operations that can be called safely
661 * under klp_mutex. 681 * under klp_mutex.
@@ -1085,6 +1105,28 @@ void klp_discard_replaced_patches(struct klp_patch *new_patch)
1085} 1105}
1086 1106
1087/* 1107/*
1108 * This function removes the dynamically allocated 'nop' functions.
1109 *
1110 * We could be pretty aggressive. NOPs do not change the existing
1111 * behavior except for adding unnecessary delay by the ftrace handler.
1112 *
1113 * It is safe even when the transition was forced. The ftrace handler
1114 * will see a valid ops->func_stack entry thanks to RCU.
1115 *
1116 * We could even free the NOPs structures. They must be the last entry
1117 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1118 * It does the same as klp_synchronize_transition() to make sure that
1119 * nobody is inside the ftrace handler once the operation finishes.
1120 *
1121 * IMPORTANT: It must be called right after removing the replaced patches!
1122 */
1123void klp_discard_nops(struct klp_patch *new_patch)
1124{
1125 klp_unpatch_objects_dynamic(klp_transition_patch);
1126 klp_free_objects_dynamic(klp_transition_patch);
1127}
1128
1129/*
1088 * Remove parts of patches that touch a given kernel module. The list of 1130 * Remove parts of patches that touch a given kernel module. The list of
1089 * patches processed might be limited. When limit is NULL, all patches 1131 * patches processed might be limited. When limit is NULL, all patches
1090 * will be handled. 1132 * will be handled.