aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch/transition.c
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2019-01-09 07:43:22 -0500
committerJiri Kosina <jkosina@suse.cz>2019-01-11 14:51:24 -0500
commit68007289bf3cd937a5b8fc4987d2787167bd06ca (patch)
tree2c913929da4c5e65fcacdfc5afb47399d86cf446 /kernel/livepatch/transition.c
parent0430f78bf38f9972f0cf0522709cc63d49fa164c (diff)
livepatch: Don't block the removal of patches loaded after a forced transition
module_put() is currently never called in klp_complete_transition() when klp_force is set. As a result, we might keep the reference count even when klp_enable_patch() fails and klp_cancel_transition() is called. This might give the impression that a module might get blocked in some strange init state. Fortunately, it is not the case. The reference count is ignored when mod->init fails and erroneous modules are always removed. Anyway, this might be confusing. Instead, this patch moves the global klp_forced flag into struct klp_patch. As a result, we block only modules that might still be in use after a forced transition. Newly loaded livepatches might be eventually completely removed later. It is not a big deal. But the code is at least consistent with the reality. Signed-off-by: Petr Mladek <pmladek@suse.com> Acked-by: Joe Lawrence <joe.lawrence@redhat.com> Acked-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch/transition.c')
-rw-r--r--kernel/livepatch/transition.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index f27a378ad5e1..a4c921364003 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -33,8 +33,6 @@ struct klp_patch *klp_transition_patch;
33 33
34static int klp_target_state = KLP_UNDEFINED; 34static int klp_target_state = KLP_UNDEFINED;
35 35
36static bool klp_forced = false;
37
38/* 36/*
39 * This work can be performed periodically to finish patching or unpatching any 37 * This work can be performed periodically to finish patching or unpatching any
40 * "straggler" tasks which failed to transition in the first attempt. 38 * "straggler" tasks which failed to transition in the first attempt.
@@ -137,10 +135,10 @@ static void klp_complete_transition(void)
137 klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 135 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
138 136
139 /* 137 /*
140 * klp_forced set implies unbounded increase of module's ref count if 138 * patch->forced set implies unbounded increase of module's ref count if
141 * the module is disabled/enabled in a loop. 139 * the module is disabled/enabled in a loop.
142 */ 140 */
143 if (!klp_forced && klp_target_state == KLP_UNPATCHED) 141 if (!klp_transition_patch->forced && klp_target_state == KLP_UNPATCHED)
144 module_put(klp_transition_patch->mod); 142 module_put(klp_transition_patch->mod);
145 143
146 klp_target_state = KLP_UNDEFINED; 144 klp_target_state = KLP_UNDEFINED;
@@ -620,6 +618,7 @@ void klp_send_signals(void)
620 */ 618 */
621void klp_force_transition(void) 619void klp_force_transition(void)
622{ 620{
621 struct klp_patch *patch;
623 struct task_struct *g, *task; 622 struct task_struct *g, *task;
624 unsigned int cpu; 623 unsigned int cpu;
625 624
@@ -633,5 +632,6 @@ void klp_force_transition(void)
633 for_each_possible_cpu(cpu) 632 for_each_possible_cpu(cpu)
634 klp_update_patch_state(idle_task(cpu)); 633 klp_update_patch_state(idle_task(cpu));
635 634
636 klp_forced = true; 635 list_for_each_entry(patch, &klp_patches, list)
636 patch->forced = true;
637} 637}