summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch14
-rw-r--r--Documentation/livepatch/livepatch.txt18
-rw-r--r--kernel/livepatch/core.c30
-rw-r--r--kernel/livepatch/transition.c36
-rw-r--r--kernel/livepatch/transition.h1
5 files changed, 95 insertions, 4 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index 3bb9d5bc1ce3..dac7e1e62a8b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -45,6 +45,20 @@ Description:
45 signal pending structures). Tasks are interrupted or woken up, 45 signal pending structures). Tasks are interrupted or woken up,
46 and forced to change their patched state. 46 and forced to change their patched state.
47 47
48What: /sys/kernel/livepatch/<patch>/force
49Date: Nov 2017
50KernelVersion: 4.15.0
51Contact: live-patching@vger.kernel.org
52Description:
53 A writable attribute that allows administrator to affect the
54 course of an existing transition. Writing 1 clears
55 TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to
56 the patched or unpatched state. Administrator should not
57 use this feature without a clearance from a patch
58 distributor. Removal (rmmod) of patch modules is permanently
59 disabled when the feature is used. See
60 Documentation/livepatch/livepatch.txt for more information.
61
48What: /sys/kernel/livepatch/<patch>/<object> 62What: /sys/kernel/livepatch/<patch>/<object>
49Date: Nov 2014 63Date: Nov 2014
50KernelVersion: 3.19.0 64KernelVersion: 3.19.0
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 9bcdef277a36..896ba8941702 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -183,6 +183,20 @@ tasks. No proper signal is actually delivered (there is no data in signal
183pending structures). Tasks are interrupted or woken up, and forced to change 183pending structures). Tasks are interrupted or woken up, and forced to change
184their patched state. 184their patched state.
185 185
186Administrator can also affect a transition through
187/sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears
188TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to the patched
189state. Important note! The force attribute is intended for cases when the
190transition gets stuck for a long time because of a blocking task. Administrator
191is expected to collect all necessary data (namely stack traces of such blocking
192tasks) and request a clearance from a patch distributor to force the transition.
193Unauthorized usage may cause harm to the system. It depends on the nature of the
194patch, which functions are (un)patched, and which functions the blocking tasks
195are sleeping in (/proc/<pid>/stack may help here). Removal (rmmod) of patch
196modules is permanently disabled when the force feature is used. It cannot be
197guaranteed there is no task sleeping in such module. It implies unbounded
198reference count if a patch module is disabled and enabled in a loop.
199
1863.1 Adding consistency model support to new architectures 2003.1 Adding consistency model support to new architectures
187--------------------------------------------------------- 201---------------------------------------------------------
188 202
@@ -439,8 +453,8 @@ Information about the registered patches can be found under
439/sys/kernel/livepatch. The patches could be enabled and disabled 453/sys/kernel/livepatch. The patches could be enabled and disabled
440by writing there. 454by writing there.
441 455
442/sys/kernel/livepatch/<patch>/signal attribute allows administrator to affect a 456/sys/kernel/livepatch/<patch>/signal and /sys/kernel/livepatch/<patch>/force
443patching operation. 457attributes allow administrator to affect a patching operation.
444 458
445See Documentation/ABI/testing/sysfs-kernel-livepatch for more details. 459See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
446 460
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 88766bd91803..1c3c9b27c916 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -455,6 +455,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
455 * /sys/kernel/livepatch/<patch>/enabled 455 * /sys/kernel/livepatch/<patch>/enabled
456 * /sys/kernel/livepatch/<patch>/transition 456 * /sys/kernel/livepatch/<patch>/transition
457 * /sys/kernel/livepatch/<patch>/signal 457 * /sys/kernel/livepatch/<patch>/signal
458 * /sys/kernel/livepatch/<patch>/force
458 * /sys/kernel/livepatch/<patch>/<object> 459 * /sys/kernel/livepatch/<patch>/<object>
459 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 460 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
460 */ 461 */
@@ -556,13 +557,42 @@ static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
556 return count; 557 return count;
557} 558}
558 559
560static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
561 const char *buf, size_t count)
562{
563 struct klp_patch *patch;
564 int ret;
565 bool val;
566
567 patch = container_of(kobj, struct klp_patch, kobj);
568
569 /*
570 * klp_mutex lock is not grabbed here intentionally. It is not really
571 * needed. The race window is harmless and grabbing the lock would only
572 * hold the action back.
573 */
574 if (patch != klp_transition_patch)
575 return -EINVAL;
576
577 ret = kstrtobool(buf, &val);
578 if (ret)
579 return ret;
580
581 if (val)
582 klp_force_transition();
583
584 return count;
585}
586
559static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 587static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
560static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); 588static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
561static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); 589static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
590static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
562static struct attribute *klp_patch_attrs[] = { 591static struct attribute *klp_patch_attrs[] = {
563 &enabled_kobj_attr.attr, 592 &enabled_kobj_attr.attr,
564 &transition_kobj_attr.attr, 593 &transition_kobj_attr.attr,
565 &signal_kobj_attr.attr, 594 &signal_kobj_attr.attr,
595 &force_kobj_attr.attr,
566 NULL 596 NULL
567}; 597};
568 598
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index edcfcb8ebb2d..be5bfa533ee8 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch;
33 33
34static int klp_target_state = KLP_UNDEFINED; 34static int klp_target_state = KLP_UNDEFINED;
35 35
36static bool klp_forced = false;
37
36/* 38/*
37 * This work can be performed periodically to finish patching or unpatching any 39 * This work can be performed periodically to finish patching or unpatching any
38 * "straggler" tasks which failed to transition in the first attempt. 40 * "straggler" tasks which failed to transition in the first attempt.
@@ -146,9 +148,12 @@ done:
146 /* 148 /*
147 * See complementary comment in __klp_enable_patch() for why we 149 * See complementary comment in __klp_enable_patch() for why we
148 * keep the module reference for immediate patches. 150 * keep the module reference for immediate patches.
151 *
152 * klp_forced or immediate_func set implies unbounded increase of
153 * module's ref count if the module is disabled/enabled in a loop.
149 */ 154 */
150 if (!klp_transition_patch->immediate && !immediate_func && 155 if (!klp_forced && !klp_transition_patch->immediate &&
151 klp_target_state == KLP_UNPATCHED) { 156 !immediate_func && klp_target_state == KLP_UNPATCHED) {
152 module_put(klp_transition_patch->mod); 157 module_put(klp_transition_patch->mod);
153 } 158 }
154 159
@@ -649,3 +654,30 @@ void klp_send_signals(void)
649 } 654 }
650 read_unlock(&tasklist_lock); 655 read_unlock(&tasklist_lock);
651} 656}
657
658/*
659 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
660 * existing transition to finish.
661 *
662 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
663 * 'current'. This is not the case here and the consistency model could be
664 * broken. Administrator, who is the only one to execute the
665 * klp_force_transitions(), has to be aware of this.
666 */
667void klp_force_transition(void)
668{
669 struct task_struct *g, *task;
670 unsigned int cpu;
671
672 pr_warn("forcing remaining tasks to the patched state\n");
673
674 read_lock(&tasklist_lock);
675 for_each_process_thread(g, task)
676 klp_update_patch_state(task);
677 read_unlock(&tasklist_lock);
678
679 for_each_possible_cpu(cpu)
680 klp_update_patch_state(idle_task(cpu));
681
682 klp_forced = true;
683}
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h
index 40522795a5f6..f9d0bc016067 100644
--- a/kernel/livepatch/transition.h
+++ b/kernel/livepatch/transition.h
@@ -12,5 +12,6 @@ void klp_start_transition(void);
12void klp_try_complete_transition(void); 12void klp_try_complete_transition(void);
13void klp_reverse_transition(void); 13void klp_reverse_transition(void);
14void klp_send_signals(void); 14void klp_send_signals(void);
15void klp_force_transition(void);
15 16
16#endif /* _LIVEPATCH_TRANSITION_H */ 17#endif /* _LIVEPATCH_TRANSITION_H */