aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch/core.c
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2019-01-09 07:43:23 -0500
committerJiri Kosina <jkosina@suse.cz>2019-01-11 14:51:24 -0500
commit958ef1e39d24d6cb8bf2a7406130a98c9564230f (patch)
tree07694df3fe9ac15dbfc1130ed5151f85f0d6a87c /kernel/livepatch/core.c
parent68007289bf3cd937a5b8fc4987d2787167bd06ca (diff)
livepatch: Simplify API by removing registration step
The possibility to re-enable a registered patch was useful for immediate patches where the livepatch module had to stay until the system reboot. The improved consistency model allows to achieve the same result by unloading and loading the livepatch module again. Also we are going to add a feature called atomic replace. It will allow to create a patch that would replace all already registered patches. The aim is to handle dependent patches more securely. It will obsolete the stack of patches that helped to handle the dependencies so far. Then it might be unclear when a cumulative patch re-enabling is safe. It would be complicated to support the many modes. Instead we could actually make the API and code easier to understand. Therefore, remove the two step public API. All the checks and init calls are moved from klp_register_patch() to klp_enabled_patch(). Also the patch is automatically freed, including the sysfs interface when the transition to the disabled state is completed. As a result, there is never a disabled patch on the top of the stack. Therefore we do not need to check the stack in __klp_enable_patch(). And we could simplify the check in __klp_disable_patch(). Also the API and logic is much easier. It is enough to call klp_enable_patch() in module_init() call. The patch can be disabled by writing '0' into /sys/kernel/livepatch/<patch>/enabled. Then the module can be removed once the transition finishes and sysfs interface is freed. The only problem is how to free the structures and kobjects safely. The operation is triggered from the sysfs interface. We could not put the related kobject from there because it would cause lock inversion between klp_mutex and kernfs locks, see kn->count lockdep map. Therefore, offload the free task to a workqueue. It is perfectly fine: + The patch can no longer be used in the livepatch operations. + The module could not be removed until the free operation finishes and module_put() is called. + The operation is asynchronous already when the first klp_try_complete_transition() fails and another call is queued with a delay. Suggested-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Acked-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch/core.c')
-rw-r--r--kernel/livepatch/core.c280
1 files changed, 94 insertions, 186 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index e77c5017ae0c..bd41b03a72d5 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -45,7 +45,11 @@
45 */ 45 */
46DEFINE_MUTEX(klp_mutex); 46DEFINE_MUTEX(klp_mutex);
47 47
48/* Registered patches */ 48/*
49 * Actively used patches: enabled or in transition. Note that replaced
50 * or disabled patches are not listed even though the related kernel
51 * module still can be loaded.
52 */
49LIST_HEAD(klp_patches); 53LIST_HEAD(klp_patches);
50 54
51static struct kobject *klp_root_kobj; 55static struct kobject *klp_root_kobj;
@@ -83,17 +87,6 @@ static void klp_find_object_module(struct klp_object *obj)
83 mutex_unlock(&module_mutex); 87 mutex_unlock(&module_mutex);
84} 88}
85 89
86static bool klp_is_patch_registered(struct klp_patch *patch)
87{
88 struct klp_patch *mypatch;
89
90 list_for_each_entry(mypatch, &klp_patches, list)
91 if (mypatch == patch)
92 return true;
93
94 return false;
95}
96
97static bool klp_initialized(void) 90static bool klp_initialized(void)
98{ 91{
99 return !!klp_root_kobj; 92 return !!klp_root_kobj;
@@ -292,7 +285,6 @@ static int klp_write_object_relocations(struct module *pmod,
292 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 285 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
293 */ 286 */
294static int __klp_disable_patch(struct klp_patch *patch); 287static int __klp_disable_patch(struct klp_patch *patch);
295static int __klp_enable_patch(struct klp_patch *patch);
296 288
297static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 289static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
298 const char *buf, size_t count) 290 const char *buf, size_t count)
@@ -309,40 +301,32 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
309 301
310 mutex_lock(&klp_mutex); 302 mutex_lock(&klp_mutex);
311 303
312 if (!klp_is_patch_registered(patch)) {
313 /*
314 * Module with the patch could either disappear meanwhile or is
315 * not properly initialized yet.
316 */
317 ret = -EINVAL;
318 goto err;
319 }
320
321 if (patch->enabled == enabled) { 304 if (patch->enabled == enabled) {
322 /* already in requested state */ 305 /* already in requested state */
323 ret = -EINVAL; 306 ret = -EINVAL;
324 goto err; 307 goto out;
325 } 308 }
326 309
327 if (patch == klp_transition_patch) { 310 /*
311 * Allow to reverse a pending transition in both ways. It might be
312 * necessary to complete the transition without forcing and breaking
313 * the system integrity.
314 *
315 * Do not allow to re-enable a disabled patch.
316 */
317 if (patch == klp_transition_patch)
328 klp_reverse_transition(); 318 klp_reverse_transition();
329 } else if (enabled) { 319 else if (!enabled)
330 ret = __klp_enable_patch(patch);
331 if (ret)
332 goto err;
333 } else {
334 ret = __klp_disable_patch(patch); 320 ret = __klp_disable_patch(patch);
335 if (ret) 321 else
336 goto err; 322 ret = -EINVAL;
337 }
338 323
324out:
339 mutex_unlock(&klp_mutex); 325 mutex_unlock(&klp_mutex);
340 326
327 if (ret)
328 return ret;
341 return count; 329 return count;
342
343err:
344 mutex_unlock(&klp_mutex);
345 return ret;
346} 330}
347 331
348static ssize_t enabled_show(struct kobject *kobj, 332static ssize_t enabled_show(struct kobject *kobj,
@@ -508,7 +492,7 @@ static void klp_free_objects(struct klp_patch *patch)
508 * The operation must be completed by calling klp_free_patch_finish() 492 * The operation must be completed by calling klp_free_patch_finish()
509 * outside klp_mutex. 493 * outside klp_mutex.
510 */ 494 */
511static void klp_free_patch_start(struct klp_patch *patch) 495void klp_free_patch_start(struct klp_patch *patch)
512{ 496{
513 if (!list_empty(&patch->list)) 497 if (!list_empty(&patch->list))
514 list_del(&patch->list); 498 list_del(&patch->list);
@@ -536,6 +520,23 @@ static void klp_free_patch_finish(struct klp_patch *patch)
536 kobject_put(&patch->kobj); 520 kobject_put(&patch->kobj);
537 wait_for_completion(&patch->finish); 521 wait_for_completion(&patch->finish);
538 } 522 }
523
524 /* Put the module after the last access to struct klp_patch. */
525 if (!patch->forced)
526 module_put(patch->mod);
527}
528
529/*
530 * The livepatch might be freed from sysfs interface created by the patch.
531 * This work allows to wait until the interface is destroyed in a separate
532 * context.
533 */
534static void klp_free_patch_work_fn(struct work_struct *work)
535{
536 struct klp_patch *patch =
537 container_of(work, struct klp_patch, free_work);
538
539 klp_free_patch_finish(patch);
539} 540}
540 541
541static int klp_init_func(struct klp_object *obj, struct klp_func *func) 542static int klp_init_func(struct klp_object *obj, struct klp_func *func)
@@ -661,6 +662,7 @@ static int klp_init_patch_early(struct klp_patch *patch)
661 patch->kobj_added = false; 662 patch->kobj_added = false;
662 patch->enabled = false; 663 patch->enabled = false;
663 patch->forced = false; 664 patch->forced = false;
665 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
664 init_completion(&patch->finish); 666 init_completion(&patch->finish);
665 667
666 klp_for_each_object(patch, obj) { 668 klp_for_each_object(patch, obj) {
@@ -673,6 +675,9 @@ static int klp_init_patch_early(struct klp_patch *patch)
673 func->kobj_added = false; 675 func->kobj_added = false;
674 } 676 }
675 677
678 if (!try_module_get(patch->mod))
679 return -ENODEV;
680
676 return 0; 681 return 0;
677} 682}
678 683
@@ -681,115 +686,22 @@ static int klp_init_patch(struct klp_patch *patch)
681 struct klp_object *obj; 686 struct klp_object *obj;
682 int ret; 687 int ret;
683 688
684 mutex_lock(&klp_mutex);
685
686 ret = klp_init_patch_early(patch);
687 if (ret) {
688 mutex_unlock(&klp_mutex);
689 return ret;
690 }
691
692 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, 689 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
693 klp_root_kobj, "%s", patch->mod->name); 690 klp_root_kobj, "%s", patch->mod->name);
694 if (ret) { 691 if (ret)
695 mutex_unlock(&klp_mutex);
696 return ret; 692 return ret;
697 }
698 patch->kobj_added = true; 693 patch->kobj_added = true;
699 694
700 klp_for_each_object(patch, obj) { 695 klp_for_each_object(patch, obj) {
701 ret = klp_init_object(patch, obj); 696 ret = klp_init_object(patch, obj);
702 if (ret) 697 if (ret)
703 goto free; 698 return ret;
704 } 699 }
705 700
706 list_add_tail(&patch->list, &klp_patches); 701 list_add_tail(&patch->list, &klp_patches);
707 702
708 mutex_unlock(&klp_mutex);
709
710 return 0;
711
712free:
713 klp_free_patch_start(patch);
714
715 mutex_unlock(&klp_mutex);
716
717 klp_free_patch_finish(patch);
718
719 return ret;
720}
721
722/**
723 * klp_unregister_patch() - unregisters a patch
724 * @patch: Disabled patch to be unregistered
725 *
726 * Frees the data structures and removes the sysfs interface.
727 *
728 * Return: 0 on success, otherwise error
729 */
730int klp_unregister_patch(struct klp_patch *patch)
731{
732 int ret;
733
734 mutex_lock(&klp_mutex);
735
736 if (!klp_is_patch_registered(patch)) {
737 ret = -EINVAL;
738 goto err;
739 }
740
741 if (patch->enabled) {
742 ret = -EBUSY;
743 goto err;
744 }
745
746 klp_free_patch_start(patch);
747
748 mutex_unlock(&klp_mutex);
749
750 klp_free_patch_finish(patch);
751
752 return 0; 703 return 0;
753err:
754 mutex_unlock(&klp_mutex);
755 return ret;
756}
757EXPORT_SYMBOL_GPL(klp_unregister_patch);
758
759/**
760 * klp_register_patch() - registers a patch
761 * @patch: Patch to be registered
762 *
763 * Initializes the data structure associated with the patch and
764 * creates the sysfs interface.
765 *
766 * There is no need to take the reference on the patch module here. It is done
767 * later when the patch is enabled.
768 *
769 * Return: 0 on success, otherwise error
770 */
771int klp_register_patch(struct klp_patch *patch)
772{
773 if (!patch || !patch->mod)
774 return -EINVAL;
775
776 if (!is_livepatch_module(patch->mod)) {
777 pr_err("module %s is not marked as a livepatch module\n",
778 patch->mod->name);
779 return -EINVAL;
780 }
781
782 if (!klp_initialized())
783 return -ENODEV;
784
785 if (!klp_have_reliable_stack()) {
786 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
787 return -ENOSYS;
788 }
789
790 return klp_init_patch(patch);
791} 704}
792EXPORT_SYMBOL_GPL(klp_register_patch);
793 705
794static int __klp_disable_patch(struct klp_patch *patch) 706static int __klp_disable_patch(struct klp_patch *patch)
795{ 707{
@@ -802,8 +714,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
802 return -EBUSY; 714 return -EBUSY;
803 715
804 /* enforce stacking: only the last enabled patch can be disabled */ 716 /* enforce stacking: only the last enabled patch can be disabled */
805 if (!list_is_last(&patch->list, &klp_patches) && 717 if (!list_is_last(&patch->list, &klp_patches))
806 list_next_entry(patch, list)->enabled)
807 return -EBUSY; 718 return -EBUSY;
808 719
809 klp_init_transition(patch, KLP_UNPATCHED); 720 klp_init_transition(patch, KLP_UNPATCHED);
@@ -822,44 +733,12 @@ static int __klp_disable_patch(struct klp_patch *patch)
822 smp_wmb(); 733 smp_wmb();
823 734
824 klp_start_transition(); 735 klp_start_transition();
825 klp_try_complete_transition();
826 patch->enabled = false; 736 patch->enabled = false;
737 klp_try_complete_transition();
827 738
828 return 0; 739 return 0;
829} 740}
830 741
831/**
832 * klp_disable_patch() - disables a registered patch
833 * @patch: The registered, enabled patch to be disabled
834 *
835 * Unregisters the patched functions from ftrace.
836 *
837 * Return: 0 on success, otherwise error
838 */
839int klp_disable_patch(struct klp_patch *patch)
840{
841 int ret;
842
843 mutex_lock(&klp_mutex);
844
845 if (!klp_is_patch_registered(patch)) {
846 ret = -EINVAL;
847 goto err;
848 }
849
850 if (!patch->enabled) {
851 ret = -EINVAL;
852 goto err;
853 }
854
855 ret = __klp_disable_patch(patch);
856
857err:
858 mutex_unlock(&klp_mutex);
859 return ret;
860}
861EXPORT_SYMBOL_GPL(klp_disable_patch);
862
863static int __klp_enable_patch(struct klp_patch *patch) 742static int __klp_enable_patch(struct klp_patch *patch)
864{ 743{
865 struct klp_object *obj; 744 struct klp_object *obj;
@@ -871,17 +750,8 @@ static int __klp_enable_patch(struct klp_patch *patch)
871 if (WARN_ON(patch->enabled)) 750 if (WARN_ON(patch->enabled))
872 return -EINVAL; 751 return -EINVAL;
873 752
874 /* enforce stacking: only the first disabled patch can be enabled */ 753 if (!patch->kobj_added)
875 if (patch->list.prev != &klp_patches && 754 return -EINVAL;
876 !list_prev_entry(patch, list)->enabled)
877 return -EBUSY;
878
879 /*
880 * A reference is taken on the patch module to prevent it from being
881 * unloaded.
882 */
883 if (!try_module_get(patch->mod))
884 return -ENODEV;
885 755
886 pr_notice("enabling patch '%s'\n", patch->mod->name); 756 pr_notice("enabling patch '%s'\n", patch->mod->name);
887 757
@@ -916,8 +786,8 @@ static int __klp_enable_patch(struct klp_patch *patch)
916 } 786 }
917 787
918 klp_start_transition(); 788 klp_start_transition();
919 klp_try_complete_transition();
920 patch->enabled = true; 789 patch->enabled = true;
790 klp_try_complete_transition();
921 791
922 return 0; 792 return 0;
923err: 793err:
@@ -928,11 +798,15 @@ err:
928} 798}
929 799
930/** 800/**
931 * klp_enable_patch() - enables a registered patch 801 * klp_enable_patch() - enable the livepatch
932 * @patch: The registered, disabled patch to be enabled 802 * @patch: patch to be enabled
933 * 803 *
934 * Performs the needed symbol lookups and code relocations, 804 * Initializes the data structure associated with the patch, creates the sysfs
935 * then registers the patched functions with ftrace. 805 * interface, performs the needed symbol lookups and code relocations,
806 * registers the patched functions with ftrace.
807 *
808 * This function is supposed to be called from the livepatch module_init()
809 * callback.
936 * 810 *
937 * Return: 0 on success, otherwise error 811 * Return: 0 on success, otherwise error
938 */ 812 */
@@ -940,17 +814,51 @@ int klp_enable_patch(struct klp_patch *patch)
940{ 814{
941 int ret; 815 int ret;
942 816
817 if (!patch || !patch->mod)
818 return -EINVAL;
819
820 if (!is_livepatch_module(patch->mod)) {
821 pr_err("module %s is not marked as a livepatch module\n",
822 patch->mod->name);
823 return -EINVAL;
824 }
825
826 if (!klp_initialized())
827 return -ENODEV;
828
829 if (!klp_have_reliable_stack()) {
830 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
831 return -ENOSYS;
832 }
833
834
943 mutex_lock(&klp_mutex); 835 mutex_lock(&klp_mutex);
944 836
945 if (!klp_is_patch_registered(patch)) { 837 ret = klp_init_patch_early(patch);
946 ret = -EINVAL; 838 if (ret) {
947 goto err; 839 mutex_unlock(&klp_mutex);
840 return ret;
948 } 841 }
949 842
843 ret = klp_init_patch(patch);
844 if (ret)
845 goto err;
846
950 ret = __klp_enable_patch(patch); 847 ret = __klp_enable_patch(patch);
848 if (ret)
849 goto err;
850
851 mutex_unlock(&klp_mutex);
852
853 return 0;
951 854
952err: 855err:
856 klp_free_patch_start(patch);
857
953 mutex_unlock(&klp_mutex); 858 mutex_unlock(&klp_mutex);
859
860 klp_free_patch_finish(patch);
861
954 return ret; 862 return ret;
955} 863}
956EXPORT_SYMBOL_GPL(klp_enable_patch); 864EXPORT_SYMBOL_GPL(klp_enable_patch);