aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch/core.c
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2019-01-09 07:43:20 -0500
committerJiri Kosina <jkosina@suse.cz>2019-01-11 14:51:23 -0500
commit26c3e98e2f8e44e856cd36c12b3eaefcc6eafb16 (patch)
treec9358b5d626b51f837e3651a25d4c70cfe695cb0 /kernel/livepatch/core.c
parent19514910d021c93c7823ec32067e6b7dea224f0f (diff)
livepatch: Shuffle klp_enable_patch()/klp_disable_patch() code
We are going to simplify the API and code by removing the registration step. This would require calling init/free functions from enable/disable ones. This patch just moves the code to prevent more forward declarations. This patch does not change the code except for two forward declarations. Signed-off-by: Petr Mladek <pmladek@suse.com> Acked-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Joe Lawrence <joe.lawrence@redhat.com> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch/core.c')
-rw-r--r--kernel/livepatch/core.c330
1 files changed, 166 insertions, 164 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index cb59c7fb94cb..20589da35194 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -278,170 +278,6 @@ static int klp_write_object_relocations(struct module *pmod,
278 return ret; 278 return ret;
279} 279}
280 280
281static int __klp_disable_patch(struct klp_patch *patch)
282{
283 struct klp_object *obj;
284
285 if (WARN_ON(!patch->enabled))
286 return -EINVAL;
287
288 if (klp_transition_patch)
289 return -EBUSY;
290
291 /* enforce stacking: only the last enabled patch can be disabled */
292 if (!list_is_last(&patch->list, &klp_patches) &&
293 list_next_entry(patch, list)->enabled)
294 return -EBUSY;
295
296 klp_init_transition(patch, KLP_UNPATCHED);
297
298 klp_for_each_object(patch, obj)
299 if (obj->patched)
300 klp_pre_unpatch_callback(obj);
301
302 /*
303 * Enforce the order of the func->transition writes in
304 * klp_init_transition() and the TIF_PATCH_PENDING writes in
305 * klp_start_transition(). In the rare case where klp_ftrace_handler()
306 * is called shortly after klp_update_patch_state() switches the task,
307 * this ensures the handler sees that func->transition is set.
308 */
309 smp_wmb();
310
311 klp_start_transition();
312 klp_try_complete_transition();
313 patch->enabled = false;
314
315 return 0;
316}
317
318/**
319 * klp_disable_patch() - disables a registered patch
320 * @patch: The registered, enabled patch to be disabled
321 *
322 * Unregisters the patched functions from ftrace.
323 *
324 * Return: 0 on success, otherwise error
325 */
326int klp_disable_patch(struct klp_patch *patch)
327{
328 int ret;
329
330 mutex_lock(&klp_mutex);
331
332 if (!klp_is_patch_registered(patch)) {
333 ret = -EINVAL;
334 goto err;
335 }
336
337 if (!patch->enabled) {
338 ret = -EINVAL;
339 goto err;
340 }
341
342 ret = __klp_disable_patch(patch);
343
344err:
345 mutex_unlock(&klp_mutex);
346 return ret;
347}
348EXPORT_SYMBOL_GPL(klp_disable_patch);
349
350static int __klp_enable_patch(struct klp_patch *patch)
351{
352 struct klp_object *obj;
353 int ret;
354
355 if (klp_transition_patch)
356 return -EBUSY;
357
358 if (WARN_ON(patch->enabled))
359 return -EINVAL;
360
361 /* enforce stacking: only the first disabled patch can be enabled */
362 if (patch->list.prev != &klp_patches &&
363 !list_prev_entry(patch, list)->enabled)
364 return -EBUSY;
365
366 /*
367 * A reference is taken on the patch module to prevent it from being
368 * unloaded.
369 */
370 if (!try_module_get(patch->mod))
371 return -ENODEV;
372
373 pr_notice("enabling patch '%s'\n", patch->mod->name);
374
375 klp_init_transition(patch, KLP_PATCHED);
376
377 /*
378 * Enforce the order of the func->transition writes in
379 * klp_init_transition() and the ops->func_stack writes in
380 * klp_patch_object(), so that klp_ftrace_handler() will see the
381 * func->transition updates before the handler is registered and the
382 * new funcs become visible to the handler.
383 */
384 smp_wmb();
385
386 klp_for_each_object(patch, obj) {
387 if (!klp_is_object_loaded(obj))
388 continue;
389
390 ret = klp_pre_patch_callback(obj);
391 if (ret) {
392 pr_warn("pre-patch callback failed for object '%s'\n",
393 klp_is_module(obj) ? obj->name : "vmlinux");
394 goto err;
395 }
396
397 ret = klp_patch_object(obj);
398 if (ret) {
399 pr_warn("failed to patch object '%s'\n",
400 klp_is_module(obj) ? obj->name : "vmlinux");
401 goto err;
402 }
403 }
404
405 klp_start_transition();
406 klp_try_complete_transition();
407 patch->enabled = true;
408
409 return 0;
410err:
411 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
412
413 klp_cancel_transition();
414 return ret;
415}
416
417/**
418 * klp_enable_patch() - enables a registered patch
419 * @patch: The registered, disabled patch to be enabled
420 *
421 * Performs the needed symbol lookups and code relocations,
422 * then registers the patched functions with ftrace.
423 *
424 * Return: 0 on success, otherwise error
425 */
426int klp_enable_patch(struct klp_patch *patch)
427{
428 int ret;
429
430 mutex_lock(&klp_mutex);
431
432 if (!klp_is_patch_registered(patch)) {
433 ret = -EINVAL;
434 goto err;
435 }
436
437 ret = __klp_enable_patch(patch);
438
439err:
440 mutex_unlock(&klp_mutex);
441 return ret;
442}
443EXPORT_SYMBOL_GPL(klp_enable_patch);
444
445/* 281/*
446 * Sysfs Interface 282 * Sysfs Interface
447 * 283 *
@@ -454,6 +290,8 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
454 * /sys/kernel/livepatch/<patch>/<object> 290 * /sys/kernel/livepatch/<patch>/<object>
455 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 291 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
456 */ 292 */
293static int __klp_disable_patch(struct klp_patch *patch);
294static int __klp_enable_patch(struct klp_patch *patch);
457 295
458static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 296static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
459 const char *buf, size_t count) 297 const char *buf, size_t count)
@@ -904,6 +742,170 @@ int klp_register_patch(struct klp_patch *patch)
904} 742}
905EXPORT_SYMBOL_GPL(klp_register_patch); 743EXPORT_SYMBOL_GPL(klp_register_patch);
906 744
745static int __klp_disable_patch(struct klp_patch *patch)
746{
747 struct klp_object *obj;
748
749 if (WARN_ON(!patch->enabled))
750 return -EINVAL;
751
752 if (klp_transition_patch)
753 return -EBUSY;
754
755 /* enforce stacking: only the last enabled patch can be disabled */
756 if (!list_is_last(&patch->list, &klp_patches) &&
757 list_next_entry(patch, list)->enabled)
758 return -EBUSY;
759
760 klp_init_transition(patch, KLP_UNPATCHED);
761
762 klp_for_each_object(patch, obj)
763 if (obj->patched)
764 klp_pre_unpatch_callback(obj);
765
766 /*
767 * Enforce the order of the func->transition writes in
768 * klp_init_transition() and the TIF_PATCH_PENDING writes in
769 * klp_start_transition(). In the rare case where klp_ftrace_handler()
770 * is called shortly after klp_update_patch_state() switches the task,
771 * this ensures the handler sees that func->transition is set.
772 */
773 smp_wmb();
774
775 klp_start_transition();
776 klp_try_complete_transition();
777 patch->enabled = false;
778
779 return 0;
780}
781
782/**
783 * klp_disable_patch() - disables a registered patch
784 * @patch: The registered, enabled patch to be disabled
785 *
786 * Unregisters the patched functions from ftrace.
787 *
788 * Return: 0 on success, otherwise error
789 */
790int klp_disable_patch(struct klp_patch *patch)
791{
792 int ret;
793
794 mutex_lock(&klp_mutex);
795
796 if (!klp_is_patch_registered(patch)) {
797 ret = -EINVAL;
798 goto err;
799 }
800
801 if (!patch->enabled) {
802 ret = -EINVAL;
803 goto err;
804 }
805
806 ret = __klp_disable_patch(patch);
807
808err:
809 mutex_unlock(&klp_mutex);
810 return ret;
811}
812EXPORT_SYMBOL_GPL(klp_disable_patch);
813
814static int __klp_enable_patch(struct klp_patch *patch)
815{
816 struct klp_object *obj;
817 int ret;
818
819 if (klp_transition_patch)
820 return -EBUSY;
821
822 if (WARN_ON(patch->enabled))
823 return -EINVAL;
824
825 /* enforce stacking: only the first disabled patch can be enabled */
826 if (patch->list.prev != &klp_patches &&
827 !list_prev_entry(patch, list)->enabled)
828 return -EBUSY;
829
830 /*
831 * A reference is taken on the patch module to prevent it from being
832 * unloaded.
833 */
834 if (!try_module_get(patch->mod))
835 return -ENODEV;
836
837 pr_notice("enabling patch '%s'\n", patch->mod->name);
838
839 klp_init_transition(patch, KLP_PATCHED);
840
841 /*
842 * Enforce the order of the func->transition writes in
843 * klp_init_transition() and the ops->func_stack writes in
844 * klp_patch_object(), so that klp_ftrace_handler() will see the
845 * func->transition updates before the handler is registered and the
846 * new funcs become visible to the handler.
847 */
848 smp_wmb();
849
850 klp_for_each_object(patch, obj) {
851 if (!klp_is_object_loaded(obj))
852 continue;
853
854 ret = klp_pre_patch_callback(obj);
855 if (ret) {
856 pr_warn("pre-patch callback failed for object '%s'\n",
857 klp_is_module(obj) ? obj->name : "vmlinux");
858 goto err;
859 }
860
861 ret = klp_patch_object(obj);
862 if (ret) {
863 pr_warn("failed to patch object '%s'\n",
864 klp_is_module(obj) ? obj->name : "vmlinux");
865 goto err;
866 }
867 }
868
869 klp_start_transition();
870 klp_try_complete_transition();
871 patch->enabled = true;
872
873 return 0;
874err:
875 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
876
877 klp_cancel_transition();
878 return ret;
879}
880
881/**
882 * klp_enable_patch() - enables a registered patch
883 * @patch: The registered, disabled patch to be enabled
884 *
885 * Performs the needed symbol lookups and code relocations,
886 * then registers the patched functions with ftrace.
887 *
888 * Return: 0 on success, otherwise error
889 */
890int klp_enable_patch(struct klp_patch *patch)
891{
892 int ret;
893
894 mutex_lock(&klp_mutex);
895
896 if (!klp_is_patch_registered(patch)) {
897 ret = -EINVAL;
898 goto err;
899 }
900
901 ret = __klp_enable_patch(patch);
902
903err:
904 mutex_unlock(&klp_mutex);
905 return ret;
906}
907EXPORT_SYMBOL_GPL(klp_enable_patch);
908
907/* 909/*
908 * Remove parts of patches that touch a given kernel module. The list of 910 * Remove parts of patches that touch a given kernel module. The list of
909 * patches processed might be limited. When limit is NULL, all patches 911 * patches processed might be limited. When limit is NULL, all patches