diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 16:02:18 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 16:02:18 -0500 |
commit | e1c70f32386c4984ed8ca1a7aedb9bbff9ed3414 (patch) | |
tree | 09a9092c1ea56b1d9c218a77b2510036b509f225 /kernel/livepatch | |
parent | 183b6366cf473ff0e706a6751adc082faa44843d (diff) | |
parent | d05b695c25bf0d704c74e0e1375de893531b9424 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull livepatching updates from Jiri Kosina:
- handle 'infinitely'-long sleeping tasks, from Miroslav Benes
- remove 'immediate' feature, as it turns out it doesn't provide the
originally expected semantics, and brings more issues than value
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
livepatch: add locking to force and signal functions
livepatch: Remove immediate feature
livepatch: force transition to finish
livepatch: send a fake signal to all blocking tasks
Diffstat (limited to 'kernel/livepatch')
-rw-r--r-- | kernel/livepatch/core.c | 76 | ||||
-rw-r--r-- | kernel/livepatch/transition.c | 116 | ||||
-rw-r--r-- | kernel/livepatch/transition.h | 2 |
3 files changed, 142 insertions, 52 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index de9e45dca70f..3a4656fb7047 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -366,11 +366,6 @@ static int __klp_enable_patch(struct klp_patch *patch) | |||
366 | /* | 366 | /* |
367 | * A reference is taken on the patch module to prevent it from being | 367 | * A reference is taken on the patch module to prevent it from being |
368 | * unloaded. | 368 | * unloaded. |
369 | * | ||
370 | * Note: For immediate (no consistency model) patches we don't allow | ||
371 | * patch modules to unload since there is no safe/sane method to | ||
372 | * determine if a thread is still running in the patched code contained | ||
373 | * in the patch module once the ftrace registration is successful. | ||
374 | */ | 369 | */ |
375 | if (!try_module_get(patch->mod)) | 370 | if (!try_module_get(patch->mod)) |
376 | return -ENODEV; | 371 | return -ENODEV; |
@@ -454,6 +449,8 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); | |||
454 | * /sys/kernel/livepatch/<patch> | 449 | * /sys/kernel/livepatch/<patch> |
455 | * /sys/kernel/livepatch/<patch>/enabled | 450 | * /sys/kernel/livepatch/<patch>/enabled |
456 | * /sys/kernel/livepatch/<patch>/transition | 451 | * /sys/kernel/livepatch/<patch>/transition |
452 | * /sys/kernel/livepatch/<patch>/signal | ||
453 | * /sys/kernel/livepatch/<patch>/force | ||
457 | * /sys/kernel/livepatch/<patch>/<object> | 454 | * /sys/kernel/livepatch/<patch>/<object> |
458 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> | 455 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> |
459 | */ | 456 | */ |
@@ -528,11 +525,73 @@ static ssize_t transition_show(struct kobject *kobj, | |||
528 | patch == klp_transition_patch); | 525 | patch == klp_transition_patch); |
529 | } | 526 | } |
530 | 527 | ||
528 | static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
529 | const char *buf, size_t count) | ||
530 | { | ||
531 | struct klp_patch *patch; | ||
532 | int ret; | ||
533 | bool val; | ||
534 | |||
535 | ret = kstrtobool(buf, &val); | ||
536 | if (ret) | ||
537 | return ret; | ||
538 | |||
539 | if (!val) | ||
540 | return count; | ||
541 | |||
542 | mutex_lock(&klp_mutex); | ||
543 | |||
544 | patch = container_of(kobj, struct klp_patch, kobj); | ||
545 | if (patch != klp_transition_patch) { | ||
546 | mutex_unlock(&klp_mutex); | ||
547 | return -EINVAL; | ||
548 | } | ||
549 | |||
550 | klp_send_signals(); | ||
551 | |||
552 | mutex_unlock(&klp_mutex); | ||
553 | |||
554 | return count; | ||
555 | } | ||
556 | |||
557 | static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
558 | const char *buf, size_t count) | ||
559 | { | ||
560 | struct klp_patch *patch; | ||
561 | int ret; | ||
562 | bool val; | ||
563 | |||
564 | ret = kstrtobool(buf, &val); | ||
565 | if (ret) | ||
566 | return ret; | ||
567 | |||
568 | if (!val) | ||
569 | return count; | ||
570 | |||
571 | mutex_lock(&klp_mutex); | ||
572 | |||
573 | patch = container_of(kobj, struct klp_patch, kobj); | ||
574 | if (patch != klp_transition_patch) { | ||
575 | mutex_unlock(&klp_mutex); | ||
576 | return -EINVAL; | ||
577 | } | ||
578 | |||
579 | klp_force_transition(); | ||
580 | |||
581 | mutex_unlock(&klp_mutex); | ||
582 | |||
583 | return count; | ||
584 | } | ||
585 | |||
531 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); | 586 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); |
532 | static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); | 587 | static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); |
588 | static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); | ||
589 | static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); | ||
533 | static struct attribute *klp_patch_attrs[] = { | 590 | static struct attribute *klp_patch_attrs[] = { |
534 | &enabled_kobj_attr.attr, | 591 | &enabled_kobj_attr.attr, |
535 | &transition_kobj_attr.attr, | 592 | &transition_kobj_attr.attr, |
593 | &signal_kobj_attr.attr, | ||
594 | &force_kobj_attr.attr, | ||
536 | NULL | 595 | NULL |
537 | }; | 596 | }; |
538 | 597 | ||
@@ -830,12 +889,7 @@ int klp_register_patch(struct klp_patch *patch) | |||
830 | if (!klp_initialized()) | 889 | if (!klp_initialized()) |
831 | return -ENODEV; | 890 | return -ENODEV; |
832 | 891 | ||
833 | /* | 892 | if (!klp_have_reliable_stack()) { |
834 | * Architectures without reliable stack traces have to set | ||
835 | * patch->immediate because there's currently no way to patch kthreads | ||
836 | * with the consistency model. | ||
837 | */ | ||
838 | if (!klp_have_reliable_stack() && !patch->immediate) { | ||
839 | pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); | 893 | pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); |
840 | return -ENOSYS; | 894 | return -ENOSYS; |
841 | } | 895 | } |
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 56add6327736..7c6631e693bc 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c | |||
@@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch; | |||
33 | 33 | ||
34 | static int klp_target_state = KLP_UNDEFINED; | 34 | static int klp_target_state = KLP_UNDEFINED; |
35 | 35 | ||
36 | static bool klp_forced = false; | ||
37 | |||
36 | /* | 38 | /* |
37 | * This work can be performed periodically to finish patching or unpatching any | 39 | * This work can be performed periodically to finish patching or unpatching any |
38 | * "straggler" tasks which failed to transition in the first attempt. | 40 | * "straggler" tasks which failed to transition in the first attempt. |
@@ -80,7 +82,6 @@ static void klp_complete_transition(void) | |||
80 | struct klp_func *func; | 82 | struct klp_func *func; |
81 | struct task_struct *g, *task; | 83 | struct task_struct *g, *task; |
82 | unsigned int cpu; | 84 | unsigned int cpu; |
83 | bool immediate_func = false; | ||
84 | 85 | ||
85 | pr_debug("'%s': completing %s transition\n", | 86 | pr_debug("'%s': completing %s transition\n", |
86 | klp_transition_patch->mod->name, | 87 | klp_transition_patch->mod->name, |
@@ -102,16 +103,9 @@ static void klp_complete_transition(void) | |||
102 | klp_synchronize_transition(); | 103 | klp_synchronize_transition(); |
103 | } | 104 | } |
104 | 105 | ||
105 | if (klp_transition_patch->immediate) | 106 | klp_for_each_object(klp_transition_patch, obj) |
106 | goto done; | 107 | klp_for_each_func(obj, func) |
107 | |||
108 | klp_for_each_object(klp_transition_patch, obj) { | ||
109 | klp_for_each_func(obj, func) { | ||
110 | func->transition = false; | 108 | func->transition = false; |
111 | if (func->immediate) | ||
112 | immediate_func = true; | ||
113 | } | ||
114 | } | ||
115 | 109 | ||
116 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ | 110 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ |
117 | if (klp_target_state == KLP_PATCHED) | 111 | if (klp_target_state == KLP_PATCHED) |
@@ -130,7 +124,6 @@ static void klp_complete_transition(void) | |||
130 | task->patch_state = KLP_UNDEFINED; | 124 | task->patch_state = KLP_UNDEFINED; |
131 | } | 125 | } |
132 | 126 | ||
133 | done: | ||
134 | klp_for_each_object(klp_transition_patch, obj) { | 127 | klp_for_each_object(klp_transition_patch, obj) { |
135 | if (!klp_is_object_loaded(obj)) | 128 | if (!klp_is_object_loaded(obj)) |
136 | continue; | 129 | continue; |
@@ -144,13 +137,11 @@ done: | |||
144 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | 137 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
145 | 138 | ||
146 | /* | 139 | /* |
147 | * See complementary comment in __klp_enable_patch() for why we | 140 | * klp_forced set implies unbounded increase of module's ref count if |
148 | * keep the module reference for immediate patches. | 141 | * the module is disabled/enabled in a loop. |
149 | */ | 142 | */ |
150 | if (!klp_transition_patch->immediate && !immediate_func && | 143 | if (!klp_forced && klp_target_state == KLP_UNPATCHED) |
151 | klp_target_state == KLP_UNPATCHED) { | ||
152 | module_put(klp_transition_patch->mod); | 144 | module_put(klp_transition_patch->mod); |
153 | } | ||
154 | 145 | ||
155 | klp_target_state = KLP_UNDEFINED; | 146 | klp_target_state = KLP_UNDEFINED; |
156 | klp_transition_patch = NULL; | 147 | klp_transition_patch = NULL; |
@@ -218,9 +209,6 @@ static int klp_check_stack_func(struct klp_func *func, | |||
218 | struct klp_ops *ops; | 209 | struct klp_ops *ops; |
219 | int i; | 210 | int i; |
220 | 211 | ||
221 | if (func->immediate) | ||
222 | return 0; | ||
223 | |||
224 | for (i = 0; i < trace->nr_entries; i++) { | 212 | for (i = 0; i < trace->nr_entries; i++) { |
225 | address = trace->entries[i]; | 213 | address = trace->entries[i]; |
226 | 214 | ||
@@ -383,13 +371,6 @@ void klp_try_complete_transition(void) | |||
383 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); | 371 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
384 | 372 | ||
385 | /* | 373 | /* |
386 | * If the patch can be applied or reverted immediately, skip the | ||
387 | * per-task transitions. | ||
388 | */ | ||
389 | if (klp_transition_patch->immediate) | ||
390 | goto success; | ||
391 | |||
392 | /* | ||
393 | * Try to switch the tasks to the target patch state by walking their | 374 | * Try to switch the tasks to the target patch state by walking their |
394 | * stacks and looking for any to-be-patched or to-be-unpatched | 375 | * stacks and looking for any to-be-patched or to-be-unpatched |
395 | * functions. If such functions are found on a stack, or if the stack | 376 | * functions. If such functions are found on a stack, or if the stack |
@@ -432,7 +413,6 @@ void klp_try_complete_transition(void) | |||
432 | return; | 413 | return; |
433 | } | 414 | } |
434 | 415 | ||
435 | success: | ||
436 | /* we're done, now cleanup the data structures */ | 416 | /* we're done, now cleanup the data structures */ |
437 | klp_complete_transition(); | 417 | klp_complete_transition(); |
438 | } | 418 | } |
@@ -453,13 +433,6 @@ void klp_start_transition(void) | |||
453 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | 433 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
454 | 434 | ||
455 | /* | 435 | /* |
456 | * If the patch can be applied or reverted immediately, skip the | ||
457 | * per-task transitions. | ||
458 | */ | ||
459 | if (klp_transition_patch->immediate) | ||
460 | return; | ||
461 | |||
462 | /* | ||
463 | * Mark all normal tasks as needing a patch state update. They'll | 436 | * Mark all normal tasks as needing a patch state update. They'll |
464 | * switch either in klp_try_complete_transition() or as they exit the | 437 | * switch either in klp_try_complete_transition() or as they exit the |
465 | * kernel. | 438 | * kernel. |
@@ -509,13 +482,6 @@ void klp_init_transition(struct klp_patch *patch, int state) | |||
509 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | 482 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
510 | 483 | ||
511 | /* | 484 | /* |
512 | * If the patch can be applied or reverted immediately, skip the | ||
513 | * per-task transitions. | ||
514 | */ | ||
515 | if (patch->immediate) | ||
516 | return; | ||
517 | |||
518 | /* | ||
519 | * Initialize all tasks to the initial patch state to prepare them for | 485 | * Initialize all tasks to the initial patch state to prepare them for |
520 | * switching to the target state. | 486 | * switching to the target state. |
521 | */ | 487 | */ |
@@ -608,3 +574,71 @@ void klp_copy_process(struct task_struct *child) | |||
608 | 574 | ||
609 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ | 575 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ |
610 | } | 576 | } |
577 | |||
578 | /* | ||
579 | * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. | ||
580 | * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this | ||
581 | * action currently. | ||
582 | */ | ||
583 | void klp_send_signals(void) | ||
584 | { | ||
585 | struct task_struct *g, *task; | ||
586 | |||
587 | pr_notice("signaling remaining tasks\n"); | ||
588 | |||
589 | read_lock(&tasklist_lock); | ||
590 | for_each_process_thread(g, task) { | ||
591 | if (!klp_patch_pending(task)) | ||
592 | continue; | ||
593 | |||
594 | /* | ||
595 | * There is a small race here. We could see TIF_PATCH_PENDING | ||
596 | * set and decide to wake up a kthread or send a fake signal. | ||
597 | * Meanwhile the task could migrate itself and the action | ||
598 | * would be meaningless. It is not serious though. | ||
599 | */ | ||
600 | if (task->flags & PF_KTHREAD) { | ||
601 | /* | ||
602 | * Wake up a kthread which sleeps interruptedly and | ||
603 | * still has not been migrated. | ||
604 | */ | ||
605 | wake_up_state(task, TASK_INTERRUPTIBLE); | ||
606 | } else { | ||
607 | /* | ||
608 | * Send fake signal to all non-kthread tasks which are | ||
609 | * still not migrated. | ||
610 | */ | ||
611 | spin_lock_irq(&task->sighand->siglock); | ||
612 | signal_wake_up(task, 0); | ||
613 | spin_unlock_irq(&task->sighand->siglock); | ||
614 | } | ||
615 | } | ||
616 | read_unlock(&tasklist_lock); | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an | ||
621 | * existing transition to finish. | ||
622 | * | ||
623 | * NOTE: klp_update_patch_state(task) requires the task to be inactive or | ||
624 | * 'current'. This is not the case here and the consistency model could be | ||
625 | * broken. Administrator, who is the only one to execute the | ||
626 | * klp_force_transitions(), has to be aware of this. | ||
627 | */ | ||
628 | void klp_force_transition(void) | ||
629 | { | ||
630 | struct task_struct *g, *task; | ||
631 | unsigned int cpu; | ||
632 | |||
633 | pr_warn("forcing remaining tasks to the patched state\n"); | ||
634 | |||
635 | read_lock(&tasklist_lock); | ||
636 | for_each_process_thread(g, task) | ||
637 | klp_update_patch_state(task); | ||
638 | read_unlock(&tasklist_lock); | ||
639 | |||
640 | for_each_possible_cpu(cpu) | ||
641 | klp_update_patch_state(idle_task(cpu)); | ||
642 | |||
643 | klp_forced = true; | ||
644 | } | ||
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h index 0f6e27c481f9..f9d0bc016067 100644 --- a/kernel/livepatch/transition.h +++ b/kernel/livepatch/transition.h | |||
@@ -11,5 +11,7 @@ void klp_cancel_transition(void); | |||
11 | void klp_start_transition(void); | 11 | void klp_start_transition(void); |
12 | void klp_try_complete_transition(void); | 12 | void klp_try_complete_transition(void); |
13 | void klp_reverse_transition(void); | 13 | void klp_reverse_transition(void); |
14 | void klp_send_signals(void); | ||
15 | void klp_force_transition(void); | ||
14 | 16 | ||
15 | #endif /* _LIVEPATCH_TRANSITION_H */ | 17 | #endif /* _LIVEPATCH_TRANSITION_H */ |