aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2019-03-05 09:56:28 -0500
committerJiri Kosina <jkosina@suse.cz>2019-03-05 09:56:28 -0500
commit7185a96981a2f8bb523dd87cad20a6b96c721ad5 (patch)
tree887d82d0adb3c0f5d7b2dbebfcd2698524355353 /kernel
parent67bae14adc8cdb650b042319136b74cffbad23c8 (diff)
parent0b3d52790e1cfd6b80b826a245d24859e89632f7 (diff)
Merge branch 'for-5.1/fake-signal' into for-linus
Ability to send fake signal to blocking tasks automatically, instead of requiring manual intervention, from Miroslav Benes
Diffstat (limited to 'kernel')
-rw-r--r--kernel/livepatch/core.c32
-rw-r--r--kernel/livepatch/transition.c92
-rw-r--r--kernel/livepatch/transition.h1
3 files changed, 51 insertions, 74 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index adca5cf07f7e..fe1993399823 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -313,7 +313,6 @@ static int klp_write_object_relocations(struct module *pmod,
313 * /sys/kernel/livepatch/<patch> 313 * /sys/kernel/livepatch/<patch>
314 * /sys/kernel/livepatch/<patch>/enabled 314 * /sys/kernel/livepatch/<patch>/enabled
315 * /sys/kernel/livepatch/<patch>/transition 315 * /sys/kernel/livepatch/<patch>/transition
316 * /sys/kernel/livepatch/<patch>/signal
317 * /sys/kernel/livepatch/<patch>/force 316 * /sys/kernel/livepatch/<patch>/force
318 * /sys/kernel/livepatch/<patch>/<object> 317 * /sys/kernel/livepatch/<patch>/<object>
319 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 318 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
@@ -382,35 +381,6 @@ static ssize_t transition_show(struct kobject *kobj,
382 patch == klp_transition_patch); 381 patch == klp_transition_patch);
383} 382}
384 383
385static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
386 const char *buf, size_t count)
387{
388 struct klp_patch *patch;
389 int ret;
390 bool val;
391
392 ret = kstrtobool(buf, &val);
393 if (ret)
394 return ret;
395
396 if (!val)
397 return count;
398
399 mutex_lock(&klp_mutex);
400
401 patch = container_of(kobj, struct klp_patch, kobj);
402 if (patch != klp_transition_patch) {
403 mutex_unlock(&klp_mutex);
404 return -EINVAL;
405 }
406
407 klp_send_signals();
408
409 mutex_unlock(&klp_mutex);
410
411 return count;
412}
413
414static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, 384static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
415 const char *buf, size_t count) 385 const char *buf, size_t count)
416{ 386{
@@ -442,12 +412,10 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
442 412
443static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 413static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
444static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); 414static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
445static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
446static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); 415static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
447static struct attribute *klp_patch_attrs[] = { 416static struct attribute *klp_patch_attrs[] = {
448 &enabled_kobj_attr.attr, 417 &enabled_kobj_attr.attr,
449 &transition_kobj_attr.attr, 418 &transition_kobj_attr.attr,
450 &signal_kobj_attr.attr,
451 &force_kobj_attr.attr, 419 &force_kobj_attr.attr,
452 NULL 420 NULL
453}; 421};
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 300273819674..183b2086ba03 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -29,10 +29,14 @@
29#define MAX_STACK_ENTRIES 100 29#define MAX_STACK_ENTRIES 100
30#define STACK_ERR_BUF_SIZE 128 30#define STACK_ERR_BUF_SIZE 128
31 31
32#define SIGNALS_TIMEOUT 15
33
32struct klp_patch *klp_transition_patch; 34struct klp_patch *klp_transition_patch;
33 35
34static int klp_target_state = KLP_UNDEFINED; 36static int klp_target_state = KLP_UNDEFINED;
35 37
38static unsigned int klp_signals_cnt;
39
36/* 40/*
37 * This work can be performed periodically to finish patching or unpatching any 41 * This work can be performed periodically to finish patching or unpatching any
38 * "straggler" tasks which failed to transition in the first attempt. 42 * "straggler" tasks which failed to transition in the first attempt.
@@ -344,6 +348,47 @@ done:
344} 348}
345 349
346/* 350/*
351 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
352 * Kthreads with TIF_PATCH_PENDING set are woken up.
353 */
354static void klp_send_signals(void)
355{
356 struct task_struct *g, *task;
357
358 if (klp_signals_cnt == SIGNALS_TIMEOUT)
359 pr_notice("signaling remaining tasks\n");
360
361 read_lock(&tasklist_lock);
362 for_each_process_thread(g, task) {
363 if (!klp_patch_pending(task))
364 continue;
365
366 /*
367 * There is a small race here. We could see TIF_PATCH_PENDING
368 * set and decide to wake up a kthread or send a fake signal.
369 * Meanwhile the task could migrate itself and the action
370 * would be meaningless. It is not serious though.
371 */
372 if (task->flags & PF_KTHREAD) {
373 /*
374 * Wake up a kthread which sleeps interruptedly and
375 * still has not been migrated.
376 */
377 wake_up_state(task, TASK_INTERRUPTIBLE);
378 } else {
379 /*
380 * Send fake signal to all non-kthread tasks which are
381 * still not migrated.
382 */
383 spin_lock_irq(&task->sighand->siglock);
384 signal_wake_up(task, 0);
385 spin_unlock_irq(&task->sighand->siglock);
386 }
387 }
388 read_unlock(&tasklist_lock);
389}
390
391/*
347 * Try to switch all remaining tasks to the target patch state by walking the 392 * Try to switch all remaining tasks to the target patch state by walking the
348 * stacks of sleeping tasks and looking for any to-be-patched or 393 * stacks of sleeping tasks and looking for any to-be-patched or
349 * to-be-unpatched functions. If such functions are found, the task can't be 394 * to-be-unpatched functions. If such functions are found, the task can't be
@@ -393,6 +438,10 @@ void klp_try_complete_transition(void)
393 put_online_cpus(); 438 put_online_cpus();
394 439
395 if (!complete) { 440 if (!complete) {
441 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
442 klp_send_signals();
443 klp_signals_cnt++;
444
396 /* 445 /*
397 * Some tasks weren't able to be switched over. Try again 446 * Some tasks weren't able to be switched over. Try again
398 * later and/or wait for other methods like kernel exit 447 * later and/or wait for other methods like kernel exit
@@ -454,6 +503,8 @@ void klp_start_transition(void)
454 if (task->patch_state != klp_target_state) 503 if (task->patch_state != klp_target_state)
455 set_tsk_thread_flag(task, TIF_PATCH_PENDING); 504 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
456 } 505 }
506
507 klp_signals_cnt = 0;
457} 508}
458 509
459/* 510/*
@@ -577,47 +628,6 @@ void klp_copy_process(struct task_struct *child)
577} 628}
578 629
579/* 630/*
580 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
581 * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
582 * action currently.
583 */
584void klp_send_signals(void)
585{
586 struct task_struct *g, *task;
587
588 pr_notice("signaling remaining tasks\n");
589
590 read_lock(&tasklist_lock);
591 for_each_process_thread(g, task) {
592 if (!klp_patch_pending(task))
593 continue;
594
595 /*
596 * There is a small race here. We could see TIF_PATCH_PENDING
597 * set and decide to wake up a kthread or send a fake signal.
598 * Meanwhile the task could migrate itself and the action
599 * would be meaningless. It is not serious though.
600 */
601 if (task->flags & PF_KTHREAD) {
602 /*
603 * Wake up a kthread which sleeps interruptedly and
604 * still has not been migrated.
605 */
606 wake_up_state(task, TASK_INTERRUPTIBLE);
607 } else {
608 /*
609 * Send fake signal to all non-kthread tasks which are
610 * still not migrated.
611 */
612 spin_lock_irq(&task->sighand->siglock);
613 signal_wake_up(task, 0);
614 spin_unlock_irq(&task->sighand->siglock);
615 }
616 }
617 read_unlock(&tasklist_lock);
618}
619
620/*
621 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an 631 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
622 * existing transition to finish. 632 * existing transition to finish.
623 * 633 *
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h
index f9d0bc016067..322db16233de 100644
--- a/kernel/livepatch/transition.h
+++ b/kernel/livepatch/transition.h
@@ -11,7 +11,6 @@ void klp_cancel_transition(void);
11void klp_start_transition(void); 11void klp_start_transition(void);
12void klp_try_complete_transition(void); 12void klp_try_complete_transition(void);
13void klp_reverse_transition(void); 13void klp_reverse_transition(void);
14void klp_send_signals(void);
15void klp_force_transition(void); 14void klp_force_transition(void);
16 15
17#endif /* _LIVEPATCH_TRANSITION_H */ 16#endif /* _LIVEPATCH_TRANSITION_H */