diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 9 | ||||
-rw-r--r-- | kernel/events/core.c | 2 | ||||
-rw-r--r-- | kernel/livepatch/core.c | 30 | ||||
-rw-r--r-- | kernel/module.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 40 | ||||
-rw-r--r-- | kernel/workqueue.c | 56 |
6 files changed, 113 insertions, 26 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
548 | 548 | ||
549 | rcu_read_lock(); | 549 | rcu_read_lock(); |
550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
551 | if (cp == root_cs) | ||
552 | continue; | ||
553 | |||
554 | /* skip the whole subtree if @cp doesn't have any CPU */ | 551 | /* skip the whole subtree if @cp doesn't have any CPU */ |
555 | if (cpumask_empty(cp->cpus_allowed)) { | 552 | if (cpumask_empty(cp->cpus_allowed)) { |
556 | pos_css = css_rightmost_descendant(pos_css); | 553 | pos_css = css_rightmost_descendant(pos_css); |
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
873 | * If it becomes empty, inherit the effective mask of the | 870 | * If it becomes empty, inherit the effective mask of the |
874 | * parent, which is guaranteed to have some CPUs. | 871 | * parent, which is guaranteed to have some CPUs. |
875 | */ | 872 | */ |
876 | if (cpumask_empty(new_cpus)) | 873 | if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) |
877 | cpumask_copy(new_cpus, parent->effective_cpus); | 874 | cpumask_copy(new_cpus, parent->effective_cpus); |
878 | 875 | ||
879 | /* Skip the whole subtree if the cpumask remains the same. */ | 876 | /* Skip the whole subtree if the cpumask remains the same. */ |
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
1129 | * If it becomes empty, inherit the effective mask of the | 1126 | * If it becomes empty, inherit the effective mask of the |
1130 | * parent, which is guaranteed to have some MEMs. | 1127 | * parent, which is guaranteed to have some MEMs. |
1131 | */ | 1128 | */ |
1132 | if (nodes_empty(*new_mems)) | 1129 | if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) |
1133 | *new_mems = parent->effective_mems; | 1130 | *new_mems = parent->effective_mems; |
1134 | 1131 | ||
1135 | /* Skip the whole subtree if the nodemask remains the same. */ | 1132 | /* Skip the whole subtree if the nodemask remains the same. */ |
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1979 | 1976 | ||
1980 | spin_lock_irq(&callback_lock); | 1977 | spin_lock_irq(&callback_lock); |
1981 | cs->mems_allowed = parent->mems_allowed; | 1978 | cs->mems_allowed = parent->mems_allowed; |
1979 | cs->effective_mems = parent->mems_allowed; | ||
1982 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); | 1980 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
1981 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); | ||
1983 | spin_unlock_irq(&callback_lock); | 1982 | spin_unlock_irq(&callback_lock); |
1984 | out_unlock: | 1983 | out_unlock: |
1985 | mutex_unlock(&cpuset_mutex); | 1984 | mutex_unlock(&cpuset_mutex); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index f04daabfd1cf..453ef61311d4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event) | |||
3591 | ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); | 3591 | ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); |
3592 | WARN_ON_ONCE(ctx->parent_ctx); | 3592 | WARN_ON_ONCE(ctx->parent_ctx); |
3593 | perf_remove_from_context(event, true); | 3593 | perf_remove_from_context(event, true); |
3594 | mutex_unlock(&ctx->mutex); | 3594 | perf_event_ctx_unlock(event, ctx); |
3595 | 3595 | ||
3596 | _free_event(event); | 3596 | _free_event(event); |
3597 | } | 3597 | } |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 01ca08804f51..3f9f1d6b4c2e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj) | |||
89 | /* sets obj->mod if object is not vmlinux and module is found */ | 89 | /* sets obj->mod if object is not vmlinux and module is found */ |
90 | static void klp_find_object_module(struct klp_object *obj) | 90 | static void klp_find_object_module(struct klp_object *obj) |
91 | { | 91 | { |
92 | struct module *mod; | ||
93 | |||
92 | if (!klp_is_module(obj)) | 94 | if (!klp_is_module(obj)) |
93 | return; | 95 | return; |
94 | 96 | ||
95 | mutex_lock(&module_mutex); | 97 | mutex_lock(&module_mutex); |
96 | /* | 98 | /* |
97 | * We don't need to take a reference on the module here because we have | 99 | * We do not want to block removal of patched modules and therefore |
98 | * the klp_mutex, which is also taken by the module notifier. This | 100 | * we do not take a reference here. The patches are removed by |
99 | * prevents any module from unloading until we release the klp_mutex. | 101 | * a going module handler instead. |
102 | */ | ||
103 | mod = find_module(obj->name); | ||
104 | /* | ||
105 | * Do not mess work of the module coming and going notifiers. | ||
106 | * Note that the patch might still be needed before the going handler | ||
107 | * is called. Module functions can be called even in the GOING state | ||
108 | * until mod->exit() finishes. This is especially important for | ||
109 | * patches that modify semantic of the functions. | ||
100 | */ | 110 | */ |
101 | obj->mod = find_module(obj->name); | 111 | if (mod && mod->klp_alive) |
112 | obj->mod = mod; | ||
113 | |||
102 | mutex_unlock(&module_mutex); | 114 | mutex_unlock(&module_mutex); |
103 | } | 115 | } |
104 | 116 | ||
@@ -767,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) | |||
767 | return -EINVAL; | 779 | return -EINVAL; |
768 | 780 | ||
769 | obj->state = KLP_DISABLED; | 781 | obj->state = KLP_DISABLED; |
782 | obj->mod = NULL; | ||
770 | 783 | ||
771 | klp_find_object_module(obj); | 784 | klp_find_object_module(obj); |
772 | 785 | ||
@@ -961,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, | |||
961 | 974 | ||
962 | mutex_lock(&klp_mutex); | 975 | mutex_lock(&klp_mutex); |
963 | 976 | ||
977 | /* | ||
978 | * Each module has to know that the notifier has been called. | ||
979 | * We never know what module will get patched by a new patch. | ||
980 | */ | ||
981 | if (action == MODULE_STATE_COMING) | ||
982 | mod->klp_alive = true; | ||
983 | else /* MODULE_STATE_GOING */ | ||
984 | mod->klp_alive = false; | ||
985 | |||
964 | list_for_each_entry(patch, &klp_patches, list) { | 986 | list_for_each_entry(patch, &klp_patches, list) { |
965 | for (obj = patch->objs; obj->funcs; obj++) { | 987 | for (obj = patch->objs; obj->funcs; obj++) { |
966 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | 988 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) |
diff --git a/kernel/module.c b/kernel/module.c index cc93cf68653c..b3d634ed06c9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/async.h> | 56 | #include <linux/async.h> |
57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
58 | #include <linux/kmemleak.h> | 58 | #include <linux/kmemleak.h> |
59 | #include <linux/kasan.h> | ||
60 | #include <linux/jump_label.h> | 59 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 60 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 61 | #include <linux/bsearch.h> |
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } | |||
1814 | void __weak module_memfree(void *module_region) | 1813 | void __weak module_memfree(void *module_region) |
1815 | { | 1814 | { |
1816 | vfree(module_region); | 1815 | vfree(module_region); |
1817 | kasan_module_free(module_region); | ||
1818 | } | 1816 | } |
1819 | 1817 | ||
1820 | void __weak module_arch_cleanup(struct module *mod) | 1818 | void __weak module_arch_cleanup(struct module *mod) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1059 | 1059 | ||
1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1061 | 1061 | ||
1062 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1063 | static int ftrace_graph_active; | ||
1064 | #else | ||
1065 | # define ftrace_graph_active 0 | ||
1066 | #endif | ||
1067 | |||
1062 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1063 | 1069 | ||
1064 | static struct ftrace_ops *removed_ops; | 1070 | static struct ftrace_ops *removed_ops; |
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2041 | if (!ftrace_rec_count(rec)) | 2047 | if (!ftrace_rec_count(rec)) |
2042 | rec->flags = 0; | 2048 | rec->flags = 0; |
2043 | else | 2049 | else |
2044 | /* Just disable the record (keep REGS state) */ | 2050 | /* |
2045 | rec->flags &= ~FTRACE_FL_ENABLED; | 2051 | * Just disable the record, but keep the ops TRAMP |
2052 | * and REGS states. The _EN flags must be disabled though. | ||
2053 | */ | ||
2054 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | ||
2055 | FTRACE_FL_REGS_EN); | ||
2046 | } | 2056 | } |
2047 | 2057 | ||
2048 | return FTRACE_UPDATE_MAKE_NOP; | 2058 | return FTRACE_UPDATE_MAKE_NOP; |
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2688 | 2698 | ||
2689 | static void ftrace_startup_sysctl(void) | 2699 | static void ftrace_startup_sysctl(void) |
2690 | { | 2700 | { |
2701 | int command; | ||
2702 | |||
2691 | if (unlikely(ftrace_disabled)) | 2703 | if (unlikely(ftrace_disabled)) |
2692 | return; | 2704 | return; |
2693 | 2705 | ||
2694 | /* Force update next time */ | 2706 | /* Force update next time */ |
2695 | saved_ftrace_func = NULL; | 2707 | saved_ftrace_func = NULL; |
2696 | /* ftrace_start_up is true if we want ftrace running */ | 2708 | /* ftrace_start_up is true if we want ftrace running */ |
2697 | if (ftrace_start_up) | 2709 | if (ftrace_start_up) { |
2698 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2710 | command = FTRACE_UPDATE_CALLS; |
2711 | if (ftrace_graph_active) | ||
2712 | command |= FTRACE_START_FUNC_RET; | ||
2713 | ftrace_startup_enable(command); | ||
2714 | } | ||
2699 | } | 2715 | } |
2700 | 2716 | ||
2701 | static void ftrace_shutdown_sysctl(void) | 2717 | static void ftrace_shutdown_sysctl(void) |
2702 | { | 2718 | { |
2719 | int command; | ||
2720 | |||
2703 | if (unlikely(ftrace_disabled)) | 2721 | if (unlikely(ftrace_disabled)) |
2704 | return; | 2722 | return; |
2705 | 2723 | ||
2706 | /* ftrace_start_up is true if ftrace is running */ | 2724 | /* ftrace_start_up is true if ftrace is running */ |
2707 | if (ftrace_start_up) | 2725 | if (ftrace_start_up) { |
2708 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2726 | command = FTRACE_DISABLE_CALLS; |
2727 | if (ftrace_graph_active) | ||
2728 | command |= FTRACE_STOP_FUNC_RET; | ||
2729 | ftrace_run_update_code(command); | ||
2730 | } | ||
2709 | } | 2731 | } |
2710 | 2732 | ||
2711 | static cycle_t ftrace_update_time; | 2733 | static cycle_t ftrace_update_time; |
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
5558 | 5580 | ||
5559 | if (ftrace_enabled) { | 5581 | if (ftrace_enabled) { |
5560 | 5582 | ||
5561 | ftrace_startup_sysctl(); | ||
5562 | |||
5563 | /* we are starting ftrace again */ | 5583 | /* we are starting ftrace again */ |
5564 | if (ftrace_ops_list != &ftrace_list_end) | 5584 | if (ftrace_ops_list != &ftrace_list_end) |
5565 | update_ftrace_function(); | 5585 | update_ftrace_function(); |
5566 | 5586 | ||
5587 | ftrace_startup_sysctl(); | ||
5588 | |||
5567 | } else { | 5589 | } else { |
5568 | /* stopping ftrace calls (just send to ftrace_stub) */ | 5590 | /* stopping ftrace calls (just send to ftrace_stub) */ |
5569 | ftrace_trace_function = ftrace_stub; | 5591 | ftrace_trace_function = ftrace_stub; |
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { | |||
5590 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5612 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5591 | }; | 5613 | }; |
5592 | 5614 | ||
5593 | static int ftrace_graph_active; | ||
5594 | |||
5595 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5615 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
5596 | { | 5616 | { |
5597 | return 0; | 5617 | return 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
2728 | } | 2728 | } |
2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
2730 | 2730 | ||
2731 | struct cwt_wait { | ||
2732 | wait_queue_t wait; | ||
2733 | struct work_struct *work; | ||
2734 | }; | ||
2735 | |||
2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
2737 | { | ||
2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
2739 | |||
2740 | if (cwait->work != key) | ||
2741 | return 0; | ||
2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
2743 | } | ||
2744 | |||
2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
2732 | { | 2746 | { |
2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
2733 | unsigned long flags; | 2748 | unsigned long flags; |
2734 | int ret; | 2749 | int ret; |
2735 | 2750 | ||
2736 | do { | 2751 | do { |
2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
2738 | /* | 2753 | /* |
2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
2756 | * because we may get scheduled between @work's completion | ||
2757 | * and the other canceling task resuming and clearing | ||
2758 | * CANCELING - flush_work() will return false immediately | ||
2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
2760 | * return -ENOENT as @work is still being canceled and the | ||
2761 | * other canceling task won't be able to clear CANCELING as | ||
2762 | * we're hogging the CPU. | ||
2763 | * | ||
2764 | * Let's wait for completion using a waitqueue. As this | ||
2765 | * may lead to the thundering herd problem, use a custom | ||
2766 | * wake function which matches @work along with exclusive | ||
2767 | * wait and wakeup. | ||
2741 | */ | 2768 | */ |
2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
2771 | |||
2772 | init_wait(&cwait.wait); | ||
2773 | cwait.wait.func = cwt_wakefn; | ||
2774 | cwait.work = work; | ||
2775 | |||
2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
2777 | TASK_UNINTERRUPTIBLE); | ||
2778 | if (work_is_canceling(work)) | ||
2779 | schedule(); | ||
2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
2781 | } | ||
2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
2745 | 2783 | ||
2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2749 | 2787 | ||
2750 | flush_work(work); | 2788 | flush_work(work); |
2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
2790 | |||
2791 | /* | ||
2792 | * Paired with prepare_to_wait() above so that either | ||
2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
2794 | * visible there. | ||
2795 | */ | ||
2796 | smp_mb(); | ||
2797 | if (waitqueue_active(&cancel_waitq)) | ||
2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
2799 | |||
2752 | return ret; | 2800 | return ret; |
2753 | } | 2801 | } |
2754 | 2802 | ||