diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/core.c | 2 | ||||
| -rw-r--r-- | kernel/bpf/syscall.c | 25 | ||||
| -rw-r--r-- | kernel/cgroup.c | 2 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_main.c | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 15 | ||||
| -rw-r--r-- | kernel/kprobes.c | 2 | ||||
| -rw-r--r-- | kernel/module.c | 91 | ||||
| -rw-r--r-- | kernel/params.c | 3 | ||||
| -rw-r--r-- | kernel/range.c | 10 | ||||
| -rw-r--r-- | kernel/sched/core.c | 5 | ||||
| -rw-r--r-- | kernel/sys.c | 4 | ||||
| -rw-r--r-- | kernel/time/ntp.c | 7 | ||||
| -rw-r--r-- | kernel/time/time.c | 4 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 53 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 69 | ||||
| -rw-r--r-- | kernel/workqueue.c | 25 |
17 files changed, 235 insertions, 85 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d6594e457a25..a64e7a207d2b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
| @@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |||
| 163 | 163 | ||
| 164 | void bpf_jit_binary_free(struct bpf_binary_header *hdr) | 164 | void bpf_jit_binary_free(struct bpf_binary_header *hdr) |
| 165 | { | 165 | { |
| 166 | module_free(NULL, hdr); | 166 | module_memfree(hdr); |
| 167 | } | 167 | } |
| 168 | #endif /* CONFIG_BPF_JIT */ | 168 | #endif /* CONFIG_BPF_JIT */ |
| 169 | 169 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 088ac0b1b106..536edc2be307 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
| 150 | int ufd = attr->map_fd; | 150 | int ufd = attr->map_fd; |
| 151 | struct fd f = fdget(ufd); | 151 | struct fd f = fdget(ufd); |
| 152 | struct bpf_map *map; | 152 | struct bpf_map *map; |
| 153 | void *key, *value; | 153 | void *key, *value, *ptr; |
| 154 | int err; | 154 | int err; |
| 155 | 155 | ||
| 156 | if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) | 156 | if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) |
| @@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
| 169 | if (copy_from_user(key, ukey, map->key_size) != 0) | 169 | if (copy_from_user(key, ukey, map->key_size) != 0) |
| 170 | goto free_key; | 170 | goto free_key; |
| 171 | 171 | ||
| 172 | err = -ENOENT; | 172 | err = -ENOMEM; |
| 173 | rcu_read_lock(); | 173 | value = kmalloc(map->value_size, GFP_USER); |
| 174 | value = map->ops->map_lookup_elem(map, key); | ||
| 175 | if (!value) | 174 | if (!value) |
| 176 | goto err_unlock; | 175 | goto free_key; |
| 176 | |||
| 177 | rcu_read_lock(); | ||
| 178 | ptr = map->ops->map_lookup_elem(map, key); | ||
| 179 | if (ptr) | ||
| 180 | memcpy(value, ptr, map->value_size); | ||
| 181 | rcu_read_unlock(); | ||
| 182 | |||
| 183 | err = -ENOENT; | ||
| 184 | if (!ptr) | ||
| 185 | goto free_value; | ||
| 177 | 186 | ||
| 178 | err = -EFAULT; | 187 | err = -EFAULT; |
| 179 | if (copy_to_user(uvalue, value, map->value_size) != 0) | 188 | if (copy_to_user(uvalue, value, map->value_size) != 0) |
| 180 | goto err_unlock; | 189 | goto free_value; |
| 181 | 190 | ||
| 182 | err = 0; | 191 | err = 0; |
| 183 | 192 | ||
| 184 | err_unlock: | 193 | free_value: |
| 185 | rcu_read_unlock(); | 194 | kfree(value); |
| 186 | free_key: | 195 | free_key: |
| 187 | kfree(key); | 196 | kfree(key); |
| 188 | err_put: | 197 | err_put: |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bb263d0caab3..04cfe8ace520 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb) | |||
| 1909 | * | 1909 | * |
| 1910 | * And don't kill the default root. | 1910 | * And don't kill the default root. |
| 1911 | */ | 1911 | */ |
| 1912 | if (css_has_online_children(&root->cgrp.self) || | 1912 | if (!list_empty(&root->cgrp.self.children) || |
| 1913 | root == &cgrp_dfl_root) | 1913 | root == &cgrp_dfl_root) |
| 1914 | cgroup_put(&root->cgrp); | 1914 | cgroup_put(&root->cgrp); |
| 1915 | else | 1915 | else |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index f191bddf64b8..7b40c5f07dce 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -2023,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv) | |||
| 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, | 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, |
| 2024 | mod->core_size, (void *)mod); | 2024 | mod->core_size, (void *)mod); |
| 2025 | #ifdef CONFIG_MODULE_UNLOAD | 2025 | #ifdef CONFIG_MODULE_UNLOAD |
| 2026 | kdb_printf("%4ld ", module_refcount(mod)); | 2026 | kdb_printf("%4d ", module_refcount(mod)); |
| 2027 | #endif | 2027 | #endif |
| 2028 | if (mod->state == MODULE_STATE_GOING) | 2028 | if (mod->state == MODULE_STATE_GOING) |
| 2029 | kdb_printf(" (Unloading)"); | 2029 | kdb_printf(" (Unloading)"); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 882f835a0d85..19efcf13375a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -6776,7 +6776,6 @@ skip_type: | |||
| 6776 | __perf_event_init_context(&cpuctx->ctx); | 6776 | __perf_event_init_context(&cpuctx->ctx); |
| 6777 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); | 6777 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); |
| 6778 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); | 6778 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); |
| 6779 | cpuctx->ctx.type = cpu_context; | ||
| 6780 | cpuctx->ctx.pmu = pmu; | 6779 | cpuctx->ctx.pmu = pmu; |
| 6781 | 6780 | ||
| 6782 | __perf_cpu_hrtimer_init(cpuctx, cpu); | 6781 | __perf_cpu_hrtimer_init(cpuctx, cpu); |
| @@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 7420 | * task or CPU context: | 7419 | * task or CPU context: |
| 7421 | */ | 7420 | */ |
| 7422 | if (move_group) { | 7421 | if (move_group) { |
| 7423 | if (group_leader->ctx->type != ctx->type) | 7422 | /* |
| 7423 | * Make sure we're both on the same task, or both | ||
| 7424 | * per-cpu events. | ||
| 7425 | */ | ||
| 7426 | if (group_leader->ctx->task != ctx->task) | ||
| 7427 | goto err_context; | ||
| 7428 | |||
| 7429 | /* | ||
| 7430 | * Make sure we're both events for the same CPU; | ||
| 7431 | * grouping events for different CPUs is broken; since | ||
| 7432 | * you can never concurrently schedule them anyhow. | ||
| 7433 | */ | ||
| 7434 | if (group_leader->cpu != event->cpu) | ||
| 7424 | goto err_context; | 7435 | goto err_context; |
| 7425 | } else { | 7436 | } else { |
| 7426 | if (group_leader->ctx != ctx) | 7437 | if (group_leader->ctx != ctx) |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 06f58309fed2..ee619929cf90 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -127,7 +127,7 @@ static void *alloc_insn_page(void) | |||
| 127 | 127 | ||
| 128 | static void free_insn_page(void *page) | 128 | static void free_insn_page(void *page) |
| 129 | { | 129 | { |
| 130 | module_free(NULL, page); | 130 | module_memfree(page); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | struct kprobe_insn_cache kprobe_insn_slots = { | 133 | struct kprobe_insn_cache kprobe_insn_slots = { |
diff --git a/kernel/module.c b/kernel/module.c index 3965511ae133..d856e96a3cce 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced) | |||
| 772 | return 0; | 772 | return 0; |
| 773 | } | 773 | } |
| 774 | 774 | ||
| 775 | unsigned long module_refcount(struct module *mod) | 775 | /** |
| 776 | * module_refcount - return the refcount or -1 if unloading | ||
| 777 | * | ||
| 778 | * @mod: the module we're checking | ||
| 779 | * | ||
| 780 | * Returns: | ||
| 781 | * -1 if the module is in the process of unloading | ||
| 782 | * otherwise the number of references in the kernel to the module | ||
| 783 | */ | ||
| 784 | int module_refcount(struct module *mod) | ||
| 776 | { | 785 | { |
| 777 | return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE; | 786 | return atomic_read(&mod->refcnt) - MODULE_REF_BASE; |
| 778 | } | 787 | } |
| 779 | EXPORT_SYMBOL(module_refcount); | 788 | EXPORT_SYMBOL(module_refcount); |
| 780 | 789 | ||
| @@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod) | |||
| 856 | struct module_use *use; | 865 | struct module_use *use; |
| 857 | int printed_something = 0; | 866 | int printed_something = 0; |
| 858 | 867 | ||
| 859 | seq_printf(m, " %lu ", module_refcount(mod)); | 868 | seq_printf(m, " %i ", module_refcount(mod)); |
| 860 | 869 | ||
| 861 | /* | 870 | /* |
| 862 | * Always include a trailing , so userspace can differentiate | 871 | * Always include a trailing , so userspace can differentiate |
| @@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr); | |||
| 908 | static ssize_t show_refcnt(struct module_attribute *mattr, | 917 | static ssize_t show_refcnt(struct module_attribute *mattr, |
| 909 | struct module_kobject *mk, char *buffer) | 918 | struct module_kobject *mk, char *buffer) |
| 910 | { | 919 | { |
| 911 | return sprintf(buffer, "%lu\n", module_refcount(mk->mod)); | 920 | return sprintf(buffer, "%i\n", module_refcount(mk->mod)); |
| 912 | } | 921 | } |
| 913 | 922 | ||
| 914 | static struct module_attribute modinfo_refcnt = | 923 | static struct module_attribute modinfo_refcnt = |
| @@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { } | |||
| 1795 | static void unset_module_init_ro_nx(struct module *mod) { } | 1804 | static void unset_module_init_ro_nx(struct module *mod) { } |
| 1796 | #endif | 1805 | #endif |
| 1797 | 1806 | ||
| 1798 | void __weak module_free(struct module *mod, void *module_region) | 1807 | void __weak module_memfree(void *module_region) |
| 1799 | { | 1808 | { |
| 1800 | vfree(module_region); | 1809 | vfree(module_region); |
| 1801 | } | 1810 | } |
| @@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod) | |||
| 1804 | { | 1813 | { |
| 1805 | } | 1814 | } |
| 1806 | 1815 | ||
| 1816 | void __weak module_arch_freeing_init(struct module *mod) | ||
| 1817 | { | ||
| 1818 | } | ||
| 1819 | |||
| 1807 | /* Free a module, remove from lists, etc. */ | 1820 | /* Free a module, remove from lists, etc. */ |
| 1808 | static void free_module(struct module *mod) | 1821 | static void free_module(struct module *mod) |
| 1809 | { | 1822 | { |
| @@ -1841,7 +1854,8 @@ static void free_module(struct module *mod) | |||
| 1841 | 1854 | ||
| 1842 | /* This may be NULL, but that's OK */ | 1855 | /* This may be NULL, but that's OK */ |
| 1843 | unset_module_init_ro_nx(mod); | 1856 | unset_module_init_ro_nx(mod); |
| 1844 | module_free(mod, mod->module_init); | 1857 | module_arch_freeing_init(mod); |
| 1858 | module_memfree(mod->module_init); | ||
| 1845 | kfree(mod->args); | 1859 | kfree(mod->args); |
| 1846 | percpu_modfree(mod); | 1860 | percpu_modfree(mod); |
| 1847 | 1861 | ||
| @@ -1850,7 +1864,7 @@ static void free_module(struct module *mod) | |||
| 1850 | 1864 | ||
| 1851 | /* Finally, free the core (containing the module structure) */ | 1865 | /* Finally, free the core (containing the module structure) */ |
| 1852 | unset_module_core_ro_nx(mod); | 1866 | unset_module_core_ro_nx(mod); |
| 1853 | module_free(mod, mod->module_core); | 1867 | module_memfree(mod->module_core); |
| 1854 | 1868 | ||
| 1855 | #ifdef CONFIG_MPU | 1869 | #ifdef CONFIG_MPU |
| 1856 | update_protections(current->mm); | 1870 | update_protections(current->mm); |
| @@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2785 | */ | 2799 | */ |
| 2786 | kmemleak_ignore(ptr); | 2800 | kmemleak_ignore(ptr); |
| 2787 | if (!ptr) { | 2801 | if (!ptr) { |
| 2788 | module_free(mod, mod->module_core); | 2802 | module_memfree(mod->module_core); |
| 2789 | return -ENOMEM; | 2803 | return -ENOMEM; |
| 2790 | } | 2804 | } |
| 2791 | memset(ptr, 0, mod->init_size); | 2805 | memset(ptr, 0, mod->init_size); |
| @@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) | |||
| 2930 | static void module_deallocate(struct module *mod, struct load_info *info) | 2944 | static void module_deallocate(struct module *mod, struct load_info *info) |
| 2931 | { | 2945 | { |
| 2932 | percpu_modfree(mod); | 2946 | percpu_modfree(mod); |
| 2933 | module_free(mod, mod->module_init); | 2947 | module_arch_freeing_init(mod); |
| 2934 | module_free(mod, mod->module_core); | 2948 | module_memfree(mod->module_init); |
| 2949 | module_memfree(mod->module_core); | ||
| 2935 | } | 2950 | } |
| 2936 | 2951 | ||
| 2937 | int __weak module_finalize(const Elf_Ehdr *hdr, | 2952 | int __weak module_finalize(const Elf_Ehdr *hdr, |
| @@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod) | |||
| 2983 | #endif | 2998 | #endif |
| 2984 | } | 2999 | } |
| 2985 | 3000 | ||
| 3001 | /* For freeing module_init on success, in case kallsyms traversing */ | ||
| 3002 | struct mod_initfree { | ||
| 3003 | struct rcu_head rcu; | ||
| 3004 | void *module_init; | ||
| 3005 | }; | ||
| 3006 | |||
| 3007 | static void do_free_init(struct rcu_head *head) | ||
| 3008 | { | ||
| 3009 | struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); | ||
| 3010 | module_memfree(m->module_init); | ||
| 3011 | kfree(m); | ||
| 3012 | } | ||
| 3013 | |||
| 2986 | /* This is where the real work happens */ | 3014 | /* This is where the real work happens */ |
| 2987 | static int do_init_module(struct module *mod) | 3015 | static int do_init_module(struct module *mod) |
| 2988 | { | 3016 | { |
| 2989 | int ret = 0; | 3017 | int ret = 0; |
| 3018 | struct mod_initfree *freeinit; | ||
| 3019 | |||
| 3020 | freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); | ||
| 3021 | if (!freeinit) { | ||
| 3022 | ret = -ENOMEM; | ||
| 3023 | goto fail; | ||
| 3024 | } | ||
| 3025 | freeinit->module_init = mod->module_init; | ||
| 2990 | 3026 | ||
| 2991 | /* | 3027 | /* |
| 2992 | * We want to find out whether @mod uses async during init. Clear | 3028 | * We want to find out whether @mod uses async during init. Clear |
| @@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod) | |||
| 2999 | if (mod->init != NULL) | 3035 | if (mod->init != NULL) |
| 3000 | ret = do_one_initcall(mod->init); | 3036 | ret = do_one_initcall(mod->init); |
| 3001 | if (ret < 0) { | 3037 | if (ret < 0) { |
| 3002 | /* | 3038 | goto fail_free_freeinit; |
| 3003 | * Init routine failed: abort. Try to protect us from | ||
| 3004 | * buggy refcounters. | ||
| 3005 | */ | ||
| 3006 | mod->state = MODULE_STATE_GOING; | ||
| 3007 | synchronize_sched(); | ||
| 3008 | module_put(mod); | ||
| 3009 | blocking_notifier_call_chain(&module_notify_list, | ||
| 3010 | MODULE_STATE_GOING, mod); | ||
| 3011 | free_module(mod); | ||
| 3012 | wake_up_all(&module_wq); | ||
| 3013 | return ret; | ||
| 3014 | } | 3039 | } |
| 3015 | if (ret > 0) { | 3040 | if (ret > 0) { |
| 3016 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " | 3041 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " |
| @@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod) | |||
| 3055 | mod->strtab = mod->core_strtab; | 3080 | mod->strtab = mod->core_strtab; |
| 3056 | #endif | 3081 | #endif |
| 3057 | unset_module_init_ro_nx(mod); | 3082 | unset_module_init_ro_nx(mod); |
| 3058 | module_free(mod, mod->module_init); | 3083 | module_arch_freeing_init(mod); |
| 3059 | mod->module_init = NULL; | 3084 | mod->module_init = NULL; |
| 3060 | mod->init_size = 0; | 3085 | mod->init_size = 0; |
| 3061 | mod->init_ro_size = 0; | 3086 | mod->init_ro_size = 0; |
| 3062 | mod->init_text_size = 0; | 3087 | mod->init_text_size = 0; |
| 3088 | /* | ||
| 3089 | * We want to free module_init, but be aware that kallsyms may be | ||
| 3090 | * walking this with preempt disabled. In all the failure paths, | ||
| 3091 | * we call synchronize_rcu/synchronize_sched, but we don't want | ||
| 3092 | * to slow down the success path, so use actual RCU here. | ||
| 3093 | */ | ||
| 3094 | call_rcu(&freeinit->rcu, do_free_init); | ||
| 3063 | mutex_unlock(&module_mutex); | 3095 | mutex_unlock(&module_mutex); |
| 3064 | wake_up_all(&module_wq); | 3096 | wake_up_all(&module_wq); |
| 3065 | 3097 | ||
| 3066 | return 0; | 3098 | return 0; |
| 3099 | |||
| 3100 | fail_free_freeinit: | ||
| 3101 | kfree(freeinit); | ||
| 3102 | fail: | ||
| 3103 | /* Try to protect us from buggy refcounters. */ | ||
| 3104 | mod->state = MODULE_STATE_GOING; | ||
| 3105 | synchronize_sched(); | ||
| 3106 | module_put(mod); | ||
| 3107 | blocking_notifier_call_chain(&module_notify_list, | ||
| 3108 | MODULE_STATE_GOING, mod); | ||
| 3109 | free_module(mod); | ||
| 3110 | wake_up_all(&module_wq); | ||
| 3111 | return ret; | ||
| 3067 | } | 3112 | } |
| 3068 | 3113 | ||
| 3069 | static int may_init_module(void) | 3114 | static int may_init_module(void) |
diff --git a/kernel/params.c b/kernel/params.c index 0af9b2c4e56c..728e05b167de 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
| @@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk, | |||
| 642 | mk->mp->grp.attrs = new_attrs; | 642 | mk->mp->grp.attrs = new_attrs; |
| 643 | 643 | ||
| 644 | /* Tack new one on the end. */ | 644 | /* Tack new one on the end. */ |
| 645 | memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0])); | ||
| 645 | sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); | 646 | sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); |
| 646 | mk->mp->attrs[mk->mp->num].param = kp; | 647 | mk->mp->attrs[mk->mp->num].param = kp; |
| 647 | mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; | 648 | mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; |
| 648 | /* Do not allow runtime DAC changes to make param writable. */ | 649 | /* Do not allow runtime DAC changes to make param writable. */ |
| 649 | if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) | 650 | if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) |
| 650 | mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; | 651 | mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; |
| 652 | else | ||
| 653 | mk->mp->attrs[mk->mp->num].mattr.store = NULL; | ||
| 651 | mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; | 654 | mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; |
| 652 | mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; | 655 | mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; |
| 653 | mk->mp->num++; | 656 | mk->mp->num++; |
diff --git a/kernel/range.c b/kernel/range.c index 322ea8e93e4b..82cfc285b046 100644 --- a/kernel/range.c +++ b/kernel/range.c | |||
| @@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2) | |||
| 113 | { | 113 | { |
| 114 | const struct range *r1 = x1; | 114 | const struct range *r1 = x1; |
| 115 | const struct range *r2 = x2; | 115 | const struct range *r2 = x2; |
| 116 | s64 start1, start2; | ||
| 117 | 116 | ||
| 118 | start1 = r1->start; | 117 | if (r1->start < r2->start) |
| 119 | start2 = r2->start; | 118 | return -1; |
| 120 | 119 | if (r1->start > r2->start) | |
| 121 | return start1 - start2; | 120 | return 1; |
| 121 | return 0; | ||
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | int clean_sort_range(struct range *range, int az) | 124 | int clean_sort_range(struct range *range, int az) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c0accc00566e..e628cb11b560 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -7292,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset) | |||
| 7292 | * since we will exit with TASK_RUNNING make sure we enter with it, | 7292 | * since we will exit with TASK_RUNNING make sure we enter with it, |
| 7293 | * otherwise we will destroy state. | 7293 | * otherwise we will destroy state. |
| 7294 | */ | 7294 | */ |
| 7295 | if (WARN_ONCE(current->state != TASK_RUNNING, | 7295 | WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, |
| 7296 | "do not call blocking ops when !TASK_RUNNING; " | 7296 | "do not call blocking ops when !TASK_RUNNING; " |
| 7297 | "state=%lx set at [<%p>] %pS\n", | 7297 | "state=%lx set at [<%p>] %pS\n", |
| 7298 | current->state, | 7298 | current->state, |
| 7299 | (void *)current->task_state_change, | 7299 | (void *)current->task_state_change, |
| 7300 | (void *)current->task_state_change)) | 7300 | (void *)current->task_state_change); |
| 7301 | __set_current_state(TASK_RUNNING); | ||
| 7302 | 7301 | ||
| 7303 | ___might_sleep(file, line, preempt_offset); | 7302 | ___might_sleep(file, line, preempt_offset); |
| 7304 | } | 7303 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index a8c9f5a7dda6..ea9c88109894 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
| 2210 | up_write(&me->mm->mmap_sem); | 2210 | up_write(&me->mm->mmap_sem); |
| 2211 | break; | 2211 | break; |
| 2212 | case PR_MPX_ENABLE_MANAGEMENT: | 2212 | case PR_MPX_ENABLE_MANAGEMENT: |
| 2213 | if (arg2 || arg3 || arg4 || arg5) | ||
| 2214 | return -EINVAL; | ||
| 2213 | error = MPX_ENABLE_MANAGEMENT(me); | 2215 | error = MPX_ENABLE_MANAGEMENT(me); |
| 2214 | break; | 2216 | break; |
| 2215 | case PR_MPX_DISABLE_MANAGEMENT: | 2217 | case PR_MPX_DISABLE_MANAGEMENT: |
| 2218 | if (arg2 || arg3 || arg4 || arg5) | ||
| 2219 | return -EINVAL; | ||
| 2216 | error = MPX_DISABLE_MANAGEMENT(me); | 2220 | error = MPX_DISABLE_MANAGEMENT(me); |
| 2217 | break; | 2221 | break; |
| 2218 | default: | 2222 | default: |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 87a346fd6d61..28bf91c60a0b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc) | |||
| 633 | if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) | 633 | if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) |
| 634 | return -EPERM; | 634 | return -EPERM; |
| 635 | 635 | ||
| 636 | if (txc->modes & ADJ_FREQUENCY) { | ||
| 637 | if (LONG_MIN / PPM_SCALE > txc->freq) | ||
| 638 | return -EINVAL; | ||
| 639 | if (LONG_MAX / PPM_SCALE < txc->freq) | ||
| 640 | return -EINVAL; | ||
| 641 | } | ||
| 642 | |||
| 636 | return 0; | 643 | return 0; |
| 637 | } | 644 | } |
| 638 | 645 | ||
diff --git a/kernel/time/time.c b/kernel/time/time.c index 6390517e77d4..2c85b7724af4 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c | |||
| @@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, | |||
| 196 | if (tv) { | 196 | if (tv) { |
| 197 | if (copy_from_user(&user_tv, tv, sizeof(*tv))) | 197 | if (copy_from_user(&user_tv, tv, sizeof(*tv))) |
| 198 | return -EFAULT; | 198 | return -EFAULT; |
| 199 | |||
| 200 | if (!timeval_valid(&user_tv)) | ||
| 201 | return -EINVAL; | ||
| 202 | |||
| 199 | new_ts.tv_sec = user_tv.tv_sec; | 203 | new_ts.tv_sec = user_tv.tv_sec; |
| 200 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; | 204 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; |
| 201 | } | 205 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 929a733d302e..224e768bdc73 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command) | |||
| 2497 | } | 2497 | } |
| 2498 | 2498 | ||
| 2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, | 2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
| 2500 | struct ftrace_hash *old_hash) | 2500 | struct ftrace_ops_hash *old_hash) |
| 2501 | { | 2501 | { |
| 2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | 2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; |
| 2503 | ops->old_hash.filter_hash = old_hash; | 2503 | ops->old_hash.filter_hash = old_hash->filter_hash; |
| 2504 | ops->old_hash.notrace_hash = old_hash->notrace_hash; | ||
| 2504 | ftrace_run_update_code(command); | 2505 | ftrace_run_update_code(command); |
| 2505 | ops->old_hash.filter_hash = NULL; | 2506 | ops->old_hash.filter_hash = NULL; |
| 2507 | ops->old_hash.notrace_hash = NULL; | ||
| 2506 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; | 2508 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
| 2507 | } | 2509 | } |
| 2508 | 2510 | ||
| @@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
| 3579 | 3581 | ||
| 3580 | static int ftrace_probe_registered; | 3582 | static int ftrace_probe_registered; |
| 3581 | 3583 | ||
| 3582 | static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) | 3584 | static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) |
| 3583 | { | 3585 | { |
| 3584 | int ret; | 3586 | int ret; |
| 3585 | int i; | 3587 | int i; |
| @@ -3637,6 +3639,7 @@ int | |||
| 3637 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3639 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
| 3638 | void *data) | 3640 | void *data) |
| 3639 | { | 3641 | { |
| 3642 | struct ftrace_ops_hash old_hash_ops; | ||
| 3640 | struct ftrace_func_probe *entry; | 3643 | struct ftrace_func_probe *entry; |
| 3641 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 3644 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3642 | struct ftrace_hash *old_hash = *orig_hash; | 3645 | struct ftrace_hash *old_hash = *orig_hash; |
| @@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3658 | 3661 | ||
| 3659 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | 3662 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3660 | 3663 | ||
| 3664 | old_hash_ops.filter_hash = old_hash; | ||
| 3665 | /* Probes only have filters */ | ||
| 3666 | old_hash_ops.notrace_hash = NULL; | ||
| 3667 | |||
| 3661 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | 3668 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); |
| 3662 | if (!hash) { | 3669 | if (!hash) { |
| 3663 | count = -ENOMEM; | 3670 | count = -ENOMEM; |
| @@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3718 | 3725 | ||
| 3719 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3726 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
| 3720 | 3727 | ||
| 3721 | __enable_ftrace_function_probe(old_hash); | 3728 | __enable_ftrace_function_probe(&old_hash_ops); |
| 3722 | 3729 | ||
| 3723 | if (!ret) | 3730 | if (!ret) |
| 3724 | free_ftrace_hash_rcu(old_hash); | 3731 | free_ftrace_hash_rcu(old_hash); |
| @@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
| 4006 | } | 4013 | } |
| 4007 | 4014 | ||
| 4008 | static void ftrace_ops_update_code(struct ftrace_ops *ops, | 4015 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
| 4009 | struct ftrace_hash *old_hash) | 4016 | struct ftrace_ops_hash *old_hash) |
| 4010 | { | 4017 | { |
| 4011 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | 4018 | struct ftrace_ops *op; |
| 4019 | |||
| 4020 | if (!ftrace_enabled) | ||
| 4021 | return; | ||
| 4022 | |||
| 4023 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | ||
| 4012 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | 4024 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); |
| 4025 | return; | ||
| 4026 | } | ||
| 4027 | |||
| 4028 | /* | ||
| 4029 | * If this is the shared global_ops filter, then we need to | ||
| 4030 | * check if there is another ops that shares it, is enabled. | ||
| 4031 | * If so, we still need to run the modify code. | ||
| 4032 | */ | ||
| 4033 | if (ops->func_hash != &global_ops.local_hash) | ||
| 4034 | return; | ||
| 4035 | |||
| 4036 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 4037 | if (op->func_hash == &global_ops.local_hash && | ||
| 4038 | op->flags & FTRACE_OPS_FL_ENABLED) { | ||
| 4039 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | ||
| 4040 | /* Only need to do this once */ | ||
| 4041 | return; | ||
| 4042 | } | ||
| 4043 | } while_for_each_ftrace_op(op); | ||
| 4013 | } | 4044 | } |
| 4014 | 4045 | ||
| 4015 | static int | 4046 | static int |
| @@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 4017 | unsigned long ip, int remove, int reset, int enable) | 4048 | unsigned long ip, int remove, int reset, int enable) |
| 4018 | { | 4049 | { |
| 4019 | struct ftrace_hash **orig_hash; | 4050 | struct ftrace_hash **orig_hash; |
| 4051 | struct ftrace_ops_hash old_hash_ops; | ||
| 4020 | struct ftrace_hash *old_hash; | 4052 | struct ftrace_hash *old_hash; |
| 4021 | struct ftrace_hash *hash; | 4053 | struct ftrace_hash *hash; |
| 4022 | int ret; | 4054 | int ret; |
| @@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 4053 | 4085 | ||
| 4054 | mutex_lock(&ftrace_lock); | 4086 | mutex_lock(&ftrace_lock); |
| 4055 | old_hash = *orig_hash; | 4087 | old_hash = *orig_hash; |
| 4088 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | ||
| 4089 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | ||
| 4056 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 4090 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
| 4057 | if (!ret) { | 4091 | if (!ret) { |
| 4058 | ftrace_ops_update_code(ops, old_hash); | 4092 | ftrace_ops_update_code(ops, &old_hash_ops); |
| 4059 | free_ftrace_hash_rcu(old_hash); | 4093 | free_ftrace_hash_rcu(old_hash); |
| 4060 | } | 4094 | } |
| 4061 | mutex_unlock(&ftrace_lock); | 4095 | mutex_unlock(&ftrace_lock); |
| @@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void) | |||
| 4267 | int ftrace_regex_release(struct inode *inode, struct file *file) | 4301 | int ftrace_regex_release(struct inode *inode, struct file *file) |
| 4268 | { | 4302 | { |
| 4269 | struct seq_file *m = (struct seq_file *)file->private_data; | 4303 | struct seq_file *m = (struct seq_file *)file->private_data; |
| 4304 | struct ftrace_ops_hash old_hash_ops; | ||
| 4270 | struct ftrace_iterator *iter; | 4305 | struct ftrace_iterator *iter; |
| 4271 | struct ftrace_hash **orig_hash; | 4306 | struct ftrace_hash **orig_hash; |
| 4272 | struct ftrace_hash *old_hash; | 4307 | struct ftrace_hash *old_hash; |
| @@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 4300 | 4335 | ||
| 4301 | mutex_lock(&ftrace_lock); | 4336 | mutex_lock(&ftrace_lock); |
| 4302 | old_hash = *orig_hash; | 4337 | old_hash = *orig_hash; |
| 4338 | old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; | ||
| 4339 | old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; | ||
| 4303 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4340 | ret = ftrace_hash_move(iter->ops, filter_hash, |
| 4304 | orig_hash, iter->hash); | 4341 | orig_hash, iter->hash); |
| 4305 | if (!ret) { | 4342 | if (!ret) { |
| 4306 | ftrace_ops_update_code(iter->ops, old_hash); | 4343 | ftrace_ops_update_code(iter->ops, &old_hash_ops); |
| 4307 | free_ftrace_hash_rcu(old_hash); | 4344 | free_ftrace_hash_rcu(old_hash); |
| 4308 | } | 4345 | } |
| 4309 | mutex_unlock(&ftrace_lock); | 4346 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2e767972e99c..4a9079b9f082 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -6918,7 +6918,6 @@ void __init trace_init(void) | |||
| 6918 | tracepoint_printk = 0; | 6918 | tracepoint_printk = 0; |
| 6919 | } | 6919 | } |
| 6920 | tracer_alloc_buffers(); | 6920 | tracer_alloc_buffers(); |
| 6921 | init_ftrace_syscalls(); | ||
| 6922 | trace_event_init(); | 6921 | trace_event_init(); |
| 6923 | } | 6922 | } |
| 6924 | 6923 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 366a78a3e61e..b03a0ea77b99 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void) | |||
| 2429 | return 0; | 2429 | return 0; |
| 2430 | } | 2430 | } |
| 2431 | 2431 | ||
| 2432 | static __init void | ||
| 2433 | early_enable_events(struct trace_array *tr, bool disable_first) | ||
| 2434 | { | ||
| 2435 | char *buf = bootup_event_buf; | ||
| 2436 | char *token; | ||
| 2437 | int ret; | ||
| 2438 | |||
| 2439 | while (true) { | ||
| 2440 | token = strsep(&buf, ","); | ||
| 2441 | |||
| 2442 | if (!token) | ||
| 2443 | break; | ||
| 2444 | if (!*token) | ||
| 2445 | continue; | ||
| 2446 | |||
| 2447 | /* Restarting syscalls requires that we stop them first */ | ||
| 2448 | if (disable_first) | ||
| 2449 | ftrace_set_clr_event(tr, token, 0); | ||
| 2450 | |||
| 2451 | ret = ftrace_set_clr_event(tr, token, 1); | ||
| 2452 | if (ret) | ||
| 2453 | pr_warn("Failed to enable trace event: %s\n", token); | ||
| 2454 | |||
| 2455 | /* Put back the comma to allow this to be called again */ | ||
| 2456 | if (buf) | ||
| 2457 | *(buf - 1) = ','; | ||
| 2458 | } | ||
| 2459 | } | ||
| 2460 | |||
| 2432 | static __init int event_trace_enable(void) | 2461 | static __init int event_trace_enable(void) |
| 2433 | { | 2462 | { |
| 2434 | struct trace_array *tr = top_trace_array(); | 2463 | struct trace_array *tr = top_trace_array(); |
| 2435 | struct ftrace_event_call **iter, *call; | 2464 | struct ftrace_event_call **iter, *call; |
| 2436 | char *buf = bootup_event_buf; | ||
| 2437 | char *token; | ||
| 2438 | int ret; | 2465 | int ret; |
| 2439 | 2466 | ||
| 2440 | if (!tr) | 2467 | if (!tr) |
| @@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void) | |||
| 2456 | */ | 2483 | */ |
| 2457 | __trace_early_add_events(tr); | 2484 | __trace_early_add_events(tr); |
| 2458 | 2485 | ||
| 2459 | while (true) { | 2486 | early_enable_events(tr, false); |
| 2460 | token = strsep(&buf, ","); | ||
| 2461 | |||
| 2462 | if (!token) | ||
| 2463 | break; | ||
| 2464 | if (!*token) | ||
| 2465 | continue; | ||
| 2466 | |||
| 2467 | ret = ftrace_set_clr_event(tr, token, 1); | ||
| 2468 | if (ret) | ||
| 2469 | pr_warn("Failed to enable trace event: %s\n", token); | ||
| 2470 | } | ||
| 2471 | 2487 | ||
| 2472 | trace_printk_start_comm(); | 2488 | trace_printk_start_comm(); |
| 2473 | 2489 | ||
| @@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void) | |||
| 2478 | return 0; | 2494 | return 0; |
| 2479 | } | 2495 | } |
| 2480 | 2496 | ||
| 2497 | /* | ||
| 2498 | * event_trace_enable() is called from trace_event_init() first to | ||
| 2499 | * initialize events and perhaps start any events that are on the | ||
| 2500 | * command line. Unfortunately, there are some events that will not | ||
| 2501 | * start this early, like the system call tracepoints that need | ||
| 2502 | * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() | ||
| 2503 | * is called before pid 1 starts, and this flag is never set, making | ||
| 2504 | * the syscall tracepoint never get reached, but the event is enabled | ||
| 2505 | * regardless (and not doing anything). | ||
| 2506 | */ | ||
| 2507 | static __init int event_trace_enable_again(void) | ||
| 2508 | { | ||
| 2509 | struct trace_array *tr; | ||
| 2510 | |||
| 2511 | tr = top_trace_array(); | ||
| 2512 | if (!tr) | ||
| 2513 | return -ENODEV; | ||
| 2514 | |||
| 2515 | early_enable_events(tr, true); | ||
| 2516 | |||
| 2517 | return 0; | ||
| 2518 | } | ||
| 2519 | |||
| 2520 | early_initcall(event_trace_enable_again); | ||
| 2521 | |||
| 2481 | static __init int event_trace_init(void) | 2522 | static __init int event_trace_init(void) |
| 2482 | { | 2523 | { |
| 2483 | struct trace_array *tr; | 2524 | struct trace_array *tr; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6202b08f1933..beeeac9e0e3e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
| 1841 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1841 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
| 1842 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1842 | * multiple times. Does GFP_KERNEL allocations. Called only from |
| 1843 | * manager. | 1843 | * manager. |
| 1844 | * | ||
| 1845 | * Return: | ||
| 1846 | * %false if no action was taken and pool->lock stayed locked, %true | ||
| 1847 | * otherwise. | ||
| 1848 | */ | 1844 | */ |
| 1849 | static bool maybe_create_worker(struct worker_pool *pool) | 1845 | static void maybe_create_worker(struct worker_pool *pool) |
| 1850 | __releases(&pool->lock) | 1846 | __releases(&pool->lock) |
| 1851 | __acquires(&pool->lock) | 1847 | __acquires(&pool->lock) |
| 1852 | { | 1848 | { |
| 1853 | if (!need_to_create_worker(pool)) | ||
| 1854 | return false; | ||
| 1855 | restart: | 1849 | restart: |
| 1856 | spin_unlock_irq(&pool->lock); | 1850 | spin_unlock_irq(&pool->lock); |
| 1857 | 1851 | ||
| @@ -1877,7 +1871,6 @@ restart: | |||
| 1877 | */ | 1871 | */ |
| 1878 | if (need_to_create_worker(pool)) | 1872 | if (need_to_create_worker(pool)) |
| 1879 | goto restart; | 1873 | goto restart; |
| 1880 | return true; | ||
| 1881 | } | 1874 | } |
| 1882 | 1875 | ||
| 1883 | /** | 1876 | /** |
| @@ -1897,16 +1890,14 @@ restart: | |||
| 1897 | * multiple times. Does GFP_KERNEL allocations. | 1890 | * multiple times. Does GFP_KERNEL allocations. |
| 1898 | * | 1891 | * |
| 1899 | * Return: | 1892 | * Return: |
| 1900 | * %false if the pool don't need management and the caller can safely start | 1893 | * %false if the pool doesn't need management and the caller can safely |
| 1901 | * processing works, %true indicates that the function released pool->lock | 1894 | * start processing works, %true if management function was performed and |
| 1902 | * and reacquired it to perform some management function and that the | 1895 | * the conditions that the caller verified before calling the function may |
| 1903 | * conditions that the caller verified while holding the lock before | 1896 | * no longer be true. |
| 1904 | * calling the function might no longer be true. | ||
| 1905 | */ | 1897 | */ |
| 1906 | static bool manage_workers(struct worker *worker) | 1898 | static bool manage_workers(struct worker *worker) |
| 1907 | { | 1899 | { |
| 1908 | struct worker_pool *pool = worker->pool; | 1900 | struct worker_pool *pool = worker->pool; |
| 1909 | bool ret = false; | ||
| 1910 | 1901 | ||
| 1911 | /* | 1902 | /* |
| 1912 | * Anyone who successfully grabs manager_arb wins the arbitration | 1903 | * Anyone who successfully grabs manager_arb wins the arbitration |
| @@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker) | |||
| 1919 | * actual management, the pool may stall indefinitely. | 1910 | * actual management, the pool may stall indefinitely. |
| 1920 | */ | 1911 | */ |
| 1921 | if (!mutex_trylock(&pool->manager_arb)) | 1912 | if (!mutex_trylock(&pool->manager_arb)) |
| 1922 | return ret; | 1913 | return false; |
| 1923 | 1914 | ||
| 1924 | ret |= maybe_create_worker(pool); | 1915 | maybe_create_worker(pool); |
| 1925 | 1916 | ||
| 1926 | mutex_unlock(&pool->manager_arb); | 1917 | mutex_unlock(&pool->manager_arb); |
| 1927 | return ret; | 1918 | return true; |
| 1928 | } | 1919 | } |
| 1929 | 1920 | ||
| 1930 | /** | 1921 | /** |
