diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpuset.c | 9 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 7 | ||||
| -rw-r--r-- | kernel/irq/pm.c | 7 | ||||
| -rw-r--r-- | kernel/livepatch/core.c | 13 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 1 | ||||
| -rw-r--r-- | kernel/module.c | 4 | ||||
| -rw-r--r-- | kernel/printk/console_cmdline.h | 2 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 1 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 54 | ||||
| -rw-r--r-- | kernel/sys.c | 3 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 40 | ||||
| -rw-r--r-- | kernel/workqueue.c | 56 |
12 files changed, 145 insertions, 52 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
| 548 | 548 | ||
| 549 | rcu_read_lock(); | 549 | rcu_read_lock(); |
| 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
| 551 | if (cp == root_cs) | ||
| 552 | continue; | ||
| 553 | |||
| 554 | /* skip the whole subtree if @cp doesn't have any CPU */ | 551 | /* skip the whole subtree if @cp doesn't have any CPU */ |
| 555 | if (cpumask_empty(cp->cpus_allowed)) { | 552 | if (cpumask_empty(cp->cpus_allowed)) { |
| 556 | pos_css = css_rightmost_descendant(pos_css); | 553 | pos_css = css_rightmost_descendant(pos_css); |
| @@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
| 873 | * If it becomes empty, inherit the effective mask of the | 870 | * If it becomes empty, inherit the effective mask of the |
| 874 | * parent, which is guaranteed to have some CPUs. | 871 | * parent, which is guaranteed to have some CPUs. |
| 875 | */ | 872 | */ |
| 876 | if (cpumask_empty(new_cpus)) | 873 | if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) |
| 877 | cpumask_copy(new_cpus, parent->effective_cpus); | 874 | cpumask_copy(new_cpus, parent->effective_cpus); |
| 878 | 875 | ||
| 879 | /* Skip the whole subtree if the cpumask remains the same. */ | 876 | /* Skip the whole subtree if the cpumask remains the same. */ |
| @@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
| 1129 | * If it becomes empty, inherit the effective mask of the | 1126 | * If it becomes empty, inherit the effective mask of the |
| 1130 | * parent, which is guaranteed to have some MEMs. | 1127 | * parent, which is guaranteed to have some MEMs. |
| 1131 | */ | 1128 | */ |
| 1132 | if (nodes_empty(*new_mems)) | 1129 | if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) |
| 1133 | *new_mems = parent->effective_mems; | 1130 | *new_mems = parent->effective_mems; |
| 1134 | 1131 | ||
| 1135 | /* Skip the whole subtree if the nodemask remains the same. */ | 1132 | /* Skip the whole subtree if the nodemask remains the same. */ |
| @@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
| 1979 | 1976 | ||
| 1980 | spin_lock_irq(&callback_lock); | 1977 | spin_lock_irq(&callback_lock); |
| 1981 | cs->mems_allowed = parent->mems_allowed; | 1978 | cs->mems_allowed = parent->mems_allowed; |
| 1979 | cs->effective_mems = parent->mems_allowed; | ||
| 1982 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); | 1980 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
| 1981 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); | ||
| 1983 | spin_unlock_irq(&callback_lock); | 1982 | spin_unlock_irq(&callback_lock); |
| 1984 | out_unlock: | 1983 | out_unlock: |
| 1985 | mutex_unlock(&cpuset_mutex); | 1984 | mutex_unlock(&cpuset_mutex); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 196a06fbc122..886d09e691d5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
| 1474 | * otherwise we'll have trouble later trying to figure out | 1474 | * otherwise we'll have trouble later trying to figure out |
| 1475 | * which interrupt is which (messes up the interrupt freeing | 1475 | * which interrupt is which (messes up the interrupt freeing |
| 1476 | * logic etc). | 1476 | * logic etc). |
| 1477 | * | ||
| 1478 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and | ||
| 1479 | * it cannot be set along with IRQF_NO_SUSPEND. | ||
| 1477 | */ | 1480 | */ |
| 1478 | if ((irqflags & IRQF_SHARED) && !dev_id) | 1481 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
| 1482 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || | ||
| 1483 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) | ||
| 1479 | return -EINVAL; | 1484 | return -EINVAL; |
| 1480 | 1485 | ||
| 1481 | desc = irq_to_desc(irq); | 1486 | desc = irq_to_desc(irq); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 3ca532592704..5204a6d1b985 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
| @@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) | |||
| 43 | 43 | ||
| 44 | if (action->flags & IRQF_NO_SUSPEND) | 44 | if (action->flags & IRQF_NO_SUSPEND) |
| 45 | desc->no_suspend_depth++; | 45 | desc->no_suspend_depth++; |
| 46 | else if (action->flags & IRQF_COND_SUSPEND) | ||
| 47 | desc->cond_suspend_depth++; | ||
| 46 | 48 | ||
| 47 | WARN_ON_ONCE(desc->no_suspend_depth && | 49 | WARN_ON_ONCE(desc->no_suspend_depth && |
| 48 | desc->no_suspend_depth != desc->nr_actions); | 50 | (desc->no_suspend_depth + |
| 51 | desc->cond_suspend_depth) != desc->nr_actions); | ||
| 49 | } | 52 | } |
| 50 | 53 | ||
| 51 | /* | 54 | /* |
| @@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) | |||
| 61 | 64 | ||
| 62 | if (action->flags & IRQF_NO_SUSPEND) | 65 | if (action->flags & IRQF_NO_SUSPEND) |
| 63 | desc->no_suspend_depth--; | 66 | desc->no_suspend_depth--; |
| 67 | else if (action->flags & IRQF_COND_SUSPEND) | ||
| 68 | desc->cond_suspend_depth--; | ||
| 64 | } | 69 | } |
| 65 | 70 | ||
| 66 | static bool suspend_device_irq(struct irq_desc *desc, int irq) | 71 | static bool suspend_device_irq(struct irq_desc *desc, int irq) |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ff7f47d026ac..01ca08804f51 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -248,11 +248,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name, | |||
| 248 | /* first, check if it's an exported symbol */ | 248 | /* first, check if it's an exported symbol */ |
| 249 | preempt_disable(); | 249 | preempt_disable(); |
| 250 | sym = find_symbol(name, NULL, NULL, true, true); | 250 | sym = find_symbol(name, NULL, NULL, true, true); |
| 251 | preempt_enable(); | ||
| 252 | if (sym) { | 251 | if (sym) { |
| 253 | *addr = sym->value; | 252 | *addr = sym->value; |
| 253 | preempt_enable(); | ||
| 254 | return 0; | 254 | return 0; |
| 255 | } | 255 | } |
| 256 | preempt_enable(); | ||
| 256 | 257 | ||
| 257 | /* otherwise check if it's in another .o within the patch module */ | 258 | /* otherwise check if it's in another .o within the patch module */ |
| 258 | return klp_find_object_symbol(pmod->name, name, addr); | 259 | return klp_find_object_symbol(pmod->name, name, addr); |
| @@ -314,12 +315,12 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
| 314 | rcu_read_lock(); | 315 | rcu_read_lock(); |
| 315 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | 316 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
| 316 | stack_node); | 317 | stack_node); |
| 317 | rcu_read_unlock(); | ||
| 318 | |||
| 319 | if (WARN_ON_ONCE(!func)) | 318 | if (WARN_ON_ONCE(!func)) |
| 320 | return; | 319 | goto unlock; |
| 321 | 320 | ||
| 322 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | 321 | klp_arch_set_pc(regs, (unsigned long)func->new_func); |
| 322 | unlock: | ||
| 323 | rcu_read_unlock(); | ||
| 323 | } | 324 | } |
| 324 | 325 | ||
| 325 | static int klp_disable_func(struct klp_func *func) | 326 | static int klp_disable_func(struct klp_func *func) |
| @@ -731,7 +732,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) | |||
| 731 | func->state = KLP_DISABLED; | 732 | func->state = KLP_DISABLED; |
| 732 | 733 | ||
| 733 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, | 734 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, |
| 734 | obj->kobj, func->old_name); | 735 | obj->kobj, "%s", func->old_name); |
| 735 | } | 736 | } |
| 736 | 737 | ||
| 737 | /* parts of the initialization that is done only when the object is loaded */ | 738 | /* parts of the initialization that is done only when the object is loaded */ |
| @@ -807,7 +808,7 @@ static int klp_init_patch(struct klp_patch *patch) | |||
| 807 | patch->state = KLP_DISABLED; | 808 | patch->state = KLP_DISABLED; |
| 808 | 809 | ||
| 809 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, | 810 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, |
| 810 | klp_root_kobj, patch->mod->name); | 811 | klp_root_kobj, "%s", patch->mod->name); |
| 811 | if (ret) | 812 | if (ret) |
| 812 | goto unlock; | 813 | goto unlock; |
| 813 | 814 | ||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index e16e5542bf13..6357265a31ad 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -1193,6 +1193,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | 1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); |
| 1194 | 1194 | ||
| 1195 | if (unlikely(ret)) { | 1195 | if (unlikely(ret)) { |
| 1196 | __set_current_state(TASK_RUNNING); | ||
| 1196 | if (rt_mutex_has_waiters(lock)) | 1197 | if (rt_mutex_has_waiters(lock)) |
| 1197 | remove_waiter(lock, &waiter); | 1198 | remove_waiter(lock, &waiter); |
| 1198 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); | 1199 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); |
diff --git a/kernel/module.c b/kernel/module.c index b34813f725e9..b3d634ed06c9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -56,7 +56,6 @@ | |||
| 56 | #include <linux/async.h> | 56 | #include <linux/async.h> |
| 57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
| 58 | #include <linux/kmemleak.h> | 58 | #include <linux/kmemleak.h> |
| 59 | #include <linux/kasan.h> | ||
| 60 | #include <linux/jump_label.h> | 59 | #include <linux/jump_label.h> |
| 61 | #include <linux/pfn.h> | 60 | #include <linux/pfn.h> |
| 62 | #include <linux/bsearch.h> | 61 | #include <linux/bsearch.h> |
| @@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } | |||
| 1814 | void __weak module_memfree(void *module_region) | 1813 | void __weak module_memfree(void *module_region) |
| 1815 | { | 1814 | { |
| 1816 | vfree(module_region); | 1815 | vfree(module_region); |
| 1817 | kasan_module_free(module_region); | ||
| 1818 | } | 1816 | } |
| 1819 | 1817 | ||
| 1820 | void __weak module_arch_cleanup(struct module *mod) | 1818 | void __weak module_arch_cleanup(struct module *mod) |
| @@ -2313,11 +2311,13 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
| 2313 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | 2311 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); |
| 2314 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); | 2312 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); |
| 2315 | mod->core_size += strtab_size; | 2313 | mod->core_size += strtab_size; |
| 2314 | mod->core_size = debug_align(mod->core_size); | ||
| 2316 | 2315 | ||
| 2317 | /* Put string table section at end of init part of module. */ | 2316 | /* Put string table section at end of init part of module. */ |
| 2318 | strsect->sh_flags |= SHF_ALLOC; | 2317 | strsect->sh_flags |= SHF_ALLOC; |
| 2319 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | 2318 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, |
| 2320 | info->index.str) | INIT_OFFSET_MASK; | 2319 | info->index.str) | INIT_OFFSET_MASK; |
| 2320 | mod->init_size = debug_align(mod->init_size); | ||
| 2321 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); | 2321 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); |
| 2322 | } | 2322 | } |
| 2323 | 2323 | ||
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h index cbd69d842341..2ca4a8b5fe57 100644 --- a/kernel/printk/console_cmdline.h +++ b/kernel/printk/console_cmdline.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | struct console_cmdline | 4 | struct console_cmdline |
| 5 | { | 5 | { |
| 6 | char name[8]; /* Name of the driver */ | 6 | char name[16]; /* Name of the driver */ |
| 7 | int index; /* Minor dev. to use */ | 7 | int index; /* Minor dev. to use */ |
| 8 | char *options; /* Options for the driver */ | 8 | char *options; /* Options for the driver */ |
| 9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 01cfd69c54c6..bb0635bd74f2 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -2464,6 +2464,7 @@ void register_console(struct console *newcon) | |||
| 2464 | for (i = 0, c = console_cmdline; | 2464 | for (i = 0, c = console_cmdline; |
| 2465 | i < MAX_CMDLINECONSOLES && c->name[0]; | 2465 | i < MAX_CMDLINECONSOLES && c->name[0]; |
| 2466 | i++, c++) { | 2466 | i++, c++) { |
| 2467 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); | ||
| 2467 | if (strcmp(c->name, newcon->name) != 0) | 2468 | if (strcmp(c->name, newcon->name) != 0) |
| 2468 | continue; | 2469 | continue; |
| 2469 | if (newcon->index >= 0 && | 2470 | if (newcon->index >= 0 && |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 94b2d7b88a27..80014a178342 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -82,6 +82,7 @@ static void cpuidle_idle_call(void) | |||
| 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
| 84 | unsigned int broadcast; | 84 | unsigned int broadcast; |
| 85 | bool reflect; | ||
| 85 | 86 | ||
| 86 | /* | 87 | /* |
| 87 | * Check if the idle task must be rescheduled. If it is the | 88 | * Check if the idle task must be rescheduled. If it is the |
| @@ -105,6 +106,9 @@ static void cpuidle_idle_call(void) | |||
| 105 | */ | 106 | */ |
| 106 | rcu_idle_enter(); | 107 | rcu_idle_enter(); |
| 107 | 108 | ||
| 109 | if (cpuidle_not_available(drv, dev)) | ||
| 110 | goto use_default; | ||
| 111 | |||
| 108 | /* | 112 | /* |
| 109 | * Suspend-to-idle ("freeze") is a system state in which all user space | 113 | * Suspend-to-idle ("freeze") is a system state in which all user space |
| 110 | * has been frozen, all I/O devices have been suspended and the only | 114 | * has been frozen, all I/O devices have been suspended and the only |
| @@ -115,30 +119,24 @@ static void cpuidle_idle_call(void) | |||
| 115 | * until a proper wakeup interrupt happens. | 119 | * until a proper wakeup interrupt happens. |
| 116 | */ | 120 | */ |
| 117 | if (idle_should_freeze()) { | 121 | if (idle_should_freeze()) { |
| 118 | cpuidle_enter_freeze(); | 122 | entered_state = cpuidle_enter_freeze(drv, dev); |
| 119 | local_irq_enable(); | 123 | if (entered_state >= 0) { |
| 120 | goto exit_idle; | 124 | local_irq_enable(); |
| 121 | } | 125 | goto exit_idle; |
| 126 | } | ||
| 122 | 127 | ||
| 123 | /* | 128 | reflect = false; |
| 124 | * Ask the cpuidle framework to choose a convenient idle state. | 129 | next_state = cpuidle_find_deepest_state(drv, dev); |
| 125 | * Fall back to the default arch idle method on errors. | 130 | } else { |
| 126 | */ | 131 | reflect = true; |
| 127 | next_state = cpuidle_select(drv, dev); | ||
| 128 | if (next_state < 0) { | ||
| 129 | use_default: | ||
| 130 | /* | 132 | /* |
| 131 | * We can't use the cpuidle framework, let's use the default | 133 | * Ask the cpuidle framework to choose a convenient idle state. |
| 132 | * idle routine. | ||
| 133 | */ | 134 | */ |
| 134 | if (current_clr_polling_and_test()) | 135 | next_state = cpuidle_select(drv, dev); |
| 135 | local_irq_enable(); | ||
| 136 | else | ||
| 137 | arch_cpu_idle(); | ||
| 138 | |||
| 139 | goto exit_idle; | ||
| 140 | } | 136 | } |
| 141 | 137 | /* Fall back to the default arch idle method on errors. */ | |
| 138 | if (next_state < 0) | ||
| 139 | goto use_default; | ||
| 142 | 140 | ||
| 143 | /* | 141 | /* |
| 144 | * The idle task must be scheduled, it is pointless to | 142 | * The idle task must be scheduled, it is pointless to |
| @@ -183,7 +181,8 @@ use_default: | |||
| 183 | /* | 181 | /* |
| 184 | * Give the governor an opportunity to reflect on the outcome | 182 | * Give the governor an opportunity to reflect on the outcome |
| 185 | */ | 183 | */ |
| 186 | cpuidle_reflect(dev, entered_state); | 184 | if (reflect) |
| 185 | cpuidle_reflect(dev, entered_state); | ||
| 187 | 186 | ||
| 188 | exit_idle: | 187 | exit_idle: |
| 189 | __current_set_polling(); | 188 | __current_set_polling(); |
| @@ -196,6 +195,19 @@ exit_idle: | |||
| 196 | 195 | ||
| 197 | rcu_idle_exit(); | 196 | rcu_idle_exit(); |
| 198 | start_critical_timings(); | 197 | start_critical_timings(); |
| 198 | return; | ||
| 199 | |||
| 200 | use_default: | ||
| 201 | /* | ||
| 202 | * We can't use the cpuidle framework, let's use the default | ||
| 203 | * idle routine. | ||
| 204 | */ | ||
| 205 | if (current_clr_polling_and_test()) | ||
| 206 | local_irq_enable(); | ||
| 207 | else | ||
| 208 | arch_cpu_idle(); | ||
| 209 | |||
| 210 | goto exit_idle; | ||
| 199 | } | 211 | } |
| 200 | 212 | ||
| 201 | /* | 213 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index 667b2e62fad2..a03d9cd23ed7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1108,6 +1108,7 @@ DECLARE_RWSEM(uts_sem); | |||
| 1108 | /* | 1108 | /* |
| 1109 | * Work around broken programs that cannot handle "Linux 3.0". | 1109 | * Work around broken programs that cannot handle "Linux 3.0". |
| 1110 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 | 1110 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 |
| 1111 | * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. | ||
| 1111 | */ | 1112 | */ |
| 1112 | static int override_release(char __user *release, size_t len) | 1113 | static int override_release(char __user *release, size_t len) |
| 1113 | { | 1114 | { |
| @@ -1127,7 +1128,7 @@ static int override_release(char __user *release, size_t len) | |||
| 1127 | break; | 1128 | break; |
| 1128 | rest++; | 1129 | rest++; |
| 1129 | } | 1130 | } |
| 1130 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; | 1131 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; |
| 1131 | copy = clamp_t(size_t, len, 1, sizeof(buf)); | 1132 | copy = clamp_t(size_t, len, 1, sizeof(buf)); |
| 1132 | copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); | 1133 | copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); |
| 1133 | ret = copy_to_user(release, buf, copy + 1); | 1134 | ret = copy_to_user(release, buf, copy + 1); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
| 1059 | 1059 | ||
| 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
| 1061 | 1061 | ||
| 1062 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 1063 | static int ftrace_graph_active; | ||
| 1064 | #else | ||
| 1065 | # define ftrace_graph_active 0 | ||
| 1066 | #endif | ||
| 1067 | |||
| 1062 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 1063 | 1069 | ||
| 1064 | static struct ftrace_ops *removed_ops; | 1070 | static struct ftrace_ops *removed_ops; |
| @@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
| 2041 | if (!ftrace_rec_count(rec)) | 2047 | if (!ftrace_rec_count(rec)) |
| 2042 | rec->flags = 0; | 2048 | rec->flags = 0; |
| 2043 | else | 2049 | else |
| 2044 | /* Just disable the record (keep REGS state) */ | 2050 | /* |
| 2045 | rec->flags &= ~FTRACE_FL_ENABLED; | 2051 | * Just disable the record, but keep the ops TRAMP |
| 2052 | * and REGS states. The _EN flags must be disabled though. | ||
| 2053 | */ | ||
| 2054 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | ||
| 2055 | FTRACE_FL_REGS_EN); | ||
| 2046 | } | 2056 | } |
| 2047 | 2057 | ||
| 2048 | return FTRACE_UPDATE_MAKE_NOP; | 2058 | return FTRACE_UPDATE_MAKE_NOP; |
| @@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
| 2688 | 2698 | ||
| 2689 | static void ftrace_startup_sysctl(void) | 2699 | static void ftrace_startup_sysctl(void) |
| 2690 | { | 2700 | { |
| 2701 | int command; | ||
| 2702 | |||
| 2691 | if (unlikely(ftrace_disabled)) | 2703 | if (unlikely(ftrace_disabled)) |
| 2692 | return; | 2704 | return; |
| 2693 | 2705 | ||
| 2694 | /* Force update next time */ | 2706 | /* Force update next time */ |
| 2695 | saved_ftrace_func = NULL; | 2707 | saved_ftrace_func = NULL; |
| 2696 | /* ftrace_start_up is true if we want ftrace running */ | 2708 | /* ftrace_start_up is true if we want ftrace running */ |
| 2697 | if (ftrace_start_up) | 2709 | if (ftrace_start_up) { |
| 2698 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2710 | command = FTRACE_UPDATE_CALLS; |
| 2711 | if (ftrace_graph_active) | ||
| 2712 | command |= FTRACE_START_FUNC_RET; | ||
| 2713 | ftrace_startup_enable(command); | ||
| 2714 | } | ||
| 2699 | } | 2715 | } |
| 2700 | 2716 | ||
| 2701 | static void ftrace_shutdown_sysctl(void) | 2717 | static void ftrace_shutdown_sysctl(void) |
| 2702 | { | 2718 | { |
| 2719 | int command; | ||
| 2720 | |||
| 2703 | if (unlikely(ftrace_disabled)) | 2721 | if (unlikely(ftrace_disabled)) |
| 2704 | return; | 2722 | return; |
| 2705 | 2723 | ||
| 2706 | /* ftrace_start_up is true if ftrace is running */ | 2724 | /* ftrace_start_up is true if ftrace is running */ |
| 2707 | if (ftrace_start_up) | 2725 | if (ftrace_start_up) { |
| 2708 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2726 | command = FTRACE_DISABLE_CALLS; |
| 2727 | if (ftrace_graph_active) | ||
| 2728 | command |= FTRACE_STOP_FUNC_RET; | ||
| 2729 | ftrace_run_update_code(command); | ||
| 2730 | } | ||
| 2709 | } | 2731 | } |
| 2710 | 2732 | ||
| 2711 | static cycle_t ftrace_update_time; | 2733 | static cycle_t ftrace_update_time; |
| @@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 5558 | 5580 | ||
| 5559 | if (ftrace_enabled) { | 5581 | if (ftrace_enabled) { |
| 5560 | 5582 | ||
| 5561 | ftrace_startup_sysctl(); | ||
| 5562 | |||
| 5563 | /* we are starting ftrace again */ | 5583 | /* we are starting ftrace again */ |
| 5564 | if (ftrace_ops_list != &ftrace_list_end) | 5584 | if (ftrace_ops_list != &ftrace_list_end) |
| 5565 | update_ftrace_function(); | 5585 | update_ftrace_function(); |
| 5566 | 5586 | ||
| 5587 | ftrace_startup_sysctl(); | ||
| 5588 | |||
| 5567 | } else { | 5589 | } else { |
| 5568 | /* stopping ftrace calls (just send to ftrace_stub) */ | 5590 | /* stopping ftrace calls (just send to ftrace_stub) */ |
| 5569 | ftrace_trace_function = ftrace_stub; | 5591 | ftrace_trace_function = ftrace_stub; |
| @@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { | |||
| 5590 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5612 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
| 5591 | }; | 5613 | }; |
| 5592 | 5614 | ||
| 5593 | static int ftrace_graph_active; | ||
| 5594 | |||
| 5595 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5615 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
| 5596 | { | 5616 | { |
| 5597 | return 0; | 5617 | return 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
| 2728 | } | 2728 | } |
| 2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
| 2730 | 2730 | ||
| 2731 | struct cwt_wait { | ||
| 2732 | wait_queue_t wait; | ||
| 2733 | struct work_struct *work; | ||
| 2734 | }; | ||
| 2735 | |||
| 2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
| 2737 | { | ||
| 2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
| 2739 | |||
| 2740 | if (cwait->work != key) | ||
| 2741 | return 0; | ||
| 2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
| 2743 | } | ||
| 2744 | |||
| 2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
| 2732 | { | 2746 | { |
| 2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
| 2733 | unsigned long flags; | 2748 | unsigned long flags; |
| 2734 | int ret; | 2749 | int ret; |
| 2735 | 2750 | ||
| 2736 | do { | 2751 | do { |
| 2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
| 2738 | /* | 2753 | /* |
| 2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
| 2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
| 2756 | * because we may get scheduled between @work's completion | ||
| 2757 | * and the other canceling task resuming and clearing | ||
| 2758 | * CANCELING - flush_work() will return false immediately | ||
| 2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
| 2760 | * return -ENOENT as @work is still being canceled and the | ||
| 2761 | * other canceling task won't be able to clear CANCELING as | ||
| 2762 | * we're hogging the CPU. | ||
| 2763 | * | ||
| 2764 | * Let's wait for completion using a waitqueue. As this | ||
| 2765 | * may lead to the thundering herd problem, use a custom | ||
| 2766 | * wake function which matches @work along with exclusive | ||
| 2767 | * wait and wakeup. | ||
| 2741 | */ | 2768 | */ |
| 2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
| 2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
| 2771 | |||
| 2772 | init_wait(&cwait.wait); | ||
| 2773 | cwait.wait.func = cwt_wakefn; | ||
| 2774 | cwait.work = work; | ||
| 2775 | |||
| 2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
| 2777 | TASK_UNINTERRUPTIBLE); | ||
| 2778 | if (work_is_canceling(work)) | ||
| 2779 | schedule(); | ||
| 2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
| 2781 | } | ||
| 2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
| 2745 | 2783 | ||
| 2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
| @@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
| 2749 | 2787 | ||
| 2750 | flush_work(work); | 2788 | flush_work(work); |
| 2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
| 2790 | |||
| 2791 | /* | ||
| 2792 | * Paired with prepare_to_wait() above so that either | ||
| 2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
| 2794 | * visible there. | ||
| 2795 | */ | ||
| 2796 | smp_mb(); | ||
| 2797 | if (waitqueue_active(&cancel_waitq)) | ||
| 2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
| 2799 | |||
| 2752 | return ret; | 2800 | return ret; |
| 2753 | } | 2801 | } |
| 2754 | 2802 | ||
