diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-03-09 03:44:23 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-03-09 03:44:23 -0400 |
commit | e94f16a4fde646b3f155788fe37339b61264b0a9 (patch) | |
tree | 28f1cf8b1980b6b25c1c58e3e157610a1b851996 /kernel | |
parent | d2b5851d8583e690eeb5ac8dfff5da92e1f1468f (diff) | |
parent | 9eccca0843205f87c00404b663188b88eb248051 (diff) |
Merge 4.0-rc3 into char-misc-next
We want the mei fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/manage.c | 7 | ||||
-rw-r--r-- | kernel/irq/pm.c | 7 | ||||
-rw-r--r-- | kernel/livepatch/core.c | 13 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 1 | ||||
-rw-r--r-- | kernel/module.c | 2 | ||||
-rw-r--r-- | kernel/printk/console_cmdline.h | 2 | ||||
-rw-r--r-- | kernel/printk/printk.c | 1 | ||||
-rw-r--r-- | kernel/sched/idle.c | 54 | ||||
-rw-r--r-- | kernel/sys.c | 3 |
9 files changed, 59 insertions, 31 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 196a06fbc122..886d09e691d5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1474 | * otherwise we'll have trouble later trying to figure out | 1474 | * otherwise we'll have trouble later trying to figure out |
1475 | * which interrupt is which (messes up the interrupt freeing | 1475 | * which interrupt is which (messes up the interrupt freeing |
1476 | * logic etc). | 1476 | * logic etc). |
1477 | * | ||
1478 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and | ||
1479 | * it cannot be set along with IRQF_NO_SUSPEND. | ||
1477 | */ | 1480 | */ |
1478 | if ((irqflags & IRQF_SHARED) && !dev_id) | 1481 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
1482 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || | ||
1483 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) | ||
1479 | return -EINVAL; | 1484 | return -EINVAL; |
1480 | 1485 | ||
1481 | desc = irq_to_desc(irq); | 1486 | desc = irq_to_desc(irq); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 3ca532592704..5204a6d1b985 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) | |||
43 | 43 | ||
44 | if (action->flags & IRQF_NO_SUSPEND) | 44 | if (action->flags & IRQF_NO_SUSPEND) |
45 | desc->no_suspend_depth++; | 45 | desc->no_suspend_depth++; |
46 | else if (action->flags & IRQF_COND_SUSPEND) | ||
47 | desc->cond_suspend_depth++; | ||
46 | 48 | ||
47 | WARN_ON_ONCE(desc->no_suspend_depth && | 49 | WARN_ON_ONCE(desc->no_suspend_depth && |
48 | desc->no_suspend_depth != desc->nr_actions); | 50 | (desc->no_suspend_depth + |
51 | desc->cond_suspend_depth) != desc->nr_actions); | ||
49 | } | 52 | } |
50 | 53 | ||
51 | /* | 54 | /* |
@@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) | |||
61 | 64 | ||
62 | if (action->flags & IRQF_NO_SUSPEND) | 65 | if (action->flags & IRQF_NO_SUSPEND) |
63 | desc->no_suspend_depth--; | 66 | desc->no_suspend_depth--; |
67 | else if (action->flags & IRQF_COND_SUSPEND) | ||
68 | desc->cond_suspend_depth--; | ||
64 | } | 69 | } |
65 | 70 | ||
66 | static bool suspend_device_irq(struct irq_desc *desc, int irq) | 71 | static bool suspend_device_irq(struct irq_desc *desc, int irq) |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ff7f47d026ac..01ca08804f51 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -248,11 +248,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name, | |||
248 | /* first, check if it's an exported symbol */ | 248 | /* first, check if it's an exported symbol */ |
249 | preempt_disable(); | 249 | preempt_disable(); |
250 | sym = find_symbol(name, NULL, NULL, true, true); | 250 | sym = find_symbol(name, NULL, NULL, true, true); |
251 | preempt_enable(); | ||
252 | if (sym) { | 251 | if (sym) { |
253 | *addr = sym->value; | 252 | *addr = sym->value; |
253 | preempt_enable(); | ||
254 | return 0; | 254 | return 0; |
255 | } | 255 | } |
256 | preempt_enable(); | ||
256 | 257 | ||
257 | /* otherwise check if it's in another .o within the patch module */ | 258 | /* otherwise check if it's in another .o within the patch module */ |
258 | return klp_find_object_symbol(pmod->name, name, addr); | 259 | return klp_find_object_symbol(pmod->name, name, addr); |
@@ -314,12 +315,12 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
314 | rcu_read_lock(); | 315 | rcu_read_lock(); |
315 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | 316 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
316 | stack_node); | 317 | stack_node); |
317 | rcu_read_unlock(); | ||
318 | |||
319 | if (WARN_ON_ONCE(!func)) | 318 | if (WARN_ON_ONCE(!func)) |
320 | return; | 319 | goto unlock; |
321 | 320 | ||
322 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | 321 | klp_arch_set_pc(regs, (unsigned long)func->new_func); |
322 | unlock: | ||
323 | rcu_read_unlock(); | ||
323 | } | 324 | } |
324 | 325 | ||
325 | static int klp_disable_func(struct klp_func *func) | 326 | static int klp_disable_func(struct klp_func *func) |
@@ -731,7 +732,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) | |||
731 | func->state = KLP_DISABLED; | 732 | func->state = KLP_DISABLED; |
732 | 733 | ||
733 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, | 734 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, |
734 | obj->kobj, func->old_name); | 735 | obj->kobj, "%s", func->old_name); |
735 | } | 736 | } |
736 | 737 | ||
737 | /* parts of the initialization that is done only when the object is loaded */ | 738 | /* parts of the initialization that is done only when the object is loaded */ |
@@ -807,7 +808,7 @@ static int klp_init_patch(struct klp_patch *patch) | |||
807 | patch->state = KLP_DISABLED; | 808 | patch->state = KLP_DISABLED; |
808 | 809 | ||
809 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, | 810 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, |
810 | klp_root_kobj, patch->mod->name); | 811 | klp_root_kobj, "%s", patch->mod->name); |
811 | if (ret) | 812 | if (ret) |
812 | goto unlock; | 813 | goto unlock; |
813 | 814 | ||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index e16e5542bf13..6357265a31ad 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1193,6 +1193,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | 1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); |
1194 | 1194 | ||
1195 | if (unlikely(ret)) { | 1195 | if (unlikely(ret)) { |
1196 | __set_current_state(TASK_RUNNING); | ||
1196 | if (rt_mutex_has_waiters(lock)) | 1197 | if (rt_mutex_has_waiters(lock)) |
1197 | remove_waiter(lock, &waiter); | 1198 | remove_waiter(lock, &waiter); |
1198 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); | 1199 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); |
diff --git a/kernel/module.c b/kernel/module.c index b34813f725e9..cc93cf68653c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2313,11 +2313,13 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
2313 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | 2313 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); |
2314 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); | 2314 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); |
2315 | mod->core_size += strtab_size; | 2315 | mod->core_size += strtab_size; |
2316 | mod->core_size = debug_align(mod->core_size); | ||
2316 | 2317 | ||
2317 | /* Put string table section at end of init part of module. */ | 2318 | /* Put string table section at end of init part of module. */ |
2318 | strsect->sh_flags |= SHF_ALLOC; | 2319 | strsect->sh_flags |= SHF_ALLOC; |
2319 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | 2320 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, |
2320 | info->index.str) | INIT_OFFSET_MASK; | 2321 | info->index.str) | INIT_OFFSET_MASK; |
2322 | mod->init_size = debug_align(mod->init_size); | ||
2321 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); | 2323 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); |
2322 | } | 2324 | } |
2323 | 2325 | ||
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h index cbd69d842341..2ca4a8b5fe57 100644 --- a/kernel/printk/console_cmdline.h +++ b/kernel/printk/console_cmdline.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | struct console_cmdline | 4 | struct console_cmdline |
5 | { | 5 | { |
6 | char name[8]; /* Name of the driver */ | 6 | char name[16]; /* Name of the driver */ |
7 | int index; /* Minor dev. to use */ | 7 | int index; /* Minor dev. to use */ |
8 | char *options; /* Options for the driver */ | 8 | char *options; /* Options for the driver */ |
9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 01cfd69c54c6..bb0635bd74f2 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2464,6 +2464,7 @@ void register_console(struct console *newcon) | |||
2464 | for (i = 0, c = console_cmdline; | 2464 | for (i = 0, c = console_cmdline; |
2465 | i < MAX_CMDLINECONSOLES && c->name[0]; | 2465 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2466 | i++, c++) { | 2466 | i++, c++) { |
2467 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); | ||
2467 | if (strcmp(c->name, newcon->name) != 0) | 2468 | if (strcmp(c->name, newcon->name) != 0) |
2468 | continue; | 2469 | continue; |
2469 | if (newcon->index >= 0 && | 2470 | if (newcon->index >= 0 && |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 94b2d7b88a27..80014a178342 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -82,6 +82,7 @@ static void cpuidle_idle_call(void) | |||
82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
84 | unsigned int broadcast; | 84 | unsigned int broadcast; |
85 | bool reflect; | ||
85 | 86 | ||
86 | /* | 87 | /* |
87 | * Check if the idle task must be rescheduled. If it is the | 88 | * Check if the idle task must be rescheduled. If it is the |
@@ -105,6 +106,9 @@ static void cpuidle_idle_call(void) | |||
105 | */ | 106 | */ |
106 | rcu_idle_enter(); | 107 | rcu_idle_enter(); |
107 | 108 | ||
109 | if (cpuidle_not_available(drv, dev)) | ||
110 | goto use_default; | ||
111 | |||
108 | /* | 112 | /* |
109 | * Suspend-to-idle ("freeze") is a system state in which all user space | 113 | * Suspend-to-idle ("freeze") is a system state in which all user space |
110 | * has been frozen, all I/O devices have been suspended and the only | 114 | * has been frozen, all I/O devices have been suspended and the only |
@@ -115,30 +119,24 @@ static void cpuidle_idle_call(void) | |||
115 | * until a proper wakeup interrupt happens. | 119 | * until a proper wakeup interrupt happens. |
116 | */ | 120 | */ |
117 | if (idle_should_freeze()) { | 121 | if (idle_should_freeze()) { |
118 | cpuidle_enter_freeze(); | 122 | entered_state = cpuidle_enter_freeze(drv, dev); |
119 | local_irq_enable(); | 123 | if (entered_state >= 0) { |
120 | goto exit_idle; | 124 | local_irq_enable(); |
121 | } | 125 | goto exit_idle; |
126 | } | ||
122 | 127 | ||
123 | /* | 128 | reflect = false; |
124 | * Ask the cpuidle framework to choose a convenient idle state. | 129 | next_state = cpuidle_find_deepest_state(drv, dev); |
125 | * Fall back to the default arch idle method on errors. | 130 | } else { |
126 | */ | 131 | reflect = true; |
127 | next_state = cpuidle_select(drv, dev); | ||
128 | if (next_state < 0) { | ||
129 | use_default: | ||
130 | /* | 132 | /* |
131 | * We can't use the cpuidle framework, let's use the default | 133 | * Ask the cpuidle framework to choose a convenient idle state. |
132 | * idle routine. | ||
133 | */ | 134 | */ |
134 | if (current_clr_polling_and_test()) | 135 | next_state = cpuidle_select(drv, dev); |
135 | local_irq_enable(); | ||
136 | else | ||
137 | arch_cpu_idle(); | ||
138 | |||
139 | goto exit_idle; | ||
140 | } | 136 | } |
141 | 137 | /* Fall back to the default arch idle method on errors. */ | |
138 | if (next_state < 0) | ||
139 | goto use_default; | ||
142 | 140 | ||
143 | /* | 141 | /* |
144 | * The idle task must be scheduled, it is pointless to | 142 | * The idle task must be scheduled, it is pointless to |
@@ -183,7 +181,8 @@ use_default: | |||
183 | /* | 181 | /* |
184 | * Give the governor an opportunity to reflect on the outcome | 182 | * Give the governor an opportunity to reflect on the outcome |
185 | */ | 183 | */ |
186 | cpuidle_reflect(dev, entered_state); | 184 | if (reflect) |
185 | cpuidle_reflect(dev, entered_state); | ||
187 | 186 | ||
188 | exit_idle: | 187 | exit_idle: |
189 | __current_set_polling(); | 188 | __current_set_polling(); |
@@ -196,6 +195,19 @@ exit_idle: | |||
196 | 195 | ||
197 | rcu_idle_exit(); | 196 | rcu_idle_exit(); |
198 | start_critical_timings(); | 197 | start_critical_timings(); |
198 | return; | ||
199 | |||
200 | use_default: | ||
201 | /* | ||
202 | * We can't use the cpuidle framework, let's use the default | ||
203 | * idle routine. | ||
204 | */ | ||
205 | if (current_clr_polling_and_test()) | ||
206 | local_irq_enable(); | ||
207 | else | ||
208 | arch_cpu_idle(); | ||
209 | |||
210 | goto exit_idle; | ||
199 | } | 211 | } |
200 | 212 | ||
201 | /* | 213 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index 667b2e62fad2..a03d9cd23ed7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1108,6 +1108,7 @@ DECLARE_RWSEM(uts_sem); | |||
1108 | /* | 1108 | /* |
1109 | * Work around broken programs that cannot handle "Linux 3.0". | 1109 | * Work around broken programs that cannot handle "Linux 3.0". |
1110 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 | 1110 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 |
1111 | * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. | ||
1111 | */ | 1112 | */ |
1112 | static int override_release(char __user *release, size_t len) | 1113 | static int override_release(char __user *release, size_t len) |
1113 | { | 1114 | { |
@@ -1127,7 +1128,7 @@ static int override_release(char __user *release, size_t len) | |||
1127 | break; | 1128 | break; |
1128 | rest++; | 1129 | rest++; |
1129 | } | 1130 | } |
1130 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; | 1131 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; |
1131 | copy = clamp_t(size_t, len, 1, sizeof(buf)); | 1132 | copy = clamp_t(size_t, len, 1, sizeof(buf)); |
1132 | copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); | 1133 | copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); |
1133 | ret = copy_to_user(release, buf, copy + 1); | 1134 | ret = copy_to_user(release, buf, copy + 1); |