aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2012-05-01 08:29:55 -0400
committerJens Axboe <axboe@kernel.dk>2012-05-01 08:29:55 -0400
commit0b7877d4eea3f93e3dd941999522bbd8c538cb53 (patch)
treeade6d4e411b9b9b569c802e3b2179826162c934c /kernel
parentbd1a68b59c8e3bce45fb76632c64e1e063c3962d (diff)
parent69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff)
Merge tag 'v3.4-rc5' into for-3.5/core
The core branch is behind driver commits that we want to build on for 3.5, hence I'm pulling in a later -rc. Linux 3.4-rc5 Conflicts: Documentation/feature-removal-schedule.txt Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/debug/debug_core.c53
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/irq/Kconfig2
-rw-r--r--kernel/irq/debug.h38
-rw-r--r--kernel/irq/irqdomain.c47
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/itimer.c8
-rw-r--r--kernel/kmod.c117
-rw-r--r--kernel/padata.c13
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/hibernate.c18
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/qos.c50
-rw-r--r--kernel/power/suspend.c7
-rw-r--r--kernel/power/swap.c28
-rw-r--r--kernel/power/user.c10
-rw-r--r--kernel/rcutree.c1
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/fair.c18
-rw-r--r--kernel/sched/features.h1
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/tick-broadcast.c11
-rw-r--r--kernel/time/tick-sched.c4
-rw-r--r--kernel/trace/blktrace.c18
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_output.c5
31 files changed, 315 insertions, 208 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2382683617a3..8c8bd652dd12 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -270,11 +270,11 @@ static struct file_system_type cpuset_fs_type = {
270 * are online. If none are online, walk up the cpuset hierarchy 270 * are online. If none are online, walk up the cpuset hierarchy
271 * until we find one that does have some online cpus. If we get 271 * until we find one that does have some online cpus. If we get
272 * all the way to the top and still haven't found any online cpus, 272 * all the way to the top and still haven't found any online cpus,
273 * return cpu_online_map. Or if passed a NULL cs from an exit'ing 273 * return cpu_online_mask. Or if passed a NULL cs from an exit'ing
274 * task, return cpu_online_map. 274 * task, return cpu_online_mask.
275 * 275 *
276 * One way or another, we guarantee to return some non-empty subset 276 * One way or another, we guarantee to return some non-empty subset
277 * of cpu_online_map. 277 * of cpu_online_mask.
278 * 278 *
279 * Call with callback_mutex held. 279 * Call with callback_mutex held.
280 */ 280 */
@@ -867,7 +867,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
867 int retval; 867 int retval;
868 int is_load_balanced; 868 int is_load_balanced;
869 869
870 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ 870 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
871 if (cs == &top_cpuset) 871 if (cs == &top_cpuset)
872 return -EACCES; 872 return -EACCES;
873 873
@@ -2138,7 +2138,7 @@ void __init cpuset_init_smp(void)
2138 * 2138 *
2139 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 2139 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2140 * attached to the specified @tsk. Guaranteed to return some non-empty 2140 * attached to the specified @tsk. Guaranteed to return some non-empty
2141 * subset of cpu_online_map, even if this means going outside the 2141 * subset of cpu_online_mask, even if this means going outside the
2142 * tasks cpuset. 2142 * tasks cpuset.
2143 **/ 2143 **/
2144 2144
diff --git a/kernel/cred.c b/kernel/cred.c
index 97b36eeca4c9..e70683d9ec32 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -386,6 +386,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
386 struct cred *new; 386 struct cred *new;
387 int ret; 387 int ret;
388 388
389 p->replacement_session_keyring = NULL;
390
389 if ( 391 if (
390#ifdef CONFIG_KEYS 392#ifdef CONFIG_KEYS
391 !p->cred->thread_keyring && 393 !p->cred->thread_keyring &&
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1dc53bae56e1..0557f24c6bca 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -160,37 +160,39 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
160 * Weak aliases for breakpoint management, 160 * Weak aliases for breakpoint management,
161 * can be overriden by architectures when needed: 161 * can be overriden by architectures when needed:
162 */ 162 */
163int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 163int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
164{ 164{
165 int err; 165 int err;
166 166
167 err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); 167 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
168 BREAK_INSTR_SIZE);
168 if (err) 169 if (err)
169 return err; 170 return err;
170 171 err = probe_kernel_write((char *)bpt->bpt_addr,
171 return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, 172 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
172 BREAK_INSTR_SIZE); 173 return err;
173} 174}
174 175
175int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) 176int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
176{ 177{
177 return probe_kernel_write((char *)addr, 178 return probe_kernel_write((char *)bpt->bpt_addr,
178 (char *)bundle, BREAK_INSTR_SIZE); 179 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
179} 180}
180 181
181int __weak kgdb_validate_break_address(unsigned long addr) 182int __weak kgdb_validate_break_address(unsigned long addr)
182{ 183{
183 char tmp_variable[BREAK_INSTR_SIZE]; 184 struct kgdb_bkpt tmp;
184 int err; 185 int err;
185 /* Validate setting the breakpoint and then removing it. In the 186 /* Validate setting the breakpoint and then removing it. If the
186 * remove fails, the kernel needs to emit a bad message because we 187 * remove fails, the kernel needs to emit a bad message because we
187 * are deep trouble not being able to put things back the way we 188 * are deep trouble not being able to put things back the way we
188 * found them. 189 * found them.
189 */ 190 */
190 err = kgdb_arch_set_breakpoint(addr, tmp_variable); 191 tmp.bpt_addr = addr;
192 err = kgdb_arch_set_breakpoint(&tmp);
191 if (err) 193 if (err)
192 return err; 194 return err;
193 err = kgdb_arch_remove_breakpoint(addr, tmp_variable); 195 err = kgdb_arch_remove_breakpoint(&tmp);
194 if (err) 196 if (err)
195 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " 197 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
196 "memory destroyed at: %lx", addr); 198 "memory destroyed at: %lx", addr);
@@ -234,7 +236,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
234 */ 236 */
235int dbg_activate_sw_breakpoints(void) 237int dbg_activate_sw_breakpoints(void)
236{ 238{
237 unsigned long addr;
238 int error; 239 int error;
239 int ret = 0; 240 int ret = 0;
240 int i; 241 int i;
@@ -243,16 +244,15 @@ int dbg_activate_sw_breakpoints(void)
243 if (kgdb_break[i].state != BP_SET) 244 if (kgdb_break[i].state != BP_SET)
244 continue; 245 continue;
245 246
246 addr = kgdb_break[i].bpt_addr; 247 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
247 error = kgdb_arch_set_breakpoint(addr,
248 kgdb_break[i].saved_instr);
249 if (error) { 248 if (error) {
250 ret = error; 249 ret = error;
251 printk(KERN_INFO "KGDB: BP install failed: %lx", addr); 250 printk(KERN_INFO "KGDB: BP install failed: %lx",
251 kgdb_break[i].bpt_addr);
252 continue; 252 continue;
253 } 253 }
254 254
255 kgdb_flush_swbreak_addr(addr); 255 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
256 kgdb_break[i].state = BP_ACTIVE; 256 kgdb_break[i].state = BP_ACTIVE;
257 } 257 }
258 return ret; 258 return ret;
@@ -301,7 +301,6 @@ int dbg_set_sw_break(unsigned long addr)
301 301
302int dbg_deactivate_sw_breakpoints(void) 302int dbg_deactivate_sw_breakpoints(void)
303{ 303{
304 unsigned long addr;
305 int error; 304 int error;
306 int ret = 0; 305 int ret = 0;
307 int i; 306 int i;
@@ -309,15 +308,14 @@ int dbg_deactivate_sw_breakpoints(void)
309 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 308 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
310 if (kgdb_break[i].state != BP_ACTIVE) 309 if (kgdb_break[i].state != BP_ACTIVE)
311 continue; 310 continue;
312 addr = kgdb_break[i].bpt_addr; 311 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
313 error = kgdb_arch_remove_breakpoint(addr,
314 kgdb_break[i].saved_instr);
315 if (error) { 312 if (error) {
316 printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr); 313 printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
314 kgdb_break[i].bpt_addr);
317 ret = error; 315 ret = error;
318 } 316 }
319 317
320 kgdb_flush_swbreak_addr(addr); 318 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
321 kgdb_break[i].state = BP_SET; 319 kgdb_break[i].state = BP_SET;
322 } 320 }
323 return ret; 321 return ret;
@@ -351,7 +349,6 @@ int kgdb_isremovedbreak(unsigned long addr)
351 349
352int dbg_remove_all_break(void) 350int dbg_remove_all_break(void)
353{ 351{
354 unsigned long addr;
355 int error; 352 int error;
356 int i; 353 int i;
357 354
@@ -359,12 +356,10 @@ int dbg_remove_all_break(void)
359 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 356 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
360 if (kgdb_break[i].state != BP_ACTIVE) 357 if (kgdb_break[i].state != BP_ACTIVE)
361 goto setundefined; 358 goto setundefined;
362 addr = kgdb_break[i].bpt_addr; 359 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
363 error = kgdb_arch_remove_breakpoint(addr,
364 kgdb_break[i].saved_instr);
365 if (error) 360 if (error)
366 printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", 361 printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
367 addr); 362 kgdb_break[i].bpt_addr);
368setundefined: 363setundefined:
369 kgdb_break[i].state = BP_UNDEFINED; 364 kgdb_break[i].state = BP_UNDEFINED;
370 } 365 }
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 9b5f17da1c56..bb9520f0f6ff 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -743,7 +743,7 @@ kdb_printit:
743 kdb_input_flush(); 743 kdb_input_flush();
744 c = console_drivers; 744 c = console_drivers;
745 745
746 if (!dbg_io_ops->is_console) { 746 if (dbg_io_ops && !dbg_io_ops->is_console) {
747 len = strlen(moreprompt); 747 len = strlen(moreprompt);
748 cp = moreprompt; 748 cp = moreprompt;
749 while (len--) { 749 while (len--) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a6a9ec4cd8f5..fd126f82b57c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3183,7 +3183,7 @@ static void perf_event_for_each(struct perf_event *event,
3183 perf_event_for_each_child(event, func); 3183 perf_event_for_each_child(event, func);
3184 func(event); 3184 func(event);
3185 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3185 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3186 perf_event_for_each_child(event, func); 3186 perf_event_for_each_child(sibling, func);
3187 mutex_unlock(&ctx->mutex); 3187 mutex_unlock(&ctx->mutex);
3188} 3188}
3189 3189
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index cf1a4a68ce44..d1a758bc972a 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -62,7 +62,7 @@ config IRQ_DOMAIN_DEBUG
62 help 62 help
63 This option will show the mapping relationship between hardware irq 63 This option will show the mapping relationship between hardware irq
64 numbers and Linux irq numbers. The mapping is exposed via debugfs 64 numbers and Linux irq numbers. The mapping is exposed via debugfs
65 in the file "virq_mapping". 65 in the file "irq_domain_mapping".
66 66
67 If you don't know what this means you don't need it. 67 If you don't know what this means you don't need it.
68 68
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 97a8bfadc88a..e75e29e4434a 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -4,10 +4,10 @@
4 4
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6 6
7#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) 7#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 8#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */ 9/* FIXME */
10#define PD(f) do { } while (0) 10#define ___PD(f) do { } while (0)
11 11
12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
13{ 13{
@@ -23,23 +23,23 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
23 print_symbol("%s\n", (unsigned long)desc->action->handler); 23 print_symbol("%s\n", (unsigned long)desc->action->handler);
24 } 24 }
25 25
26 P(IRQ_LEVEL); 26 ___P(IRQ_LEVEL);
27 P(IRQ_PER_CPU); 27 ___P(IRQ_PER_CPU);
28 P(IRQ_NOPROBE); 28 ___P(IRQ_NOPROBE);
29 P(IRQ_NOREQUEST); 29 ___P(IRQ_NOREQUEST);
30 P(IRQ_NOTHREAD); 30 ___P(IRQ_NOTHREAD);
31 P(IRQ_NOAUTOEN); 31 ___P(IRQ_NOAUTOEN);
32 32
33 PS(IRQS_AUTODETECT); 33 ___PS(IRQS_AUTODETECT);
34 PS(IRQS_REPLAY); 34 ___PS(IRQS_REPLAY);
35 PS(IRQS_WAITING); 35 ___PS(IRQS_WAITING);
36 PS(IRQS_PENDING); 36 ___PS(IRQS_PENDING);
37 37
38 PD(IRQS_INPROGRESS); 38 ___PD(IRQS_INPROGRESS);
39 PD(IRQS_DISABLED); 39 ___PD(IRQS_DISABLED);
40 PD(IRQS_MASKED); 40 ___PD(IRQS_MASKED);
41} 41}
42 42
43#undef P 43#undef ___P
44#undef PS 44#undef ___PS
45#undef PD 45#undef ___PD
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3601f3fbf67c..0e0ba5f840b2 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,7 +23,6 @@ static LIST_HEAD(irq_domain_list);
23static DEFINE_MUTEX(irq_domain_mutex); 23static DEFINE_MUTEX(irq_domain_mutex);
24 24
25static DEFINE_MUTEX(revmap_trees_mutex); 25static DEFINE_MUTEX(revmap_trees_mutex);
26static unsigned int irq_virq_count = NR_IRQS;
27static struct irq_domain *irq_default_domain; 26static struct irq_domain *irq_default_domain;
28 27
29/** 28/**
@@ -184,13 +183,16 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
184} 183}
185 184
186struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 185struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
186 unsigned int max_irq,
187 const struct irq_domain_ops *ops, 187 const struct irq_domain_ops *ops,
188 void *host_data) 188 void *host_data)
189{ 189{
190 struct irq_domain *domain = irq_domain_alloc(of_node, 190 struct irq_domain *domain = irq_domain_alloc(of_node,
191 IRQ_DOMAIN_MAP_NOMAP, ops, host_data); 191 IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
192 if (domain) 192 if (domain) {
193 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
193 irq_domain_add(domain); 194 irq_domain_add(domain);
195 }
194 return domain; 196 return domain;
195} 197}
196 198
@@ -262,22 +264,6 @@ void irq_set_default_host(struct irq_domain *domain)
262 irq_default_domain = domain; 264 irq_default_domain = domain;
263} 265}
264 266
265/**
266 * irq_set_virq_count() - Set the maximum number of linux irqs
267 * @count: number of linux irqs, capped with NR_IRQS
268 *
269 * This is mainly for use by platforms like iSeries who want to program
270 * the virtual irq number in the controller to avoid the reverse mapping
271 */
272void irq_set_virq_count(unsigned int count)
273{
274 pr_debug("irq: Trying to set virq count to %d\n", count);
275
276 BUG_ON(count < NUM_ISA_INTERRUPTS);
277 if (count < NR_IRQS)
278 irq_virq_count = count;
279}
280
281static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, 267static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
282 irq_hw_number_t hwirq) 268 irq_hw_number_t hwirq)
283{ 269{
@@ -320,13 +306,12 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
320 pr_debug("irq: create_direct virq allocation failed\n"); 306 pr_debug("irq: create_direct virq allocation failed\n");
321 return 0; 307 return 0;
322 } 308 }
323 if (virq >= irq_virq_count) { 309 if (virq >= domain->revmap_data.nomap.max_irq) {
324 pr_err("ERROR: no free irqs available below %i maximum\n", 310 pr_err("ERROR: no free irqs available below %i maximum\n",
325 irq_virq_count); 311 domain->revmap_data.nomap.max_irq);
326 irq_free_desc(virq); 312 irq_free_desc(virq);
327 return 0; 313 return 0;
328 } 314 }
329
330 pr_debug("irq: create_direct obtained virq %d\n", virq); 315 pr_debug("irq: create_direct obtained virq %d\n", virq);
331 316
332 if (irq_setup_virq(domain, virq, virq)) { 317 if (irq_setup_virq(domain, virq, virq)) {
@@ -350,7 +335,8 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
350unsigned int irq_create_mapping(struct irq_domain *domain, 335unsigned int irq_create_mapping(struct irq_domain *domain,
351 irq_hw_number_t hwirq) 336 irq_hw_number_t hwirq)
352{ 337{
353 unsigned int virq, hint; 338 unsigned int hint;
339 int virq;
354 340
355 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 341 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
356 342
@@ -377,13 +363,13 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
377 return irq_domain_legacy_revmap(domain, hwirq); 363 return irq_domain_legacy_revmap(domain, hwirq);
378 364
379 /* Allocate a virtual interrupt number */ 365 /* Allocate a virtual interrupt number */
380 hint = hwirq % irq_virq_count; 366 hint = hwirq % nr_irqs;
381 if (hint == 0) 367 if (hint == 0)
382 hint++; 368 hint++;
383 virq = irq_alloc_desc_from(hint, 0); 369 virq = irq_alloc_desc_from(hint, 0);
384 if (!virq) 370 if (virq <= 0)
385 virq = irq_alloc_desc_from(1, 0); 371 virq = irq_alloc_desc_from(1, 0);
386 if (!virq) { 372 if (virq <= 0) {
387 pr_debug("irq: -> virq allocation failed\n"); 373 pr_debug("irq: -> virq allocation failed\n");
388 return 0; 374 return 0;
389 } 375 }
@@ -515,7 +501,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
515 irq_hw_number_t hwirq) 501 irq_hw_number_t hwirq)
516{ 502{
517 unsigned int i; 503 unsigned int i;
518 unsigned int hint = hwirq % irq_virq_count; 504 unsigned int hint = hwirq % nr_irqs;
519 505
520 /* Look for default domain if nececssary */ 506 /* Look for default domain if nececssary */
521 if (domain == NULL) 507 if (domain == NULL)
@@ -536,7 +522,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
536 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 522 if (data && (data->domain == domain) && (data->hwirq == hwirq))
537 return i; 523 return i;
538 i++; 524 i++;
539 if (i >= irq_virq_count) 525 if (i >= nr_irqs)
540 i = 1; 526 i = 1;
541 } while(i != hint); 527 } while(i != hint);
542 return 0; 528 return 0;
@@ -642,8 +628,9 @@ static int virq_debug_show(struct seq_file *m, void *private)
642 void *data; 628 void *data;
643 int i; 629 int i;
644 630
645 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", 631 seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
646 "chip name", "chip data", "domain name"); 632 "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
633 "domain name");
647 634
648 for (i = 1; i < nr_irqs; i++) { 635 for (i = 1; i < nr_irqs; i++) {
649 desc = irq_to_desc(i); 636 desc = irq_to_desc(i);
@@ -666,7 +653,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
666 seq_printf(m, "%-15s ", p); 653 seq_printf(m, "%-15s ", p);
667 654
668 data = irq_desc_get_chip_data(desc); 655 data = irq_desc_get_chip_data(desc);
669 seq_printf(m, "0x%16p ", data); 656 seq_printf(m, data ? "0x%p " : " %p ", data);
670 657
671 if (desc->irq_data.domain && desc->irq_data.domain->of_node) 658 if (desc->irq_data.domain && desc->irq_data.domain->of_node)
672 p = desc->irq_data.domain->of_node->full_name; 659 p = desc->irq_data.domain->of_node->full_name;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index c3c46c72046e..1588e3b2871b 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -5,11 +5,13 @@
5 * context. The enqueueing is NMI-safe. 5 * context. The enqueueing is NMI-safe.
6 */ 6 */
7 7
8#include <linux/bug.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/export.h> 10#include <linux/export.h>
10#include <linux/irq_work.h> 11#include <linux/irq_work.h>
11#include <linux/percpu.h> 12#include <linux/percpu.h>
12#include <linux/hardirq.h> 13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
13#include <asm/processor.h> 15#include <asm/processor.h>
14 16
15/* 17/*
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 22000c3db0dd..8d262b467573 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -284,8 +284,12 @@ SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
284 if (value) { 284 if (value) {
285 if(copy_from_user(&set_buffer, value, sizeof(set_buffer))) 285 if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
286 return -EFAULT; 286 return -EFAULT;
287 } else 287 } else {
288 memset((char *) &set_buffer, 0, sizeof(set_buffer)); 288 memset(&set_buffer, 0, sizeof(set_buffer));
289 printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
290 " Misfeature support will be removed\n",
291 current->comm);
292 }
289 293
290 error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL); 294 error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
291 if (error || !ovalue) 295 if (error || !ovalue)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 957a7aab8ebc..05698a7415fe 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -322,7 +322,7 @@ static void __call_usermodehelper(struct work_struct *work)
322 * land has been frozen during a system-wide hibernation or suspend operation). 322 * land has been frozen during a system-wide hibernation or suspend operation).
323 * Should always be manipulated under umhelper_sem acquired for write. 323 * Should always be manipulated under umhelper_sem acquired for write.
324 */ 324 */
325static int usermodehelper_disabled = 1; 325static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
326 326
327/* Number of helpers running */ 327/* Number of helpers running */
328static atomic_t running_helpers = ATOMIC_INIT(0); 328static atomic_t running_helpers = ATOMIC_INIT(0);
@@ -334,32 +334,110 @@ static atomic_t running_helpers = ATOMIC_INIT(0);
334static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); 334static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
335 335
336/* 336/*
337 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
338 * to become 'false'.
339 */
340static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
341
342/*
337 * Time to wait for running_helpers to become zero before the setting of 343 * Time to wait for running_helpers to become zero before the setting of
338 * usermodehelper_disabled in usermodehelper_disable() fails 344 * usermodehelper_disabled in usermodehelper_disable() fails
339 */ 345 */
340#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 346#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
341 347
342void read_lock_usermodehelper(void) 348int usermodehelper_read_trylock(void)
343{ 349{
350 DEFINE_WAIT(wait);
351 int ret = 0;
352
344 down_read(&umhelper_sem); 353 down_read(&umhelper_sem);
354 for (;;) {
355 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
356 TASK_INTERRUPTIBLE);
357 if (!usermodehelper_disabled)
358 break;
359
360 if (usermodehelper_disabled == UMH_DISABLED)
361 ret = -EAGAIN;
362
363 up_read(&umhelper_sem);
364
365 if (ret)
366 break;
367
368 schedule();
369 try_to_freeze();
370
371 down_read(&umhelper_sem);
372 }
373 finish_wait(&usermodehelper_disabled_waitq, &wait);
374 return ret;
375}
376EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
377
378long usermodehelper_read_lock_wait(long timeout)
379{
380 DEFINE_WAIT(wait);
381
382 if (timeout < 0)
383 return -EINVAL;
384
385 down_read(&umhelper_sem);
386 for (;;) {
387 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
388 TASK_UNINTERRUPTIBLE);
389 if (!usermodehelper_disabled)
390 break;
391
392 up_read(&umhelper_sem);
393
394 timeout = schedule_timeout(timeout);
395 if (!timeout)
396 break;
397
398 down_read(&umhelper_sem);
399 }
400 finish_wait(&usermodehelper_disabled_waitq, &wait);
401 return timeout;
345} 402}
346EXPORT_SYMBOL_GPL(read_lock_usermodehelper); 403EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
347 404
348void read_unlock_usermodehelper(void) 405void usermodehelper_read_unlock(void)
349{ 406{
350 up_read(&umhelper_sem); 407 up_read(&umhelper_sem);
351} 408}
352EXPORT_SYMBOL_GPL(read_unlock_usermodehelper); 409EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
353 410
354/** 411/**
355 * usermodehelper_disable - prevent new helpers from being started 412 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
413 * depth: New value to assign to usermodehelper_disabled.
414 *
415 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
416 * writing) and wakeup tasks waiting for it to change.
356 */ 417 */
357int usermodehelper_disable(void) 418void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
419{
420 down_write(&umhelper_sem);
421 usermodehelper_disabled = depth;
422 wake_up(&usermodehelper_disabled_waitq);
423 up_write(&umhelper_sem);
424}
425
426/**
427 * __usermodehelper_disable - Prevent new helpers from being started.
428 * @depth: New value to assign to usermodehelper_disabled.
429 *
430 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
431 */
432int __usermodehelper_disable(enum umh_disable_depth depth)
358{ 433{
359 long retval; 434 long retval;
360 435
436 if (!depth)
437 return -EINVAL;
438
361 down_write(&umhelper_sem); 439 down_write(&umhelper_sem);
362 usermodehelper_disabled = 1; 440 usermodehelper_disabled = depth;
363 up_write(&umhelper_sem); 441 up_write(&umhelper_sem);
364 442
365 /* 443 /*
@@ -374,31 +452,10 @@ int usermodehelper_disable(void)
374 if (retval) 452 if (retval)
375 return 0; 453 return 0;
376 454
377 down_write(&umhelper_sem); 455 __usermodehelper_set_disable_depth(UMH_ENABLED);
378 usermodehelper_disabled = 0;
379 up_write(&umhelper_sem);
380 return -EAGAIN; 456 return -EAGAIN;
381} 457}
382 458
383/**
384 * usermodehelper_enable - allow new helpers to be started again
385 */
386void usermodehelper_enable(void)
387{
388 down_write(&umhelper_sem);
389 usermodehelper_disabled = 0;
390 up_write(&umhelper_sem);
391}
392
393/**
394 * usermodehelper_is_disabled - check if new helpers are allowed to be started
395 */
396bool usermodehelper_is_disabled(void)
397{
398 return usermodehelper_disabled;
399}
400EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
401
402static void helper_lock(void) 459static void helper_lock(void)
403{ 460{
404 atomic_inc(&running_helpers); 461 atomic_inc(&running_helpers);
diff --git a/kernel/padata.c b/kernel/padata.c
index 6f10eb285ece..89fe3d1b9efb 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * padata.c - generic interface to process data streams in parallel 2 * padata.c - generic interface to process data streams in parallel
3 * 3 *
4 * See Documentation/padata.txt for an api documentation.
5 *
4 * Copyright (C) 2008, 2009 secunet Security Networks AG 6 * Copyright (C) 2008, 2009 secunet Security Networks AG
5 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
6 * 8 *
@@ -354,13 +356,13 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
354 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 356 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
355 return -ENOMEM; 357 return -ENOMEM;
356 358
357 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); 359 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
358 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 360 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
359 free_cpumask_var(pd->cpumask.cbcpu); 361 free_cpumask_var(pd->cpumask.cbcpu);
360 return -ENOMEM; 362 return -ENOMEM;
361 } 363 }
362 364
363 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); 365 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
364 return 0; 366 return 0;
365} 367}
366 368
@@ -564,7 +566,7 @@ EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
564static bool padata_validate_cpumask(struct padata_instance *pinst, 566static bool padata_validate_cpumask(struct padata_instance *pinst,
565 const struct cpumask *cpumask) 567 const struct cpumask *cpumask)
566{ 568{
567 if (!cpumask_intersects(cpumask, cpu_active_mask)) { 569 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
568 pinst->flags |= PADATA_INVALID; 570 pinst->flags |= PADATA_INVALID;
569 return false; 571 return false;
570 } 572 }
@@ -678,7 +680,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
678{ 680{
679 struct parallel_data *pd; 681 struct parallel_data *pd;
680 682
681 if (cpumask_test_cpu(cpu, cpu_active_mask)) { 683 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
682 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 684 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
683 pinst->cpumask.cbcpu); 685 pinst->cpumask.cbcpu);
684 if (!pd) 686 if (!pd)
@@ -746,6 +748,9 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
746 return -ENOMEM; 748 return -ENOMEM;
747 749
748 padata_replace(pinst, pd); 750 padata_replace(pinst, pd);
751
752 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
753 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
749 } 754 }
750 755
751 return 0; 756 return 0;
diff --git a/kernel/panic.c b/kernel/panic.c
index 80aed44e345a..8ed89a175d79 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -97,7 +97,7 @@ void panic(const char *fmt, ...)
97 /* 97 /*
98 * Avoid nested stack-dumping if a panic occurs during oops processing 98 * Avoid nested stack-dumping if a panic occurs during oops processing
99 */ 99 */
100 if (!oops_in_progress) 100 if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
101 dump_stack(); 101 dump_stack();
102#endif 102#endif
103 103
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 0a186cfde788..e09dfbfeecee 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -16,7 +16,6 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/async.h> 18#include <linux/async.h>
19#include <linux/kmod.h>
20#include <linux/delay.h> 19#include <linux/delay.h>
21#include <linux/fs.h> 20#include <linux/fs.h>
22#include <linux/mount.h> 21#include <linux/mount.h>
@@ -611,14 +610,10 @@ int hibernate(void)
611 if (error) 610 if (error)
612 goto Exit; 611 goto Exit;
613 612
614 error = usermodehelper_disable();
615 if (error)
616 goto Exit;
617
618 /* Allocate memory management structures */ 613 /* Allocate memory management structures */
619 error = create_basic_memory_bitmaps(); 614 error = create_basic_memory_bitmaps();
620 if (error) 615 if (error)
621 goto Enable_umh; 616 goto Exit;
622 617
623 printk(KERN_INFO "PM: Syncing filesystems ... "); 618 printk(KERN_INFO "PM: Syncing filesystems ... ");
624 sys_sync(); 619 sys_sync();
@@ -661,8 +656,6 @@ int hibernate(void)
661 656
662 Free_bitmaps: 657 Free_bitmaps:
663 free_basic_memory_bitmaps(); 658 free_basic_memory_bitmaps();
664 Enable_umh:
665 usermodehelper_enable();
666 Exit: 659 Exit:
667 pm_notifier_call_chain(PM_POST_HIBERNATION); 660 pm_notifier_call_chain(PM_POST_HIBERNATION);
668 pm_restore_console(); 661 pm_restore_console();
@@ -777,15 +770,9 @@ static int software_resume(void)
777 if (error) 770 if (error)
778 goto close_finish; 771 goto close_finish;
779 772
780 error = usermodehelper_disable();
781 if (error)
782 goto close_finish;
783
784 error = create_basic_memory_bitmaps(); 773 error = create_basic_memory_bitmaps();
785 if (error) { 774 if (error)
786 usermodehelper_enable();
787 goto close_finish; 775 goto close_finish;
788 }
789 776
790 pr_debug("PM: Preparing processes for restore.\n"); 777 pr_debug("PM: Preparing processes for restore.\n");
791 error = freeze_processes(); 778 error = freeze_processes();
@@ -806,7 +793,6 @@ static int software_resume(void)
806 thaw_processes(); 793 thaw_processes();
807 Done: 794 Done:
808 free_basic_memory_bitmaps(); 795 free_basic_memory_bitmaps();
809 usermodehelper_enable();
810 Finish: 796 Finish:
811 pm_notifier_call_chain(PM_POST_RESTORE); 797 pm_notifier_call_chain(PM_POST_RESTORE);
812 pm_restore_console(); 798 pm_restore_console();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0d2aeb226108..19db29f67558 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
16#include <linux/freezer.h> 16#include <linux/freezer.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/kmod.h>
19 20
20/* 21/*
21 * Timeout for stopping processes 22 * Timeout for stopping processes
@@ -122,6 +123,10 @@ int freeze_processes(void)
122{ 123{
123 int error; 124 int error;
124 125
126 error = __usermodehelper_disable(UMH_FREEZING);
127 if (error)
128 return error;
129
125 if (!pm_freezing) 130 if (!pm_freezing)
126 atomic_inc(&system_freezing_cnt); 131 atomic_inc(&system_freezing_cnt);
127 132
@@ -130,6 +135,7 @@ int freeze_processes(void)
130 error = try_to_freeze_tasks(true); 135 error = try_to_freeze_tasks(true);
131 if (!error) { 136 if (!error) {
132 printk("done."); 137 printk("done.");
138 __usermodehelper_set_disable_depth(UMH_DISABLED);
133 oom_killer_disable(); 139 oom_killer_disable();
134 } 140 }
135 printk("\n"); 141 printk("\n");
@@ -187,6 +193,8 @@ void thaw_processes(void)
187 } while_each_thread(g, p); 193 } while_each_thread(g, p);
188 read_unlock(&tasklist_lock); 194 read_unlock(&tasklist_lock);
189 195
196 usermodehelper_enable();
197
190 schedule(); 198 schedule();
191 printk("done.\n"); 199 printk("done.\n");
192} 200}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index d6d6dbd1ecc0..6a031e684026 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -230,6 +230,21 @@ int pm_qos_request_active(struct pm_qos_request *req)
230EXPORT_SYMBOL_GPL(pm_qos_request_active); 230EXPORT_SYMBOL_GPL(pm_qos_request_active);
231 231
232/** 232/**
233 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
234 * @work: work struct for the delayed work (timeout)
235 *
236 * This cancels the timeout request by falling back to the default at timeout.
237 */
238static void pm_qos_work_fn(struct work_struct *work)
239{
240 struct pm_qos_request *req = container_of(to_delayed_work(work),
241 struct pm_qos_request,
242 work);
243
244 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
245}
246
247/**
233 * pm_qos_add_request - inserts new qos request into the list 248 * pm_qos_add_request - inserts new qos request into the list
234 * @req: pointer to a preallocated handle 249 * @req: pointer to a preallocated handle
235 * @pm_qos_class: identifies which list of qos request to use 250 * @pm_qos_class: identifies which list of qos request to use
@@ -253,6 +268,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
253 return; 268 return;
254 } 269 }
255 req->pm_qos_class = pm_qos_class; 270 req->pm_qos_class = pm_qos_class;
271 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
256 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, 272 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
257 &req->node, PM_QOS_ADD_REQ, value); 273 &req->node, PM_QOS_ADD_REQ, value);
258} 274}
@@ -279,6 +295,9 @@ void pm_qos_update_request(struct pm_qos_request *req,
279 return; 295 return;
280 } 296 }
281 297
298 if (delayed_work_pending(&req->work))
299 cancel_delayed_work_sync(&req->work);
300
282 if (new_value != req->node.prio) 301 if (new_value != req->node.prio)
283 pm_qos_update_target( 302 pm_qos_update_target(
284 pm_qos_array[req->pm_qos_class]->constraints, 303 pm_qos_array[req->pm_qos_class]->constraints,
@@ -287,6 +306,34 @@ void pm_qos_update_request(struct pm_qos_request *req,
287EXPORT_SYMBOL_GPL(pm_qos_update_request); 306EXPORT_SYMBOL_GPL(pm_qos_update_request);
288 307
289/** 308/**
309 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
310 * @req : handle to list element holding a pm_qos request to use
311 * @new_value: defines the temporal qos request
312 * @timeout_us: the effective duration of this qos request in usecs.
313 *
314 * After timeout_us, this qos request is cancelled automatically.
315 */
316void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
317 unsigned long timeout_us)
318{
319 if (!req)
320 return;
321 if (WARN(!pm_qos_request_active(req),
322 "%s called for unknown object.", __func__))
323 return;
324
325 if (delayed_work_pending(&req->work))
326 cancel_delayed_work_sync(&req->work);
327
328 if (new_value != req->node.prio)
329 pm_qos_update_target(
330 pm_qos_array[req->pm_qos_class]->constraints,
331 &req->node, PM_QOS_UPDATE_REQ, new_value);
332
333 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
334}
335
336/**
290 * pm_qos_remove_request - modifies an existing qos request 337 * pm_qos_remove_request - modifies an existing qos request
291 * @req: handle to request list element 338 * @req: handle to request list element
292 * 339 *
@@ -305,6 +352,9 @@ void pm_qos_remove_request(struct pm_qos_request *req)
305 return; 352 return;
306 } 353 }
307 354
355 if (delayed_work_pending(&req->work))
356 cancel_delayed_work_sync(&req->work);
357
308 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, 358 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
309 &req->node, PM_QOS_REMOVE_REQ, 359 &req->node, PM_QOS_REMOVE_REQ,
310 PM_QOS_DEFAULT_VALUE); 360 PM_QOS_DEFAULT_VALUE);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 88e5c967370d..396d262b8fd0 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -12,7 +12,6 @@
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kmod.h>
16#include <linux/console.h> 15#include <linux/console.h>
17#include <linux/cpu.h> 16#include <linux/cpu.h>
18#include <linux/syscalls.h> 17#include <linux/syscalls.h>
@@ -102,17 +101,12 @@ static int suspend_prepare(void)
102 if (error) 101 if (error)
103 goto Finish; 102 goto Finish;
104 103
105 error = usermodehelper_disable();
106 if (error)
107 goto Finish;
108
109 error = suspend_freeze_processes(); 104 error = suspend_freeze_processes();
110 if (!error) 105 if (!error)
111 return 0; 106 return 0;
112 107
113 suspend_stats.failed_freeze++; 108 suspend_stats.failed_freeze++;
114 dpm_save_failed_step(SUSPEND_FREEZE); 109 dpm_save_failed_step(SUSPEND_FREEZE);
115 usermodehelper_enable();
116 Finish: 110 Finish:
117 pm_notifier_call_chain(PM_POST_SUSPEND); 111 pm_notifier_call_chain(PM_POST_SUSPEND);
118 pm_restore_console(); 112 pm_restore_console();
@@ -259,7 +253,6 @@ int suspend_devices_and_enter(suspend_state_t state)
259static void suspend_finish(void) 253static void suspend_finish(void)
260{ 254{
261 suspend_thaw_processes(); 255 suspend_thaw_processes();
262 usermodehelper_enable();
263 pm_notifier_call_chain(PM_POST_SUSPEND); 256 pm_notifier_call_chain(PM_POST_SUSPEND);
264 pm_restore_console(); 257 pm_restore_console();
265} 258}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8742fd013a94..eef311a58a64 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -51,6 +51,23 @@
51 51
52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
53 53
54/*
55 * Number of free pages that are not high.
56 */
57static inline unsigned long low_free_pages(void)
58{
59 return nr_free_pages() - nr_free_highpages();
60}
61
62/*
63 * Number of pages required to be kept free while writing the image. Always
64 * half of all available low pages before the writing starts.
65 */
66static inline unsigned long reqd_free_pages(void)
67{
68 return low_free_pages() / 2;
69}
70
54struct swap_map_page { 71struct swap_map_page {
55 sector_t entries[MAP_PAGE_ENTRIES]; 72 sector_t entries[MAP_PAGE_ENTRIES];
56 sector_t next_swap; 73 sector_t next_swap;
@@ -72,7 +89,7 @@ struct swap_map_handle {
72 sector_t cur_swap; 89 sector_t cur_swap;
73 sector_t first_sector; 90 sector_t first_sector;
74 unsigned int k; 91 unsigned int k;
75 unsigned long nr_free_pages, written; 92 unsigned long reqd_free_pages;
76 u32 crc32; 93 u32 crc32;
77}; 94};
78 95
@@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
316 goto err_rel; 333 goto err_rel;
317 } 334 }
318 handle->k = 0; 335 handle->k = 0;
319 handle->nr_free_pages = nr_free_pages() >> 1; 336 handle->reqd_free_pages = reqd_free_pages();
320 handle->written = 0;
321 handle->first_sector = handle->cur_swap; 337 handle->first_sector = handle->cur_swap;
322 return 0; 338 return 0;
323err_rel: 339err_rel:
@@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
352 handle->cur_swap = offset; 368 handle->cur_swap = offset;
353 handle->k = 0; 369 handle->k = 0;
354 } 370 }
355 if (bio_chain && ++handle->written > handle->nr_free_pages) { 371 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
356 error = hib_wait_on_bio_chain(bio_chain); 372 error = hib_wait_on_bio_chain(bio_chain);
357 if (error) 373 if (error)
358 goto out; 374 goto out;
359 handle->written = 0; 375 handle->reqd_free_pages = reqd_free_pages();
360 } 376 }
361 out: 377 out:
362 return error; 378 return error;
@@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
618 * Adjust number of free pages after all allocations have been done. 634 * Adjust number of free pages after all allocations have been done.
619 * We don't want to run out of pages when writing. 635 * We don't want to run out of pages when writing.
620 */ 636 */
621 handle->nr_free_pages = nr_free_pages() >> 1; 637 handle->reqd_free_pages = reqd_free_pages();
622 638
623 /* 639 /*
624 * Start the CRC32 thread. 640 * Start the CRC32 thread.
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 33c4329205af..91b0fd021a95 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -12,7 +12,6 @@
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15#include <linux/kmod.h>
16#include <linux/string.h> 15#include <linux/string.h>
17#include <linux/device.h> 16#include <linux/device.h>
18#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
@@ -222,14 +221,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
222 sys_sync(); 221 sys_sync();
223 printk("done.\n"); 222 printk("done.\n");
224 223
225 error = usermodehelper_disable();
226 if (error)
227 break;
228
229 error = freeze_processes(); 224 error = freeze_processes();
230 if (error) 225 if (!error)
231 usermodehelper_enable();
232 else
233 data->frozen = 1; 226 data->frozen = 1;
234 break; 227 break;
235 228
@@ -238,7 +231,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
238 break; 231 break;
239 pm_restore_gfp_mask(); 232 pm_restore_gfp_mask();
240 thaw_processes(); 233 thaw_processes();
241 usermodehelper_enable();
242 data->frozen = 0; 234 data->frozen = 0;
243 break; 235 break;
244 236
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1050d6d3922c..d0c5baf1ab18 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1820,7 +1820,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1820 * a quiescent state betweentimes. 1820 * a quiescent state betweentimes.
1821 */ 1821 */
1822 local_irq_save(flags); 1822 local_irq_save(flags);
1823 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1824 rdp = this_cpu_ptr(rsp->rda); 1823 rdp = this_cpu_ptr(rsp->rda);
1825 1824
1826 /* Add the callback to our list. */ 1825 /* Add the callback to our list. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index afc6d7e71557..6f61fd44a5c5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
6405 struct sd_data *sdd = &tl->data; 6405 struct sd_data *sdd = &tl->data;
6406 6406
6407 for_each_cpu(j, cpu_map) { 6407 for_each_cpu(j, cpu_map) {
6408 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 6408 struct sched_domain *sd;
6409 if (sd && (sd->flags & SD_OVERLAP)) 6409
6410 free_sched_groups(sd->groups, 0); 6410 if (sdd->sd) {
6411 kfree(*per_cpu_ptr(sdd->sd, j)); 6411 sd = *per_cpu_ptr(sdd->sd, j);
6412 kfree(*per_cpu_ptr(sdd->sg, j)); 6412 if (sd && (sd->flags & SD_OVERLAP))
6413 kfree(*per_cpu_ptr(sdd->sgp, j)); 6413 free_sched_groups(sd->groups, 0);
6414 kfree(*per_cpu_ptr(sdd->sd, j));
6415 }
6416
6417 if (sdd->sg)
6418 kfree(*per_cpu_ptr(sdd->sg, j));
6419 if (sdd->sgp)
6420 kfree(*per_cpu_ptr(sdd->sgp, j));
6414 } 6421 }
6415 free_percpu(sdd->sd); 6422 free_percpu(sdd->sd);
6423 sdd->sd = NULL;
6416 free_percpu(sdd->sg); 6424 free_percpu(sdd->sg);
6425 sdd->sg = NULL;
6417 free_percpu(sdd->sgp); 6426 free_percpu(sdd->sgp);
6427 sdd->sgp = NULL;
6418 } 6428 }
6419} 6429}
6420 6430
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d97ebdc58f0..e9553640c1c3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
785#ifdef CONFIG_SMP 785#ifdef CONFIG_SMP
786 if (entity_is_task(se)) 786 if (entity_is_task(se))
787 list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); 787 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
788#endif 788#endif
789 cfs_rq->nr_running++; 789 cfs_rq->nr_running++;
790} 790}
@@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)
3215 3215
3216static unsigned long task_h_load(struct task_struct *p); 3216static unsigned long task_h_load(struct task_struct *p);
3217 3217
3218static const unsigned int sched_nr_migrate_break = 32;
3219
3218/* 3220/*
3219 * move_tasks tries to move up to load_move weighted load from busiest to 3221 * move_tasks tries to move up to load_move weighted load from busiest to
3220 * this_rq, as part of a balancing operation within domain "sd". 3222 * this_rq, as part of a balancing operation within domain "sd".
@@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)
3242 3244
3243 /* take a breather every nr_migrate tasks */ 3245 /* take a breather every nr_migrate tasks */
3244 if (env->loop > env->loop_break) { 3246 if (env->loop > env->loop_break) {
3245 env->loop_break += sysctl_sched_nr_migrate; 3247 env->loop_break += sched_nr_migrate_break;
3246 env->flags |= LBF_NEED_BREAK; 3248 env->flags |= LBF_NEED_BREAK;
3247 break; 3249 break;
3248 } 3250 }
@@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)
3252 3254
3253 load = task_h_load(p); 3255 load = task_h_load(p);
3254 3256
3255 if (load < 16 && !env->sd->nr_balance_failed) 3257 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
3256 goto next; 3258 goto next;
3257 3259
3258 if ((load / 2) > env->load_move) 3260 if ((load / 2) > env->load_move)
@@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4407 .dst_cpu = this_cpu, 4409 .dst_cpu = this_cpu,
4408 .dst_rq = this_rq, 4410 .dst_rq = this_rq,
4409 .idle = idle, 4411 .idle = idle,
4410 .loop_break = sysctl_sched_nr_migrate, 4412 .loop_break = sched_nr_migrate_break,
4411 }; 4413 };
4412 4414
4413 cpumask_copy(cpus, cpu_active_mask); 4415 cpumask_copy(cpus, cpu_active_mask);
@@ -4445,10 +4447,10 @@ redo:
4445 * correctly treated as an imbalance. 4447 * correctly treated as an imbalance.
4446 */ 4448 */
4447 env.flags |= LBF_ALL_PINNED; 4449 env.flags |= LBF_ALL_PINNED;
4448 env.load_move = imbalance; 4450 env.load_move = imbalance;
4449 env.src_cpu = busiest->cpu; 4451 env.src_cpu = busiest->cpu;
4450 env.src_rq = busiest; 4452 env.src_rq = busiest;
4451 env.loop_max = busiest->nr_running; 4453 env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
4452 4454
4453more_balance: 4455more_balance:
4454 local_irq_save(flags); 4456 local_irq_save(flags);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e61fd73913d0..de00a486c5c6 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, false) 69SCHED_FEAT(FORCE_SD_OVERLAP, false)
70SCHED_FEAT(RT_RUNTIME_SHARE, true) 70SCHED_FEAT(RT_RUNTIME_SHARE, true)
71SCHED_FEAT(LB_MIN, false)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 52b3a06a02f8..4ab11879aeb4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -170,7 +170,7 @@ static int proc_taint(struct ctl_table *table, int write,
170#endif 170#endif
171 171
172#ifdef CONFIG_PRINTK 172#ifdef CONFIG_PRINTK
173static int proc_dmesg_restrict(struct ctl_table *table, int write, 173static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
174 void __user *buffer, size_t *lenp, loff_t *ppos); 174 void __user *buffer, size_t *lenp, loff_t *ppos);
175#endif 175#endif
176 176
@@ -703,7 +703,7 @@ static struct ctl_table kern_table[] = {
703 .data = &dmesg_restrict, 703 .data = &dmesg_restrict,
704 .maxlen = sizeof(int), 704 .maxlen = sizeof(int),
705 .mode = 0644, 705 .mode = 0644,
706 .proc_handler = proc_dointvec_minmax, 706 .proc_handler = proc_dointvec_minmax_sysadmin,
707 .extra1 = &zero, 707 .extra1 = &zero,
708 .extra2 = &one, 708 .extra2 = &one,
709 }, 709 },
@@ -712,7 +712,7 @@ static struct ctl_table kern_table[] = {
712 .data = &kptr_restrict, 712 .data = &kptr_restrict,
713 .maxlen = sizeof(int), 713 .maxlen = sizeof(int),
714 .mode = 0644, 714 .mode = 0644,
715 .proc_handler = proc_dmesg_restrict, 715 .proc_handler = proc_dointvec_minmax_sysadmin,
716 .extra1 = &zero, 716 .extra1 = &zero,
717 .extra2 = &two, 717 .extra2 = &two,
718 }, 718 },
@@ -1943,7 +1943,7 @@ static int proc_taint(struct ctl_table *table, int write,
1943} 1943}
1944 1944
1945#ifdef CONFIG_PRINTK 1945#ifdef CONFIG_PRINTK
1946static int proc_dmesg_restrict(struct ctl_table *table, int write, 1946static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
1947 void __user *buffer, size_t *lenp, loff_t *ppos) 1947 void __user *buffer, size_t *lenp, loff_t *ppos)
1948{ 1948{
1949 if (write && !capable(CAP_SYS_ADMIN)) 1949 if (write && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 2cf9cc7aa103..a20dc8a3c949 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -1,6 +1,10 @@
1# 1#
2# Timer subsystem related configuration options 2# Timer subsystem related configuration options
3# 3#
4
5# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is
6# only related to the tick functionality. Oneshot clockevent devices
7# are supported independ of this.
4config TICK_ONESHOT 8config TICK_ONESHOT
5 bool 9 bool
6 10
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e883f57a3cd3..f113755695e2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -346,7 +346,8 @@ int tick_resume_broadcast(void)
346 tick_get_broadcast_mask()); 346 tick_get_broadcast_mask());
347 break; 347 break;
348 case TICKDEV_MODE_ONESHOT: 348 case TICKDEV_MODE_ONESHOT:
349 broadcast = tick_resume_broadcast_oneshot(bc); 349 if (!cpumask_empty(tick_get_broadcast_mask()))
350 broadcast = tick_resume_broadcast_oneshot(bc);
350 break; 351 break;
351 } 352 }
352 } 353 }
@@ -373,6 +374,9 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
373{ 374{
374 struct clock_event_device *bc = tick_broadcast_device.evtdev; 375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
375 376
377 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
378 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
379
376 return clockevents_program_event(bc, expires, force); 380 return clockevents_program_event(bc, expires, force);
377} 381}
378 382
@@ -531,7 +535,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
531 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 535 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
532 536
533 bc->event_handler = tick_handle_oneshot_broadcast; 537 bc->event_handler = tick_handle_oneshot_broadcast;
534 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
535 538
536 /* Take the do_timer update */ 539 /* Take the do_timer update */
537 tick_do_timer_cpu = cpu; 540 tick_do_timer_cpu = cpu;
@@ -549,6 +552,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
549 to_cpumask(tmpmask)); 552 to_cpumask(tmpmask));
550 553
551 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { 554 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
555 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
552 tick_broadcast_init_next_event(to_cpumask(tmpmask), 556 tick_broadcast_init_next_event(to_cpumask(tmpmask),
553 tick_next_period); 557 tick_next_period);
554 tick_broadcast_set_event(tick_next_period, 1); 558 tick_broadcast_set_event(tick_next_period, 1);
@@ -575,15 +579,12 @@ void tick_broadcast_switch_to_oneshot(void)
575 unsigned long flags; 579 unsigned long flags;
576 580
577 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 581 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
578 if (cpumask_empty(tick_get_broadcast_mask()))
579 goto end;
580 582
581 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 583 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
582 bc = tick_broadcast_device.evtdev; 584 bc = tick_broadcast_device.evtdev;
583 if (bc) 585 if (bc)
584 tick_broadcast_setup_oneshot(bc); 586 tick_broadcast_setup_oneshot(bc);
585 587
586end:
587 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 588 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
588} 589}
589 590
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3526038f2836..6a3a5b9ff561 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -534,9 +534,9 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
534 hrtimer_get_expires(&ts->sched_timer), 0)) 534 hrtimer_get_expires(&ts->sched_timer), 0))
535 break; 535 break;
536 } 536 }
537 /* Update jiffies and reread time */ 537 /* Reread time and update jiffies */
538 tick_do_update_jiffies64(now);
539 now = ktime_get(); 538 now = ktime_get();
539 tick_do_update_jiffies64(now);
540 } 540 }
541} 541}
542 542
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index cdea7b56b0c9..c0bd0308741c 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -311,13 +311,6 @@ int blk_trace_remove(struct request_queue *q)
311} 311}
312EXPORT_SYMBOL_GPL(blk_trace_remove); 312EXPORT_SYMBOL_GPL(blk_trace_remove);
313 313
314static int blk_dropped_open(struct inode *inode, struct file *filp)
315{
316 filp->private_data = inode->i_private;
317
318 return 0;
319}
320
321static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, 314static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
322 size_t count, loff_t *ppos) 315 size_t count, loff_t *ppos)
323{ 316{
@@ -331,18 +324,11 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
331 324
332static const struct file_operations blk_dropped_fops = { 325static const struct file_operations blk_dropped_fops = {
333 .owner = THIS_MODULE, 326 .owner = THIS_MODULE,
334 .open = blk_dropped_open, 327 .open = simple_open,
335 .read = blk_dropped_read, 328 .read = blk_dropped_read,
336 .llseek = default_llseek, 329 .llseek = default_llseek,
337}; 330};
338 331
339static int blk_msg_open(struct inode *inode, struct file *filp)
340{
341 filp->private_data = inode->i_private;
342
343 return 0;
344}
345
346static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, 332static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
347 size_t count, loff_t *ppos) 333 size_t count, loff_t *ppos)
348{ 334{
@@ -371,7 +357,7 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
371 357
372static const struct file_operations blk_msg_fops = { 358static const struct file_operations blk_msg_fops = {
373 .owner = THIS_MODULE, 359 .owner = THIS_MODULE,
374 .open = blk_msg_open, 360 .open = simple_open,
375 .write = blk_msg_write, 361 .write = blk_msg_write,
376 .llseek = noop_llseek, 362 .llseek = noop_llseek,
377}; 363};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed7b5d1e12f4..2a22255c1010 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4629,7 +4629,8 @@ static ssize_t
4629rb_simple_read(struct file *filp, char __user *ubuf, 4629rb_simple_read(struct file *filp, char __user *ubuf,
4630 size_t cnt, loff_t *ppos) 4630 size_t cnt, loff_t *ppos)
4631{ 4631{
4632 struct ring_buffer *buffer = filp->private_data; 4632 struct trace_array *tr = filp->private_data;
4633 struct ring_buffer *buffer = tr->buffer;
4633 char buf[64]; 4634 char buf[64];
4634 int r; 4635 int r;
4635 4636
@@ -4647,7 +4648,8 @@ static ssize_t
4647rb_simple_write(struct file *filp, const char __user *ubuf, 4648rb_simple_write(struct file *filp, const char __user *ubuf,
4648 size_t cnt, loff_t *ppos) 4649 size_t cnt, loff_t *ppos)
4649{ 4650{
4650 struct ring_buffer *buffer = filp->private_data; 4651 struct trace_array *tr = filp->private_data;
4652 struct ring_buffer *buffer = tr->buffer;
4651 unsigned long val; 4653 unsigned long val;
4652 int ret; 4654 int ret;
4653 4655
@@ -4734,7 +4736,7 @@ static __init int tracer_init_debugfs(void)
4734 &trace_clock_fops); 4736 &trace_clock_fops);
4735 4737
4736 trace_create_file("tracing_on", 0644, d_tracer, 4738 trace_create_file("tracing_on", 0644, d_tracer,
4737 global_trace.buffer, &rb_simple_fops); 4739 &global_trace, &rb_simple_fops);
4738 4740
4739#ifdef CONFIG_DYNAMIC_FTRACE 4741#ifdef CONFIG_DYNAMIC_FTRACE
4740 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4742 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 95059f091a24..f95d65da6db8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -836,11 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
836 filter) 836 filter)
837#include "trace_entries.h" 837#include "trace_entries.h"
838 838
839#ifdef CONFIG_FUNCTION_TRACER 839#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
840int perf_ftrace_event_register(struct ftrace_event_call *call, 840int perf_ftrace_event_register(struct ftrace_event_call *call,
841 enum trace_reg type, void *data); 841 enum trace_reg type, void *data);
842#else 842#else
843#define perf_ftrace_event_register NULL 843#define perf_ftrace_event_register NULL
844#endif /* CONFIG_FUNCTION_TRACER */ 844#endif
845 845
846#endif /* _LINUX_KERNEL_TRACE_H */ 846#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 859fae6b1825..df611a0e76c5 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -652,6 +652,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
652{ 652{
653 u64 next_ts; 653 u64 next_ts;
654 int ret; 654 int ret;
655 /* trace_find_next_entry will reset ent_size */
656 int ent_size = iter->ent_size;
655 struct trace_seq *s = &iter->seq; 657 struct trace_seq *s = &iter->seq;
656 struct trace_entry *entry = iter->ent, 658 struct trace_entry *entry = iter->ent,
657 *next_entry = trace_find_next_entry(iter, NULL, 659 *next_entry = trace_find_next_entry(iter, NULL,
@@ -660,6 +662,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
660 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); 662 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
661 unsigned long rel_usecs; 663 unsigned long rel_usecs;
662 664
665 /* Restore the original ent_size */
666 iter->ent_size = ent_size;
667
663 if (!next_entry) 668 if (!next_entry)
664 next_ts = iter->ts; 669 next_ts = iter->ts;
665 rel_usecs = ns2usecs(next_ts - iter->ts); 670 rel_usecs = ns2usecs(next_ts - iter->ts);