aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2012-05-13 08:32:54 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2012-05-13 08:32:54 -0400
commitdc2af52c0d6d00fd530e4a5e300834cdb1bb1c1c (patch)
tree4573b99fb11e5b93bd011045039b370ddafe45e6 /kernel
parentf1992dde7fef6713a469a5a142b86812b8a47f9e (diff)
parent36be50515fe2aef61533b516fa2576a2c7fe7664 (diff)
Merge tag 'v3.4-rc7' into for-3.5
Linux 3.4-rc7 Conflicts): drivers/base/regmap/regmap.c (overlap with bug fixes) sound/soc/blackfin/bf5xx-ssm2602.c (overlap with bug fixes)
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c63
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/irq/debug.h38
-rw-r--r--kernel/power/swap.c28
-rw-r--r--kernel/rcutree.c1
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/fair.c18
-rw-r--r--kernel/sched/features.h1
-rw-r--r--kernel/time/tick-broadcast.c13
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_output.c5
13 files changed, 136 insertions, 70 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 74ff8498809a..d2c67aa49ae6 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -372,25 +372,54 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
372 372
373#ifdef __ARCH_WANT_SYS_SIGPROCMASK 373#ifdef __ARCH_WANT_SYS_SIGPROCMASK
374 374
375asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 375/*
376 compat_old_sigset_t __user *oset) 376 * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
377 * blocked set of signals to the supplied signal set
378 */
379static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set)
377{ 380{
378 old_sigset_t s; 381 memcpy(blocked->sig, &set, sizeof(set));
379 long ret; 382}
380 mm_segment_t old_fs;
381 383
382 if (set && get_user(s, set)) 384asmlinkage long compat_sys_sigprocmask(int how,
383 return -EFAULT; 385 compat_old_sigset_t __user *nset,
384 old_fs = get_fs(); 386 compat_old_sigset_t __user *oset)
385 set_fs(KERNEL_DS); 387{
386 ret = sys_sigprocmask(how, 388 old_sigset_t old_set, new_set;
387 set ? (old_sigset_t __user *) &s : NULL, 389 sigset_t new_blocked;
388 oset ? (old_sigset_t __user *) &s : NULL); 390
389 set_fs(old_fs); 391 old_set = current->blocked.sig[0];
390 if (ret == 0) 392
391 if (oset) 393 if (nset) {
392 ret = put_user(s, oset); 394 if (get_user(new_set, nset))
393 return ret; 395 return -EFAULT;
396 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
397
398 new_blocked = current->blocked;
399
400 switch (how) {
401 case SIG_BLOCK:
402 sigaddsetmask(&new_blocked, new_set);
403 break;
404 case SIG_UNBLOCK:
405 sigdelsetmask(&new_blocked, new_set);
406 break;
407 case SIG_SETMASK:
408 compat_sig_setmask(&new_blocked, new_set);
409 break;
410 default:
411 return -EINVAL;
412 }
413
414 set_current_blocked(&new_blocked);
415 }
416
417 if (oset) {
418 if (put_user(old_set, oset))
419 return -EFAULT;
420 }
421
422 return 0;
394} 423}
395 424
396#endif 425#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a6a9ec4cd8f5..fd126f82b57c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3183,7 +3183,7 @@ static void perf_event_for_each(struct perf_event *event,
3183 perf_event_for_each_child(event, func); 3183 perf_event_for_each_child(event, func);
3184 func(event); 3184 func(event);
3185 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3185 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3186 perf_event_for_each_child(event, func); 3186 perf_event_for_each_child(sibling, func);
3187 mutex_unlock(&ctx->mutex); 3187 mutex_unlock(&ctx->mutex);
3188} 3188}
3189 3189
diff --git a/kernel/fork.c b/kernel/fork.c
index b9372a0bff18..687a15d56243 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -47,6 +47,7 @@
47#include <linux/audit.h> 47#include <linux/audit.h>
48#include <linux/memcontrol.h> 48#include <linux/memcontrol.h>
49#include <linux/ftrace.h> 49#include <linux/ftrace.h>
50#include <linux/proc_fs.h>
50#include <linux/profile.h> 51#include <linux/profile.h>
51#include <linux/rmap.h> 52#include <linux/rmap.h>
52#include <linux/ksm.h> 53#include <linux/ksm.h>
@@ -1464,6 +1465,8 @@ bad_fork_cleanup_io:
1464 if (p->io_context) 1465 if (p->io_context)
1465 exit_io_context(p); 1466 exit_io_context(p);
1466bad_fork_cleanup_namespaces: 1467bad_fork_cleanup_namespaces:
1468 if (unlikely(clone_flags & CLONE_NEWPID))
1469 pid_ns_release_proc(p->nsproxy->pid_ns);
1467 exit_task_namespaces(p); 1470 exit_task_namespaces(p);
1468bad_fork_cleanup_mm: 1471bad_fork_cleanup_mm:
1469 if (p->mm) 1472 if (p->mm)
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 97a8bfadc88a..e75e29e4434a 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -4,10 +4,10 @@
4 4
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6 6
7#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) 7#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 8#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */ 9/* FIXME */
10#define PD(f) do { } while (0) 10#define ___PD(f) do { } while (0)
11 11
12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
13{ 13{
@@ -23,23 +23,23 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
23 print_symbol("%s\n", (unsigned long)desc->action->handler); 23 print_symbol("%s\n", (unsigned long)desc->action->handler);
24 } 24 }
25 25
26 P(IRQ_LEVEL); 26 ___P(IRQ_LEVEL);
27 P(IRQ_PER_CPU); 27 ___P(IRQ_PER_CPU);
28 P(IRQ_NOPROBE); 28 ___P(IRQ_NOPROBE);
29 P(IRQ_NOREQUEST); 29 ___P(IRQ_NOREQUEST);
30 P(IRQ_NOTHREAD); 30 ___P(IRQ_NOTHREAD);
31 P(IRQ_NOAUTOEN); 31 ___P(IRQ_NOAUTOEN);
32 32
33 PS(IRQS_AUTODETECT); 33 ___PS(IRQS_AUTODETECT);
34 PS(IRQS_REPLAY); 34 ___PS(IRQS_REPLAY);
35 PS(IRQS_WAITING); 35 ___PS(IRQS_WAITING);
36 PS(IRQS_PENDING); 36 ___PS(IRQS_PENDING);
37 37
38 PD(IRQS_INPROGRESS); 38 ___PD(IRQS_INPROGRESS);
39 PD(IRQS_DISABLED); 39 ___PD(IRQS_DISABLED);
40 PD(IRQS_MASKED); 40 ___PD(IRQS_MASKED);
41} 41}
42 42
43#undef P 43#undef ___P
44#undef PS 44#undef ___PS
45#undef PD 45#undef ___PD
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8742fd013a94..eef311a58a64 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -51,6 +51,23 @@
51 51
52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
53 53
54/*
55 * Number of free pages that are not high.
56 */
57static inline unsigned long low_free_pages(void)
58{
59 return nr_free_pages() - nr_free_highpages();
60}
61
62/*
63 * Number of pages required to be kept free while writing the image. Always
64 * half of all available low pages before the writing starts.
65 */
66static inline unsigned long reqd_free_pages(void)
67{
68 return low_free_pages() / 2;
69}
70
54struct swap_map_page { 71struct swap_map_page {
55 sector_t entries[MAP_PAGE_ENTRIES]; 72 sector_t entries[MAP_PAGE_ENTRIES];
56 sector_t next_swap; 73 sector_t next_swap;
@@ -72,7 +89,7 @@ struct swap_map_handle {
72 sector_t cur_swap; 89 sector_t cur_swap;
73 sector_t first_sector; 90 sector_t first_sector;
74 unsigned int k; 91 unsigned int k;
75 unsigned long nr_free_pages, written; 92 unsigned long reqd_free_pages;
76 u32 crc32; 93 u32 crc32;
77}; 94};
78 95
@@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
316 goto err_rel; 333 goto err_rel;
317 } 334 }
318 handle->k = 0; 335 handle->k = 0;
319 handle->nr_free_pages = nr_free_pages() >> 1; 336 handle->reqd_free_pages = reqd_free_pages();
320 handle->written = 0;
321 handle->first_sector = handle->cur_swap; 337 handle->first_sector = handle->cur_swap;
322 return 0; 338 return 0;
323err_rel: 339err_rel:
@@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
352 handle->cur_swap = offset; 368 handle->cur_swap = offset;
353 handle->k = 0; 369 handle->k = 0;
354 } 370 }
355 if (bio_chain && ++handle->written > handle->nr_free_pages) { 371 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
356 error = hib_wait_on_bio_chain(bio_chain); 372 error = hib_wait_on_bio_chain(bio_chain);
357 if (error) 373 if (error)
358 goto out; 374 goto out;
359 handle->written = 0; 375 handle->reqd_free_pages = reqd_free_pages();
360 } 376 }
361 out: 377 out:
362 return error; 378 return error;
@@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
618 * Adjust number of free pages after all allocations have been done. 634 * Adjust number of free pages after all allocations have been done.
619 * We don't want to run out of pages when writing. 635 * We don't want to run out of pages when writing.
620 */ 636 */
621 handle->nr_free_pages = nr_free_pages() >> 1; 637 handle->reqd_free_pages = reqd_free_pages();
622 638
623 /* 639 /*
624 * Start the CRC32 thread. 640 * Start the CRC32 thread.
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1050d6d3922c..d0c5baf1ab18 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1820,7 +1820,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1820 * a quiescent state betweentimes. 1820 * a quiescent state betweentimes.
1821 */ 1821 */
1822 local_irq_save(flags); 1822 local_irq_save(flags);
1823 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1824 rdp = this_cpu_ptr(rsp->rda); 1823 rdp = this_cpu_ptr(rsp->rda);
1825 1824
1826 /* Add the callback to our list. */ 1825 /* Add the callback to our list. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4603b9d8f30a..0533a688ce22 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
6405 struct sd_data *sdd = &tl->data; 6405 struct sd_data *sdd = &tl->data;
6406 6406
6407 for_each_cpu(j, cpu_map) { 6407 for_each_cpu(j, cpu_map) {
6408 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 6408 struct sched_domain *sd;
6409 if (sd && (sd->flags & SD_OVERLAP)) 6409
6410 free_sched_groups(sd->groups, 0); 6410 if (sdd->sd) {
6411 kfree(*per_cpu_ptr(sdd->sd, j)); 6411 sd = *per_cpu_ptr(sdd->sd, j);
6412 kfree(*per_cpu_ptr(sdd->sg, j)); 6412 if (sd && (sd->flags & SD_OVERLAP))
6413 kfree(*per_cpu_ptr(sdd->sgp, j)); 6413 free_sched_groups(sd->groups, 0);
6414 kfree(*per_cpu_ptr(sdd->sd, j));
6415 }
6416
6417 if (sdd->sg)
6418 kfree(*per_cpu_ptr(sdd->sg, j));
6419 if (sdd->sgp)
6420 kfree(*per_cpu_ptr(sdd->sgp, j));
6414 } 6421 }
6415 free_percpu(sdd->sd); 6422 free_percpu(sdd->sd);
6423 sdd->sd = NULL;
6416 free_percpu(sdd->sg); 6424 free_percpu(sdd->sg);
6425 sdd->sg = NULL;
6417 free_percpu(sdd->sgp); 6426 free_percpu(sdd->sgp);
6427 sdd->sgp = NULL;
6418 } 6428 }
6419} 6429}
6420 6430
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d97ebdc58f0..e9553640c1c3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
785#ifdef CONFIG_SMP 785#ifdef CONFIG_SMP
786 if (entity_is_task(se)) 786 if (entity_is_task(se))
787 list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); 787 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
788#endif 788#endif
789 cfs_rq->nr_running++; 789 cfs_rq->nr_running++;
790} 790}
@@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)
3215 3215
3216static unsigned long task_h_load(struct task_struct *p); 3216static unsigned long task_h_load(struct task_struct *p);
3217 3217
3218static const unsigned int sched_nr_migrate_break = 32;
3219
3218/* 3220/*
3219 * move_tasks tries to move up to load_move weighted load from busiest to 3221 * move_tasks tries to move up to load_move weighted load from busiest to
3220 * this_rq, as part of a balancing operation within domain "sd". 3222 * this_rq, as part of a balancing operation within domain "sd".
@@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)
3242 3244
3243 /* take a breather every nr_migrate tasks */ 3245 /* take a breather every nr_migrate tasks */
3244 if (env->loop > env->loop_break) { 3246 if (env->loop > env->loop_break) {
3245 env->loop_break += sysctl_sched_nr_migrate; 3247 env->loop_break += sched_nr_migrate_break;
3246 env->flags |= LBF_NEED_BREAK; 3248 env->flags |= LBF_NEED_BREAK;
3247 break; 3249 break;
3248 } 3250 }
@@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)
3252 3254
3253 load = task_h_load(p); 3255 load = task_h_load(p);
3254 3256
3255 if (load < 16 && !env->sd->nr_balance_failed) 3257 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
3256 goto next; 3258 goto next;
3257 3259
3258 if ((load / 2) > env->load_move) 3260 if ((load / 2) > env->load_move)
@@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4407 .dst_cpu = this_cpu, 4409 .dst_cpu = this_cpu,
4408 .dst_rq = this_rq, 4410 .dst_rq = this_rq,
4409 .idle = idle, 4411 .idle = idle,
4410 .loop_break = sysctl_sched_nr_migrate, 4412 .loop_break = sched_nr_migrate_break,
4411 }; 4413 };
4412 4414
4413 cpumask_copy(cpus, cpu_active_mask); 4415 cpumask_copy(cpus, cpu_active_mask);
@@ -4445,10 +4447,10 @@ redo:
4445 * correctly treated as an imbalance. 4447 * correctly treated as an imbalance.
4446 */ 4448 */
4447 env.flags |= LBF_ALL_PINNED; 4449 env.flags |= LBF_ALL_PINNED;
4448 env.load_move = imbalance; 4450 env.load_move = imbalance;
4449 env.src_cpu = busiest->cpu; 4451 env.src_cpu = busiest->cpu;
4450 env.src_rq = busiest; 4452 env.src_rq = busiest;
4451 env.loop_max = busiest->nr_running; 4453 env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
4452 4454
4453more_balance: 4455more_balance:
4454 local_irq_save(flags); 4456 local_irq_save(flags);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e61fd73913d0..de00a486c5c6 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, false) 69SCHED_FEAT(FORCE_SD_OVERLAP, false)
70SCHED_FEAT(RT_RUNTIME_SHARE, true) 70SCHED_FEAT(RT_RUNTIME_SHARE, true)
71SCHED_FEAT(LB_MIN, false)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index bf57abdc7bd0..f113755695e2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -346,7 +346,8 @@ int tick_resume_broadcast(void)
346 tick_get_broadcast_mask()); 346 tick_get_broadcast_mask());
347 break; 347 break;
348 case TICKDEV_MODE_ONESHOT: 348 case TICKDEV_MODE_ONESHOT:
349 broadcast = tick_resume_broadcast_oneshot(bc); 349 if (!cpumask_empty(tick_get_broadcast_mask()))
350 broadcast = tick_resume_broadcast_oneshot(bc);
350 break; 351 break;
351 } 352 }
352 } 353 }
@@ -373,6 +374,9 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
373{ 374{
374 struct clock_event_device *bc = tick_broadcast_device.evtdev; 375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
375 376
377 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
378 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
379
376 return clockevents_program_event(bc, expires, force); 380 return clockevents_program_event(bc, expires, force);
377} 381}
378 382
@@ -531,7 +535,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
531 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 535 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
532 536
533 bc->event_handler = tick_handle_oneshot_broadcast; 537 bc->event_handler = tick_handle_oneshot_broadcast;
534 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
535 538
536 /* Take the do_timer update */ 539 /* Take the do_timer update */
537 tick_do_timer_cpu = cpu; 540 tick_do_timer_cpu = cpu;
@@ -549,6 +552,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
549 to_cpumask(tmpmask)); 552 to_cpumask(tmpmask));
550 553
551 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { 554 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
555 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
552 tick_broadcast_init_next_event(to_cpumask(tmpmask), 556 tick_broadcast_init_next_event(to_cpumask(tmpmask),
553 tick_next_period); 557 tick_next_period);
554 tick_broadcast_set_event(tick_next_period, 1); 558 tick_broadcast_set_event(tick_next_period, 1);
@@ -577,15 +581,10 @@ void tick_broadcast_switch_to_oneshot(void)
577 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 581 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
578 582
579 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 583 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
580
581 if (cpumask_empty(tick_get_broadcast_mask()))
582 goto end;
583
584 bc = tick_broadcast_device.evtdev; 584 bc = tick_broadcast_device.evtdev;
585 if (bc) 585 if (bc)
586 tick_broadcast_setup_oneshot(bc); 586 tick_broadcast_setup_oneshot(bc);
587 587
588end:
589 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 588 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
590} 589}
591 590
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed7b5d1e12f4..2a22255c1010 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4629,7 +4629,8 @@ static ssize_t
4629rb_simple_read(struct file *filp, char __user *ubuf, 4629rb_simple_read(struct file *filp, char __user *ubuf,
4630 size_t cnt, loff_t *ppos) 4630 size_t cnt, loff_t *ppos)
4631{ 4631{
4632 struct ring_buffer *buffer = filp->private_data; 4632 struct trace_array *tr = filp->private_data;
4633 struct ring_buffer *buffer = tr->buffer;
4633 char buf[64]; 4634 char buf[64];
4634 int r; 4635 int r;
4635 4636
@@ -4647,7 +4648,8 @@ static ssize_t
4647rb_simple_write(struct file *filp, const char __user *ubuf, 4648rb_simple_write(struct file *filp, const char __user *ubuf,
4648 size_t cnt, loff_t *ppos) 4649 size_t cnt, loff_t *ppos)
4649{ 4650{
4650 struct ring_buffer *buffer = filp->private_data; 4651 struct trace_array *tr = filp->private_data;
4652 struct ring_buffer *buffer = tr->buffer;
4651 unsigned long val; 4653 unsigned long val;
4652 int ret; 4654 int ret;
4653 4655
@@ -4734,7 +4736,7 @@ static __init int tracer_init_debugfs(void)
4734 &trace_clock_fops); 4736 &trace_clock_fops);
4735 4737
4736 trace_create_file("tracing_on", 0644, d_tracer, 4738 trace_create_file("tracing_on", 0644, d_tracer,
4737 global_trace.buffer, &rb_simple_fops); 4739 &global_trace, &rb_simple_fops);
4738 4740
4739#ifdef CONFIG_DYNAMIC_FTRACE 4741#ifdef CONFIG_DYNAMIC_FTRACE
4740 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4742 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 95059f091a24..f95d65da6db8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -836,11 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
836 filter) 836 filter)
837#include "trace_entries.h" 837#include "trace_entries.h"
838 838
839#ifdef CONFIG_FUNCTION_TRACER 839#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
840int perf_ftrace_event_register(struct ftrace_event_call *call, 840int perf_ftrace_event_register(struct ftrace_event_call *call,
841 enum trace_reg type, void *data); 841 enum trace_reg type, void *data);
842#else 842#else
843#define perf_ftrace_event_register NULL 843#define perf_ftrace_event_register NULL
844#endif /* CONFIG_FUNCTION_TRACER */ 844#endif
845 845
846#endif /* _LINUX_KERNEL_TRACE_H */ 846#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 859fae6b1825..df611a0e76c5 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -652,6 +652,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
652{ 652{
653 u64 next_ts; 653 u64 next_ts;
654 int ret; 654 int ret;
655 /* trace_find_next_entry will reset ent_size */
656 int ent_size = iter->ent_size;
655 struct trace_seq *s = &iter->seq; 657 struct trace_seq *s = &iter->seq;
656 struct trace_entry *entry = iter->ent, 658 struct trace_entry *entry = iter->ent,
657 *next_entry = trace_find_next_entry(iter, NULL, 659 *next_entry = trace_find_next_entry(iter, NULL,
@@ -660,6 +662,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
660 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); 662 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
661 unsigned long rel_usecs; 663 unsigned long rel_usecs;
662 664
665 /* Restore the original ent_size */
666 iter->ent_size = ent_size;
667
663 if (!next_entry) 668 if (!next_entry)
664 next_ts = iter->ts; 669 next_ts = iter->ts;
665 rel_usecs = ns2usecs(next_ts - iter->ts); 670 rel_usecs = ns2usecs(next_ts - iter->ts);