aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/irq/debug.h38
-rw-r--r--kernel/power/swap.c28
-rw-r--r--kernel/rcutree.c1
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/fair.c18
-rw-r--r--kernel/sched/features.h1
-rw-r--r--kernel/time/tick-broadcast.c13
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_export.c1
-rw-r--r--kernel/trace/trace_output.c5
13 files changed, 92 insertions, 54 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e82c7a1face9..91a445925855 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3189,7 +3189,7 @@ static void perf_event_for_each(struct perf_event *event,
3189 perf_event_for_each_child(event, func); 3189 perf_event_for_each_child(event, func);
3190 func(event); 3190 func(event);
3191 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3191 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3192 perf_event_for_each_child(event, func); 3192 perf_event_for_each_child(sibling, func);
3193 mutex_unlock(&ctx->mutex); 3193 mutex_unlock(&ctx->mutex);
3194} 3194}
3195 3195
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 97a8bfadc88a..e75e29e4434a 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -4,10 +4,10 @@
4 4
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6 6
7#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) 7#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 8#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */ 9/* FIXME */
10#define PD(f) do { } while (0) 10#define ___PD(f) do { } while (0)
11 11
12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
13{ 13{
@@ -23,23 +23,23 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
23 print_symbol("%s\n", (unsigned long)desc->action->handler); 23 print_symbol("%s\n", (unsigned long)desc->action->handler);
24 } 24 }
25 25
26 P(IRQ_LEVEL); 26 ___P(IRQ_LEVEL);
27 P(IRQ_PER_CPU); 27 ___P(IRQ_PER_CPU);
28 P(IRQ_NOPROBE); 28 ___P(IRQ_NOPROBE);
29 P(IRQ_NOREQUEST); 29 ___P(IRQ_NOREQUEST);
30 P(IRQ_NOTHREAD); 30 ___P(IRQ_NOTHREAD);
31 P(IRQ_NOAUTOEN); 31 ___P(IRQ_NOAUTOEN);
32 32
33 PS(IRQS_AUTODETECT); 33 ___PS(IRQS_AUTODETECT);
34 PS(IRQS_REPLAY); 34 ___PS(IRQS_REPLAY);
35 PS(IRQS_WAITING); 35 ___PS(IRQS_WAITING);
36 PS(IRQS_PENDING); 36 ___PS(IRQS_PENDING);
37 37
38 PD(IRQS_INPROGRESS); 38 ___PD(IRQS_INPROGRESS);
39 PD(IRQS_DISABLED); 39 ___PD(IRQS_DISABLED);
40 PD(IRQS_MASKED); 40 ___PD(IRQS_MASKED);
41} 41}
42 42
43#undef P 43#undef ___P
44#undef PS 44#undef ___PS
45#undef PD 45#undef ___PD
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8742fd013a94..eef311a58a64 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -51,6 +51,23 @@
51 51
52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
53 53
54/*
55 * Number of free pages that are not high.
56 */
57static inline unsigned long low_free_pages(void)
58{
59 return nr_free_pages() - nr_free_highpages();
60}
61
62/*
63 * Number of pages required to be kept free while writing the image. Always
64 * half of all available low pages before the writing starts.
65 */
66static inline unsigned long reqd_free_pages(void)
67{
68 return low_free_pages() / 2;
69}
70
54struct swap_map_page { 71struct swap_map_page {
55 sector_t entries[MAP_PAGE_ENTRIES]; 72 sector_t entries[MAP_PAGE_ENTRIES];
56 sector_t next_swap; 73 sector_t next_swap;
@@ -72,7 +89,7 @@ struct swap_map_handle {
72 sector_t cur_swap; 89 sector_t cur_swap;
73 sector_t first_sector; 90 sector_t first_sector;
74 unsigned int k; 91 unsigned int k;
75 unsigned long nr_free_pages, written; 92 unsigned long reqd_free_pages;
76 u32 crc32; 93 u32 crc32;
77}; 94};
78 95
@@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
316 goto err_rel; 333 goto err_rel;
317 } 334 }
318 handle->k = 0; 335 handle->k = 0;
319 handle->nr_free_pages = nr_free_pages() >> 1; 336 handle->reqd_free_pages = reqd_free_pages();
320 handle->written = 0;
321 handle->first_sector = handle->cur_swap; 337 handle->first_sector = handle->cur_swap;
322 return 0; 338 return 0;
323err_rel: 339err_rel:
@@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
352 handle->cur_swap = offset; 368 handle->cur_swap = offset;
353 handle->k = 0; 369 handle->k = 0;
354 } 370 }
355 if (bio_chain && ++handle->written > handle->nr_free_pages) { 371 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
356 error = hib_wait_on_bio_chain(bio_chain); 372 error = hib_wait_on_bio_chain(bio_chain);
357 if (error) 373 if (error)
358 goto out; 374 goto out;
359 handle->written = 0; 375 handle->reqd_free_pages = reqd_free_pages();
360 } 376 }
361 out: 377 out:
362 return error; 378 return error;
@@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
618 * Adjust number of free pages after all allocations have been done. 634 * Adjust number of free pages after all allocations have been done.
619 * We don't want to run out of pages when writing. 635 * We don't want to run out of pages when writing.
620 */ 636 */
621 handle->nr_free_pages = nr_free_pages() >> 1; 637 handle->reqd_free_pages = reqd_free_pages();
622 638
623 /* 639 /*
624 * Start the CRC32 thread. 640 * Start the CRC32 thread.
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1050d6d3922c..d0c5baf1ab18 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1820,7 +1820,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1820 * a quiescent state betweentimes. 1820 * a quiescent state betweentimes.
1821 */ 1821 */
1822 local_irq_save(flags); 1822 local_irq_save(flags);
1823 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1824 rdp = this_cpu_ptr(rsp->rda); 1823 rdp = this_cpu_ptr(rsp->rda);
1825 1824
1826 /* Add the callback to our list. */ 1825 /* Add the callback to our list. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5c692a0a555d..13c38837f2cd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6398,16 +6398,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
6398 struct sd_data *sdd = &tl->data; 6398 struct sd_data *sdd = &tl->data;
6399 6399
6400 for_each_cpu(j, cpu_map) { 6400 for_each_cpu(j, cpu_map) {
6401 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 6401 struct sched_domain *sd;
6402 if (sd && (sd->flags & SD_OVERLAP)) 6402
6403 free_sched_groups(sd->groups, 0); 6403 if (sdd->sd) {
6404 kfree(*per_cpu_ptr(sdd->sd, j)); 6404 sd = *per_cpu_ptr(sdd->sd, j);
6405 kfree(*per_cpu_ptr(sdd->sg, j)); 6405 if (sd && (sd->flags & SD_OVERLAP))
6406 kfree(*per_cpu_ptr(sdd->sgp, j)); 6406 free_sched_groups(sd->groups, 0);
6407 kfree(*per_cpu_ptr(sdd->sd, j));
6408 }
6409
6410 if (sdd->sg)
6411 kfree(*per_cpu_ptr(sdd->sg, j));
6412 if (sdd->sgp)
6413 kfree(*per_cpu_ptr(sdd->sgp, j));
6407 } 6414 }
6408 free_percpu(sdd->sd); 6415 free_percpu(sdd->sd);
6416 sdd->sd = NULL;
6409 free_percpu(sdd->sg); 6417 free_percpu(sdd->sg);
6418 sdd->sg = NULL;
6410 free_percpu(sdd->sgp); 6419 free_percpu(sdd->sgp);
6420 sdd->sgp = NULL;
6411 } 6421 }
6412} 6422}
6413 6423
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d97ebdc58f0..e9553640c1c3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 784 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
785#ifdef CONFIG_SMP 785#ifdef CONFIG_SMP
786 if (entity_is_task(se)) 786 if (entity_is_task(se))
787 list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); 787 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
788#endif 788#endif
789 cfs_rq->nr_running++; 789 cfs_rq->nr_running++;
790} 790}
@@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)
3215 3215
3216static unsigned long task_h_load(struct task_struct *p); 3216static unsigned long task_h_load(struct task_struct *p);
3217 3217
3218static const unsigned int sched_nr_migrate_break = 32;
3219
3218/* 3220/*
3219 * move_tasks tries to move up to load_move weighted load from busiest to 3221 * move_tasks tries to move up to load_move weighted load from busiest to
3220 * this_rq, as part of a balancing operation within domain "sd". 3222 * this_rq, as part of a balancing operation within domain "sd".
@@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)
3242 3244
3243 /* take a breather every nr_migrate tasks */ 3245 /* take a breather every nr_migrate tasks */
3244 if (env->loop > env->loop_break) { 3246 if (env->loop > env->loop_break) {
3245 env->loop_break += sysctl_sched_nr_migrate; 3247 env->loop_break += sched_nr_migrate_break;
3246 env->flags |= LBF_NEED_BREAK; 3248 env->flags |= LBF_NEED_BREAK;
3247 break; 3249 break;
3248 } 3250 }
@@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)
3252 3254
3253 load = task_h_load(p); 3255 load = task_h_load(p);
3254 3256
3255 if (load < 16 && !env->sd->nr_balance_failed) 3257 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
3256 goto next; 3258 goto next;
3257 3259
3258 if ((load / 2) > env->load_move) 3260 if ((load / 2) > env->load_move)
@@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4407 .dst_cpu = this_cpu, 4409 .dst_cpu = this_cpu,
4408 .dst_rq = this_rq, 4410 .dst_rq = this_rq,
4409 .idle = idle, 4411 .idle = idle,
4410 .loop_break = sysctl_sched_nr_migrate, 4412 .loop_break = sched_nr_migrate_break,
4411 }; 4413 };
4412 4414
4413 cpumask_copy(cpus, cpu_active_mask); 4415 cpumask_copy(cpus, cpu_active_mask);
@@ -4445,10 +4447,10 @@ redo:
4445 * correctly treated as an imbalance. 4447 * correctly treated as an imbalance.
4446 */ 4448 */
4447 env.flags |= LBF_ALL_PINNED; 4449 env.flags |= LBF_ALL_PINNED;
4448 env.load_move = imbalance; 4450 env.load_move = imbalance;
4449 env.src_cpu = busiest->cpu; 4451 env.src_cpu = busiest->cpu;
4450 env.src_rq = busiest; 4452 env.src_rq = busiest;
4451 env.loop_max = busiest->nr_running; 4453 env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
4452 4454
4453more_balance: 4455more_balance:
4454 local_irq_save(flags); 4456 local_irq_save(flags);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e61fd73913d0..de00a486c5c6 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, false) 69SCHED_FEAT(FORCE_SD_OVERLAP, false)
70SCHED_FEAT(RT_RUNTIME_SHARE, true) 70SCHED_FEAT(RT_RUNTIME_SHARE, true)
71SCHED_FEAT(LB_MIN, false)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index bf57abdc7bd0..f113755695e2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -346,7 +346,8 @@ int tick_resume_broadcast(void)
346 tick_get_broadcast_mask()); 346 tick_get_broadcast_mask());
347 break; 347 break;
348 case TICKDEV_MODE_ONESHOT: 348 case TICKDEV_MODE_ONESHOT:
349 broadcast = tick_resume_broadcast_oneshot(bc); 349 if (!cpumask_empty(tick_get_broadcast_mask()))
350 broadcast = tick_resume_broadcast_oneshot(bc);
350 break; 351 break;
351 } 352 }
352 } 353 }
@@ -373,6 +374,9 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
373{ 374{
374 struct clock_event_device *bc = tick_broadcast_device.evtdev; 375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
375 376
377 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
378 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
379
376 return clockevents_program_event(bc, expires, force); 380 return clockevents_program_event(bc, expires, force);
377} 381}
378 382
@@ -531,7 +535,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
531 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 535 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
532 536
533 bc->event_handler = tick_handle_oneshot_broadcast; 537 bc->event_handler = tick_handle_oneshot_broadcast;
534 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
535 538
536 /* Take the do_timer update */ 539 /* Take the do_timer update */
537 tick_do_timer_cpu = cpu; 540 tick_do_timer_cpu = cpu;
@@ -549,6 +552,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
549 to_cpumask(tmpmask)); 552 to_cpumask(tmpmask));
550 553
551 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { 554 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
555 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
552 tick_broadcast_init_next_event(to_cpumask(tmpmask), 556 tick_broadcast_init_next_event(to_cpumask(tmpmask),
553 tick_next_period); 557 tick_next_period);
554 tick_broadcast_set_event(tick_next_period, 1); 558 tick_broadcast_set_event(tick_next_period, 1);
@@ -577,15 +581,10 @@ void tick_broadcast_switch_to_oneshot(void)
577 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 581 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
578 582
579 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 583 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
580
581 if (cpumask_empty(tick_get_broadcast_mask()))
582 goto end;
583
584 bc = tick_broadcast_device.evtdev; 584 bc = tick_broadcast_device.evtdev;
585 if (bc) 585 if (bc)
586 tick_broadcast_setup_oneshot(bc); 586 tick_broadcast_setup_oneshot(bc);
587 587
588end:
589 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 588 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
590} 589}
591 590
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 48ef4960ec90..509e8615f504 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4759,7 +4759,8 @@ static ssize_t
4759rb_simple_read(struct file *filp, char __user *ubuf, 4759rb_simple_read(struct file *filp, char __user *ubuf,
4760 size_t cnt, loff_t *ppos) 4760 size_t cnt, loff_t *ppos)
4761{ 4761{
4762 struct ring_buffer *buffer = filp->private_data; 4762 struct trace_array *tr = filp->private_data;
4763 struct ring_buffer *buffer = tr->buffer;
4763 char buf[64]; 4764 char buf[64];
4764 int r; 4765 int r;
4765 4766
@@ -4777,7 +4778,8 @@ static ssize_t
4777rb_simple_write(struct file *filp, const char __user *ubuf, 4778rb_simple_write(struct file *filp, const char __user *ubuf,
4778 size_t cnt, loff_t *ppos) 4779 size_t cnt, loff_t *ppos)
4779{ 4780{
4780 struct ring_buffer *buffer = filp->private_data; 4781 struct trace_array *tr = filp->private_data;
4782 struct ring_buffer *buffer = tr->buffer;
4781 unsigned long val; 4783 unsigned long val;
4782 int ret; 4784 int ret;
4783 4785
@@ -4864,7 +4866,7 @@ static __init int tracer_init_debugfs(void)
4864 &trace_clock_fops); 4866 &trace_clock_fops);
4865 4867
4866 trace_create_file("tracing_on", 0644, d_tracer, 4868 trace_create_file("tracing_on", 0644, d_tracer,
4867 global_trace.buffer, &rb_simple_fops); 4869 &global_trace, &rb_simple_fops);
4868 4870
4869#ifdef CONFIG_DYNAMIC_FTRACE 4871#ifdef CONFIG_DYNAMIC_FTRACE
4870 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4872 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1c8b7c6f7b3b..6c6f7933eede 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -838,11 +838,11 @@ void trace_printk_init_buffers(void);
838 filter) 838 filter)
839#include "trace_entries.h" 839#include "trace_entries.h"
840 840
841#ifdef CONFIG_FUNCTION_TRACER 841#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
842int perf_ftrace_event_register(struct ftrace_event_call *call, 842int perf_ftrace_event_register(struct ftrace_event_call *call,
843 enum trace_reg type, void *data); 843 enum trace_reg type, void *data);
844#else 844#else
845#define perf_ftrace_event_register NULL 845#define perf_ftrace_event_register NULL
846#endif /* CONFIG_FUNCTION_TRACER */ 846#endif
847 847
848#endif /* _LINUX_KERNEL_TRACE_H */ 848#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 079a93ae8a9d..29111da1d100 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -294,6 +294,9 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
294 if (!call->name || !call->class || !call->class->reg) 294 if (!call->name || !call->class || !call->class->reg)
295 continue; 295 continue;
296 296
297 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
298 continue;
299
297 if (match && 300 if (match &&
298 strcmp(match, call->name) != 0 && 301 strcmp(match, call->name) != 0 &&
299 strcmp(match, call->class->system) != 0) 302 strcmp(match, call->class->system) != 0)
@@ -1164,7 +1167,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1164 return -1; 1167 return -1;
1165 } 1168 }
1166 1169
1167 if (call->class->reg) 1170 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1168 trace_create_file("enable", 0644, call->dir, call, 1171 trace_create_file("enable", 0644, call->dir, call,
1169 enable); 1172 enable);
1170 1173
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 3dd15e8bc856..e039906b037d 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -180,6 +180,7 @@ struct ftrace_event_call __used event_##call = { \
180 .event.type = etype, \ 180 .event.type = etype, \
181 .class = &event_class_ftrace_##call, \ 181 .class = &event_class_ftrace_##call, \
182 .print_fmt = print, \ 182 .print_fmt = print, \
183 .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
183}; \ 184}; \
184struct ftrace_event_call __used \ 185struct ftrace_event_call __used \
185__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; 186__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 859fae6b1825..df611a0e76c5 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -652,6 +652,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
652{ 652{
653 u64 next_ts; 653 u64 next_ts;
654 int ret; 654 int ret;
655 /* trace_find_next_entry will reset ent_size */
656 int ent_size = iter->ent_size;
655 struct trace_seq *s = &iter->seq; 657 struct trace_seq *s = &iter->seq;
656 struct trace_entry *entry = iter->ent, 658 struct trace_entry *entry = iter->ent,
657 *next_entry = trace_find_next_entry(iter, NULL, 659 *next_entry = trace_find_next_entry(iter, NULL,
@@ -660,6 +662,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
660 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); 662 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
661 unsigned long rel_usecs; 663 unsigned long rel_usecs;
662 664
665 /* Restore the original ent_size */
666 iter->ent_size = ent_size;
667
663 if (!next_entry) 668 if (!next_entry)
664 next_ts = iter->ts; 669 next_ts = iter->ts;
665 rel_usecs = ns2usecs(next_ts - iter->ts); 670 rel_usecs = ns2usecs(next_ts - iter->ts);