diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-08-04 07:58:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-04 07:58:28 -0400 |
commit | e16852cfc5580b88cb327413ab8c89375f380592 (patch) | |
tree | 67e7d5b84e2602986f2da689625e5a25d7af7bb4 /kernel | |
parent | bdff78707f3ce47e891f3201c9666122a70556ce (diff) | |
parent | 74e7ff8c50b6b022e6ffaa736b16a4dc161d3eaf (diff) |
Merge branch 'tracing/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into tracing/urgent
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 9 | ||||
-rw-r--r-- | kernel/freezer.c | 7 | ||||
-rw-r--r-- | kernel/irq/internals.h | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 55 | ||||
-rw-r--r-- | kernel/irq/migration.c | 2 | ||||
-rw-r--r-- | kernel/kthread.c | 10 | ||||
-rw-r--r-- | kernel/module.c | 3 | ||||
-rw-r--r-- | kernel/perf_counter.c | 186 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 | ||||
-rw-r--r-- | kernel/softirq.c | 64 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 11 |
15 files changed, 252 insertions, 126 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index bd2959228871..9b42695f0d14 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1407,14 +1407,11 @@ long do_fork(unsigned long clone_flags, | |||
1407 | if (clone_flags & CLONE_VFORK) { | 1407 | if (clone_flags & CLONE_VFORK) { |
1408 | p->vfork_done = &vfork; | 1408 | p->vfork_done = &vfork; |
1409 | init_completion(&vfork); | 1409 | init_completion(&vfork); |
1410 | } else if (!(clone_flags & CLONE_VM)) { | ||
1411 | /* | ||
1412 | * vfork will do an exec which will call | ||
1413 | * set_task_comm() | ||
1414 | */ | ||
1415 | perf_counter_fork(p); | ||
1416 | } | 1410 | } |
1417 | 1411 | ||
1412 | if (!(clone_flags & CLONE_THREAD)) | ||
1413 | perf_counter_fork(p); | ||
1414 | |||
1418 | audit_finish_fork(p); | 1415 | audit_finish_fork(p); |
1419 | tracehook_report_clone(regs, clone_flags, nr, p); | 1416 | tracehook_report_clone(regs, clone_flags, nr, p); |
1420 | 1417 | ||
diff --git a/kernel/freezer.c b/kernel/freezer.c index 2f4936cf7083..bd1d42b17cb2 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -44,12 +44,19 @@ void refrigerator(void) | |||
44 | recalc_sigpending(); /* We sent fake signal, clean it up */ | 44 | recalc_sigpending(); /* We sent fake signal, clean it up */ |
45 | spin_unlock_irq(¤t->sighand->siglock); | 45 | spin_unlock_irq(¤t->sighand->siglock); |
46 | 46 | ||
47 | /* prevent accounting of that task to load */ | ||
48 | current->flags |= PF_FREEZING; | ||
49 | |||
47 | for (;;) { | 50 | for (;;) { |
48 | set_current_state(TASK_UNINTERRUPTIBLE); | 51 | set_current_state(TASK_UNINTERRUPTIBLE); |
49 | if (!frozen(current)) | 52 | if (!frozen(current)) |
50 | break; | 53 | break; |
51 | schedule(); | 54 | schedule(); |
52 | } | 55 | } |
56 | |||
57 | /* Remove the accounting blocker */ | ||
58 | current->flags &= ~PF_FREEZING; | ||
59 | |||
53 | pr_debug("%s left refrigerator\n", current->comm); | 60 | pr_debug("%s left refrigerator\n", current->comm); |
54 | __set_current_state(save); | 61 | __set_current_state(save); |
55 | } | 62 | } |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 73468253143b..e70ed5592eb9 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
42 | 42 | ||
43 | extern int irq_select_affinity_usr(unsigned int irq); | 43 | extern int irq_select_affinity_usr(unsigned int irq); |
44 | 44 | ||
45 | extern void | 45 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
46 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask); | ||
47 | 46 | ||
48 | /* | 47 | /* |
49 | * Debugging printout: | 48 | * Debugging printout: |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 50da67672901..61c679db4687 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq) | |||
80 | return 1; | 80 | return 1; |
81 | } | 81 | } |
82 | 82 | ||
83 | void | 83 | /** |
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | 84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
85 | * @desc: irq descriptor which has affitnity changed | ||
86 | * | ||
87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | ||
88 | * to the interrupt thread itself. We can not call | ||
89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | ||
90 | * code can be called from hard interrupt context. | ||
91 | */ | ||
92 | void irq_set_thread_affinity(struct irq_desc *desc) | ||
85 | { | 93 | { |
86 | struct irqaction *action = desc->action; | 94 | struct irqaction *action = desc->action; |
87 | 95 | ||
88 | while (action) { | 96 | while (action) { |
89 | if (action->thread) | 97 | if (action->thread) |
90 | set_cpus_allowed_ptr(action->thread, cpumask); | 98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
91 | action = action->next; | 99 | action = action->next; |
92 | } | 100 | } |
93 | } | 101 | } |
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
112 | if (desc->status & IRQ_MOVE_PCNTXT) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
113 | if (!desc->chip->set_affinity(irq, cpumask)) { | 121 | if (!desc->chip->set_affinity(irq, cpumask)) { |
114 | cpumask_copy(desc->affinity, cpumask); | 122 | cpumask_copy(desc->affinity, cpumask); |
115 | irq_set_thread_affinity(desc, cpumask); | 123 | irq_set_thread_affinity(desc); |
116 | } | 124 | } |
117 | } | 125 | } |
118 | else { | 126 | else { |
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
122 | #else | 130 | #else |
123 | if (!desc->chip->set_affinity(irq, cpumask)) { | 131 | if (!desc->chip->set_affinity(irq, cpumask)) { |
124 | cpumask_copy(desc->affinity, cpumask); | 132 | cpumask_copy(desc->affinity, cpumask); |
125 | irq_set_thread_affinity(desc, cpumask); | 133 | irq_set_thread_affinity(desc); |
126 | } | 134 | } |
127 | #endif | 135 | #endif |
128 | desc->status |= IRQ_AFFINITY_SET; | 136 | desc->status |= IRQ_AFFINITY_SET; |
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq) | |||
176 | spin_lock_irqsave(&desc->lock, flags); | 184 | spin_lock_irqsave(&desc->lock, flags); |
177 | ret = setup_affinity(irq, desc); | 185 | ret = setup_affinity(irq, desc); |
178 | if (!ret) | 186 | if (!ret) |
179 | irq_set_thread_affinity(desc, desc->affinity); | 187 | irq_set_thread_affinity(desc); |
180 | spin_unlock_irqrestore(&desc->lock, flags); | 188 | spin_unlock_irqrestore(&desc->lock, flags); |
181 | 189 | ||
182 | return ret; | 190 | return ret; |
@@ -443,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
443 | return -1; | 451 | return -1; |
444 | } | 452 | } |
445 | 453 | ||
454 | #ifdef CONFIG_SMP | ||
455 | /* | ||
456 | * Check whether we need to change the affinity of the interrupt thread. | ||
457 | */ | ||
458 | static void | ||
459 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | ||
460 | { | ||
461 | cpumask_var_t mask; | ||
462 | |||
463 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | ||
464 | return; | ||
465 | |||
466 | /* | ||
467 | * In case we are out of memory we set IRQTF_AFFINITY again and | ||
468 | * try again next time | ||
469 | */ | ||
470 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
471 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | ||
472 | return; | ||
473 | } | ||
474 | |||
475 | spin_lock_irq(&desc->lock); | ||
476 | cpumask_copy(mask, desc->affinity); | ||
477 | spin_unlock_irq(&desc->lock); | ||
478 | |||
479 | set_cpus_allowed_ptr(current, mask); | ||
480 | free_cpumask_var(mask); | ||
481 | } | ||
482 | #else | ||
483 | static inline void | ||
484 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | ||
485 | #endif | ||
486 | |||
446 | /* | 487 | /* |
447 | * Interrupt handler thread | 488 | * Interrupt handler thread |
448 | */ | 489 | */ |
@@ -458,6 +499,8 @@ static int irq_thread(void *data) | |||
458 | 499 | ||
459 | while (!irq_wait_for_interrupt(action)) { | 500 | while (!irq_wait_for_interrupt(action)) { |
460 | 501 | ||
502 | irq_thread_check_affinity(desc, action); | ||
503 | |||
461 | atomic_inc(&desc->threads_active); | 504 | atomic_inc(&desc->threads_active); |
462 | 505 | ||
463 | spin_lock_irq(&desc->lock); | 506 | spin_lock_irq(&desc->lock); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index cfe767ca1545..fcb6c96f2627 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -45,7 +45,7 @@ void move_masked_irq(int irq) | |||
45 | < nr_cpu_ids)) | 45 | < nr_cpu_ids)) |
46 | if (!desc->chip->set_affinity(irq, desc->pending_mask)) { | 46 | if (!desc->chip->set_affinity(irq, desc->pending_mask)) { |
47 | cpumask_copy(desc->affinity, desc->pending_mask); | 47 | cpumask_copy(desc->affinity, desc->pending_mask); |
48 | irq_set_thread_affinity(desc, desc->pending_mask); | 48 | irq_set_thread_affinity(desc); |
49 | } | 49 | } |
50 | 50 | ||
51 | cpumask_clear(desc->pending_mask); | 51 | cpumask_clear(desc->pending_mask); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 9b1a7de26979..eb8751aa0418 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind); | |||
180 | * @k: thread created by kthread_create(). | 180 | * @k: thread created by kthread_create(). |
181 | * | 181 | * |
182 | * Sets kthread_should_stop() for @k to return true, wakes it, and | 182 | * Sets kthread_should_stop() for @k to return true, wakes it, and |
183 | * waits for it to exit. Your threadfn() must not call do_exit() | 183 | * waits for it to exit. This can also be called after kthread_create() |
184 | * itself if you use this function! This can also be called after | 184 | * instead of calling wake_up_process(): the thread will exit without |
185 | * kthread_create() instead of calling wake_up_process(): the thread | 185 | * calling threadfn(). |
186 | * will exit without calling threadfn(). | 186 | * |
187 | * If threadfn() may call do_exit() itself, the caller must ensure | ||
188 | * task_struct can't go away. | ||
187 | * | 189 | * |
188 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() | 190 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() |
189 | * was never called. | 191 | * was never called. |
diff --git a/kernel/module.c b/kernel/module.c index 0a049837008e..fd1411403558 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
1068 | { | 1068 | { |
1069 | const unsigned long *crc; | 1069 | const unsigned long *crc; |
1070 | 1070 | ||
1071 | if (!find_symbol("module_layout", NULL, &crc, true, false)) | 1071 | if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, |
1072 | &crc, true, false)) | ||
1072 | BUG(); | 1073 | BUG(); |
1073 | return check_version(sechdrs, versindex, "module_layout", mod, crc); | 1074 | return check_version(sechdrs, versindex, "module_layout", mod, crc); |
1074 | } | 1075 | } |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a641eb753b8c..950931041954 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -146,6 +146,28 @@ static void put_ctx(struct perf_counter_context *ctx) | |||
146 | } | 146 | } |
147 | } | 147 | } |
148 | 148 | ||
149 | static void unclone_ctx(struct perf_counter_context *ctx) | ||
150 | { | ||
151 | if (ctx->parent_ctx) { | ||
152 | put_ctx(ctx->parent_ctx); | ||
153 | ctx->parent_ctx = NULL; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * If we inherit counters we want to return the parent counter id | ||
159 | * to userspace. | ||
160 | */ | ||
161 | static u64 primary_counter_id(struct perf_counter *counter) | ||
162 | { | ||
163 | u64 id = counter->id; | ||
164 | |||
165 | if (counter->parent) | ||
166 | id = counter->parent->id; | ||
167 | |||
168 | return id; | ||
169 | } | ||
170 | |||
149 | /* | 171 | /* |
150 | * Get the perf_counter_context for a task and lock it. | 172 | * Get the perf_counter_context for a task and lock it. |
151 | * This has to cope with with the fact that until it is locked, | 173 | * This has to cope with with the fact that until it is locked, |
@@ -1288,7 +1310,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1288 | #define MAX_INTERRUPTS (~0ULL) | 1310 | #define MAX_INTERRUPTS (~0ULL) |
1289 | 1311 | ||
1290 | static void perf_log_throttle(struct perf_counter *counter, int enable); | 1312 | static void perf_log_throttle(struct perf_counter *counter, int enable); |
1291 | static void perf_log_period(struct perf_counter *counter, u64 period); | ||
1292 | 1313 | ||
1293 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | 1314 | static void perf_adjust_period(struct perf_counter *counter, u64 events) |
1294 | { | 1315 | { |
@@ -1307,8 +1328,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events) | |||
1307 | if (!sample_period) | 1328 | if (!sample_period) |
1308 | sample_period = 1; | 1329 | sample_period = 1; |
1309 | 1330 | ||
1310 | perf_log_period(counter, sample_period); | ||
1311 | |||
1312 | hwc->sample_period = sample_period; | 1331 | hwc->sample_period = sample_period; |
1313 | } | 1332 | } |
1314 | 1333 | ||
@@ -1463,10 +1482,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1463 | /* | 1482 | /* |
1464 | * Unclone this context if we enabled any counter. | 1483 | * Unclone this context if we enabled any counter. |
1465 | */ | 1484 | */ |
1466 | if (enabled && ctx->parent_ctx) { | 1485 | if (enabled) |
1467 | put_ctx(ctx->parent_ctx); | 1486 | unclone_ctx(ctx); |
1468 | ctx->parent_ctx = NULL; | ||
1469 | } | ||
1470 | 1487 | ||
1471 | spin_unlock(&ctx->lock); | 1488 | spin_unlock(&ctx->lock); |
1472 | 1489 | ||
@@ -1526,7 +1543,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, | |||
1526 | 1543 | ||
1527 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | 1544 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) |
1528 | { | 1545 | { |
1529 | struct perf_counter_context *parent_ctx; | ||
1530 | struct perf_counter_context *ctx; | 1546 | struct perf_counter_context *ctx; |
1531 | struct perf_cpu_context *cpuctx; | 1547 | struct perf_cpu_context *cpuctx; |
1532 | struct task_struct *task; | 1548 | struct task_struct *task; |
@@ -1586,11 +1602,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | |||
1586 | retry: | 1602 | retry: |
1587 | ctx = perf_lock_task_context(task, &flags); | 1603 | ctx = perf_lock_task_context(task, &flags); |
1588 | if (ctx) { | 1604 | if (ctx) { |
1589 | parent_ctx = ctx->parent_ctx; | 1605 | unclone_ctx(ctx); |
1590 | if (parent_ctx) { | ||
1591 | put_ctx(parent_ctx); | ||
1592 | ctx->parent_ctx = NULL; /* no longer a clone */ | ||
1593 | } | ||
1594 | spin_unlock_irqrestore(&ctx->lock, flags); | 1606 | spin_unlock_irqrestore(&ctx->lock, flags); |
1595 | } | 1607 | } |
1596 | 1608 | ||
@@ -1704,7 +1716,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1704 | values[n++] = counter->total_time_running + | 1716 | values[n++] = counter->total_time_running + |
1705 | atomic64_read(&counter->child_total_time_running); | 1717 | atomic64_read(&counter->child_total_time_running); |
1706 | if (counter->attr.read_format & PERF_FORMAT_ID) | 1718 | if (counter->attr.read_format & PERF_FORMAT_ID) |
1707 | values[n++] = counter->id; | 1719 | values[n++] = primary_counter_id(counter); |
1708 | mutex_unlock(&counter->child_mutex); | 1720 | mutex_unlock(&counter->child_mutex); |
1709 | 1721 | ||
1710 | if (count < n * sizeof(u64)) | 1722 | if (count < n * sizeof(u64)) |
@@ -1811,8 +1823,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1811 | 1823 | ||
1812 | counter->attr.sample_freq = value; | 1824 | counter->attr.sample_freq = value; |
1813 | } else { | 1825 | } else { |
1814 | perf_log_period(counter, value); | ||
1815 | |||
1816 | counter->attr.sample_period = value; | 1826 | counter->attr.sample_period = value; |
1817 | counter->hw.sample_period = value; | 1827 | counter->hw.sample_period = value; |
1818 | } | 1828 | } |
@@ -2661,10 +2671,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2661 | if (sample_type & PERF_SAMPLE_ID) | 2671 | if (sample_type & PERF_SAMPLE_ID) |
2662 | header.size += sizeof(u64); | 2672 | header.size += sizeof(u64); |
2663 | 2673 | ||
2674 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2675 | header.size += sizeof(u64); | ||
2676 | |||
2664 | if (sample_type & PERF_SAMPLE_CPU) { | 2677 | if (sample_type & PERF_SAMPLE_CPU) { |
2665 | header.size += sizeof(cpu_entry); | 2678 | header.size += sizeof(cpu_entry); |
2666 | 2679 | ||
2667 | cpu_entry.cpu = raw_smp_processor_id(); | 2680 | cpu_entry.cpu = raw_smp_processor_id(); |
2681 | cpu_entry.reserved = 0; | ||
2668 | } | 2682 | } |
2669 | 2683 | ||
2670 | if (sample_type & PERF_SAMPLE_PERIOD) | 2684 | if (sample_type & PERF_SAMPLE_PERIOD) |
@@ -2703,7 +2717,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2703 | if (sample_type & PERF_SAMPLE_ADDR) | 2717 | if (sample_type & PERF_SAMPLE_ADDR) |
2704 | perf_output_put(&handle, data->addr); | 2718 | perf_output_put(&handle, data->addr); |
2705 | 2719 | ||
2706 | if (sample_type & PERF_SAMPLE_ID) | 2720 | if (sample_type & PERF_SAMPLE_ID) { |
2721 | u64 id = primary_counter_id(counter); | ||
2722 | |||
2723 | perf_output_put(&handle, id); | ||
2724 | } | ||
2725 | |||
2726 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2707 | perf_output_put(&handle, counter->id); | 2727 | perf_output_put(&handle, counter->id); |
2708 | 2728 | ||
2709 | if (sample_type & PERF_SAMPLE_CPU) | 2729 | if (sample_type & PERF_SAMPLE_CPU) |
@@ -2726,7 +2746,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2726 | if (sub != counter) | 2746 | if (sub != counter) |
2727 | sub->pmu->read(sub); | 2747 | sub->pmu->read(sub); |
2728 | 2748 | ||
2729 | group_entry.id = sub->id; | 2749 | group_entry.id = primary_counter_id(sub); |
2730 | group_entry.counter = atomic64_read(&sub->count); | 2750 | group_entry.counter = atomic64_read(&sub->count); |
2731 | 2751 | ||
2732 | perf_output_put(&handle, group_entry); | 2752 | perf_output_put(&handle, group_entry); |
@@ -2786,15 +2806,8 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2786 | } | 2806 | } |
2787 | 2807 | ||
2788 | if (counter->attr.read_format & PERF_FORMAT_ID) { | 2808 | if (counter->attr.read_format & PERF_FORMAT_ID) { |
2789 | u64 id; | ||
2790 | |||
2791 | event.header.size += sizeof(u64); | 2809 | event.header.size += sizeof(u64); |
2792 | if (counter->parent) | 2810 | event.format[i++] = primary_counter_id(counter); |
2793 | id = counter->parent->id; | ||
2794 | else | ||
2795 | id = counter->id; | ||
2796 | |||
2797 | event.format[i++] = id; | ||
2798 | } | 2811 | } |
2799 | 2812 | ||
2800 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2813 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
@@ -2895,8 +2908,11 @@ void perf_counter_fork(struct task_struct *task) | |||
2895 | .event = { | 2908 | .event = { |
2896 | .header = { | 2909 | .header = { |
2897 | .type = PERF_EVENT_FORK, | 2910 | .type = PERF_EVENT_FORK, |
2911 | .misc = 0, | ||
2898 | .size = sizeof(fork_event.event), | 2912 | .size = sizeof(fork_event.event), |
2899 | }, | 2913 | }, |
2914 | /* .pid */ | ||
2915 | /* .ppid */ | ||
2900 | }, | 2916 | }, |
2901 | }; | 2917 | }; |
2902 | 2918 | ||
@@ -2968,8 +2984,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) | |||
2968 | struct perf_cpu_context *cpuctx; | 2984 | struct perf_cpu_context *cpuctx; |
2969 | struct perf_counter_context *ctx; | 2985 | struct perf_counter_context *ctx; |
2970 | unsigned int size; | 2986 | unsigned int size; |
2971 | char *comm = comm_event->task->comm; | 2987 | char comm[TASK_COMM_LEN]; |
2972 | 2988 | ||
2989 | memset(comm, 0, sizeof(comm)); | ||
2990 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
2973 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 2991 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
2974 | 2992 | ||
2975 | comm_event->comm = comm; | 2993 | comm_event->comm = comm; |
@@ -3004,8 +3022,16 @@ void perf_counter_comm(struct task_struct *task) | |||
3004 | 3022 | ||
3005 | comm_event = (struct perf_comm_event){ | 3023 | comm_event = (struct perf_comm_event){ |
3006 | .task = task, | 3024 | .task = task, |
3025 | /* .comm */ | ||
3026 | /* .comm_size */ | ||
3007 | .event = { | 3027 | .event = { |
3008 | .header = { .type = PERF_EVENT_COMM, }, | 3028 | .header = { |
3029 | .type = PERF_EVENT_COMM, | ||
3030 | .misc = 0, | ||
3031 | /* .size */ | ||
3032 | }, | ||
3033 | /* .pid */ | ||
3034 | /* .tid */ | ||
3009 | }, | 3035 | }, |
3010 | }; | 3036 | }; |
3011 | 3037 | ||
@@ -3088,8 +3114,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3088 | char *buf = NULL; | 3114 | char *buf = NULL; |
3089 | const char *name; | 3115 | const char *name; |
3090 | 3116 | ||
3117 | memset(tmp, 0, sizeof(tmp)); | ||
3118 | |||
3091 | if (file) { | 3119 | if (file) { |
3092 | buf = kzalloc(PATH_MAX, GFP_KERNEL); | 3120 | /* |
3121 | * d_path works from the end of the buffer backwards, so we | ||
3122 | * need to add enough zero bytes after the string to handle | ||
3123 | * the 64bit alignment we do later. | ||
3124 | */ | ||
3125 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3093 | if (!buf) { | 3126 | if (!buf) { |
3094 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | 3127 | name = strncpy(tmp, "//enomem", sizeof(tmp)); |
3095 | goto got_name; | 3128 | goto got_name; |
@@ -3100,9 +3133,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3100 | goto got_name; | 3133 | goto got_name; |
3101 | } | 3134 | } |
3102 | } else { | 3135 | } else { |
3103 | name = arch_vma_name(mmap_event->vma); | 3136 | if (arch_vma_name(mmap_event->vma)) { |
3104 | if (name) | 3137 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
3138 | sizeof(tmp)); | ||
3105 | goto got_name; | 3139 | goto got_name; |
3140 | } | ||
3106 | 3141 | ||
3107 | if (!vma->vm_mm) { | 3142 | if (!vma->vm_mm) { |
3108 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3143 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
@@ -3147,8 +3182,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3147 | 3182 | ||
3148 | mmap_event = (struct perf_mmap_event){ | 3183 | mmap_event = (struct perf_mmap_event){ |
3149 | .vma = vma, | 3184 | .vma = vma, |
3185 | /* .file_name */ | ||
3186 | /* .file_size */ | ||
3150 | .event = { | 3187 | .event = { |
3151 | .header = { .type = PERF_EVENT_MMAP, }, | 3188 | .header = { |
3189 | .type = PERF_EVENT_MMAP, | ||
3190 | .misc = 0, | ||
3191 | /* .size */ | ||
3192 | }, | ||
3193 | /* .pid */ | ||
3194 | /* .tid */ | ||
3152 | .start = vma->vm_start, | 3195 | .start = vma->vm_start, |
3153 | .len = vma->vm_end - vma->vm_start, | 3196 | .len = vma->vm_end - vma->vm_start, |
3154 | .pgoff = vma->vm_pgoff, | 3197 | .pgoff = vma->vm_pgoff, |
@@ -3159,49 +3202,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3159 | } | 3202 | } |
3160 | 3203 | ||
3161 | /* | 3204 | /* |
3162 | * Log sample_period changes so that analyzing tools can re-normalize the | ||
3163 | * event flow. | ||
3164 | */ | ||
3165 | |||
3166 | struct freq_event { | ||
3167 | struct perf_event_header header; | ||
3168 | u64 time; | ||
3169 | u64 id; | ||
3170 | u64 period; | ||
3171 | }; | ||
3172 | |||
3173 | static void perf_log_period(struct perf_counter *counter, u64 period) | ||
3174 | { | ||
3175 | struct perf_output_handle handle; | ||
3176 | struct freq_event event; | ||
3177 | int ret; | ||
3178 | |||
3179 | if (counter->hw.sample_period == period) | ||
3180 | return; | ||
3181 | |||
3182 | if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) | ||
3183 | return; | ||
3184 | |||
3185 | event = (struct freq_event) { | ||
3186 | .header = { | ||
3187 | .type = PERF_EVENT_PERIOD, | ||
3188 | .misc = 0, | ||
3189 | .size = sizeof(event), | ||
3190 | }, | ||
3191 | .time = sched_clock(), | ||
3192 | .id = counter->id, | ||
3193 | .period = period, | ||
3194 | }; | ||
3195 | |||
3196 | ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); | ||
3197 | if (ret) | ||
3198 | return; | ||
3199 | |||
3200 | perf_output_put(&handle, event); | ||
3201 | perf_output_end(&handle); | ||
3202 | } | ||
3203 | |||
3204 | /* | ||
3205 | * IRQ throttle logging | 3205 | * IRQ throttle logging |
3206 | */ | 3206 | */ |
3207 | 3207 | ||
@@ -3214,16 +3214,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3214 | struct perf_event_header header; | 3214 | struct perf_event_header header; |
3215 | u64 time; | 3215 | u64 time; |
3216 | u64 id; | 3216 | u64 id; |
3217 | u64 stream_id; | ||
3217 | } throttle_event = { | 3218 | } throttle_event = { |
3218 | .header = { | 3219 | .header = { |
3219 | .type = PERF_EVENT_THROTTLE + 1, | 3220 | .type = PERF_EVENT_THROTTLE, |
3220 | .misc = 0, | 3221 | .misc = 0, |
3221 | .size = sizeof(throttle_event), | 3222 | .size = sizeof(throttle_event), |
3222 | }, | 3223 | }, |
3223 | .time = sched_clock(), | 3224 | .time = sched_clock(), |
3224 | .id = counter->id, | 3225 | .id = primary_counter_id(counter), |
3226 | .stream_id = counter->id, | ||
3225 | }; | 3227 | }; |
3226 | 3228 | ||
3229 | if (enable) | ||
3230 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | ||
3231 | |||
3227 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | 3232 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); |
3228 | if (ret) | 3233 | if (ret) |
3229 | return; | 3234 | return; |
@@ -3671,7 +3676,7 @@ static const struct pmu perf_ops_task_clock = { | |||
3671 | void perf_tpcounter_event(int event_id) | 3676 | void perf_tpcounter_event(int event_id) |
3672 | { | 3677 | { |
3673 | struct perf_sample_data data = { | 3678 | struct perf_sample_data data = { |
3674 | .regs = get_irq_regs(); | 3679 | .regs = get_irq_regs(), |
3675 | .addr = 0, | 3680 | .addr = 0, |
3676 | }; | 3681 | }; |
3677 | 3682 | ||
@@ -3687,16 +3692,12 @@ extern void ftrace_profile_disable(int); | |||
3687 | 3692 | ||
3688 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 3693 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
3689 | { | 3694 | { |
3690 | ftrace_profile_disable(perf_event_id(&counter->attr)); | 3695 | ftrace_profile_disable(counter->attr.config); |
3691 | } | 3696 | } |
3692 | 3697 | ||
3693 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3698 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
3694 | { | 3699 | { |
3695 | int event_id = perf_event_id(&counter->attr); | 3700 | if (ftrace_profile_enable(counter->attr.config)) |
3696 | int ret; | ||
3697 | |||
3698 | ret = ftrace_profile_enable(event_id); | ||
3699 | if (ret) | ||
3700 | return NULL; | 3701 | return NULL; |
3701 | 3702 | ||
3702 | counter->destroy = tp_perf_counter_destroy; | 3703 | counter->destroy = tp_perf_counter_destroy; |
@@ -4255,15 +4256,12 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4255 | */ | 4256 | */ |
4256 | spin_lock(&child_ctx->lock); | 4257 | spin_lock(&child_ctx->lock); |
4257 | child->perf_counter_ctxp = NULL; | 4258 | child->perf_counter_ctxp = NULL; |
4258 | if (child_ctx->parent_ctx) { | 4259 | /* |
4259 | /* | 4260 | * If this context is a clone; unclone it so it can't get |
4260 | * This context is a clone; unclone it so it can't get | 4261 | * swapped to another process while we're removing all |
4261 | * swapped to another process while we're removing all | 4262 | * the counters from it. |
4262 | * the counters from it. | 4263 | */ |
4263 | */ | 4264 | unclone_ctx(child_ctx); |
4264 | put_ctx(child_ctx->parent_ctx); | ||
4265 | child_ctx->parent_ctx = NULL; | ||
4266 | } | ||
4267 | spin_unlock(&child_ctx->lock); | 4265 | spin_unlock(&child_ctx->lock); |
4268 | local_irq_restore(flags); | 4266 | local_irq_restore(flags); |
4269 | 4267 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 98972d366fdc..1b59e265273b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
7289 | static void calc_global_load_remove(struct rq *rq) | 7289 | static void calc_global_load_remove(struct rq *rq) |
7290 | { | 7290 | { |
7291 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); | 7291 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); |
7292 | rq->calc_load_active = 0; | ||
7292 | } | 7293 | } |
7293 | #endif /* CONFIG_HOTPLUG_CPU */ | 7294 | #endif /* CONFIG_HOTPLUG_CPU */ |
7294 | 7295 | ||
@@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7515 | task_rq_unlock(rq, &flags); | 7516 | task_rq_unlock(rq, &flags); |
7516 | get_task_struct(p); | 7517 | get_task_struct(p); |
7517 | cpu_rq(cpu)->migration_thread = p; | 7518 | cpu_rq(cpu)->migration_thread = p; |
7519 | rq->calc_load_update = calc_load_update; | ||
7518 | break; | 7520 | break; |
7519 | 7521 | ||
7520 | case CPU_ONLINE: | 7522 | case CPU_ONLINE: |
@@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7525 | /* Update our root-domain */ | 7527 | /* Update our root-domain */ |
7526 | rq = cpu_rq(cpu); | 7528 | rq = cpu_rq(cpu); |
7527 | spin_lock_irqsave(&rq->lock, flags); | 7529 | spin_lock_irqsave(&rq->lock, flags); |
7528 | rq->calc_load_update = calc_load_update; | ||
7529 | rq->calc_load_active = 0; | ||
7530 | if (rq->rd) { | 7530 | if (rq->rd) { |
7531 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 7531 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
7532 | 7532 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7c248dc30f41..9ffb2b2ceba4 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | |||
266 | return min_vruntime; | 266 | return min_vruntime; |
267 | } | 267 | } |
268 | 268 | ||
269 | static inline int entity_before(struct sched_entity *a, | ||
270 | struct sched_entity *b) | ||
271 | { | ||
272 | return (s64)(a->vruntime - b->vruntime) < 0; | ||
273 | } | ||
274 | |||
269 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 275 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) |
270 | { | 276 | { |
271 | return se->vruntime - cfs_rq->min_vruntime; | 277 | return se->vruntime - cfs_rq->min_vruntime; |
@@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq) | |||
1017 | /* | 1023 | /* |
1018 | * Already in the rightmost position? | 1024 | * Already in the rightmost position? |
1019 | */ | 1025 | */ |
1020 | if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) | 1026 | if (unlikely(!rightmost || entity_before(rightmost, se))) |
1021 | return; | 1027 | return; |
1022 | 1028 | ||
1023 | /* | 1029 | /* |
@@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1713 | 1719 | ||
1714 | /* 'curr' will be NULL if the child belongs to a different group */ | 1720 | /* 'curr' will be NULL if the child belongs to a different group */ |
1715 | if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | 1721 | if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && |
1716 | curr && curr->vruntime < se->vruntime) { | 1722 | curr && entity_before(curr, se)) { |
1717 | /* | 1723 | /* |
1718 | * Upon rescheduling, sched_class::put_prev_task() will place | 1724 | * Upon rescheduling, sched_class::put_prev_task() will place |
1719 | * 'current' within the tree based on its new key value. | 1725 | * 'current' within the tree based on its new key value. |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 3a94905fa5d2..eb5e131a0485 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -345,7 +345,9 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) | |||
345 | softirq_vec[nr].action = action; | 345 | softirq_vec[nr].action = action; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Tasklets */ | 348 | /* |
349 | * Tasklets | ||
350 | */ | ||
349 | struct tasklet_head | 351 | struct tasklet_head |
350 | { | 352 | { |
351 | struct tasklet_struct *head; | 353 | struct tasklet_struct *head; |
@@ -493,6 +495,66 @@ void tasklet_kill(struct tasklet_struct *t) | |||
493 | 495 | ||
494 | EXPORT_SYMBOL(tasklet_kill); | 496 | EXPORT_SYMBOL(tasklet_kill); |
495 | 497 | ||
498 | /* | ||
499 | * tasklet_hrtimer | ||
500 | */ | ||
501 | |||
502 | /* | ||
503 | * The trampoline is called when the hrtimer expires. If this is | ||
504 | * called from the hrtimer interrupt then we schedule the tasklet as | ||
505 | * the timer callback function expects to run in softirq context. If | ||
506 | * it's called in softirq context anyway (i.e. high resolution timers | ||
507 | * disabled) then the hrtimer callback is called right away. | ||
508 | */ | ||
509 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | ||
510 | { | ||
511 | struct tasklet_hrtimer *ttimer = | ||
512 | container_of(timer, struct tasklet_hrtimer, timer); | ||
513 | |||
514 | if (hrtimer_is_hres_active(timer)) { | ||
515 | tasklet_hi_schedule(&ttimer->tasklet); | ||
516 | return HRTIMER_NORESTART; | ||
517 | } | ||
518 | return ttimer->function(timer); | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Helper function which calls the hrtimer callback from | ||
523 | * tasklet/softirq context | ||
524 | */ | ||
525 | static void __tasklet_hrtimer_trampoline(unsigned long data) | ||
526 | { | ||
527 | struct tasklet_hrtimer *ttimer = (void *)data; | ||
528 | enum hrtimer_restart restart; | ||
529 | |||
530 | restart = ttimer->function(&ttimer->timer); | ||
531 | if (restart != HRTIMER_NORESTART) | ||
532 | hrtimer_restart(&ttimer->timer); | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks | ||
537 | * @ttimer: tasklet_hrtimer which is initialized | ||
538 | * @function: hrtimer callback funtion which gets called from softirq context | ||
539 | * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) | ||
540 | * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) | ||
541 | */ | ||
542 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | ||
543 | enum hrtimer_restart (*function)(struct hrtimer *), | ||
544 | clockid_t which_clock, enum hrtimer_mode mode) | ||
545 | { | ||
546 | hrtimer_init(&ttimer->timer, which_clock, mode); | ||
547 | ttimer->timer.function = __hrtimer_tasklet_trampoline; | ||
548 | tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, | ||
549 | (unsigned long)ttimer); | ||
550 | ttimer->function = function; | ||
551 | } | ||
552 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); | ||
553 | |||
554 | /* | ||
555 | * Remote softirq bits | ||
556 | */ | ||
557 | |||
496 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | 558 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
497 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | 559 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); |
498 | 560 | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 592bf584d1d2..7466cb811251 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, | |||
513 | * Check to make sure we don't switch to a non-highres capable | 513 | * Check to make sure we don't switch to a non-highres capable |
514 | * clocksource if the tick code is in oneshot mode (highres or nohz) | 514 | * clocksource if the tick code is in oneshot mode (highres or nohz) |
515 | */ | 515 | */ |
516 | if (tick_oneshot_mode_active() && | 516 | if (tick_oneshot_mode_active() && ovr && |
517 | !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { | 517 | !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { |
518 | printk(KERN_WARNING "%s clocksource is not HRT compatible. " | 518 | printk(KERN_WARNING "%s clocksource is not HRT compatible. " |
519 | "Cannot switch while in HRT/NOHZ mode\n", ovr->name); | 519 | "Cannot switch while in HRT/NOHZ mode\n", ovr->name); |
diff --git a/kernel/timer.c b/kernel/timer.c index 0b36b9e5cc8b..a7f07d5a6241 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -714,7 +714,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires) | |||
714 | * networking code - if the timer is re-modified | 714 | * networking code - if the timer is re-modified |
715 | * to be the same thing then just return: | 715 | * to be the same thing then just return: |
716 | */ | 716 | */ |
717 | if (timer->expires == expires && timer_pending(timer)) | 717 | if (timer_pending(timer) && timer->expires == expires) |
718 | return 1; | 718 | return 1; |
719 | 719 | ||
720 | return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); | 720 | return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d8ef28574aa1..8930e39b9d8c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3085,7 +3085,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3085 | break; | 3085 | break; |
3086 | } | 3086 | } |
3087 | 3087 | ||
3088 | trace_consume(iter); | 3088 | if (ret != TRACE_TYPE_NO_CONSUME) |
3089 | trace_consume(iter); | ||
3089 | rem -= count; | 3090 | rem -= count; |
3090 | if (!find_next_entry_inc(iter)) { | 3091 | if (!find_next_entry_inc(iter)) { |
3091 | rem = 0; | 3092 | rem = 0; |
@@ -4233,8 +4234,11 @@ static void __ftrace_dump(bool disable_tracing) | |||
4233 | iter.pos = -1; | 4234 | iter.pos = -1; |
4234 | 4235 | ||
4235 | if (find_next_entry_inc(&iter) != NULL) { | 4236 | if (find_next_entry_inc(&iter) != NULL) { |
4236 | print_trace_line(&iter); | 4237 | int ret; |
4237 | trace_consume(&iter); | 4238 | |
4239 | ret = print_trace_line(&iter); | ||
4240 | if (ret != TRACE_TYPE_NO_CONSUME) | ||
4241 | trace_consume(&iter); | ||
4238 | } | 4242 | } |
4239 | 4243 | ||
4240 | trace_printk_seq(&iter.seq); | 4244 | trace_printk_seq(&iter.seq); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d2249abafb53..420ec3487579 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter) | |||
843 | 843 | ||
844 | switch (entry->type) { | 844 | switch (entry->type) { |
845 | case TRACE_GRAPH_ENT: { | 845 | case TRACE_GRAPH_ENT: { |
846 | struct ftrace_graph_ent_entry *field; | 846 | /* |
847 | * print_graph_entry() may consume the current event, | ||
848 | * thus @field may become invalid, so we need to save it. | ||
849 | * sizeof(struct ftrace_graph_ent_entry) is very small, | ||
850 | * it can be safely saved at the stack. | ||
851 | */ | ||
852 | struct ftrace_graph_ent_entry *field, saved; | ||
847 | trace_assign_type(field, entry); | 853 | trace_assign_type(field, entry); |
848 | return print_graph_entry(field, s, iter); | 854 | saved = *field; |
855 | return print_graph_entry(&saved, s, iter); | ||
849 | } | 856 | } |
850 | case TRACE_GRAPH_RET: { | 857 | case TRACE_GRAPH_RET: { |
851 | struct ftrace_graph_ret_entry *field; | 858 | struct ftrace_graph_ret_entry *field; |