diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 22 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_debug.c | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 11 |
6 files changed, 31 insertions, 20 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 574ee58a3046..681af806d76b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1164 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1164 | struct perf_event_context *ctx = task->perf_event_ctxp; |
1165 | struct perf_event_context *next_ctx; | 1165 | struct perf_event_context *next_ctx; |
1166 | struct perf_event_context *parent; | 1166 | struct perf_event_context *parent; |
1167 | struct pt_regs *regs; | ||
1168 | int do_switch = 1; | 1167 | int do_switch = 1; |
1169 | 1168 | ||
1170 | regs = task_pt_regs(task); | 1169 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1171 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1172 | 1170 | ||
1173 | if (likely(!ctx || !cpuctx->task_ctx)) | 1171 | if (likely(!ctx || !cpuctx->task_ctx)) |
1174 | return; | 1172 | return; |
@@ -2786,12 +2784,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2786 | return NULL; | 2784 | return NULL; |
2787 | } | 2785 | } |
2788 | 2786 | ||
2789 | #ifdef CONFIG_EVENT_TRACING | ||
2790 | __weak | 2787 | __weak |
2791 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | 2788 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
2792 | { | 2789 | { |
2793 | } | 2790 | } |
2794 | #endif | 2791 | |
2795 | 2792 | ||
2796 | /* | 2793 | /* |
2797 | * Output | 2794 | * Output |
@@ -3378,15 +3375,23 @@ static void perf_event_task_output(struct perf_event *event, | |||
3378 | struct perf_task_event *task_event) | 3375 | struct perf_task_event *task_event) |
3379 | { | 3376 | { |
3380 | struct perf_output_handle handle; | 3377 | struct perf_output_handle handle; |
3381 | int size; | ||
3382 | struct task_struct *task = task_event->task; | 3378 | struct task_struct *task = task_event->task; |
3383 | int ret; | 3379 | unsigned long flags; |
3380 | int size, ret; | ||
3381 | |||
3382 | /* | ||
3383 | * If this CPU attempts to acquire an rq lock held by a CPU spinning | ||
3384 | * in perf_output_lock() from interrupt context, it's game over. | ||
3385 | */ | ||
3386 | local_irq_save(flags); | ||
3384 | 3387 | ||
3385 | size = task_event->event_id.header.size; | 3388 | size = task_event->event_id.header.size; |
3386 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3389 | ret = perf_output_begin(&handle, event, size, 0, 0); |
3387 | 3390 | ||
3388 | if (ret) | 3391 | if (ret) { |
3392 | local_irq_restore(flags); | ||
3389 | return; | 3393 | return; |
3394 | } | ||
3390 | 3395 | ||
3391 | task_event->event_id.pid = perf_event_pid(event, task); | 3396 | task_event->event_id.pid = perf_event_pid(event, task); |
3392 | task_event->event_id.ppid = perf_event_pid(event, current); | 3397 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3397,6 +3402,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
3397 | perf_output_put(&handle, task_event->event_id); | 3402 | perf_output_put(&handle, task_event->event_id); |
3398 | 3403 | ||
3399 | perf_output_end(&handle); | 3404 | perf_output_end(&handle); |
3405 | local_irq_restore(flags); | ||
3400 | } | 3406 | } |
3401 | 3407 | ||
3402 | static int perf_event_task_match(struct perf_event *event) | 3408 | static int perf_event_task_match(struct perf_event *event) |
diff --git a/kernel/sched.c b/kernel/sched.c index 49d2fa7b687a..528a10592c16 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5387,7 +5387,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5387 | 5387 | ||
5388 | get_task_struct(mt); | 5388 | get_task_struct(mt); |
5389 | task_rq_unlock(rq, &flags); | 5389 | task_rq_unlock(rq, &flags); |
5390 | wake_up_process(rq->migration_thread); | 5390 | wake_up_process(mt); |
5391 | put_task_struct(mt); | 5391 | put_task_struct(mt); |
5392 | wait_for_completion(&req.done); | 5392 | wait_for_completion(&req.done); |
5393 | tlb_migrate_finish(p->mm); | 5393 | tlb_migrate_finish(p->mm); |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 67f95aada4b9..9b49db144037 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -518,8 +518,4 @@ void proc_sched_set_task(struct task_struct *p) | |||
518 | p->se.nr_wakeups_idle = 0; | 518 | p->se.nr_wakeups_idle = 0; |
519 | p->sched_info.bkl_count = 0; | 519 | p->sched_info.bkl_count = 0; |
520 | #endif | 520 | #endif |
521 | p->se.sum_exec_runtime = 0; | ||
522 | p->se.prev_sum_exec_runtime = 0; | ||
523 | p->nvcsw = 0; | ||
524 | p->nivcsw = 0; | ||
525 | } | 521 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d1187ef20caf..9a0f9bf6a37b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1209,18 +1209,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1209 | 1209 | ||
1210 | for (i = 0; i < nr_pages; i++) { | 1210 | for (i = 0; i < nr_pages; i++) { |
1211 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1211 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1212 | return; | 1212 | goto out; |
1213 | p = cpu_buffer->pages->next; | 1213 | p = cpu_buffer->pages->next; |
1214 | bpage = list_entry(p, struct buffer_page, list); | 1214 | bpage = list_entry(p, struct buffer_page, list); |
1215 | list_del_init(&bpage->list); | 1215 | list_del_init(&bpage->list); |
1216 | free_buffer_page(bpage); | 1216 | free_buffer_page(bpage); |
1217 | } | 1217 | } |
1218 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1218 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1219 | return; | 1219 | goto out; |
1220 | 1220 | ||
1221 | rb_reset_cpu(cpu_buffer); | 1221 | rb_reset_cpu(cpu_buffer); |
1222 | rb_check_pages(cpu_buffer); | 1222 | rb_check_pages(cpu_buffer); |
1223 | 1223 | ||
1224 | out: | ||
1224 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1225 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1225 | } | 1226 | } |
1226 | 1227 | ||
@@ -1237,7 +1238,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1237 | 1238 | ||
1238 | for (i = 0; i < nr_pages; i++) { | 1239 | for (i = 0; i < nr_pages; i++) { |
1239 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 1240 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
1240 | return; | 1241 | goto out; |
1241 | p = pages->next; | 1242 | p = pages->next; |
1242 | bpage = list_entry(p, struct buffer_page, list); | 1243 | bpage = list_entry(p, struct buffer_page, list); |
1243 | list_del_init(&bpage->list); | 1244 | list_del_init(&bpage->list); |
@@ -1246,6 +1247,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1246 | rb_reset_cpu(cpu_buffer); | 1247 | rb_reset_cpu(cpu_buffer); |
1247 | rb_check_pages(cpu_buffer); | 1248 | rb_check_pages(cpu_buffer); |
1248 | 1249 | ||
1250 | out: | ||
1249 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1251 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1250 | } | 1252 | } |
1251 | 1253 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 6fbfb8f417b9..9d589d8dcd1a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void) | |||
84 | int this_cpu; | 84 | int this_cpu; |
85 | u64 now; | 85 | u64 now; |
86 | 86 | ||
87 | raw_local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | 88 | ||
89 | this_cpu = raw_smp_processor_id(); | 89 | this_cpu = raw_smp_processor_id(); |
90 | now = cpu_clock(this_cpu); | 90 | now = cpu_clock(this_cpu); |
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void) | |||
110 | arch_spin_unlock(&trace_clock_struct.lock); | 110 | arch_spin_unlock(&trace_clock_struct.lock); |
111 | 111 | ||
112 | out: | 112 | out: |
113 | raw_local_irq_restore(flags); | 113 | local_irq_restore(flags); |
114 | 114 | ||
115 | return now; | 115 | return now; |
116 | } | 116 | } |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
19 | 19 | ||
20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
22 | * suprises | ||
23 | */ | ||
24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
25 | perf_trace_t; | ||
21 | 26 | ||
22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
23 | static int total_ref_count; | 28 | static int total_ref_count; |
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
131 | int pc, cpu; | 136 | int pc, cpu; |
132 | 137 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
139 | |||
133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
134 | 141 | ||
135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
153 | 160 | ||
154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
156 | 163 | ||
157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |