diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/perf_event.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c | 11 |
2 files changed, 10 insertions, 5 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 574ee58a3046..b0feb4795af3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
| 1164 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1164 | struct perf_event_context *ctx = task->perf_event_ctxp; |
| 1165 | struct perf_event_context *next_ctx; | 1165 | struct perf_event_context *next_ctx; |
| 1166 | struct perf_event_context *parent; | 1166 | struct perf_event_context *parent; |
| 1167 | struct pt_regs *regs; | ||
| 1168 | int do_switch = 1; | 1167 | int do_switch = 1; |
| 1169 | 1168 | ||
| 1170 | regs = task_pt_regs(task); | 1169 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
| 1171 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
| 1172 | 1170 | ||
| 1173 | if (likely(!ctx || !cpuctx->task_ctx)) | 1171 | if (likely(!ctx || !cpuctx->task_ctx)) |
| 1174 | return; | 1172 | return; |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
| 17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
| 18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
| 19 | 19 | ||
| 20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
| 21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
| 22 | * suprises | ||
| 23 | */ | ||
| 24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
| 25 | perf_trace_t; | ||
| 21 | 26 | ||
| 22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
| 23 | static int total_ref_count; | 28 | static int total_ref_count; |
| @@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
| 130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
| 131 | int pc, cpu; | 136 | int pc, cpu; |
| 132 | 137 | ||
| 138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
| 139 | |||
| 133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
| 134 | 141 | ||
| 135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
| @@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
| 152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
| 153 | 160 | ||
| 154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
| 155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
| 156 | 163 | ||
| 157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
| 158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |
