aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
committerTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
commit336f5899d287f06d8329e208fc14ce50f7ec9698 (patch)
tree9b762d450d5eb248a6ff8317badb7e223d93ed58 /kernel/trace
parenta4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff)
parentdb217dece3003df0841bacf9556b5c06aa097dae (diff)
Merge branch 'master' into export-slabh
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_event_perf.c11
3 files changed, 16 insertions, 7 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2c839ca5e5ce..41ca394feb22 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1210,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1210 1210
1211 for (i = 0; i < nr_pages; i++) { 1211 for (i = 0; i < nr_pages; i++) {
1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1213 return; 1213 goto out;
1214 p = cpu_buffer->pages->next; 1214 p = cpu_buffer->pages->next;
1215 bpage = list_entry(p, struct buffer_page, list); 1215 bpage = list_entry(p, struct buffer_page, list);
1216 list_del_init(&bpage->list); 1216 list_del_init(&bpage->list);
1217 free_buffer_page(bpage); 1217 free_buffer_page(bpage);
1218 } 1218 }
1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1220 return; 1220 goto out;
1221 1221
1222 rb_reset_cpu(cpu_buffer); 1222 rb_reset_cpu(cpu_buffer);
1223 rb_check_pages(cpu_buffer); 1223 rb_check_pages(cpu_buffer);
1224 1224
1225out:
1225 spin_unlock_irq(&cpu_buffer->reader_lock); 1226 spin_unlock_irq(&cpu_buffer->reader_lock);
1226} 1227}
1227 1228
@@ -1238,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1238 1239
1239 for (i = 0; i < nr_pages; i++) { 1240 for (i = 0; i < nr_pages; i++) {
1240 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1241 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1241 return; 1242 goto out;
1242 p = pages->next; 1243 p = pages->next;
1243 bpage = list_entry(p, struct buffer_page, list); 1244 bpage = list_entry(p, struct buffer_page, list);
1244 list_del_init(&bpage->list); 1245 list_del_init(&bpage->list);
@@ -1247,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1247 rb_reset_cpu(cpu_buffer); 1248 rb_reset_cpu(cpu_buffer);
1248 rb_check_pages(cpu_buffer); 1249 rb_check_pages(cpu_buffer);
1249 1250
1251out:
1250 spin_unlock_irq(&cpu_buffer->reader_lock); 1252 spin_unlock_irq(&cpu_buffer->reader_lock);
1251} 1253}
1252 1254
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 6fbfb8f417b9..9d589d8dcd1a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
84 int this_cpu; 84 int this_cpu;
85 u64 now; 85 u64 now;
86 86
87 raw_local_irq_save(flags); 87 local_irq_save(flags);
88 88
89 this_cpu = raw_smp_processor_id(); 89 this_cpu = raw_smp_processor_id();
90 now = cpu_clock(this_cpu); 90 now = cpu_clock(this_cpu);
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void)
110 arch_spin_unlock(&trace_clock_struct.lock); 110 arch_spin_unlock(&trace_clock_struct.lock);
111 111
112 out: 112 out:
113 raw_local_irq_restore(flags); 113 local_irq_restore(flags);
114 114
115 return now; 115 return now;
116} 116}
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 81f691eb3a30..0565bb42566f 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
17static char *perf_trace_buf; 17static char *perf_trace_buf;
18static char *perf_trace_buf_nmi; 18static char *perf_trace_buf_nmi;
19 19
20typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; 20/*
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
22 * suprises
23 */
24typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
25 perf_trace_t;
21 26
22/* Count the events in use (per event id, not per instance) */ 27/* Count the events in use (per event id, not per instance) */
23static int total_ref_count; 28static int total_ref_count;
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
130 char *trace_buf, *raw_data; 135 char *trace_buf, *raw_data;
131 int pc, cpu; 136 int pc, cpu;
132 137
138 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
139
133 pc = preempt_count(); 140 pc = preempt_count();
134 141
135 /* Protect the per cpu buffer, begin the rcu read side */ 142 /* Protect the per cpu buffer, begin the rcu read side */
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
152 raw_data = per_cpu_ptr(trace_buf, cpu); 159 raw_data = per_cpu_ptr(trace_buf, cpu);
153 160
154 /* zero the dead bytes from align to not leak stack to user */ 161 /* zero the dead bytes from align to not leak stack to user */
155 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 162 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
156 163
157 entry = (struct trace_entry *)raw_data; 164 entry = (struct trace_entry *)raw_data;
158 tracing_generic_entry_update(entry, *irq_flags, pc); 165 tracing_generic_entry_update(entry, *irq_flags, pc);