aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/x86/oprofile/op_model_amd.c224
-rw-r--r--drivers/oprofile/buffer_sync.c229
-rw-r--r--drivers/oprofile/cpu_buffer.c393
-rw-r--r--drivers/oprofile/cpu_buffer.h72
-rw-r--r--drivers/oprofile/event_buffer.c4
-rw-r--r--drivers/oprofile/oprof.c4
-rw-r--r--drivers/oprofile/oprof.h8
-rw-r--r--drivers/oprofile/oprofile_files.c24
-rw-r--r--include/linux/oprofile.h21
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--kernel/trace/ring_buffer.c44
-rw-r--r--kernel/trace/trace.c4
13 files changed, 566 insertions, 465 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 471e72dbaf8b..2e13aa261929 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -6,6 +6,8 @@ config OPROFILE
6 tristate "OProfile system profiling (EXPERIMENTAL)" 6 tristate "OProfile system profiling (EXPERIMENTAL)"
7 depends on PROFILING 7 depends on PROFILING
8 depends on HAVE_OPROFILE 8 depends on HAVE_OPROFILE
9 select TRACING
10 select RING_BUFFER
9 help 11 help
10 OProfile is a profiling system capable of profiling the 12 OProfile is a profiling system capable of profiling the
11 whole system, include the kernel, kernel modules, libraries, 13 whole system, include the kernel, kernel modules, libraries,
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 509513760a6e..8fdf06e4edf9 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -2,7 +2,7 @@
2 * @file op_model_amd.c 2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations 3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 * 4 *
5 * @remark Copyright 2002-2008 OProfile authors 5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING 6 * @remark Read the file COPYING
7 * 7 *
8 * @author John Levon 8 * @author John Levon
@@ -10,7 +10,7 @@
10 * @author Graydon Hoare 10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com> 11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf 12 * @author Barry Kasindorf
13*/ 13 */
14 14
15#include <linux/oprofile.h> 15#include <linux/oprofile.h>
16#include <linux/device.h> 16#include <linux/device.h>
@@ -60,56 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS];
60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ 60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ 61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
62 62
63/* Codes used in cpu_buffer.c */ 63#define IBS_FETCH_SIZE 6
64/* This produces duplicate code, need to be fixed */ 64#define IBS_OP_SIZE 12
65#define IBS_FETCH_BEGIN 3
66#define IBS_OP_BEGIN 4
67
68/* The function interface needs to be fixed, something like add
69 data. Should then be added to linux/oprofile.h. */
70extern void
71oprofile_add_ibs_sample(struct pt_regs *const regs,
72 unsigned int *const ibs_sample, int ibs_code);
73
74struct ibs_fetch_sample {
75 /* MSRC001_1031 IBS Fetch Linear Address Register */
76 unsigned int ibs_fetch_lin_addr_low;
77 unsigned int ibs_fetch_lin_addr_high;
78 /* MSRC001_1030 IBS Fetch Control Register */
79 unsigned int ibs_fetch_ctl_low;
80 unsigned int ibs_fetch_ctl_high;
81 /* MSRC001_1032 IBS Fetch Physical Address Register */
82 unsigned int ibs_fetch_phys_addr_low;
83 unsigned int ibs_fetch_phys_addr_high;
84};
85
86struct ibs_op_sample {
87 /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
88 unsigned int ibs_op_rip_low;
89 unsigned int ibs_op_rip_high;
90 /* MSRC001_1035 IBS Op Data Register */
91 unsigned int ibs_op_data1_low;
92 unsigned int ibs_op_data1_high;
93 /* MSRC001_1036 IBS Op Data 2 Register */
94 unsigned int ibs_op_data2_low;
95 unsigned int ibs_op_data2_high;
96 /* MSRC001_1037 IBS Op Data 3 Register */
97 unsigned int ibs_op_data3_low;
98 unsigned int ibs_op_data3_high;
99 /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
100 unsigned int ibs_dc_linear_low;
101 unsigned int ibs_dc_linear_high;
102 /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
103 unsigned int ibs_dc_phys_low;
104 unsigned int ibs_dc_phys_high;
105};
106
107/*
108 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h+
109*/
110static void clear_ibs_nmi(void);
111 65
112static int ibs_allowed; /* AMD Family10h and later */ 66static int has_ibs; /* AMD Family10h and later */
113 67
114struct op_ibs_config { 68struct op_ibs_config {
115 unsigned long op_enabled; 69 unsigned long op_enabled;
@@ -200,31 +154,29 @@ static inline int
200op_amd_handle_ibs(struct pt_regs * const regs, 154op_amd_handle_ibs(struct pt_regs * const regs,
201 struct op_msrs const * const msrs) 155 struct op_msrs const * const msrs)
202{ 156{
203 unsigned int low, high; 157 u32 low, high;
204 struct ibs_fetch_sample ibs_fetch; 158 u64 msr;
205 struct ibs_op_sample ibs_op; 159 struct op_entry entry;
206 160
207 if (!ibs_allowed) 161 if (!has_ibs)
208 return 1; 162 return 1;
209 163
210 if (ibs_config.fetch_enabled) { 164 if (ibs_config.fetch_enabled) {
211 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); 165 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
212 if (high & IBS_FETCH_HIGH_VALID_BIT) { 166 if (high & IBS_FETCH_HIGH_VALID_BIT) {
213 ibs_fetch.ibs_fetch_ctl_high = high; 167 rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
214 ibs_fetch.ibs_fetch_ctl_low = low; 168 oprofile_write_reserve(&entry, regs, msr,
215 rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); 169 IBS_FETCH_CODE, IBS_FETCH_SIZE);
216 ibs_fetch.ibs_fetch_lin_addr_high = high; 170 oprofile_add_data(&entry, (u32)msr);
217 ibs_fetch.ibs_fetch_lin_addr_low = low; 171 oprofile_add_data(&entry, (u32)(msr >> 32));
218 rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); 172 oprofile_add_data(&entry, low);
219 ibs_fetch.ibs_fetch_phys_addr_high = high; 173 oprofile_add_data(&entry, high);
220 ibs_fetch.ibs_fetch_phys_addr_low = low; 174 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
221 175 oprofile_add_data(&entry, (u32)msr);
222 oprofile_add_ibs_sample(regs, 176 oprofile_add_data(&entry, (u32)(msr >> 32));
223 (unsigned int *)&ibs_fetch, 177 oprofile_write_commit(&entry);
224 IBS_FETCH_BEGIN); 178
225 179 /* reenable the IRQ */
226 /*reenable the IRQ */
227 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
228 high &= ~IBS_FETCH_HIGH_VALID_BIT; 180 high &= ~IBS_FETCH_HIGH_VALID_BIT;
229 high |= IBS_FETCH_HIGH_ENABLE; 181 high |= IBS_FETCH_HIGH_ENABLE;
230 low &= IBS_FETCH_LOW_MAX_CNT_MASK; 182 low &= IBS_FETCH_LOW_MAX_CNT_MASK;
@@ -235,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs,
235 if (ibs_config.op_enabled) { 187 if (ibs_config.op_enabled) {
236 rdmsr(MSR_AMD64_IBSOPCTL, low, high); 188 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
237 if (low & IBS_OP_LOW_VALID_BIT) { 189 if (low & IBS_OP_LOW_VALID_BIT) {
238 rdmsr(MSR_AMD64_IBSOPRIP, low, high); 190 rdmsrl(MSR_AMD64_IBSOPRIP, msr);
239 ibs_op.ibs_op_rip_low = low; 191 oprofile_write_reserve(&entry, regs, msr,
240 ibs_op.ibs_op_rip_high = high; 192 IBS_OP_CODE, IBS_OP_SIZE);
241 rdmsr(MSR_AMD64_IBSOPDATA, low, high); 193 oprofile_add_data(&entry, (u32)msr);
242 ibs_op.ibs_op_data1_low = low; 194 oprofile_add_data(&entry, (u32)(msr >> 32));
243 ibs_op.ibs_op_data1_high = high; 195 rdmsrl(MSR_AMD64_IBSOPDATA, msr);
244 rdmsr(MSR_AMD64_IBSOPDATA2, low, high); 196 oprofile_add_data(&entry, (u32)msr);
245 ibs_op.ibs_op_data2_low = low; 197 oprofile_add_data(&entry, (u32)(msr >> 32));
246 ibs_op.ibs_op_data2_high = high; 198 rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
247 rdmsr(MSR_AMD64_IBSOPDATA3, low, high); 199 oprofile_add_data(&entry, (u32)msr);
248 ibs_op.ibs_op_data3_low = low; 200 oprofile_add_data(&entry, (u32)(msr >> 32));
249 ibs_op.ibs_op_data3_high = high; 201 rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
250 rdmsr(MSR_AMD64_IBSDCLINAD, low, high); 202 oprofile_add_data(&entry, (u32)msr);
251 ibs_op.ibs_dc_linear_low = low; 203 oprofile_add_data(&entry, (u32)(msr >> 32));
252 ibs_op.ibs_dc_linear_high = high; 204 rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
253 rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); 205 oprofile_add_data(&entry, (u32)msr);
254 ibs_op.ibs_dc_phys_low = low; 206 oprofile_add_data(&entry, (u32)(msr >> 32));
255 ibs_op.ibs_dc_phys_high = high; 207 rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
208 oprofile_add_data(&entry, (u32)msr);
209 oprofile_add_data(&entry, (u32)(msr >> 32));
210 oprofile_write_commit(&entry);
256 211
257 /* reenable the IRQ */ 212 /* reenable the IRQ */
258 oprofile_add_ibs_sample(regs,
259 (unsigned int *)&ibs_op,
260 IBS_OP_BEGIN);
261 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
262 high = 0; 213 high = 0;
263 low &= ~IBS_OP_LOW_VALID_BIT; 214 low &= ~IBS_OP_LOW_VALID_BIT;
264 low |= IBS_OP_LOW_ENABLE; 215 low |= IBS_OP_LOW_ENABLE;
@@ -308,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs)
308 } 259 }
309 260
310#ifdef CONFIG_OPROFILE_IBS 261#ifdef CONFIG_OPROFILE_IBS
311 if (ibs_allowed && ibs_config.fetch_enabled) { 262 if (has_ibs && ibs_config.fetch_enabled) {
312 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 263 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
313 high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ 264 high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
314 + IBS_FETCH_HIGH_ENABLE; 265 + IBS_FETCH_HIGH_ENABLE;
315 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); 266 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
316 } 267 }
317 268
318 if (ibs_allowed && ibs_config.op_enabled) { 269 if (has_ibs && ibs_config.op_enabled) {
319 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) 270 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
320 + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ 271 + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
321 + IBS_OP_LOW_ENABLE; 272 + IBS_OP_LOW_ENABLE;
@@ -331,8 +282,10 @@ static void op_amd_stop(struct op_msrs const * const msrs)
331 unsigned int low, high; 282 unsigned int low, high;
332 int i; 283 int i;
333 284
334 /* Subtle: stop on all counters to avoid race with 285 /*
335 * setting our pm callback */ 286 * Subtle: stop on all counters to avoid race with setting our
287 * pm callback
288 */
336 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 289 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
337 if (!reset_value[i]) 290 if (!reset_value[i])
338 continue; 291 continue;
@@ -342,14 +295,16 @@ static void op_amd_stop(struct op_msrs const * const msrs)
342 } 295 }
343 296
344#ifdef CONFIG_OPROFILE_IBS 297#ifdef CONFIG_OPROFILE_IBS
345 if (ibs_allowed && ibs_config.fetch_enabled) { 298 if (has_ibs && ibs_config.fetch_enabled) {
346 low = 0; /* clear max count and enable */ 299 /* clear max count and enable */
300 low = 0;
347 high = 0; 301 high = 0;
348 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); 302 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
349 } 303 }
350 304
351 if (ibs_allowed && ibs_config.op_enabled) { 305 if (has_ibs && ibs_config.op_enabled) {
352 low = 0; /* clear max count and enable */ 306 /* clear max count and enable */
307 low = 0;
353 high = 0; 308 high = 0;
354 wrmsr(MSR_AMD64_IBSOPCTL, low, high); 309 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
355 } 310 }
@@ -370,18 +325,7 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
370 } 325 }
371} 326}
372 327
373#ifndef CONFIG_OPROFILE_IBS 328#ifdef CONFIG_OPROFILE_IBS
374
375/* no IBS support */
376
377static int op_amd_init(struct oprofile_operations *ops)
378{
379 return 0;
380}
381
382static void op_amd_exit(void) {}
383
384#else
385 329
386static u8 ibs_eilvt_off; 330static u8 ibs_eilvt_off;
387 331
@@ -395,7 +339,7 @@ static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
395 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); 339 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
396} 340}
397 341
398static int pfm_amd64_setup_eilvt(void) 342static int init_ibs_nmi(void)
399{ 343{
400#define IBSCTL_LVTOFFSETVAL (1 << 8) 344#define IBSCTL_LVTOFFSETVAL (1 << 8)
401#define IBSCTL 0x1cc 345#define IBSCTL 0x1cc
@@ -419,6 +363,7 @@ static int pfm_amd64_setup_eilvt(void)
419 | IBSCTL_LVTOFFSETVAL); 363 | IBSCTL_LVTOFFSETVAL);
420 pci_read_config_dword(cpu_cfg, IBSCTL, &value); 364 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
421 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { 365 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
366 pci_dev_put(cpu_cfg);
422 printk(KERN_DEBUG "Failed to setup IBS LVT offset, " 367 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
423 "IBSCTL = 0x%08x", value); 368 "IBSCTL = 0x%08x", value);
424 return 1; 369 return 1;
@@ -443,33 +388,35 @@ static int pfm_amd64_setup_eilvt(void)
443 return 0; 388 return 0;
444} 389}
445 390
446/* 391/* uninitialize the APIC for the IBS interrupts if needed */
447 * initialize the APIC for the IBS interrupts 392static void clear_ibs_nmi(void)
448 * if available (AMD Family10h rev B0 and later)
449 */
450static void setup_ibs(void)
451{ 393{
452 ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); 394 if (has_ibs)
395 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
396}
397
398/* initialize the APIC for the IBS interrupts if available */
399static void ibs_init(void)
400{
401 has_ibs = boot_cpu_has(X86_FEATURE_IBS);
453 402
454 if (!ibs_allowed) 403 if (!has_ibs)
455 return; 404 return;
456 405
457 if (pfm_amd64_setup_eilvt()) { 406 if (init_ibs_nmi()) {
458 ibs_allowed = 0; 407 has_ibs = 0;
459 return; 408 return;
460 } 409 }
461 410
462 printk(KERN_INFO "oprofile: AMD IBS detected\n"); 411 printk(KERN_INFO "oprofile: AMD IBS detected\n");
463} 412}
464 413
465 414static void ibs_exit(void)
466/*
467 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h
468 * rev B0 and later */
469static void clear_ibs_nmi(void)
470{ 415{
471 if (ibs_allowed) 416 if (!has_ibs)
472 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); 417 return;
418
419 clear_ibs_nmi();
473} 420}
474 421
475static int (*create_arch_files)(struct super_block *sb, struct dentry *root); 422static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
@@ -486,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
486 if (ret) 433 if (ret)
487 return ret; 434 return ret;
488 435
489 if (!ibs_allowed) 436 if (!has_ibs)
490 return ret; 437 return ret;
491 438
492 /* model specific files */ 439 /* model specific files */
@@ -519,7 +466,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
519 466
520static int op_amd_init(struct oprofile_operations *ops) 467static int op_amd_init(struct oprofile_operations *ops)
521{ 468{
522 setup_ibs(); 469 ibs_init();
523 create_arch_files = ops->create_files; 470 create_arch_files = ops->create_files;
524 ops->create_files = setup_ibs_files; 471 ops->create_files = setup_ibs_files;
525 return 0; 472 return 0;
@@ -527,10 +474,21 @@ static int op_amd_init(struct oprofile_operations *ops)
527 474
528static void op_amd_exit(void) 475static void op_amd_exit(void)
529{ 476{
530 clear_ibs_nmi(); 477 ibs_exit();
531} 478}
532 479
533#endif 480#else
481
482/* no IBS support */
483
484static int op_amd_init(struct oprofile_operations *ops)
485{
486 return 0;
487}
488
489static void op_amd_exit(void) {}
490
491#endif /* CONFIG_OPROFILE_IBS */
534 492
535struct op_x86_model_spec const op_amd_spec = { 493struct op_x86_model_spec const op_amd_spec = {
536 .init = op_amd_init, 494 .init = op_amd_init,
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index b55cd23ffdef..ac014cb27915 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -1,11 +1,12 @@
1/** 1/**
2 * @file buffer_sync.c 2 * @file buffer_sync.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf 8 * @author Barry Kasindorf
9 * @author Robert Richter <robert.richter@amd.com>
9 * 10 *
10 * This is the core of the buffer management. Each 11 * This is the core of the buffer management. Each
11 * CPU buffer is processed and entered into the 12 * CPU buffer is processed and entered into the
@@ -268,18 +269,6 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
268 return cookie; 269 return cookie;
269} 270}
270 271
271static void increment_tail(struct oprofile_cpu_buffer *b)
272{
273 unsigned long new_tail = b->tail_pos + 1;
274
275 rmb(); /* be sure fifo pointers are synchromized */
276
277 if (new_tail < b->buffer_size)
278 b->tail_pos = new_tail;
279 else
280 b->tail_pos = 0;
281}
282
283static unsigned long last_cookie = INVALID_COOKIE; 272static unsigned long last_cookie = INVALID_COOKIE;
284 273
285static void add_cpu_switch(int i) 274static void add_cpu_switch(int i)
@@ -327,84 +316,73 @@ static void add_trace_begin(void)
327 add_event_entry(TRACE_BEGIN_CODE); 316 add_event_entry(TRACE_BEGIN_CODE);
328} 317}
329 318
330#ifdef CONFIG_OPROFILE_IBS 319static void add_data(struct op_entry *entry, struct mm_struct *mm)
331
332#define IBS_FETCH_CODE_SIZE 2
333#define IBS_OP_CODE_SIZE 5
334#define IBS_EIP(offset) \
335 (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
336#define IBS_EVENT(offset) \
337 (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
338
339/*
340 * Add IBS fetch and op entries to event buffer
341 */
342static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
343 struct mm_struct *mm)
344{ 320{
345 unsigned long rip; 321 unsigned long code, pc, val;
346 int i, count; 322 unsigned long cookie;
347 unsigned long ibs_cookie = 0;
348 off_t offset; 323 off_t offset;
349 324
350 increment_tail(cpu_buf); /* move to RIP entry */ 325 if (!op_cpu_buffer_get_data(entry, &code))
351 326 return;
352 rip = IBS_EIP(cpu_buf->tail_pos); 327 if (!op_cpu_buffer_get_data(entry, &pc))
353 328 return;
354#ifdef __LP64__ 329 if (!op_cpu_buffer_get_size(entry))
355 rip += IBS_EVENT(cpu_buf->tail_pos) << 32; 330 return;
356#endif
357 331
358 if (mm) { 332 if (mm) {
359 ibs_cookie = lookup_dcookie(mm, rip, &offset); 333 cookie = lookup_dcookie(mm, pc, &offset);
360 334
361 if (ibs_cookie == NO_COOKIE) 335 if (cookie == NO_COOKIE)
362 offset = rip; 336 offset = pc;
363 if (ibs_cookie == INVALID_COOKIE) { 337 if (cookie == INVALID_COOKIE) {
364 atomic_inc(&oprofile_stats.sample_lost_no_mapping); 338 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
365 offset = rip; 339 offset = pc;
366 } 340 }
367 if (ibs_cookie != last_cookie) { 341 if (cookie != last_cookie) {
368 add_cookie_switch(ibs_cookie); 342 add_cookie_switch(cookie);
369 last_cookie = ibs_cookie; 343 last_cookie = cookie;
370 } 344 }
371 } else 345 } else
372 offset = rip; 346 offset = pc;
373 347
374 add_event_entry(ESCAPE_CODE); 348 add_event_entry(ESCAPE_CODE);
375 add_event_entry(code); 349 add_event_entry(code);
376 add_event_entry(offset); /* Offset from Dcookie */ 350 add_event_entry(offset); /* Offset from Dcookie */
377 351
378 /* we send the Dcookie offset, but send the raw Linear Add also*/ 352 while (op_cpu_buffer_get_data(entry, &val))
379 add_event_entry(IBS_EIP(cpu_buf->tail_pos)); 353 add_event_entry(val);
380 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
381
382 if (code == IBS_FETCH_CODE)
383 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
384 else
385 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
386
387 for (i = 0; i < count; i++) {
388 increment_tail(cpu_buf);
389 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
390 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
391 }
392} 354}
393 355
394#endif 356static inline void add_sample_entry(unsigned long offset, unsigned long event)
395
396static void add_sample_entry(unsigned long offset, unsigned long event)
397{ 357{
398 add_event_entry(offset); 358 add_event_entry(offset);
399 add_event_entry(event); 359 add_event_entry(event);
400} 360}
401 361
402 362
403static int add_us_sample(struct mm_struct *mm, struct op_sample *s) 363/*
364 * Add a sample to the global event buffer. If possible the
365 * sample is converted into a persistent dentry/offset pair
366 * for later lookup from userspace. Return 0 on failure.
367 */
368static int
369add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
404{ 370{
405 unsigned long cookie; 371 unsigned long cookie;
406 off_t offset; 372 off_t offset;
407 373
374 if (in_kernel) {
375 add_sample_entry(s->eip, s->event);
376 return 1;
377 }
378
379 /* add userspace sample */
380
381 if (!mm) {
382 atomic_inc(&oprofile_stats.sample_lost_no_mm);
383 return 0;
384 }
385
408 cookie = lookup_dcookie(mm, s->eip, &offset); 386 cookie = lookup_dcookie(mm, s->eip, &offset);
409 387
410 if (cookie == INVALID_COOKIE) { 388 if (cookie == INVALID_COOKIE) {
@@ -423,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
423} 401}
424 402
425 403
426/* Add a sample to the global event buffer. If possible the
427 * sample is converted into a persistent dentry/offset pair
428 * for later lookup from userspace.
429 */
430static int
431add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
432{
433 if (in_kernel) {
434 add_sample_entry(s->eip, s->event);
435 return 1;
436 } else if (mm) {
437 return add_us_sample(mm, s);
438 } else {
439 atomic_inc(&oprofile_stats.sample_lost_no_mm);
440 }
441 return 0;
442}
443
444
445static void release_mm(struct mm_struct *mm) 404static void release_mm(struct mm_struct *mm)
446{ 405{
447 if (!mm) 406 if (!mm)
@@ -466,33 +425,6 @@ static inline int is_code(unsigned long val)
466} 425}
467 426
468 427
469/* "acquire" as many cpu buffer slots as we can */
470static unsigned long get_slots(struct oprofile_cpu_buffer *b)
471{
472 unsigned long head = b->head_pos;
473 unsigned long tail = b->tail_pos;
474
475 /*
476 * Subtle. This resets the persistent last_task
477 * and in_kernel values used for switching notes.
478 * BUT, there is a small window between reading
479 * head_pos, and this call, that means samples
480 * can appear at the new head position, but not
481 * be prefixed with the notes for switching
482 * kernel mode or a task switch. This small hole
483 * can lead to mis-attribution or samples where
484 * we don't know if it's in the kernel or not,
485 * at the start of an event buffer.
486 */
487 cpu_buffer_reset(b);
488
489 if (head >= tail)
490 return head - tail;
491
492 return head + (b->buffer_size - tail);
493}
494
495
496/* Move tasks along towards death. Any tasks on dead_tasks 428/* Move tasks along towards death. Any tasks on dead_tasks
497 * will definitely have no remaining references in any 429 * will definitely have no remaining references in any
498 * CPU buffers at this point, because we use two lists, 430 * CPU buffers at this point, because we use two lists,
@@ -559,71 +491,72 @@ typedef enum {
559 */ 491 */
560void sync_buffer(int cpu) 492void sync_buffer(int cpu)
561{ 493{
562 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
563 struct mm_struct *mm = NULL; 494 struct mm_struct *mm = NULL;
495 struct mm_struct *oldmm;
496 unsigned long val;
564 struct task_struct *new; 497 struct task_struct *new;
565 unsigned long cookie = 0; 498 unsigned long cookie = 0;
566 int in_kernel = 1; 499 int in_kernel = 1;
567 sync_buffer_state state = sb_buffer_start; 500 sync_buffer_state state = sb_buffer_start;
568#ifndef CONFIG_OPROFILE_IBS
569 unsigned int i; 501 unsigned int i;
570 unsigned long available; 502 unsigned long available;
571#endif 503 unsigned long flags;
504 struct op_entry entry;
505 struct op_sample *sample;
572 506
573 mutex_lock(&buffer_mutex); 507 mutex_lock(&buffer_mutex);
574 508
575 add_cpu_switch(cpu); 509 add_cpu_switch(cpu);
576 510
577 /* Remember, only we can modify tail_pos */ 511 op_cpu_buffer_reset(cpu);
578 512 available = op_cpu_buffer_entries(cpu);
579#ifndef CONFIG_OPROFILE_IBS
580 available = get_slots(cpu_buf);
581 513
582 for (i = 0; i < available; ++i) { 514 for (i = 0; i < available; ++i) {
583#else 515 sample = op_cpu_buffer_read_entry(&entry, cpu);
584 while (get_slots(cpu_buf)) { 516 if (!sample)
585#endif 517 break;
586 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
587 518
588 if (is_code(s->eip)) { 519 if (is_code(sample->eip)) {
589 if (s->event <= CPU_IS_KERNEL) { 520 flags = sample->event;
521 if (flags & TRACE_BEGIN) {
522 state = sb_bt_start;
523 add_trace_begin();
524 }
525 if (flags & KERNEL_CTX_SWITCH) {
590 /* kernel/userspace switch */ 526 /* kernel/userspace switch */
591 in_kernel = s->event; 527 in_kernel = flags & IS_KERNEL;
592 if (state == sb_buffer_start) 528 if (state == sb_buffer_start)
593 state = sb_sample_start; 529 state = sb_sample_start;
594 add_kernel_ctx_switch(s->event); 530 add_kernel_ctx_switch(flags & IS_KERNEL);
595 } else if (s->event == CPU_TRACE_BEGIN) { 531 }
596 state = sb_bt_start; 532 if (flags & USER_CTX_SWITCH
597 add_trace_begin(); 533 && op_cpu_buffer_get_data(&entry, &val)) {
598#ifdef CONFIG_OPROFILE_IBS
599 } else if (s->event == IBS_FETCH_BEGIN) {
600 state = sb_bt_start;
601 add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
602 } else if (s->event == IBS_OP_BEGIN) {
603 state = sb_bt_start;
604 add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
605#endif
606 } else {
607 struct mm_struct *oldmm = mm;
608
609 /* userspace context switch */ 534 /* userspace context switch */
610 new = (struct task_struct *)s->event; 535 new = (struct task_struct *)val;
611 536 oldmm = mm;
612 release_mm(oldmm); 537 release_mm(oldmm);
613 mm = take_tasks_mm(new); 538 mm = take_tasks_mm(new);
614 if (mm != oldmm) 539 if (mm != oldmm)
615 cookie = get_exec_dcookie(mm); 540 cookie = get_exec_dcookie(mm);
616 add_user_ctx_switch(new, cookie); 541 add_user_ctx_switch(new, cookie);
617 } 542 }
618 } else if (state >= sb_bt_start && 543 if (op_cpu_buffer_get_size(&entry))
619 !add_sample(mm, s, in_kernel)) { 544 add_data(&entry, mm);
620 if (state == sb_bt_start) { 545 continue;
621 state = sb_bt_ignore;
622 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
623 }
624 } 546 }
625 547
626 increment_tail(cpu_buf); 548 if (state < sb_bt_start)
549 /* ignore sample */
550 continue;
551
552 if (add_sample(mm, sample, in_kernel))
553 continue;
554
555 /* ignore backtraces if failed to add a sample */
556 if (state == sb_bt_start) {
557 state = sb_bt_ignore;
558 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
559 }
627 } 560 }
628 release_mm(mm); 561 release_mm(mm);
629 562
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 01d38e78cde1..2e03b6d796d3 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -1,11 +1,12 @@
1/** 1/**
2 * @file cpu_buffer.c 2 * @file cpu_buffer.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com> 8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
9 * @author Robert Richter <robert.richter@amd.com>
9 * 10 *
10 * Each CPU has a local buffer that stores PC value/event 11 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them. 12 * pairs. We also log context switches when we notice them.
@@ -28,6 +29,25 @@
28#include "buffer_sync.h" 29#include "buffer_sync.h"
29#include "oprof.h" 30#include "oprof.h"
30 31
32#define OP_BUFFER_FLAGS 0
33
34/*
35 * Read and write access is using spin locking. Thus, writing to the
36 * buffer by NMI handler (x86) could occur also during critical
37 * sections when reading the buffer. To avoid this, there are 2
38 * buffers for independent read and write access. Read access is in
39 * process context only, write access only in the NMI handler. If the
40 * read buffer runs empty, both buffers are swapped atomically. There
41 * is potentially a small window during swapping where the buffers are
42 * disabled and samples could be lost.
43 *
44 * Using 2 buffers is a little bit overhead, but the solution is clear
45 * and does not require changes in the ring buffer implementation. It
46 * can be changed to a single buffer solution when the ring buffer
47 * access is implemented as non-locking atomic code.
48 */
49static struct ring_buffer *op_ring_buffer_read;
50static struct ring_buffer *op_ring_buffer_write;
31DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 51DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
32 52
33static void wq_sync_buffer(struct work_struct *work); 53static void wq_sync_buffer(struct work_struct *work);
@@ -35,19 +55,9 @@ static void wq_sync_buffer(struct work_struct *work);
35#define DEFAULT_TIMER_EXPIRE (HZ / 10) 55#define DEFAULT_TIMER_EXPIRE (HZ / 10)
36static int work_enabled; 56static int work_enabled;
37 57
38void free_cpu_buffers(void)
39{
40 int i;
41
42 for_each_possible_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL;
45 }
46}
47
48unsigned long oprofile_get_cpu_buffer_size(void) 58unsigned long oprofile_get_cpu_buffer_size(void)
49{ 59{
50 return fs_cpu_buffer_size; 60 return oprofile_cpu_buffer_size;
51} 61}
52 62
53void oprofile_cpu_buffer_inc_smpl_lost(void) 63void oprofile_cpu_buffer_inc_smpl_lost(void)
@@ -58,26 +68,36 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
58 cpu_buf->sample_lost_overflow++; 68 cpu_buf->sample_lost_overflow++;
59} 69}
60 70
71void free_cpu_buffers(void)
72{
73 if (op_ring_buffer_read)
74 ring_buffer_free(op_ring_buffer_read);
75 op_ring_buffer_read = NULL;
76 if (op_ring_buffer_write)
77 ring_buffer_free(op_ring_buffer_write);
78 op_ring_buffer_write = NULL;
79}
80
61int alloc_cpu_buffers(void) 81int alloc_cpu_buffers(void)
62{ 82{
63 int i; 83 int i;
64 84
65 unsigned long buffer_size = fs_cpu_buffer_size; 85 unsigned long buffer_size = oprofile_cpu_buffer_size;
86
87 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
88 if (!op_ring_buffer_read)
89 goto fail;
90 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
91 if (!op_ring_buffer_write)
92 goto fail;
66 93
67 for_each_possible_cpu(i) { 94 for_each_possible_cpu(i) {
68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 95 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
69 96
70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
71 cpu_to_node(i));
72 if (!b->buffer)
73 goto fail;
74
75 b->last_task = NULL; 97 b->last_task = NULL;
76 b->last_is_kernel = -1; 98 b->last_is_kernel = -1;
77 b->tracing = 0; 99 b->tracing = 0;
78 b->buffer_size = buffer_size; 100 b->buffer_size = buffer_size;
79 b->tail_pos = 0;
80 b->head_pos = 0;
81 b->sample_received = 0; 101 b->sample_received = 0;
82 b->sample_lost_overflow = 0; 102 b->sample_lost_overflow = 0;
83 b->backtrace_aborted = 0; 103 b->backtrace_aborted = 0;
@@ -124,73 +144,156 @@ void end_cpu_work(void)
124 flush_scheduled_work(); 144 flush_scheduled_work();
125} 145}
126 146
127/* Resets the cpu buffer to a sane state. */ 147/*
128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf) 148 * This function prepares the cpu buffer to write a sample.
149 *
150 * Struct op_entry is used during operations on the ring buffer while
151 * struct op_sample contains the data that is stored in the ring
152 * buffer. Struct entry can be uninitialized. The function reserves a
153 * data array that is specified by size. Use
154 * op_cpu_buffer_write_commit() after preparing the sample. In case of
155 * errors a null pointer is returned, otherwise the pointer to the
156 * sample.
157 *
158 */
159struct op_sample
160*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
129{ 161{
130 /* reset these to invalid values; the next sample 162 entry->event = ring_buffer_lock_reserve
131 * collected will populate the buffer with proper 163 (op_ring_buffer_write, sizeof(struct op_sample) +
132 * values to initialize the buffer 164 size * sizeof(entry->sample->data[0]), &entry->irq_flags);
133 */ 165 if (entry->event)
134 cpu_buf->last_is_kernel = -1; 166 entry->sample = ring_buffer_event_data(entry->event);
135 cpu_buf->last_task = NULL; 167 else
168 entry->sample = NULL;
169
170 if (!entry->sample)
171 return NULL;
172
173 entry->size = size;
174 entry->data = entry->sample->data;
175
176 return entry->sample;
136} 177}
137 178
138/* compute number of available slots in cpu_buffer queue */ 179int op_cpu_buffer_write_commit(struct op_entry *entry)
139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
140{ 180{
141 unsigned long head = b->head_pos; 181 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
142 unsigned long tail = b->tail_pos; 182 entry->irq_flags);
183}
143 184
144 if (tail > head) 185struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
145 return (tail - head) - 1; 186{
187 struct ring_buffer_event *e;
188 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
189 if (e)
190 goto event;
191 if (ring_buffer_swap_cpu(op_ring_buffer_read,
192 op_ring_buffer_write,
193 cpu))
194 return NULL;
195 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
196 if (e)
197 goto event;
198 return NULL;
199
200event:
201 entry->event = e;
202 entry->sample = ring_buffer_event_data(e);
203 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
204 / sizeof(entry->sample->data[0]);
205 entry->data = entry->sample->data;
206 return entry->sample;
207}
146 208
147 return tail + (b->buffer_size - head) - 1; 209unsigned long op_cpu_buffer_entries(int cpu)
210{
211 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
212 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
148} 213}
149 214
150static void increment_head(struct oprofile_cpu_buffer *b) 215static int
216op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
217 int is_kernel, struct task_struct *task)
151{ 218{
152 unsigned long new_head = b->head_pos + 1; 219 struct op_entry entry;
220 struct op_sample *sample;
221 unsigned long flags;
222 int size;
223
224 flags = 0;
153 225
154 /* Ensure anything written to the slot before we 226 if (backtrace)
155 * increment is visible */ 227 flags |= TRACE_BEGIN;
156 wmb(); 228
229 /* notice a switch from user->kernel or vice versa */
230 is_kernel = !!is_kernel;
231 if (cpu_buf->last_is_kernel != is_kernel) {
232 cpu_buf->last_is_kernel = is_kernel;
233 flags |= KERNEL_CTX_SWITCH;
234 if (is_kernel)
235 flags |= IS_KERNEL;
236 }
237
238 /* notice a task switch */
239 if (cpu_buf->last_task != task) {
240 cpu_buf->last_task = task;
241 flags |= USER_CTX_SWITCH;
242 }
243
244 if (!flags)
245 /* nothing to do */
246 return 0;
157 247
158 if (new_head < b->buffer_size) 248 if (flags & USER_CTX_SWITCH)
159 b->head_pos = new_head; 249 size = 1;
160 else 250 else
161 b->head_pos = 0; 251 size = 0;
162}
163 252
164static inline void 253 sample = op_cpu_buffer_write_reserve(&entry, size);
165add_sample(struct oprofile_cpu_buffer *cpu_buf, 254 if (!sample)
166 unsigned long pc, unsigned long event) 255 return -ENOMEM;
167{ 256
168 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos]; 257 sample->eip = ESCAPE_CODE;
169 entry->eip = pc; 258 sample->event = flags;
170 entry->event = event; 259
171 increment_head(cpu_buf); 260 if (size)
261 op_cpu_buffer_add_data(&entry, (unsigned long)task);
262
263 op_cpu_buffer_write_commit(&entry);
264
265 return 0;
172} 266}
173 267
174static inline void 268static inline int
175add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) 269op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
270 unsigned long pc, unsigned long event)
176{ 271{
177 add_sample(buffer, ESCAPE_CODE, value); 272 struct op_entry entry;
273 struct op_sample *sample;
274
275 sample = op_cpu_buffer_write_reserve(&entry, 0);
276 if (!sample)
277 return -ENOMEM;
278
279 sample->eip = pc;
280 sample->event = event;
281
282 return op_cpu_buffer_write_commit(&entry);
178} 283}
179 284
180/* This must be safe from any context. It's safe writing here 285/*
181 * because of the head/tail separation of the writer and reader 286 * This must be safe from any context.
182 * of the CPU buffer.
183 * 287 *
184 * is_kernel is needed because on some architectures you cannot 288 * is_kernel is needed because on some architectures you cannot
185 * tell if you are in kernel or user space simply by looking at 289 * tell if you are in kernel or user space simply by looking at
186 * pc. We tag this in the buffer by generating kernel enter/exit 290 * pc. We tag this in the buffer by generating kernel enter/exit
187 * events whenever is_kernel changes 291 * events whenever is_kernel changes
188 */ 292 */
189static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, 293static int
190 int is_kernel, unsigned long event) 294log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
295 unsigned long backtrace, int is_kernel, unsigned long event)
191{ 296{
192 struct task_struct *task;
193
194 cpu_buf->sample_received++; 297 cpu_buf->sample_received++;
195 298
196 if (pc == ESCAPE_CODE) { 299 if (pc == ESCAPE_CODE) {
@@ -198,131 +301,115 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
198 return 0; 301 return 0;
199 } 302 }
200 303
201 if (nr_available_slots(cpu_buf) < 3) { 304 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
202 cpu_buf->sample_lost_overflow++; 305 goto fail;
203 return 0;
204 }
205
206 is_kernel = !!is_kernel;
207 306
208 task = current; 307 if (op_add_sample(cpu_buf, pc, event))
308 goto fail;
209 309
210 /* notice a switch from user->kernel or vice versa */
211 if (cpu_buf->last_is_kernel != is_kernel) {
212 cpu_buf->last_is_kernel = is_kernel;
213 add_code(cpu_buf, is_kernel);
214 }
215
216 /* notice a task switch */
217 if (cpu_buf->last_task != task) {
218 cpu_buf->last_task = task;
219 add_code(cpu_buf, (unsigned long)task);
220 }
221
222 add_sample(cpu_buf, pc, event);
223 return 1; 310 return 1;
311
312fail:
313 cpu_buf->sample_lost_overflow++;
314 return 0;
224} 315}
225 316
226static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) 317static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
227{ 318{
228 if (nr_available_slots(cpu_buf) < 4) {
229 cpu_buf->sample_lost_overflow++;
230 return 0;
231 }
232
233 add_code(cpu_buf, CPU_TRACE_BEGIN);
234 cpu_buf->tracing = 1; 319 cpu_buf->tracing = 1;
235 return 1;
236} 320}
237 321
238static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) 322static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
239{ 323{
240 cpu_buf->tracing = 0; 324 cpu_buf->tracing = 0;
241} 325}
242 326
243void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 327static inline void
244 unsigned long event, int is_kernel) 328__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
329 unsigned long event, int is_kernel)
245{ 330{
246 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 331 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
332 unsigned long backtrace = oprofile_backtrace_depth;
247 333
248 if (!backtrace_depth) { 334 /*
249 log_sample(cpu_buf, pc, is_kernel, event); 335 * if log_sample() fail we can't backtrace since we lost the
336 * source of this event
337 */
338 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
339 /* failed */
250 return; 340 return;
251 }
252 341
253 if (!oprofile_begin_trace(cpu_buf)) 342 if (!backtrace)
254 return; 343 return;
255 344
256 /* if log_sample() fail we can't backtrace since we lost the source 345 oprofile_begin_trace(cpu_buf);
257 * of this event */ 346 oprofile_ops.backtrace(regs, backtrace);
258 if (log_sample(cpu_buf, pc, is_kernel, event))
259 oprofile_ops.backtrace(regs, backtrace_depth);
260 oprofile_end_trace(cpu_buf); 347 oprofile_end_trace(cpu_buf);
261} 348}
262 349
350void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
351 unsigned long event, int is_kernel)
352{
353 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
354}
355
263void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 356void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
264{ 357{
265 int is_kernel = !user_mode(regs); 358 int is_kernel = !user_mode(regs);
266 unsigned long pc = profile_pc(regs); 359 unsigned long pc = profile_pc(regs);
267 360
268 oprofile_add_ext_sample(pc, regs, event, is_kernel); 361 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
269} 362}
270 363
271#ifdef CONFIG_OPROFILE_IBS 364/*
272 365 * Add samples with data to the ring buffer.
273#define MAX_IBS_SAMPLE_SIZE 14 366 *
274 367 * Use oprofile_add_data(&entry, val) to add data and
275void oprofile_add_ibs_sample(struct pt_regs *const regs, 368 * oprofile_write_commit(&entry) to commit the sample.
276 unsigned int *const ibs_sample, int ibs_code) 369 */
370void
371oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
372 unsigned long pc, int code, int size)
277{ 373{
374 struct op_sample *sample;
278 int is_kernel = !user_mode(regs); 375 int is_kernel = !user_mode(regs);
279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 376 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
280 struct task_struct *task;
281 377
282 cpu_buf->sample_received++; 378 cpu_buf->sample_received++;
283 379
284 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { 380 /* no backtraces for samples with data */
285 /* we can't backtrace since we lost the source of this event */ 381 if (op_add_code(cpu_buf, 0, is_kernel, current))
286 cpu_buf->sample_lost_overflow++; 382 goto fail;
287 return;
288 }
289 383
290 /* notice a switch from user->kernel or vice versa */ 384 sample = op_cpu_buffer_write_reserve(entry, size + 2);
291 if (cpu_buf->last_is_kernel != is_kernel) { 385 if (!sample)
292 cpu_buf->last_is_kernel = is_kernel; 386 goto fail;
293 add_code(cpu_buf, is_kernel); 387 sample->eip = ESCAPE_CODE;
294 } 388 sample->event = 0; /* no flags */
295 389
296 /* notice a task switch */ 390 op_cpu_buffer_add_data(entry, code);
297 if (!is_kernel) { 391 op_cpu_buffer_add_data(entry, pc);
298 task = current;
299 if (cpu_buf->last_task != task) {
300 cpu_buf->last_task = task;
301 add_code(cpu_buf, (unsigned long)task);
302 }
303 }
304 392
305 add_code(cpu_buf, ibs_code); 393 return;
306 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
307 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
308 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
309 394
310 if (ibs_code == IBS_OP_BEGIN) { 395fail:
311 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); 396 cpu_buf->sample_lost_overflow++;
312 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); 397}
313 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
314 }
315 398
316 if (backtrace_depth) 399int oprofile_add_data(struct op_entry *entry, unsigned long val)
317 oprofile_ops.backtrace(regs, backtrace_depth); 400{
401 return op_cpu_buffer_add_data(entry, val);
318} 402}
319 403
320#endif 404int oprofile_write_commit(struct op_entry *entry)
405{
406 return op_cpu_buffer_write_commit(entry);
407}
321 408
322void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 409void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
323{ 410{
324 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 411 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
325 log_sample(cpu_buf, pc, is_kernel, event); 412 log_sample(cpu_buf, pc, 0, is_kernel, event);
326} 413}
327 414
328void oprofile_add_trace(unsigned long pc) 415void oprofile_add_trace(unsigned long pc)
@@ -332,21 +419,21 @@ void oprofile_add_trace(unsigned long pc)
332 if (!cpu_buf->tracing) 419 if (!cpu_buf->tracing)
333 return; 420 return;
334 421
335 if (nr_available_slots(cpu_buf) < 1) { 422 /*
336 cpu_buf->tracing = 0; 423 * broken frame can give an eip with the same value as an
337 cpu_buf->sample_lost_overflow++; 424 * escape code, abort the trace if we get it
338 return; 425 */
339 } 426 if (pc == ESCAPE_CODE)
427 goto fail;
340 428
341 /* broken frame can give an eip with the same value as an escape code, 429 if (op_add_sample(cpu_buf, pc, 0))
342 * abort the trace if we get it */ 430 goto fail;
343 if (pc == ESCAPE_CODE) {
344 cpu_buf->tracing = 0;
345 cpu_buf->backtrace_aborted++;
346 return;
347 }
348 431
349 add_sample(cpu_buf, pc, 0); 432 return;
433fail:
434 cpu_buf->tracing = 0;
435 cpu_buf->backtrace_aborted++;
436 return;
350} 437}
351 438
352/* 439/*
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index d3cc26264db5..63f81c44846a 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -1,10 +1,11 @@
1/** 1/**
2 * @file cpu_buffer.h 2 * @file cpu_buffer.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
8 */ 9 */
9 10
10#ifndef OPROFILE_CPU_BUFFER_H 11#ifndef OPROFILE_CPU_BUFFER_H
@@ -15,6 +16,7 @@
15#include <linux/workqueue.h> 16#include <linux/workqueue.h>
16#include <linux/cache.h> 17#include <linux/cache.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/ring_buffer.h>
18 20
19struct task_struct; 21struct task_struct;
20 22
@@ -30,16 +32,16 @@ void end_cpu_work(void);
30struct op_sample { 32struct op_sample {
31 unsigned long eip; 33 unsigned long eip;
32 unsigned long event; 34 unsigned long event;
35 unsigned long data[0];
33}; 36};
34 37
38struct op_entry;
39
35struct oprofile_cpu_buffer { 40struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos;
38 unsigned long buffer_size; 41 unsigned long buffer_size;
39 struct task_struct *last_task; 42 struct task_struct *last_task;
40 int last_is_kernel; 43 int last_is_kernel;
41 int tracing; 44 int tracing;
42 struct op_sample *buffer;
43 unsigned long sample_received; 45 unsigned long sample_received;
44 unsigned long sample_lost_overflow; 46 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted; 47 unsigned long backtrace_aborted;
@@ -50,12 +52,62 @@ struct oprofile_cpu_buffer {
50 52
51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 53DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
52 54
53void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf); 55/*
56 * Resets the cpu buffer to a sane state.
57 *
58 * reset these to invalid values; the next sample collected will
59 * populate the buffer with proper values to initialize the buffer
60 */
61static inline void op_cpu_buffer_reset(int cpu)
62{
63 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
64
65 cpu_buf->last_is_kernel = -1;
66 cpu_buf->last_task = NULL;
67}
68
69struct op_sample
70*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
71int op_cpu_buffer_write_commit(struct op_entry *entry);
72struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
73unsigned long op_cpu_buffer_entries(int cpu);
74
75/* returns the remaining free size of data in the entry */
76static inline
77int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
78{
79 if (!entry->size)
80 return 0;
81 *entry->data = val;
82 entry->size--;
83 entry->data++;
84 return entry->size;
85}
86
87/* returns the size of data in the entry */
88static inline
89int op_cpu_buffer_get_size(struct op_entry *entry)
90{
91 return entry->size;
92}
93
94/* returns 0 if empty or the size of data including the current value */
95static inline
96int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
97{
98 int size = entry->size;
99 if (!size)
100 return 0;
101 *val = *entry->data;
102 entry->size--;
103 entry->data++;
104 return size;
105}
54 106
55/* transient events for the CPU buffer -> event buffer */ 107/* extra data flags */
56#define CPU_IS_KERNEL 1 108#define KERNEL_CTX_SWITCH (1UL << 0)
57#define CPU_TRACE_BEGIN 2 109#define IS_KERNEL (1UL << 1)
58#define IBS_FETCH_BEGIN 3 110#define TRACE_BEGIN (1UL << 2)
59#define IBS_OP_BEGIN 4 111#define USER_CTX_SWITCH (1UL << 3)
60 112
61#endif /* OPROFILE_CPU_BUFFER_H */ 113#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 191a3202cecc..2b7ae366ceb1 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -73,8 +73,8 @@ int alloc_event_buffer(void)
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&oprofilefs_lock, flags); 75 spin_lock_irqsave(&oprofilefs_lock, flags);
76 buffer_size = fs_buffer_size; 76 buffer_size = oprofile_buffer_size;
77 buffer_watershed = fs_buffer_watershed; 77 buffer_watershed = oprofile_buffer_watershed;
78 spin_unlock_irqrestore(&oprofilefs_lock, flags); 78 spin_unlock_irqrestore(&oprofilefs_lock, flags);
79 79
80 if (buffer_watershed >= buffer_size) 80 if (buffer_watershed >= buffer_size)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index cd375907f26f..3cffce90f82a 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -23,7 +23,7 @@
23struct oprofile_operations oprofile_ops; 23struct oprofile_operations oprofile_ops;
24 24
25unsigned long oprofile_started; 25unsigned long oprofile_started;
26unsigned long backtrace_depth; 26unsigned long oprofile_backtrace_depth;
27static unsigned long is_setup; 27static unsigned long is_setup;
28static DEFINE_MUTEX(start_mutex); 28static DEFINE_MUTEX(start_mutex);
29 29
@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
172 goto out; 172 goto out;
173 } 173 }
174 174
175 backtrace_depth = val; 175 oprofile_backtrace_depth = val;
176 176
177out: 177out:
178 mutex_unlock(&start_mutex); 178 mutex_unlock(&start_mutex);
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 5df0c21a608f..c288d3c24b50 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -21,12 +21,12 @@ void oprofile_stop(void);
21 21
22struct oprofile_operations; 22struct oprofile_operations;
23 23
24extern unsigned long fs_buffer_size; 24extern unsigned long oprofile_buffer_size;
25extern unsigned long fs_cpu_buffer_size; 25extern unsigned long oprofile_cpu_buffer_size;
26extern unsigned long fs_buffer_watershed; 26extern unsigned long oprofile_buffer_watershed;
27extern struct oprofile_operations oprofile_ops; 27extern struct oprofile_operations oprofile_ops;
28extern unsigned long oprofile_started; 28extern unsigned long oprofile_started;
29extern unsigned long backtrace_depth; 29extern unsigned long oprofile_backtrace_depth;
30 30
31struct super_block; 31struct super_block;
32struct dentry; 32struct dentry;
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index cc106d503ace..5d36ffc30dd5 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -14,13 +14,18 @@
14#include "oprofile_stats.h" 14#include "oprofile_stats.h"
15#include "oprof.h" 15#include "oprof.h"
16 16
17unsigned long fs_buffer_size = 131072; 17#define BUFFER_SIZE_DEFAULT 131072
18unsigned long fs_cpu_buffer_size = 8192; 18#define CPU_BUFFER_SIZE_DEFAULT 8192
19unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ 19#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
20
21unsigned long oprofile_buffer_size;
22unsigned long oprofile_cpu_buffer_size;
23unsigned long oprofile_buffer_watershed;
20 24
21static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 25static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
22{ 26{
23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); 27 return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
28 offset);
24} 29}
25 30
26 31
@@ -120,12 +125,17 @@ static const struct file_operations dump_fops = {
120 125
121void oprofile_create_files(struct super_block *sb, struct dentry *root) 126void oprofile_create_files(struct super_block *sb, struct dentry *root)
122{ 127{
128 /* reinitialize default values */
129 oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
130 oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
131 oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
132
123 oprofilefs_create_file(sb, root, "enable", &enable_fops); 133 oprofilefs_create_file(sb, root, "enable", &enable_fops);
124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 134 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
125 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); 135 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); 136 oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); 137 oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); 138 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 139 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 140 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 141 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 5231861f357d..1d9518bc4c58 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops);
86void oprofile_arch_exit(void); 86void oprofile_arch_exit(void);
87 87
88/** 88/**
89 * Add a sample. This may be called from any context. Pass 89 * Add a sample. This may be called from any context.
90 * smp_processor_id() as cpu.
91 */ 90 */
92void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); 91void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
93 92
@@ -165,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
165unsigned long oprofile_get_cpu_buffer_size(void); 164unsigned long oprofile_get_cpu_buffer_size(void);
166void oprofile_cpu_buffer_inc_smpl_lost(void); 165void oprofile_cpu_buffer_inc_smpl_lost(void);
167 166
167/* cpu buffer functions */
168
169struct op_sample;
170
171struct op_entry {
172 struct ring_buffer_event *event;
173 struct op_sample *sample;
174 unsigned long irq_flags;
175 unsigned long size;
176 unsigned long *data;
177};
178
179void oprofile_write_reserve(struct op_entry *entry,
180 struct pt_regs * const regs,
181 unsigned long pc, int code, int size);
182int oprofile_add_data(struct op_entry *entry, unsigned long val);
183int oprofile_write_commit(struct op_entry *entry);
184
168#endif /* OPROFILE_H */ 185#endif /* OPROFILE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index e097c2e6b6dc..de9d8c12e5ec 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -116,6 +116,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
116 116
117unsigned long ring_buffer_entries(struct ring_buffer *buffer); 117unsigned long ring_buffer_entries(struct ring_buffer *buffer);
118unsigned long ring_buffer_overruns(struct ring_buffer *buffer); 118unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
119unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
120unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
119 121
120u64 ring_buffer_time_stamp(int cpu); 122u64 ring_buffer_time_stamp(int cpu);
121void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 123void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 668bbb5ef2bd..d42b882dfe4b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -31,6 +31,7 @@ void tracing_on(void)
31{ 31{
32 ring_buffers_off = 0; 32 ring_buffers_off = 0;
33} 33}
34EXPORT_SYMBOL_GPL(tracing_on);
34 35
35/** 36/**
36 * tracing_off - turn off all tracing buffers 37 * tracing_off - turn off all tracing buffers
@@ -44,6 +45,7 @@ void tracing_off(void)
44{ 45{
45 ring_buffers_off = 1; 46 ring_buffers_off = 1;
46} 47}
48EXPORT_SYMBOL_GPL(tracing_off);
47 49
48/* Up this if you want to test the TIME_EXTENTS and normalization */ 50/* Up this if you want to test the TIME_EXTENTS and normalization */
49#define DEBUG_SHIFT 0 51#define DEBUG_SHIFT 0
@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu)
60 62
61 return time; 63 return time;
62} 64}
65EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
63 66
64void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 67void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
65{ 68{
66 /* Just stupid testing the normalize function and deltas */ 69 /* Just stupid testing the normalize function and deltas */
67 *ts >>= DEBUG_SHIFT; 70 *ts >>= DEBUG_SHIFT;
68} 71}
72EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
69 73
70#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 74#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71#define RB_ALIGNMENT_SHIFT 2 75#define RB_ALIGNMENT_SHIFT 2
@@ -113,8 +117,15 @@ rb_event_length(struct ring_buffer_event *event)
113 */ 117 */
114unsigned ring_buffer_event_length(struct ring_buffer_event *event) 118unsigned ring_buffer_event_length(struct ring_buffer_event *event)
115{ 119{
116 return rb_event_length(event); 120 unsigned length = rb_event_length(event);
121 if (event->type != RINGBUF_TYPE_DATA)
122 return length;
123 length -= RB_EVNT_HDR_SIZE;
124 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
125 length -= sizeof(event->array[0]);
126 return length;
117} 127}
128EXPORT_SYMBOL_GPL(ring_buffer_event_length);
118 129
119/* inline for ring buffer fast paths */ 130/* inline for ring buffer fast paths */
120static inline void * 131static inline void *
@@ -136,6 +147,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
136{ 147{
137 return rb_event_data(event); 148 return rb_event_data(event);
138} 149}
150EXPORT_SYMBOL_GPL(ring_buffer_event_data);
139 151
140#define for_each_buffer_cpu(buffer, cpu) \ 152#define for_each_buffer_cpu(buffer, cpu) \
141 for_each_cpu_mask(cpu, buffer->cpumask) 153 for_each_cpu_mask(cpu, buffer->cpumask)
@@ -381,7 +393,7 @@ extern int ring_buffer_page_too_big(void);
381 393
382/** 394/**
383 * ring_buffer_alloc - allocate a new ring_buffer 395 * ring_buffer_alloc - allocate a new ring_buffer
384 * @size: the size in bytes that is needed. 396 * @size: the size in bytes per cpu that is needed.
385 * @flags: attributes to set for the ring buffer. 397 * @flags: attributes to set for the ring buffer.
386 * 398 *
387 * Currently the only flag that is available is the RB_FL_OVERWRITE 399 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -444,6 +456,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
444 kfree(buffer); 456 kfree(buffer);
445 return NULL; 457 return NULL;
446} 458}
459EXPORT_SYMBOL_GPL(ring_buffer_alloc);
447 460
448/** 461/**
449 * ring_buffer_free - free a ring buffer. 462 * ring_buffer_free - free a ring buffer.
@@ -459,6 +472,7 @@ ring_buffer_free(struct ring_buffer *buffer)
459 472
460 kfree(buffer); 473 kfree(buffer);
461} 474}
475EXPORT_SYMBOL_GPL(ring_buffer_free);
462 476
463static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 477static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
464 478
@@ -620,6 +634,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
620 mutex_unlock(&buffer->mutex); 634 mutex_unlock(&buffer->mutex);
621 return -ENOMEM; 635 return -ENOMEM;
622} 636}
637EXPORT_SYMBOL_GPL(ring_buffer_resize);
623 638
624static inline int rb_null_event(struct ring_buffer_event *event) 639static inline int rb_null_event(struct ring_buffer_event *event)
625{ 640{
@@ -1220,6 +1235,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1220 preempt_enable_notrace(); 1235 preempt_enable_notrace();
1221 return NULL; 1236 return NULL;
1222} 1237}
1238EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1223 1239
1224static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1240static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1225 struct ring_buffer_event *event) 1241 struct ring_buffer_event *event)
@@ -1269,6 +1285,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1269 1285
1270 return 0; 1286 return 0;
1271} 1287}
1288EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1272 1289
1273/** 1290/**
1274 * ring_buffer_write - write data to the buffer without reserving 1291 * ring_buffer_write - write data to the buffer without reserving
@@ -1334,6 +1351,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1334 1351
1335 return ret; 1352 return ret;
1336} 1353}
1354EXPORT_SYMBOL_GPL(ring_buffer_write);
1337 1355
1338static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1356static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1339{ 1357{
@@ -1360,6 +1378,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1360{ 1378{
1361 atomic_inc(&buffer->record_disabled); 1379 atomic_inc(&buffer->record_disabled);
1362} 1380}
1381EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1363 1382
1364/** 1383/**
1365 * ring_buffer_record_enable - enable writes to the buffer 1384 * ring_buffer_record_enable - enable writes to the buffer
@@ -1372,6 +1391,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1372{ 1391{
1373 atomic_dec(&buffer->record_disabled); 1392 atomic_dec(&buffer->record_disabled);
1374} 1393}
1394EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1375 1395
1376/** 1396/**
1377 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1397 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1393,6 +1413,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1393 cpu_buffer = buffer->buffers[cpu]; 1413 cpu_buffer = buffer->buffers[cpu];
1394 atomic_inc(&cpu_buffer->record_disabled); 1414 atomic_inc(&cpu_buffer->record_disabled);
1395} 1415}
1416EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1396 1417
1397/** 1418/**
1398 * ring_buffer_record_enable_cpu - enable writes to the buffer 1419 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1412,6 +1433,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1412 cpu_buffer = buffer->buffers[cpu]; 1433 cpu_buffer = buffer->buffers[cpu];
1413 atomic_dec(&cpu_buffer->record_disabled); 1434 atomic_dec(&cpu_buffer->record_disabled);
1414} 1435}
1436EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1415 1437
1416/** 1438/**
1417 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1439 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1428,6 +1450,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1428 cpu_buffer = buffer->buffers[cpu]; 1450 cpu_buffer = buffer->buffers[cpu];
1429 return cpu_buffer->entries; 1451 return cpu_buffer->entries;
1430} 1452}
1453EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1431 1454
1432/** 1455/**
1433 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1456 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1444,6 +1467,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1444 cpu_buffer = buffer->buffers[cpu]; 1467 cpu_buffer = buffer->buffers[cpu];
1445 return cpu_buffer->overrun; 1468 return cpu_buffer->overrun;
1446} 1469}
1470EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1447 1471
1448/** 1472/**
1449 * ring_buffer_entries - get the number of entries in a buffer 1473 * ring_buffer_entries - get the number of entries in a buffer
@@ -1466,6 +1490,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1466 1490
1467 return entries; 1491 return entries;
1468} 1492}
1493EXPORT_SYMBOL_GPL(ring_buffer_entries);
1469 1494
1470/** 1495/**
1471 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1496 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1488,6 +1513,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1488 1513
1489 return overruns; 1514 return overruns;
1490} 1515}
1516EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1491 1517
1492/** 1518/**
1493 * ring_buffer_iter_reset - reset an iterator 1519 * ring_buffer_iter_reset - reset an iterator
@@ -1513,6 +1539,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1513 else 1539 else
1514 iter->read_stamp = iter->head_page->time_stamp; 1540 iter->read_stamp = iter->head_page->time_stamp;
1515} 1541}
1542EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1516 1543
1517/** 1544/**
1518 * ring_buffer_iter_empty - check if an iterator has no more to read 1545 * ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1527,6 +1554,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1527 return iter->head_page == cpu_buffer->commit_page && 1554 return iter->head_page == cpu_buffer->commit_page &&
1528 iter->head == rb_commit_index(cpu_buffer); 1555 iter->head == rb_commit_index(cpu_buffer);
1529} 1556}
1557EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1530 1558
1531static void 1559static void
1532rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1560rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1797,6 +1825,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1797 1825
1798 return NULL; 1826 return NULL;
1799} 1827}
1828EXPORT_SYMBOL_GPL(ring_buffer_peek);
1800 1829
1801/** 1830/**
1802 * ring_buffer_iter_peek - peek at the next event to be read 1831 * ring_buffer_iter_peek - peek at the next event to be read
@@ -1867,6 +1896,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1867 1896
1868 return NULL; 1897 return NULL;
1869} 1898}
1899EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1870 1900
1871/** 1901/**
1872 * ring_buffer_consume - return an event and consume it 1902 * ring_buffer_consume - return an event and consume it
@@ -1894,6 +1924,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1894 1924
1895 return event; 1925 return event;
1896} 1926}
1927EXPORT_SYMBOL_GPL(ring_buffer_consume);
1897 1928
1898/** 1929/**
1899 * ring_buffer_read_start - start a non consuming read of the buffer 1930 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -1934,6 +1965,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1934 1965
1935 return iter; 1966 return iter;
1936} 1967}
1968EXPORT_SYMBOL_GPL(ring_buffer_read_start);
1937 1969
1938/** 1970/**
1939 * ring_buffer_finish - finish reading the iterator of the buffer 1971 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -1950,6 +1982,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
1950 atomic_dec(&cpu_buffer->record_disabled); 1982 atomic_dec(&cpu_buffer->record_disabled);
1951 kfree(iter); 1983 kfree(iter);
1952} 1984}
1985EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
1953 1986
1954/** 1987/**
1955 * ring_buffer_read - read the next item in the ring buffer by the iterator 1988 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -1971,6 +2004,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1971 2004
1972 return event; 2005 return event;
1973} 2006}
2007EXPORT_SYMBOL_GPL(ring_buffer_read);
1974 2008
1975/** 2009/**
1976 * ring_buffer_size - return the size of the ring buffer (in bytes) 2010 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -1980,6 +2014,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
1980{ 2014{
1981 return BUF_PAGE_SIZE * buffer->pages; 2015 return BUF_PAGE_SIZE * buffer->pages;
1982} 2016}
2017EXPORT_SYMBOL_GPL(ring_buffer_size);
1983 2018
1984static void 2019static void
1985rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2020rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2022,6 +2057,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2022 2057
2023 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2058 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2024} 2059}
2060EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2025 2061
2026/** 2062/**
2027 * ring_buffer_reset - reset a ring buffer 2063 * ring_buffer_reset - reset a ring buffer
@@ -2034,6 +2070,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2034 for_each_buffer_cpu(buffer, cpu) 2070 for_each_buffer_cpu(buffer, cpu)
2035 ring_buffer_reset_cpu(buffer, cpu); 2071 ring_buffer_reset_cpu(buffer, cpu);
2036} 2072}
2073EXPORT_SYMBOL_GPL(ring_buffer_reset);
2037 2074
2038/** 2075/**
2039 * rind_buffer_empty - is the ring buffer empty? 2076 * rind_buffer_empty - is the ring buffer empty?
@@ -2052,6 +2089,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2052 } 2089 }
2053 return 1; 2090 return 1;
2054} 2091}
2092EXPORT_SYMBOL_GPL(ring_buffer_empty);
2055 2093
2056/** 2094/**
2057 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2095 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2068,6 +2106,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2068 cpu_buffer = buffer->buffers[cpu]; 2106 cpu_buffer = buffer->buffers[cpu];
2069 return rb_per_cpu_empty(cpu_buffer); 2107 return rb_per_cpu_empty(cpu_buffer);
2070} 2108}
2109EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2071 2110
2072/** 2111/**
2073 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2112 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2117,6 +2156,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2117 2156
2118 return 0; 2157 return 0;
2119} 2158}
2159EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2120 2160
2121static ssize_t 2161static ssize_t
2122rb_simple_read(struct file *filp, char __user *ubuf, 2162rb_simple_read(struct file *filp, char __user *ubuf,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d86e3252f300..a96b335fe75c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -914,7 +914,7 @@ enum trace_file_type {
914 TRACE_FILE_LAT_FMT = 1, 914 TRACE_FILE_LAT_FMT = 1,
915}; 915};
916 916
917static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 917static void trace_iterator_increment(struct trace_iterator *iter)
918{ 918{
919 /* Don't allow ftrace to trace into the ring buffers */ 919 /* Don't allow ftrace to trace into the ring buffers */
920 ftrace_disable_cpu(); 920 ftrace_disable_cpu();
@@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
993 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 993 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
994 994
995 if (iter->ent) 995 if (iter->ent)
996 trace_iterator_increment(iter, iter->cpu); 996 trace_iterator_increment(iter);
997 997
998 return iter->ent ? iter : NULL; 998 return iter->ent ? iter : NULL;
999} 999}