aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig14
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/apic_64.c4
-rw-r--r--arch/x86/oprofile/Makefile2
-rw-r--r--arch/x86/oprofile/nmi_int.c27
-rw-r--r--arch/x86/oprofile/op_model_amd.c543
-rw-r--r--arch/x86/oprofile/op_model_athlon.c190
-rw-r--r--arch/x86/oprofile/op_x86_model.h4
-rw-r--r--drivers/oprofile/buffer_sync.c209
-rw-r--r--drivers/oprofile/cpu_buffer.c74
-rw-r--r--drivers/oprofile/cpu_buffer.h2
-rw-r--r--include/linux/oprofile.h2
12 files changed, 809 insertions, 266 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 364c6dadde0a..0267babe5eb9 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -13,6 +13,20 @@ config OPROFILE
13 13
14 If unsure, say N. 14 If unsure, say N.
15 15
16config OPROFILE_IBS
17 bool "OProfile AMD IBS support (EXPERIMENTAL)"
18 default n
19 depends on OPROFILE && SMP && X86
20 help
21 Instruction-Based Sampling (IBS) is a new profiling
22 technique that provides rich, precise program performance
23 information. IBS is introduced by AMD Family10h processors
24 (AMD Opteron Quad-Core processor “Barcelona”) to overcome
25 the limitations of conventional performance counter
26 sampling.
27
28 If unsure, say N.
29
16config HAVE_OPROFILE 30config HAVE_OPROFILE
17 def_bool n 31 def_bool n
18 32
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index a91c57cb666a..21c831d96af3 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -295,6 +295,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
295 * 295 *
296 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 296 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
297 * MCE interrupts are supported. Thus MCE offset must be set to 0. 297 * MCE interrupts are supported. Thus MCE offset must be set to 0.
298 *
299 * If mask=1, the LVT entry does not generate interrupts while mask=0
300 * enables the vector. See also the BKDGs.
298 */ 301 */
299 302
300#define APIC_EILVT_LVTOFF_MCE 0 303#define APIC_EILVT_LVTOFF_MCE 0
@@ -319,6 +322,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
319 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 322 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
320 return APIC_EILVT_LVTOFF_IBS; 323 return APIC_EILVT_LVTOFF_IBS;
321} 324}
325EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
322 326
323/* 327/*
324 * Program the next event, relative to now 328 * Program the next event, relative to now
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 53898b65a6ae..94ddb69ae15e 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -307,6 +307,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
307 * 307 *
308 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 308 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
309 * MCE interrupts are supported. Thus MCE offset must be set to 0. 309 * MCE interrupts are supported. Thus MCE offset must be set to 0.
310 *
311 * If mask=1, the LVT entry does not generate interrupts while mask=0
312 * enables the vector. See also the BKDGs.
310 */ 313 */
311 314
312#define APIC_EILVT_LVTOFF_MCE 0 315#define APIC_EILVT_LVTOFF_MCE 0
@@ -331,6 +334,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
331 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 334 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
332 return APIC_EILVT_LVTOFF_IBS; 335 return APIC_EILVT_LVTOFF_IBS;
333} 336}
337EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
334 338
335/* 339/*
336 * Program the next event, relative to now 340 * Program the next event, relative to now
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 30f3eb366667..446902b2a6b6 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -7,6 +7,6 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \ 10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
11 op_model_ppro.o op_model_p4.o 11 op_model_ppro.o op_model_p4.o
12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o 12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 8a5f1614a3d5..57f6c9088081 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,10 +1,11 @@
1/** 1/**
2 * @file nmi_int.c 2 * @file nmi_int.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
8 */ 9 */
9 10
10#include <linux/init.h> 11#include <linux/init.h>
@@ -439,6 +440,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
439 __u8 vendor = boot_cpu_data.x86_vendor; 440 __u8 vendor = boot_cpu_data.x86_vendor;
440 __u8 family = boot_cpu_data.x86; 441 __u8 family = boot_cpu_data.x86;
441 char *cpu_type; 442 char *cpu_type;
443 int ret = 0;
442 444
443 if (!cpu_has_apic) 445 if (!cpu_has_apic)
444 return -ENODEV; 446 return -ENODEV;
@@ -451,19 +453,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
451 default: 453 default:
452 return -ENODEV; 454 return -ENODEV;
453 case 6: 455 case 6:
454 model = &op_athlon_spec; 456 model = &op_amd_spec;
455 cpu_type = "i386/athlon"; 457 cpu_type = "i386/athlon";
456 break; 458 break;
457 case 0xf: 459 case 0xf:
458 model = &op_athlon_spec; 460 model = &op_amd_spec;
459 /* Actually it could be i386/hammer too, but give 461 /* Actually it could be i386/hammer too, but give
460 user space an consistent name. */ 462 user space an consistent name. */
461 cpu_type = "x86-64/hammer"; 463 cpu_type = "x86-64/hammer";
462 break; 464 break;
463 case 0x10: 465 case 0x10:
464 model = &op_athlon_spec; 466 model = &op_amd_spec;
465 cpu_type = "x86-64/family10"; 467 cpu_type = "x86-64/family10";
466 break; 468 break;
469 case 0x11:
470 model = &op_amd_spec;
471 cpu_type = "x86-64/family11h";
472 break;
467 } 473 }
468 break; 474 break;
469 475
@@ -490,17 +496,24 @@ int __init op_nmi_init(struct oprofile_operations *ops)
490 return -ENODEV; 496 return -ENODEV;
491 } 497 }
492 498
493 init_sysfs();
494#ifdef CONFIG_SMP 499#ifdef CONFIG_SMP
495 register_cpu_notifier(&oprofile_cpu_nb); 500 register_cpu_notifier(&oprofile_cpu_nb);
496#endif 501#endif
497 using_nmi = 1; 502 /* default values, can be overwritten by model */
498 ops->create_files = nmi_create_files; 503 ops->create_files = nmi_create_files;
499 ops->setup = nmi_setup; 504 ops->setup = nmi_setup;
500 ops->shutdown = nmi_shutdown; 505 ops->shutdown = nmi_shutdown;
501 ops->start = nmi_start; 506 ops->start = nmi_start;
502 ops->stop = nmi_stop; 507 ops->stop = nmi_stop;
503 ops->cpu_type = cpu_type; 508 ops->cpu_type = cpu_type;
509
510 if (model->init)
511 ret = model->init(ops);
512 if (ret)
513 return ret;
514
515 init_sysfs();
516 using_nmi = 1;
504 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 517 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
505 return 0; 518 return 0;
506} 519}
@@ -513,4 +526,6 @@ void op_nmi_exit(void)
513 unregister_cpu_notifier(&oprofile_cpu_nb); 526 unregister_cpu_notifier(&oprofile_cpu_nb);
514#endif 527#endif
515 } 528 }
529 if (model->exit)
530 model->exit();
516} 531}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
new file mode 100644
index 000000000000..d9faf607b3a6
--- /dev/null
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -0,0 +1,543 @@
1/*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002-2008 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf
13*/
14
15#include <linux/oprofile.h>
16#include <linux/device.h>
17#include <linux/pci.h>
18
19#include <asm/ptrace.h>
20#include <asm/msr.h>
21#include <asm/nmi.h>
22
23#include "op_x86_model.h"
24#include "op_counter.h"
25
26#define NUM_COUNTERS 4
27#define NUM_CONTROLS 4
28
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
31#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
32#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
33
34#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
35#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
36#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
37#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
38#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
39#define CTRL_CLEAR_LO(x) (x &= (1<<21))
40#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
41#define CTRL_SET_ENABLE(val) (val |= 1<<20)
42#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
43#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
44#define CTRL_SET_UM(val, m) (val |= (m << 8))
45#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
46#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
47#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
48#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
49
50static unsigned long reset_value[NUM_COUNTERS];
51
52#ifdef CONFIG_OPROFILE_IBS
53
54/* IbsFetchCtl bits/masks */
55#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */
56#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */
57#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */
58
59/*IbsOpCtl bits */
60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
62
63/* Codes used in cpu_buffer.c */
64/* This produces duplicate code, need to be fixed */
65#define IBS_FETCH_BEGIN 3
66#define IBS_OP_BEGIN 4
67
68/* The function interface needs to be fixed, something like add
69 data. Should then be added to linux/oprofile.h. */
70extern void oprofile_add_ibs_sample(struct pt_regs *const regs,
71 unsigned int * const ibs_sample, u8 code);
72
73struct ibs_fetch_sample {
74 /* MSRC001_1031 IBS Fetch Linear Address Register */
75 unsigned int ibs_fetch_lin_addr_low;
76 unsigned int ibs_fetch_lin_addr_high;
77 /* MSRC001_1030 IBS Fetch Control Register */
78 unsigned int ibs_fetch_ctl_low;
79 unsigned int ibs_fetch_ctl_high;
80 /* MSRC001_1032 IBS Fetch Physical Address Register */
81 unsigned int ibs_fetch_phys_addr_low;
82 unsigned int ibs_fetch_phys_addr_high;
83};
84
85struct ibs_op_sample {
86 /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
87 unsigned int ibs_op_rip_low;
88 unsigned int ibs_op_rip_high;
89 /* MSRC001_1035 IBS Op Data Register */
90 unsigned int ibs_op_data1_low;
91 unsigned int ibs_op_data1_high;
92 /* MSRC001_1036 IBS Op Data 2 Register */
93 unsigned int ibs_op_data2_low;
94 unsigned int ibs_op_data2_high;
95 /* MSRC001_1037 IBS Op Data 3 Register */
96 unsigned int ibs_op_data3_low;
97 unsigned int ibs_op_data3_high;
98 /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
99 unsigned int ibs_dc_linear_low;
100 unsigned int ibs_dc_linear_high;
101 /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
102 unsigned int ibs_dc_phys_low;
103 unsigned int ibs_dc_phys_high;
104};
105
106/*
107 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h+
108*/
109static void clear_ibs_nmi(void);
110
111static int ibs_allowed; /* AMD Family10h and later */
112
113struct op_ibs_config {
114 unsigned long op_enabled;
115 unsigned long fetch_enabled;
116 unsigned long max_cnt_fetch;
117 unsigned long max_cnt_op;
118 unsigned long rand_en;
119 unsigned long dispatched_ops;
120};
121
122static struct op_ibs_config ibs_config;
123
124#endif
125
126/* functions for op_amd_spec */
127
128static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
129{
130 int i;
131
132 for (i = 0; i < NUM_COUNTERS; i++) {
133 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
134 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
135 else
136 msrs->counters[i].addr = 0;
137 }
138
139 for (i = 0; i < NUM_CONTROLS; i++) {
140 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
141 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
142 else
143 msrs->controls[i].addr = 0;
144 }
145}
146
147
148static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
149{
150 unsigned int low, high;
151 int i;
152
153 /* clear all counters */
154 for (i = 0 ; i < NUM_CONTROLS; ++i) {
155 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
156 continue;
157 CTRL_READ(low, high, msrs, i);
158 CTRL_CLEAR_LO(low);
159 CTRL_CLEAR_HI(high);
160 CTRL_WRITE(low, high, msrs, i);
161 }
162
163 /* avoid a false detection of ctr overflows in NMI handler */
164 for (i = 0; i < NUM_COUNTERS; ++i) {
165 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
166 continue;
167 CTR_WRITE(1, msrs, i);
168 }
169
170 /* enable active counters */
171 for (i = 0; i < NUM_COUNTERS; ++i) {
172 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
173 reset_value[i] = counter_config[i].count;
174
175 CTR_WRITE(counter_config[i].count, msrs, i);
176
177 CTRL_READ(low, high, msrs, i);
178 CTRL_CLEAR_LO(low);
179 CTRL_CLEAR_HI(high);
180 CTRL_SET_ENABLE(low);
181 CTRL_SET_USR(low, counter_config[i].user);
182 CTRL_SET_KERN(low, counter_config[i].kernel);
183 CTRL_SET_UM(low, counter_config[i].unit_mask);
184 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
185 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
186 CTRL_SET_HOST_ONLY(high, 0);
187 CTRL_SET_GUEST_ONLY(high, 0);
188
189 CTRL_WRITE(low, high, msrs, i);
190 } else {
191 reset_value[i] = 0;
192 }
193 }
194}
195
196#ifdef CONFIG_OPROFILE_IBS
197
198static inline int
199op_amd_handle_ibs(struct pt_regs * const regs,
200 struct op_msrs const * const msrs)
201{
202 unsigned int low, high;
203 struct ibs_fetch_sample ibs_fetch;
204 struct ibs_op_sample ibs_op;
205
206 if (!ibs_allowed)
207 return 1;
208
209 if (ibs_config.fetch_enabled) {
210 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
211 if (high & IBS_FETCH_HIGH_VALID_BIT) {
212 ibs_fetch.ibs_fetch_ctl_high = high;
213 ibs_fetch.ibs_fetch_ctl_low = low;
214 rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
215 ibs_fetch.ibs_fetch_lin_addr_high = high;
216 ibs_fetch.ibs_fetch_lin_addr_low = low;
217 rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
218 ibs_fetch.ibs_fetch_phys_addr_high = high;
219 ibs_fetch.ibs_fetch_phys_addr_low = low;
220
221 oprofile_add_ibs_sample(regs,
222 (unsigned int *)&ibs_fetch,
223 IBS_FETCH_BEGIN);
224
225 /*reenable the IRQ */
226 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
227 high &= ~IBS_FETCH_HIGH_VALID_BIT;
228 high |= IBS_FETCH_HIGH_ENABLE;
229 low &= IBS_FETCH_LOW_MAX_CNT_MASK;
230 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
231 }
232 }
233
234 if (ibs_config.op_enabled) {
235 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
236 if (low & IBS_OP_LOW_VALID_BIT) {
237 rdmsr(MSR_AMD64_IBSOPRIP, low, high);
238 ibs_op.ibs_op_rip_low = low;
239 ibs_op.ibs_op_rip_high = high;
240 rdmsr(MSR_AMD64_IBSOPDATA, low, high);
241 ibs_op.ibs_op_data1_low = low;
242 ibs_op.ibs_op_data1_high = high;
243 rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
244 ibs_op.ibs_op_data2_low = low;
245 ibs_op.ibs_op_data2_high = high;
246 rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
247 ibs_op.ibs_op_data3_low = low;
248 ibs_op.ibs_op_data3_high = high;
249 rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
250 ibs_op.ibs_dc_linear_low = low;
251 ibs_op.ibs_dc_linear_high = high;
252 rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
253 ibs_op.ibs_dc_phys_low = low;
254 ibs_op.ibs_dc_phys_high = high;
255
256 /* reenable the IRQ */
257 oprofile_add_ibs_sample(regs,
258 (unsigned int *)&ibs_op,
259 IBS_OP_BEGIN);
260 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
261 high = 0;
262 low &= ~IBS_OP_LOW_VALID_BIT;
263 low |= IBS_OP_LOW_ENABLE;
264 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
265 }
266 }
267
268 return 1;
269}
270
271#endif
272
273static int op_amd_check_ctrs(struct pt_regs * const regs,
274 struct op_msrs const * const msrs)
275{
276 unsigned int low, high;
277 int i;
278
279 for (i = 0 ; i < NUM_COUNTERS; ++i) {
280 if (!reset_value[i])
281 continue;
282 CTR_READ(low, high, msrs, i);
283 if (CTR_OVERFLOWED(low)) {
284 oprofile_add_sample(regs, i);
285 CTR_WRITE(reset_value[i], msrs, i);
286 }
287 }
288
289#ifdef CONFIG_OPROFILE_IBS
290 op_amd_handle_ibs(regs, msrs);
291#endif
292
293 /* See op_model_ppro.c */
294 return 1;
295}
296
297static void op_amd_start(struct op_msrs const * const msrs)
298{
299 unsigned int low, high;
300 int i;
301 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
302 if (reset_value[i]) {
303 CTRL_READ(low, high, msrs, i);
304 CTRL_SET_ACTIVE(low);
305 CTRL_WRITE(low, high, msrs, i);
306 }
307 }
308
309#ifdef CONFIG_OPROFILE_IBS
310 if (ibs_allowed && ibs_config.fetch_enabled) {
311 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
312 high = IBS_FETCH_HIGH_ENABLE;
313 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
314 }
315
316 if (ibs_allowed && ibs_config.op_enabled) {
317 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE;
318 high = 0;
319 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
320 }
321#endif
322}
323
324
325static void op_amd_stop(struct op_msrs const * const msrs)
326{
327 unsigned int low, high;
328 int i;
329
330 /* Subtle: stop on all counters to avoid race with
331 * setting our pm callback */
332 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
333 if (!reset_value[i])
334 continue;
335 CTRL_READ(low, high, msrs, i);
336 CTRL_SET_INACTIVE(low);
337 CTRL_WRITE(low, high, msrs, i);
338 }
339
340#ifdef CONFIG_OPROFILE_IBS
341 if (ibs_allowed && ibs_config.fetch_enabled) {
342 low = 0; /* clear max count and enable */
343 high = 0;
344 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
345 }
346
347 if (ibs_allowed && ibs_config.op_enabled) {
348 low = 0; /* clear max count and enable */
349 high = 0;
350 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
351 }
352#endif
353}
354
355static void op_amd_shutdown(struct op_msrs const * const msrs)
356{
357 int i;
358
359 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
360 if (CTR_IS_RESERVED(msrs, i))
361 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
362 }
363 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
364 if (CTRL_IS_RESERVED(msrs, i))
365 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
366 }
367}
368
369#ifndef CONFIG_OPROFILE_IBS
370
371/* no IBS support */
372
373static int op_amd_init(struct oprofile_operations *ops)
374{
375 return 0;
376}
377
378static void op_amd_exit(void) {}
379
380#else
381
382static u8 ibs_eilvt_off;
383
384static inline void apic_init_ibs_nmi_per_cpu(void *arg)
385{
386 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
387}
388
389static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
390{
391 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
392}
393
394static int pfm_amd64_setup_eilvt(void)
395{
396#define IBSCTL_LVTOFFSETVAL (1 << 8)
397#define IBSCTL 0x1cc
398 struct pci_dev *cpu_cfg;
399 int nodes;
400 u32 value = 0;
401
402 /* per CPU setup */
403 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
404
405 nodes = 0;
406 cpu_cfg = NULL;
407 do {
408 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
409 PCI_DEVICE_ID_AMD_10H_NB_MISC,
410 cpu_cfg);
411 if (!cpu_cfg)
412 break;
413 ++nodes;
414 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
415 | IBSCTL_LVTOFFSETVAL);
416 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
417 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
418 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
419 "IBSCTL = 0x%08x", value);
420 return 1;
421 }
422 } while (1);
423
424 if (!nodes) {
425 printk(KERN_DEBUG "No CPU node configured for IBS");
426 return 1;
427 }
428
429#ifdef CONFIG_NUMA
430 /* Sanity check */
431 /* Works only for 64bit with proper numa implementation. */
432 if (nodes != num_possible_nodes()) {
433 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
434 "found: %d, expected %d",
435 nodes, num_possible_nodes());
436 return 1;
437 }
438#endif
439 return 0;
440}
441
442/*
443 * initialize the APIC for the IBS interrupts
444 * if available (AMD Family10h rev B0 and later)
445 */
446static void setup_ibs(void)
447{
448 ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
449
450 if (!ibs_allowed)
451 return;
452
453 if (pfm_amd64_setup_eilvt()) {
454 ibs_allowed = 0;
455 return;
456 }
457
458 printk(KERN_INFO "oprofile: AMD IBS detected\n");
459}
460
461
462/*
463 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h
464 * rev B0 and later */
465static void clear_ibs_nmi(void)
466{
467 if (ibs_allowed)
468 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
469}
470
471static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
472
473static int setup_ibs_files(struct super_block * sb, struct dentry * root)
474{
475 char buf[12];
476 struct dentry *dir;
477 int ret = 0;
478
479 /* architecture specific files */
480 if (create_arch_files)
481 ret = create_arch_files(sb, root);
482
483 if (ret)
484 return ret;
485
486 if (!ibs_allowed)
487 return ret;
488
489 /* model specific files */
490
491 /* setup some reasonable defaults */
492 ibs_config.max_cnt_fetch = 250000;
493 ibs_config.fetch_enabled = 0;
494 ibs_config.max_cnt_op = 250000;
495 ibs_config.op_enabled = 0;
496 ibs_config.dispatched_ops = 1;
497 snprintf(buf, sizeof(buf), "ibs_fetch");
498 dir = oprofilefs_mkdir(sb, root, buf);
499 oprofilefs_create_ulong(sb, dir, "rand_enable",
500 &ibs_config.rand_en);
501 oprofilefs_create_ulong(sb, dir, "enable",
502 &ibs_config.fetch_enabled);
503 oprofilefs_create_ulong(sb, dir, "max_count",
504 &ibs_config.max_cnt_fetch);
505 snprintf(buf, sizeof(buf), "ibs_uops");
506 dir = oprofilefs_mkdir(sb, root, buf);
507 oprofilefs_create_ulong(sb, dir, "enable",
508 &ibs_config.op_enabled);
509 oprofilefs_create_ulong(sb, dir, "max_count",
510 &ibs_config.max_cnt_op);
511 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
512 &ibs_config.dispatched_ops);
513
514 return 0;
515}
516
517static int op_amd_init(struct oprofile_operations *ops)
518{
519 setup_ibs();
520 create_arch_files = ops->create_files;
521 ops->create_files = setup_ibs_files;
522 return 0;
523}
524
525static void op_amd_exit(void)
526{
527 clear_ibs_nmi();
528}
529
530#endif
531
532struct op_x86_model_spec const op_amd_spec = {
533 .init = op_amd_init,
534 .exit = op_amd_exit,
535 .num_counters = NUM_COUNTERS,
536 .num_controls = NUM_CONTROLS,
537 .fill_in_addresses = &op_amd_fill_in_addresses,
538 .setup_ctrs = &op_amd_setup_ctrs,
539 .check_ctrs = &op_amd_check_ctrs,
540 .start = &op_amd_start,
541 .stop = &op_amd_stop,
542 .shutdown = &op_amd_shutdown
543};
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c
deleted file mode 100644
index 3d534879a9dc..000000000000
--- a/arch/x86/oprofile/op_model_athlon.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * @file op_model_athlon.h
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 */
12
13#include <linux/oprofile.h>
14#include <asm/ptrace.h>
15#include <asm/msr.h>
16#include <asm/nmi.h>
17
18#include "op_x86_model.h"
19#include "op_counter.h"
20
21#define NUM_COUNTERS 4
22#define NUM_CONTROLS 4
23
24#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
25#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
26#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
27#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
28
29#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
30#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
31#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
32#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
33#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
34#define CTRL_CLEAR_LO(x) (x &= (1<<21))
35#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
36#define CTRL_SET_ENABLE(val) (val |= 1<<20)
37#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
38#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
39#define CTRL_SET_UM(val, m) (val |= (m << 8))
40#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
41#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
42#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
43#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
44
45static unsigned long reset_value[NUM_COUNTERS];
46
47static void athlon_fill_in_addresses(struct op_msrs * const msrs)
48{
49 int i;
50
51 for (i = 0; i < NUM_COUNTERS; i++) {
52 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
53 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
54 else
55 msrs->counters[i].addr = 0;
56 }
57
58 for (i = 0; i < NUM_CONTROLS; i++) {
59 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
60 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
61 else
62 msrs->controls[i].addr = 0;
63 }
64}
65
66
67static void athlon_setup_ctrs(struct op_msrs const * const msrs)
68{
69 unsigned int low, high;
70 int i;
71
72 /* clear all counters */
73 for (i = 0 ; i < NUM_CONTROLS; ++i) {
74 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
75 continue;
76 CTRL_READ(low, high, msrs, i);
77 CTRL_CLEAR_LO(low);
78 CTRL_CLEAR_HI(high);
79 CTRL_WRITE(low, high, msrs, i);
80 }
81
82 /* avoid a false detection of ctr overflows in NMI handler */
83 for (i = 0; i < NUM_COUNTERS; ++i) {
84 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
85 continue;
86 CTR_WRITE(1, msrs, i);
87 }
88
89 /* enable active counters */
90 for (i = 0; i < NUM_COUNTERS; ++i) {
91 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
92 reset_value[i] = counter_config[i].count;
93
94 CTR_WRITE(counter_config[i].count, msrs, i);
95
96 CTRL_READ(low, high, msrs, i);
97 CTRL_CLEAR_LO(low);
98 CTRL_CLEAR_HI(high);
99 CTRL_SET_ENABLE(low);
100 CTRL_SET_USR(low, counter_config[i].user);
101 CTRL_SET_KERN(low, counter_config[i].kernel);
102 CTRL_SET_UM(low, counter_config[i].unit_mask);
103 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
104 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
105 CTRL_SET_HOST_ONLY(high, 0);
106 CTRL_SET_GUEST_ONLY(high, 0);
107
108 CTRL_WRITE(low, high, msrs, i);
109 } else {
110 reset_value[i] = 0;
111 }
112 }
113}
114
115
116static int athlon_check_ctrs(struct pt_regs * const regs,
117 struct op_msrs const * const msrs)
118{
119 unsigned int low, high;
120 int i;
121
122 for (i = 0 ; i < NUM_COUNTERS; ++i) {
123 if (!reset_value[i])
124 continue;
125 CTR_READ(low, high, msrs, i);
126 if (CTR_OVERFLOWED(low)) {
127 oprofile_add_sample(regs, i);
128 CTR_WRITE(reset_value[i], msrs, i);
129 }
130 }
131
132 /* See op_model_ppro.c */
133 return 1;
134}
135
136
137static void athlon_start(struct op_msrs const * const msrs)
138{
139 unsigned int low, high;
140 int i;
141 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
142 if (reset_value[i]) {
143 CTRL_READ(low, high, msrs, i);
144 CTRL_SET_ACTIVE(low);
145 CTRL_WRITE(low, high, msrs, i);
146 }
147 }
148}
149
150
151static void athlon_stop(struct op_msrs const * const msrs)
152{
153 unsigned int low, high;
154 int i;
155
156 /* Subtle: stop on all counters to avoid race with
157 * setting our pm callback */
158 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
159 if (!reset_value[i])
160 continue;
161 CTRL_READ(low, high, msrs, i);
162 CTRL_SET_INACTIVE(low);
163 CTRL_WRITE(low, high, msrs, i);
164 }
165}
166
167static void athlon_shutdown(struct op_msrs const * const msrs)
168{
169 int i;
170
171 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
172 if (CTR_IS_RESERVED(msrs, i))
173 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
174 }
175 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
176 if (CTRL_IS_RESERVED(msrs, i))
177 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
178 }
179}
180
181struct op_x86_model_spec const op_athlon_spec = {
182 .num_counters = NUM_COUNTERS,
183 .num_controls = NUM_CONTROLS,
184 .fill_in_addresses = &athlon_fill_in_addresses,
185 .setup_ctrs = &athlon_setup_ctrs,
186 .check_ctrs = &athlon_check_ctrs,
187 .start = &athlon_start,
188 .stop = &athlon_stop,
189 .shutdown = &athlon_shutdown
190};
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 45b605fa71d0..05a0261ba0c3 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -32,6 +32,8 @@ struct pt_regs;
32 * various x86 CPU models' perfctr support. 32 * various x86 CPU models' perfctr support.
33 */ 33 */
34struct op_x86_model_spec { 34struct op_x86_model_spec {
35 int (*init)(struct oprofile_operations *ops);
36 void (*exit)(void);
35 unsigned int const num_counters; 37 unsigned int const num_counters;
36 unsigned int const num_controls; 38 unsigned int const num_controls;
37 void (*fill_in_addresses)(struct op_msrs * const msrs); 39 void (*fill_in_addresses)(struct op_msrs * const msrs);
@@ -46,6 +48,6 @@ struct op_x86_model_spec {
46extern struct op_x86_model_spec const op_ppro_spec; 48extern struct op_x86_model_spec const op_ppro_spec;
47extern struct op_x86_model_spec const op_p4_spec; 49extern struct op_x86_model_spec const op_p4_spec;
48extern struct op_x86_model_spec const op_p4_ht2_spec; 50extern struct op_x86_model_spec const op_p4_ht2_spec;
49extern struct op_x86_model_spec const op_athlon_spec; 51extern struct op_x86_model_spec const op_amd_spec;
50 52
51#endif /* OP_X86_MODEL_H */ 53#endif /* OP_X86_MODEL_H */
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 9304c4555079..ed982273fb8b 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -5,6 +5,7 @@
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf
8 * 9 *
9 * This is the core of the buffer management. Each 10 * This is the core of the buffer management. Each
10 * CPU buffer is processed and entered into the 11 * CPU buffer is processed and entered into the
@@ -33,7 +34,7 @@
33#include "event_buffer.h" 34#include "event_buffer.h"
34#include "cpu_buffer.h" 35#include "cpu_buffer.h"
35#include "buffer_sync.h" 36#include "buffer_sync.h"
36 37
37static LIST_HEAD(dying_tasks); 38static LIST_HEAD(dying_tasks);
38static LIST_HEAD(dead_tasks); 39static LIST_HEAD(dead_tasks);
39static cpumask_t marked_cpus = CPU_MASK_NONE; 40static cpumask_t marked_cpus = CPU_MASK_NONE;
@@ -48,10 +49,11 @@ static void process_task_mortuary(void);
48 * Can be invoked from softirq via RCU callback due to 49 * Can be invoked from softirq via RCU callback due to
49 * call_rcu() of the task struct, hence the _irqsave. 50 * call_rcu() of the task struct, hence the _irqsave.
50 */ 51 */
51static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) 52static int
53task_free_notify(struct notifier_block *self, unsigned long val, void *data)
52{ 54{
53 unsigned long flags; 55 unsigned long flags;
54 struct task_struct * task = data; 56 struct task_struct *task = data;
55 spin_lock_irqsave(&task_mortuary, flags); 57 spin_lock_irqsave(&task_mortuary, flags);
56 list_add(&task->tasks, &dying_tasks); 58 list_add(&task->tasks, &dying_tasks);
57 spin_unlock_irqrestore(&task_mortuary, flags); 59 spin_unlock_irqrestore(&task_mortuary, flags);
@@ -62,13 +64,14 @@ static int task_free_notify(struct notifier_block * self, unsigned long val, voi
62/* The task is on its way out. A sync of the buffer means we can catch 64/* The task is on its way out. A sync of the buffer means we can catch
63 * any remaining samples for this task. 65 * any remaining samples for this task.
64 */ 66 */
65static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) 67static int
68task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
66{ 69{
67 /* To avoid latency problems, we only process the current CPU, 70 /* To avoid latency problems, we only process the current CPU,
68 * hoping that most samples for the task are on this CPU 71 * hoping that most samples for the task are on this CPU
69 */ 72 */
70 sync_buffer(raw_smp_processor_id()); 73 sync_buffer(raw_smp_processor_id());
71 return 0; 74 return 0;
72} 75}
73 76
74 77
@@ -77,11 +80,12 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
77 * we don't lose any. This does not have to be exact, it's a QoI issue 80 * we don't lose any. This does not have to be exact, it's a QoI issue
78 * only. 81 * only.
79 */ 82 */
80static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) 83static int
84munmap_notify(struct notifier_block *self, unsigned long val, void *data)
81{ 85{
82 unsigned long addr = (unsigned long)data; 86 unsigned long addr = (unsigned long)data;
83 struct mm_struct * mm = current->mm; 87 struct mm_struct *mm = current->mm;
84 struct vm_area_struct * mpnt; 88 struct vm_area_struct *mpnt;
85 89
86 down_read(&mm->mmap_sem); 90 down_read(&mm->mmap_sem);
87 91
@@ -99,11 +103,12 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
99 return 0; 103 return 0;
100} 104}
101 105
102 106
103/* We need to be told about new modules so we don't attribute to a previously 107/* We need to be told about new modules so we don't attribute to a previously
104 * loaded module, or drop the samples on the floor. 108 * loaded module, or drop the samples on the floor.
105 */ 109 */
106static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) 110static int
111module_load_notify(struct notifier_block *self, unsigned long val, void *data)
107{ 112{
108#ifdef CONFIG_MODULES 113#ifdef CONFIG_MODULES
109 if (val != MODULE_STATE_COMING) 114 if (val != MODULE_STATE_COMING)
@@ -118,7 +123,7 @@ static int module_load_notify(struct notifier_block * self, unsigned long val, v
118 return 0; 123 return 0;
119} 124}
120 125
121 126
122static struct notifier_block task_free_nb = { 127static struct notifier_block task_free_nb = {
123 .notifier_call = task_free_notify, 128 .notifier_call = task_free_notify,
124}; 129};
@@ -135,7 +140,7 @@ static struct notifier_block module_load_nb = {
135 .notifier_call = module_load_notify, 140 .notifier_call = module_load_notify,
136}; 141};
137 142
138 143
139static void end_sync(void) 144static void end_sync(void)
140{ 145{
141 end_cpu_work(); 146 end_cpu_work();
@@ -208,14 +213,14 @@ static inline unsigned long fast_get_dcookie(struct path *path)
208 * not strictly necessary but allows oprofile to associate 213 * not strictly necessary but allows oprofile to associate
209 * shared-library samples with particular applications 214 * shared-library samples with particular applications
210 */ 215 */
211static unsigned long get_exec_dcookie(struct mm_struct * mm) 216static unsigned long get_exec_dcookie(struct mm_struct *mm)
212{ 217{
213 unsigned long cookie = NO_COOKIE; 218 unsigned long cookie = NO_COOKIE;
214 struct vm_area_struct * vma; 219 struct vm_area_struct *vma;
215 220
216 if (!mm) 221 if (!mm)
217 goto out; 222 goto out;
218 223
219 for (vma = mm->mmap; vma; vma = vma->vm_next) { 224 for (vma = mm->mmap; vma; vma = vma->vm_next) {
220 if (!vma->vm_file) 225 if (!vma->vm_file)
221 continue; 226 continue;
@@ -235,13 +240,14 @@ out:
235 * sure to do this lookup before a mm->mmap modification happens so 240 * sure to do this lookup before a mm->mmap modification happens so
236 * we don't lose track. 241 * we don't lose track.
237 */ 242 */
238static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) 243static unsigned long
244lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
239{ 245{
240 unsigned long cookie = NO_COOKIE; 246 unsigned long cookie = NO_COOKIE;
241 struct vm_area_struct * vma; 247 struct vm_area_struct *vma;
242 248
243 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { 249 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
244 250
245 if (addr < vma->vm_start || addr >= vma->vm_end) 251 if (addr < vma->vm_start || addr >= vma->vm_end)
246 continue; 252 continue;
247 253
@@ -263,9 +269,20 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o
263 return cookie; 269 return cookie;
264} 270}
265 271
272static void increment_tail(struct oprofile_cpu_buffer *b)
273{
274 unsigned long new_tail = b->tail_pos + 1;
275
276 rmb(); /* be sure fifo pointers are synchromized */
277
278 if (new_tail < b->buffer_size)
279 b->tail_pos = new_tail;
280 else
281 b->tail_pos = 0;
282}
266 283
267static unsigned long last_cookie = INVALID_COOKIE; 284static unsigned long last_cookie = INVALID_COOKIE;
268 285
269static void add_cpu_switch(int i) 286static void add_cpu_switch(int i)
270{ 287{
271 add_event_entry(ESCAPE_CODE); 288 add_event_entry(ESCAPE_CODE);
@@ -278,16 +295,16 @@ static void add_kernel_ctx_switch(unsigned int in_kernel)
278{ 295{
279 add_event_entry(ESCAPE_CODE); 296 add_event_entry(ESCAPE_CODE);
280 if (in_kernel) 297 if (in_kernel)
281 add_event_entry(KERNEL_ENTER_SWITCH_CODE); 298 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
282 else 299 else
283 add_event_entry(KERNEL_EXIT_SWITCH_CODE); 300 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
284} 301}
285 302
286static void 303static void
287add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) 304add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
288{ 305{
289 add_event_entry(ESCAPE_CODE); 306 add_event_entry(ESCAPE_CODE);
290 add_event_entry(CTX_SWITCH_CODE); 307 add_event_entry(CTX_SWITCH_CODE);
291 add_event_entry(task->pid); 308 add_event_entry(task->pid);
292 add_event_entry(cookie); 309 add_event_entry(cookie);
293 /* Another code for daemon back-compat */ 310 /* Another code for daemon back-compat */
@@ -296,7 +313,7 @@ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
296 add_event_entry(task->tgid); 313 add_event_entry(task->tgid);
297} 314}
298 315
299 316
300static void add_cookie_switch(unsigned long cookie) 317static void add_cookie_switch(unsigned long cookie)
301{ 318{
302 add_event_entry(ESCAPE_CODE); 319 add_event_entry(ESCAPE_CODE);
@@ -304,13 +321,78 @@ static void add_cookie_switch(unsigned long cookie)
304 add_event_entry(cookie); 321 add_event_entry(cookie);
305} 322}
306 323
307 324
308static void add_trace_begin(void) 325static void add_trace_begin(void)
309{ 326{
310 add_event_entry(ESCAPE_CODE); 327 add_event_entry(ESCAPE_CODE);
311 add_event_entry(TRACE_BEGIN_CODE); 328 add_event_entry(TRACE_BEGIN_CODE);
312} 329}
313 330
331#ifdef CONFIG_OPROFILE_IBS
332
333#define IBS_FETCH_CODE_SIZE 2
334#define IBS_OP_CODE_SIZE 5
335#define IBS_EIP(offset) \
336 (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
337#define IBS_EVENT(offset) \
338 (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
339
340/*
341 * Add IBS fetch and op entries to event buffer
342 */
343static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
344 int in_kernel, struct mm_struct *mm)
345{
346 unsigned long rip;
347 int i, count;
348 unsigned long ibs_cookie = 0;
349 off_t offset;
350
351 increment_tail(cpu_buf); /* move to RIP entry */
352
353 rip = IBS_EIP(cpu_buf->tail_pos);
354
355#ifdef __LP64__
356 rip += IBS_EVENT(cpu_buf->tail_pos) << 32;
357#endif
358
359 if (mm) {
360 ibs_cookie = lookup_dcookie(mm, rip, &offset);
361
362 if (ibs_cookie == NO_COOKIE)
363 offset = rip;
364 if (ibs_cookie == INVALID_COOKIE) {
365 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
366 offset = rip;
367 }
368 if (ibs_cookie != last_cookie) {
369 add_cookie_switch(ibs_cookie);
370 last_cookie = ibs_cookie;
371 }
372 } else
373 offset = rip;
374
375 add_event_entry(ESCAPE_CODE);
376 add_event_entry(code);
377 add_event_entry(offset); /* Offset from Dcookie */
378
379 /* we send the Dcookie offset, but send the raw Linear Add also*/
380 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
381 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
382
383 if (code == IBS_FETCH_CODE)
384 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
385 else
386 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
387
388 for (i = 0; i < count; i++) {
389 increment_tail(cpu_buf);
390 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
391 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
392 }
393}
394
395#endif
314 396
315static void add_sample_entry(unsigned long offset, unsigned long event) 397static void add_sample_entry(unsigned long offset, unsigned long event)
316{ 398{
@@ -319,13 +401,13 @@ static void add_sample_entry(unsigned long offset, unsigned long event)
319} 401}
320 402
321 403
322static int add_us_sample(struct mm_struct * mm, struct op_sample * s) 404static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
323{ 405{
324 unsigned long cookie; 406 unsigned long cookie;
325 off_t offset; 407 off_t offset;
326 408
327 cookie = lookup_dcookie(mm, s->eip, &offset); 409 cookie = lookup_dcookie(mm, s->eip, &offset);
328 410
329 if (cookie == INVALID_COOKIE) { 411 if (cookie == INVALID_COOKIE) {
330 atomic_inc(&oprofile_stats.sample_lost_no_mapping); 412 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
331 return 0; 413 return 0;
@@ -341,13 +423,13 @@ static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
341 return 1; 423 return 1;
342} 424}
343 425
344 426
345/* Add a sample to the global event buffer. If possible the 427/* Add a sample to the global event buffer. If possible the
346 * sample is converted into a persistent dentry/offset pair 428 * sample is converted into a persistent dentry/offset pair
347 * for later lookup from userspace. 429 * for later lookup from userspace.
348 */ 430 */
349static int 431static int
350add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) 432add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
351{ 433{
352 if (in_kernel) { 434 if (in_kernel) {
353 add_sample_entry(s->eip, s->event); 435 add_sample_entry(s->eip, s->event);
@@ -359,9 +441,9 @@ add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
359 } 441 }
360 return 0; 442 return 0;
361} 443}
362
363 444
364static void release_mm(struct mm_struct * mm) 445
446static void release_mm(struct mm_struct *mm)
365{ 447{
366 if (!mm) 448 if (!mm)
367 return; 449 return;
@@ -370,9 +452,9 @@ static void release_mm(struct mm_struct * mm)
370} 452}
371 453
372 454
373static struct mm_struct * take_tasks_mm(struct task_struct * task) 455static struct mm_struct *take_tasks_mm(struct task_struct *task)
374{ 456{
375 struct mm_struct * mm = get_task_mm(task); 457 struct mm_struct *mm = get_task_mm(task);
376 if (mm) 458 if (mm)
377 down_read(&mm->mmap_sem); 459 down_read(&mm->mmap_sem);
378 return mm; 460 return mm;
@@ -383,10 +465,10 @@ static inline int is_code(unsigned long val)
383{ 465{
384 return val == ESCAPE_CODE; 466 return val == ESCAPE_CODE;
385} 467}
386 468
387 469
388/* "acquire" as many cpu buffer slots as we can */ 470/* "acquire" as many cpu buffer slots as we can */
389static unsigned long get_slots(struct oprofile_cpu_buffer * b) 471static unsigned long get_slots(struct oprofile_cpu_buffer *b)
390{ 472{
391 unsigned long head = b->head_pos; 473 unsigned long head = b->head_pos;
392 unsigned long tail = b->tail_pos; 474 unsigned long tail = b->tail_pos;
@@ -412,19 +494,6 @@ static unsigned long get_slots(struct oprofile_cpu_buffer * b)
412} 494}
413 495
414 496
415static void increment_tail(struct oprofile_cpu_buffer * b)
416{
417 unsigned long new_tail = b->tail_pos + 1;
418
419 rmb();
420
421 if (new_tail < b->buffer_size)
422 b->tail_pos = new_tail;
423 else
424 b->tail_pos = 0;
425}
426
427
428/* Move tasks along towards death. Any tasks on dead_tasks 497/* Move tasks along towards death. Any tasks on dead_tasks
429 * will definitely have no remaining references in any 498 * will definitely have no remaining references in any
430 * CPU buffers at this point, because we use two lists, 499 * CPU buffers at this point, because we use two lists,
@@ -435,8 +504,8 @@ static void process_task_mortuary(void)
435{ 504{
436 unsigned long flags; 505 unsigned long flags;
437 LIST_HEAD(local_dead_tasks); 506 LIST_HEAD(local_dead_tasks);
438 struct task_struct * task; 507 struct task_struct *task;
439 struct task_struct * ttask; 508 struct task_struct *ttask;
440 509
441 spin_lock_irqsave(&task_mortuary, flags); 510 spin_lock_irqsave(&task_mortuary, flags);
442 511
@@ -493,7 +562,7 @@ void sync_buffer(int cpu)
493{ 562{
494 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); 563 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
495 struct mm_struct *mm = NULL; 564 struct mm_struct *mm = NULL;
496 struct task_struct * new; 565 struct task_struct *new;
497 unsigned long cookie = 0; 566 unsigned long cookie = 0;
498 int in_kernel = 1; 567 int in_kernel = 1;
499 unsigned int i; 568 unsigned int i;
@@ -501,7 +570,7 @@ void sync_buffer(int cpu)
501 unsigned long available; 570 unsigned long available;
502 571
503 mutex_lock(&buffer_mutex); 572 mutex_lock(&buffer_mutex);
504 573
505 add_cpu_switch(cpu); 574 add_cpu_switch(cpu);
506 575
507 /* Remember, only we can modify tail_pos */ 576 /* Remember, only we can modify tail_pos */
@@ -509,8 +578,8 @@ void sync_buffer(int cpu)
509 available = get_slots(cpu_buf); 578 available = get_slots(cpu_buf);
510 579
511 for (i = 0; i < available; ++i) { 580 for (i = 0; i < available; ++i) {
512 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; 581 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
513 582
514 if (is_code(s->eip)) { 583 if (is_code(s->eip)) {
515 if (s->event <= CPU_IS_KERNEL) { 584 if (s->event <= CPU_IS_KERNEL) {
516 /* kernel/userspace switch */ 585 /* kernel/userspace switch */
@@ -521,8 +590,18 @@ void sync_buffer(int cpu)
521 } else if (s->event == CPU_TRACE_BEGIN) { 590 } else if (s->event == CPU_TRACE_BEGIN) {
522 state = sb_bt_start; 591 state = sb_bt_start;
523 add_trace_begin(); 592 add_trace_begin();
593#ifdef CONFIG_OPROFILE_IBS
594 } else if (s->event == IBS_FETCH_BEGIN) {
595 state = sb_bt_start;
596 add_ibs_begin(cpu_buf,
597 IBS_FETCH_CODE, in_kernel, mm);
598 } else if (s->event == IBS_OP_BEGIN) {
599 state = sb_bt_start;
600 add_ibs_begin(cpu_buf,
601 IBS_OP_CODE, in_kernel, mm);
602#endif
524 } else { 603 } else {
525 struct mm_struct * oldmm = mm; 604 struct mm_struct *oldmm = mm;
526 605
527 /* userspace context switch */ 606 /* userspace context switch */
528 new = (struct task_struct *)s->event; 607 new = (struct task_struct *)s->event;
@@ -533,13 +612,11 @@ void sync_buffer(int cpu)
533 cookie = get_exec_dcookie(mm); 612 cookie = get_exec_dcookie(mm);
534 add_user_ctx_switch(new, cookie); 613 add_user_ctx_switch(new, cookie);
535 } 614 }
536 } else { 615 } else if (state >= sb_bt_start &&
537 if (state >= sb_bt_start && 616 !add_sample(mm, s, in_kernel)) {
538 !add_sample(mm, s, in_kernel)) { 617 if (state == sb_bt_start) {
539 if (state == sb_bt_start) { 618 state = sb_bt_ignore;
540 state = sb_bt_ignore; 619 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
541 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
542 }
543 } 620 }
544 } 621 }
545 622
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 7ba78e6d210e..e1bd5a937f6c 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -5,6 +5,7 @@
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
8 * 9 *
9 * Each CPU has a local buffer that stores PC value/event 10 * Each CPU has a local buffer that stores PC value/event
10 * pairs. We also log context switches when we notice them. 11 * pairs. We also log context switches when we notice them.
@@ -209,7 +210,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
209 return 1; 210 return 1;
210} 211}
211 212
212static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf) 213static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
213{ 214{
214 if (nr_available_slots(cpu_buf) < 4) { 215 if (nr_available_slots(cpu_buf) < 4) {
215 cpu_buf->sample_lost_overflow++; 216 cpu_buf->sample_lost_overflow++;
@@ -254,6 +255,75 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
254 oprofile_add_ext_sample(pc, regs, event, is_kernel); 255 oprofile_add_ext_sample(pc, regs, event, is_kernel);
255} 256}
256 257
258#ifdef CONFIG_OPROFILE_IBS
259
260#define MAX_IBS_SAMPLE_SIZE 14
261static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
262 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
263{
264 struct task_struct *task;
265
266 cpu_buf->sample_received++;
267
268 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
269 cpu_buf->sample_lost_overflow++;
270 return 0;
271 }
272
273 is_kernel = !!is_kernel;
274
275 /* notice a switch from user->kernel or vice versa */
276 if (cpu_buf->last_is_kernel != is_kernel) {
277 cpu_buf->last_is_kernel = is_kernel;
278 add_code(cpu_buf, is_kernel);
279 }
280
281 /* notice a task switch */
282 if (!is_kernel) {
283 task = current;
284
285 if (cpu_buf->last_task != task) {
286 cpu_buf->last_task = task;
287 add_code(cpu_buf, (unsigned long)task);
288 }
289 }
290
291 add_code(cpu_buf, ibs_code);
292 add_sample(cpu_buf, ibs[0], ibs[1]);
293 add_sample(cpu_buf, ibs[2], ibs[3]);
294 add_sample(cpu_buf, ibs[4], ibs[5]);
295
296 if (ibs_code == IBS_OP_BEGIN) {
297 add_sample(cpu_buf, ibs[6], ibs[7]);
298 add_sample(cpu_buf, ibs[8], ibs[9]);
299 add_sample(cpu_buf, ibs[10], ibs[11]);
300 }
301
302 return 1;
303}
304
305void oprofile_add_ibs_sample(struct pt_regs *const regs,
306 unsigned int * const ibs_sample, u8 code)
307{
308 int is_kernel = !user_mode(regs);
309 unsigned long pc = profile_pc(regs);
310
311 struct oprofile_cpu_buffer *cpu_buf =
312 &per_cpu(cpu_buffer, smp_processor_id());
313
314 if (!backtrace_depth) {
315 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
316 return;
317 }
318
319 /* if log_sample() fails we can't backtrace since we lost the source
320 * of this event */
321 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
322 oprofile_ops.backtrace(regs, backtrace_depth);
323}
324
325#endif
326
257void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 327void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
258{ 328{
259 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 329 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
@@ -296,7 +366,7 @@ static void wq_sync_buffer(struct work_struct *work)
296 struct oprofile_cpu_buffer * b = 366 struct oprofile_cpu_buffer * b =
297 container_of(work, struct oprofile_cpu_buffer, work.work); 367 container_of(work, struct oprofile_cpu_buffer, work.work);
298 if (b->cpu != smp_processor_id()) { 368 if (b->cpu != smp_processor_id()) {
299 printk("WQ on CPU%d, prefer CPU%d\n", 369 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
300 smp_processor_id(), b->cpu); 370 smp_processor_id(), b->cpu);
301 } 371 }
302 sync_buffer(b->cpu); 372 sync_buffer(b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index c3e366b52261..9c44d004da69 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -55,5 +55,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
55/* transient events for the CPU buffer -> event buffer */ 55/* transient events for the CPU buffer -> event buffer */
56#define CPU_IS_KERNEL 1 56#define CPU_IS_KERNEL 1
57#define CPU_TRACE_BEGIN 2 57#define CPU_TRACE_BEGIN 2
58#define IBS_FETCH_BEGIN 3
59#define IBS_OP_BEGIN 4
58 60
59#endif /* OPROFILE_CPU_BUFFER_H */ 61#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 041bb31100f4..bcb8f725427c 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -36,6 +36,8 @@
36#define XEN_ENTER_SWITCH_CODE 10 36#define XEN_ENTER_SWITCH_CODE 10
37#define SPU_PROFILING_CODE 11 37#define SPU_PROFILING_CODE 11
38#define SPU_CTX_SWITCH_CODE 12 38#define SPU_CTX_SWITCH_CODE 12
39#define IBS_FETCH_CODE 13
40#define IBS_OP_CODE 14
39 41
40struct super_block; 42struct super_block;
41struct dentry; 43struct dentry;