diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 15:43:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 15:43:06 -0500 |
commit | 4ce5f24193cef2e26f182ce708e94ba1f5fafc0c (patch) | |
tree | 300373440be70af7c8ce662d4b30d8103e7c6026 | |
parent | 7c51d57e9d7fbce89f79c41dc8da383101dbe9c6 (diff) | |
parent | a076aa4f96f40fc75451ae835a1a665ce1faf951 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (31 commits)
powerpc/oprofile: fix whitespaces in op_model_cell.c
powerpc/oprofile: IBM CELL: add SPU event profiling support
powerpc/oprofile: fix cell/pr_util.h
powerpc/oprofile: IBM CELL: cleanup and restructuring
oprofile: make new cpu buffer functions part of the api
oprofile: remove #ifdef CONFIG_OPROFILE_IBS in non-ibs code
ring_buffer: fix ring_buffer_event_length()
oprofile: use new data sample format for ibs
oprofile: add op_cpu_buffer_get_data()
oprofile: add op_cpu_buffer_add_data()
oprofile: rework implementation of cpu buffer events
oprofile: modify op_cpu_buffer_read_entry()
oprofile: add op_cpu_buffer_write_reserve()
oprofile: rename variables in add_ibs_begin()
oprofile: rename add_sample() in cpu_buffer.c
oprofile: rename variable ibs_allowed to has_ibs in op_model_amd.c
oprofile: making add_sample_entry() inline
oprofile: remove backtrace code for ibs
oprofile: remove unused ibs macro
oprofile: remove unused components in struct oprofile_cpu_buffer
...
-rw-r--r-- | arch/powerpc/include/asm/cell-pmu.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/oprofile_impl.h | 6 | ||||
-rw-r--r-- | arch/powerpc/oprofile/cell/pr_util.h | 11 | ||||
-rw-r--r-- | arch/powerpc/oprofile/cell/spu_profiler.c | 56 | ||||
-rw-r--r-- | arch/powerpc/oprofile/common.c | 22 | ||||
-rw-r--r-- | arch/powerpc/oprofile/op_model_cell.c | 748 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_amd.c | 149 | ||||
-rw-r--r-- | drivers/oprofile/buffer_sync.c | 188 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.c | 316 | ||||
-rw-r--r-- | drivers/oprofile/cpu_buffer.h | 89 | ||||
-rw-r--r-- | drivers/oprofile/event_buffer.c | 4 | ||||
-rw-r--r-- | drivers/oprofile/oprof.c | 4 | ||||
-rw-r--r-- | drivers/oprofile/oprof.h | 8 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_files.c | 27 | ||||
-rw-r--r-- | include/linux/oprofile.h | 18 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 |
16 files changed, 1122 insertions, 534 deletions
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h index 8066eede3a0c..b4b7338ad79e 100644 --- a/arch/powerpc/include/asm/cell-pmu.h +++ b/arch/powerpc/include/asm/cell-pmu.h | |||
@@ -37,9 +37,11 @@ | |||
37 | #define CBE_PM_STOP_AT_MAX 0x40000000 | 37 | #define CBE_PM_STOP_AT_MAX 0x40000000 |
38 | #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) | 38 | #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) |
39 | #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) | 39 | #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) |
40 | #define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17) | ||
40 | #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) | 41 | #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) |
41 | #define CBE_PM_FREEZE_ALL_CTRS 0x00100000 | 42 | #define CBE_PM_FREEZE_ALL_CTRS 0x00100000 |
42 | #define CBE_PM_ENABLE_EXT_TRACE 0x00008000 | 43 | #define CBE_PM_ENABLE_EXT_TRACE 0x00008000 |
44 | #define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9) | ||
43 | 45 | ||
44 | /* Macros for the trace_address register. */ | 46 | /* Macros for the trace_address register. */ |
45 | #define CBE_PM_TRACE_BUF_FULL 0x00000800 | 47 | #define CBE_PM_TRACE_BUF_FULL 0x00000800 |
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h index 95035c602ba6..639dc96077ab 100644 --- a/arch/powerpc/include/asm/oprofile_impl.h +++ b/arch/powerpc/include/asm/oprofile_impl.h | |||
@@ -32,6 +32,12 @@ struct op_system_config { | |||
32 | unsigned long mmcr0; | 32 | unsigned long mmcr0; |
33 | unsigned long mmcr1; | 33 | unsigned long mmcr1; |
34 | unsigned long mmcra; | 34 | unsigned long mmcra; |
35 | #ifdef CONFIG_OPROFILE_CELL | ||
36 | /* Register for oprofile user tool to check cell kernel profiling | ||
37 | * suport. | ||
38 | */ | ||
39 | unsigned long cell_support; | ||
40 | #endif | ||
35 | #endif | 41 | #endif |
36 | unsigned long enable_kernel; | 42 | unsigned long enable_kernel; |
37 | unsigned long enable_user; | 43 | unsigned long enable_user; |
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h index dfdbffa06818..964b93974d89 100644 --- a/arch/powerpc/oprofile/cell/pr_util.h +++ b/arch/powerpc/oprofile/cell/pr_util.h | |||
@@ -30,6 +30,10 @@ | |||
30 | extern struct delayed_work spu_work; | 30 | extern struct delayed_work spu_work; |
31 | extern int spu_prof_running; | 31 | extern int spu_prof_running; |
32 | 32 | ||
33 | #define TRACE_ARRAY_SIZE 1024 | ||
34 | |||
35 | extern spinlock_t oprof_spu_smpl_arry_lck; | ||
36 | |||
33 | struct spu_overlay_info { /* map of sections within an SPU overlay */ | 37 | struct spu_overlay_info { /* map of sections within an SPU overlay */ |
34 | unsigned int vma; /* SPU virtual memory address from elf */ | 38 | unsigned int vma; /* SPU virtual memory address from elf */ |
35 | unsigned int size; /* size of section from elf */ | 39 | unsigned int size; /* size of section from elf */ |
@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map); | |||
89 | * Entry point for SPU profiling. | 93 | * Entry point for SPU profiling. |
90 | * cycles_reset is the SPU_CYCLES count value specified by the user. | 94 | * cycles_reset is the SPU_CYCLES count value specified by the user. |
91 | */ | 95 | */ |
92 | int start_spu_profiling(unsigned int cycles_reset); | 96 | int start_spu_profiling_cycles(unsigned int cycles_reset); |
93 | 97 | void start_spu_profiling_events(void); | |
94 | void stop_spu_profiling(void); | ||
95 | 98 | ||
99 | void stop_spu_profiling_cycles(void); | ||
100 | void stop_spu_profiling_events(void); | ||
96 | 101 | ||
97 | /* add the necessary profiling hooks */ | 102 | /* add the necessary profiling hooks */ |
98 | int spu_sync_start(void); | 103 | int spu_sync_start(void); |
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 83faa958b9d4..9305ddaac512 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c | |||
@@ -18,11 +18,21 @@ | |||
18 | #include <asm/cell-pmu.h> | 18 | #include <asm/cell-pmu.h> |
19 | #include "pr_util.h" | 19 | #include "pr_util.h" |
20 | 20 | ||
21 | #define TRACE_ARRAY_SIZE 1024 | ||
22 | #define SCALE_SHIFT 14 | 21 | #define SCALE_SHIFT 14 |
23 | 22 | ||
24 | static u32 *samples; | 23 | static u32 *samples; |
25 | 24 | ||
25 | /* spu_prof_running is a flag used to indicate if spu profiling is enabled | ||
26 | * or not. It is set by the routines start_spu_profiling_cycles() and | ||
27 | * start_spu_profiling_events(). The flag is cleared by the routines | ||
28 | * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These | ||
29 | * routines are called via global_start() and global_stop() which are called in | ||
30 | * op_powerpc_start() and op_powerpc_stop(). These routines are called once | ||
31 | * per system as a result of the user starting/stopping oprofile. Hence, only | ||
32 | * one CPU per user at a time will be changing the value of spu_prof_running. | ||
33 | * In general, OProfile does not protect against multiple users trying to run | ||
34 | * OProfile at a time. | ||
35 | */ | ||
26 | int spu_prof_running; | 36 | int spu_prof_running; |
27 | static unsigned int profiling_interval; | 37 | static unsigned int profiling_interval; |
28 | 38 | ||
@@ -31,8 +41,8 @@ static unsigned int profiling_interval; | |||
31 | 41 | ||
32 | #define SPU_PC_MASK 0xFFFF | 42 | #define SPU_PC_MASK 0xFFFF |
33 | 43 | ||
34 | static DEFINE_SPINLOCK(sample_array_lock); | 44 | DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck); |
35 | unsigned long sample_array_lock_flags; | 45 | unsigned long oprof_spu_smpl_arry_lck_flags; |
36 | 46 | ||
37 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) | 47 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) |
38 | { | 48 | { |
@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
145 | * sample array must be loaded and then processed for a given | 155 | * sample array must be loaded and then processed for a given |
146 | * cpu. The sample array is not per cpu. | 156 | * cpu. The sample array is not per cpu. |
147 | */ | 157 | */ |
148 | spin_lock_irqsave(&sample_array_lock, | 158 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, |
149 | sample_array_lock_flags); | 159 | oprof_spu_smpl_arry_lck_flags); |
150 | num_samples = cell_spu_pc_collection(cpu); | 160 | num_samples = cell_spu_pc_collection(cpu); |
151 | 161 | ||
152 | if (num_samples == 0) { | 162 | if (num_samples == 0) { |
153 | spin_unlock_irqrestore(&sample_array_lock, | 163 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
154 | sample_array_lock_flags); | 164 | oprof_spu_smpl_arry_lck_flags); |
155 | continue; | 165 | continue; |
156 | } | 166 | } |
157 | 167 | ||
@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
162 | num_samples); | 172 | num_samples); |
163 | } | 173 | } |
164 | 174 | ||
165 | spin_unlock_irqrestore(&sample_array_lock, | 175 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
166 | sample_array_lock_flags); | 176 | oprof_spu_smpl_arry_lck_flags); |
167 | 177 | ||
168 | } | 178 | } |
169 | smp_wmb(); /* insure spu event buffer updates are written */ | 179 | smp_wmb(); /* insure spu event buffer updates are written */ |
@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
182 | 192 | ||
183 | static struct hrtimer timer; | 193 | static struct hrtimer timer; |
184 | /* | 194 | /* |
185 | * Entry point for SPU profiling. | 195 | * Entry point for SPU cycle profiling. |
186 | * NOTE: SPU profiling is done system-wide, not per-CPU. | 196 | * NOTE: SPU profiling is done system-wide, not per-CPU. |
187 | * | 197 | * |
188 | * cycles_reset is the count value specified by the user when | 198 | * cycles_reset is the count value specified by the user when |
189 | * setting up OProfile to count SPU_CYCLES. | 199 | * setting up OProfile to count SPU_CYCLES. |
190 | */ | 200 | */ |
191 | int start_spu_profiling(unsigned int cycles_reset) | 201 | int start_spu_profiling_cycles(unsigned int cycles_reset) |
192 | { | 202 | { |
193 | ktime_t kt; | 203 | ktime_t kt; |
194 | 204 | ||
@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset) | |||
212 | return 0; | 222 | return 0; |
213 | } | 223 | } |
214 | 224 | ||
215 | void stop_spu_profiling(void) | 225 | /* |
226 | * Entry point for SPU event profiling. | ||
227 | * NOTE: SPU profiling is done system-wide, not per-CPU. | ||
228 | * | ||
229 | * cycles_reset is the count value specified by the user when | ||
230 | * setting up OProfile to count SPU_CYCLES. | ||
231 | */ | ||
232 | void start_spu_profiling_events(void) | ||
233 | { | ||
234 | spu_prof_running = 1; | ||
235 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); | ||
236 | |||
237 | return; | ||
238 | } | ||
239 | |||
240 | void stop_spu_profiling_cycles(void) | ||
216 | { | 241 | { |
217 | spu_prof_running = 0; | 242 | spu_prof_running = 0; |
218 | hrtimer_cancel(&timer); | 243 | hrtimer_cancel(&timer); |
219 | kfree(samples); | 244 | kfree(samples); |
220 | pr_debug("SPU_PROF: stop_spu_profiling issued\n"); | 245 | pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n"); |
246 | } | ||
247 | |||
248 | void stop_spu_profiling_events(void) | ||
249 | { | ||
250 | spu_prof_running = 0; | ||
221 | } | 251 | } |
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 17807acb05d9..21f16edf6c8d 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
@@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) | |||
132 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); | 132 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); |
133 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); | 133 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); |
134 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); | 134 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); |
135 | #ifdef CONFIG_OPROFILE_CELL | ||
136 | /* create a file the user tool can check to see what level of profiling | ||
137 | * support exits with this kernel. Initialize bit mask to indicate | ||
138 | * what support the kernel has: | ||
139 | * bit 0 - Supports SPU event profiling in addition to PPU | ||
140 | * event and cycles; and SPU cycle profiling | ||
141 | * bits 1-31 - Currently unused. | ||
142 | * | ||
143 | * If the file does not exist, then the kernel only supports SPU | ||
144 | * cycle profiling, PPU event and cycle profiling. | ||
145 | */ | ||
146 | oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support); | ||
147 | sys.cell_support = 0x1; /* Note, the user OProfile tool must check | ||
148 | * that this bit is set before attempting to | ||
149 | * user SPU event profiling. Older kernels | ||
150 | * will not have this file, hence the user | ||
151 | * tool is not allowed to do SPU event | ||
152 | * profiling on older kernels. Older kernels | ||
153 | * will accept SPU events but collected data | ||
154 | * is garbage. | ||
155 | */ | ||
156 | #endif | ||
135 | #endif | 157 | #endif |
136 | 158 | ||
137 | for (i = 0; i < model->num_counters; ++i) { | 159 | for (i = 0; i < model->num_counters; ++i) { |
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 25a4ec2514a3..ae06c6236d9c 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c | |||
@@ -40,14 +40,15 @@ | |||
40 | #include "../platforms/cell/interrupt.h" | 40 | #include "../platforms/cell/interrupt.h" |
41 | #include "cell/pr_util.h" | 41 | #include "cell/pr_util.h" |
42 | 42 | ||
43 | static void cell_global_stop_spu(void); | 43 | #define PPU_PROFILING 0 |
44 | #define SPU_PROFILING_CYCLES 1 | ||
45 | #define SPU_PROFILING_EVENTS 2 | ||
44 | 46 | ||
45 | /* | 47 | #define SPU_EVENT_NUM_START 4100 |
46 | * spu_cycle_reset is the number of cycles between samples. | 48 | #define SPU_EVENT_NUM_STOP 4399 |
47 | * This variable is used for SPU profiling and should ONLY be set | 49 | #define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */ |
48 | * at the beginning of cell_reg_setup; otherwise, it's read-only. | 50 | #define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */ |
49 | */ | 51 | #define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */ |
50 | static unsigned int spu_cycle_reset; | ||
51 | 52 | ||
52 | #define NUM_SPUS_PER_NODE 8 | 53 | #define NUM_SPUS_PER_NODE 8 |
53 | #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ | 54 | #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ |
@@ -66,6 +67,21 @@ static unsigned int spu_cycle_reset; | |||
66 | 67 | ||
67 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ | 68 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ |
68 | 69 | ||
70 | /* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. | ||
71 | * To configure counter to send value every N cycles set counter to | ||
72 | * 2^32 - 1 - N. | ||
73 | */ | ||
74 | #define NUM_INTERVAL_CYC 0xFFFFFFFF - 10 | ||
75 | |||
76 | /* | ||
77 | * spu_cycle_reset is the number of cycles between samples. | ||
78 | * This variable is used for SPU profiling and should ONLY be set | ||
79 | * at the beginning of cell_reg_setup; otherwise, it's read-only. | ||
80 | */ | ||
81 | static unsigned int spu_cycle_reset; | ||
82 | static unsigned int profiling_mode; | ||
83 | static int spu_evnt_phys_spu_indx; | ||
84 | |||
69 | struct pmc_cntrl_data { | 85 | struct pmc_cntrl_data { |
70 | unsigned long vcntr; | 86 | unsigned long vcntr; |
71 | unsigned long evnts; | 87 | unsigned long evnts; |
@@ -105,6 +121,8 @@ struct pm_cntrl { | |||
105 | u16 trace_mode; | 121 | u16 trace_mode; |
106 | u16 freeze; | 122 | u16 freeze; |
107 | u16 count_mode; | 123 | u16 count_mode; |
124 | u16 spu_addr_trace; | ||
125 | u8 trace_buf_ovflw; | ||
108 | }; | 126 | }; |
109 | 127 | ||
110 | static struct { | 128 | static struct { |
@@ -122,7 +140,7 @@ static struct { | |||
122 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) | 140 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) |
123 | 141 | ||
124 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); | 142 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); |
125 | 143 | static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE]; | |
126 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; | 144 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; |
127 | 145 | ||
128 | /* | 146 | /* |
@@ -152,6 +170,7 @@ static u32 hdw_thread; | |||
152 | 170 | ||
153 | static u32 virt_cntr_inter_mask; | 171 | static u32 virt_cntr_inter_mask; |
154 | static struct timer_list timer_virt_cntr; | 172 | static struct timer_list timer_virt_cntr; |
173 | static struct timer_list timer_spu_event_swap; | ||
155 | 174 | ||
156 | /* | 175 | /* |
157 | * pm_signal needs to be global since it is initialized in | 176 | * pm_signal needs to be global since it is initialized in |
@@ -165,7 +184,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */ | |||
165 | static u32 reset_value[NR_PHYS_CTRS]; | 184 | static u32 reset_value[NR_PHYS_CTRS]; |
166 | static int num_counters; | 185 | static int num_counters; |
167 | static int oprofile_running; | 186 | static int oprofile_running; |
168 | static DEFINE_SPINLOCK(virt_cntr_lock); | 187 | static DEFINE_SPINLOCK(cntr_lock); |
169 | 188 | ||
170 | static u32 ctr_enabled; | 189 | static u32 ctr_enabled; |
171 | 190 | ||
@@ -336,13 +355,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) | |||
336 | for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { | 355 | for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { |
337 | if (bus_word & (1 << i)) { | 356 | if (bus_word & (1 << i)) { |
338 | pm_regs.debug_bus_control |= | 357 | pm_regs.debug_bus_control |= |
339 | (bus_type << (30 - (2 * i))); | 358 | (bus_type << (30 - (2 * i))); |
340 | 359 | ||
341 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { | 360 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { |
342 | if (input_bus[j] == 0xff) { | 361 | if (input_bus[j] == 0xff) { |
343 | input_bus[j] = i; | 362 | input_bus[j] = i; |
344 | pm_regs.group_control |= | 363 | pm_regs.group_control |= |
345 | (i << (30 - (2 * j))); | 364 | (i << (30 - (2 * j))); |
346 | 365 | ||
347 | break; | 366 | break; |
348 | } | 367 | } |
@@ -367,12 +386,16 @@ static void write_pm_cntrl(int cpu) | |||
367 | if (pm_regs.pm_cntrl.stop_at_max == 1) | 386 | if (pm_regs.pm_cntrl.stop_at_max == 1) |
368 | val |= CBE_PM_STOP_AT_MAX; | 387 | val |= CBE_PM_STOP_AT_MAX; |
369 | 388 | ||
370 | if (pm_regs.pm_cntrl.trace_mode == 1) | 389 | if (pm_regs.pm_cntrl.trace_mode != 0) |
371 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); | 390 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); |
372 | 391 | ||
392 | if (pm_regs.pm_cntrl.trace_buf_ovflw == 1) | ||
393 | val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw); | ||
373 | if (pm_regs.pm_cntrl.freeze == 1) | 394 | if (pm_regs.pm_cntrl.freeze == 1) |
374 | val |= CBE_PM_FREEZE_ALL_CTRS; | 395 | val |= CBE_PM_FREEZE_ALL_CTRS; |
375 | 396 | ||
397 | val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace); | ||
398 | |||
376 | /* | 399 | /* |
377 | * Routine set_count_mode must be called previously to set | 400 | * Routine set_count_mode must be called previously to set |
378 | * the count mode based on the user selection of user and kernel. | 401 | * the count mode based on the user selection of user and kernel. |
@@ -441,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
441 | * not both playing with the counters on the same node. | 464 | * not both playing with the counters on the same node. |
442 | */ | 465 | */ |
443 | 466 | ||
444 | spin_lock_irqsave(&virt_cntr_lock, flags); | 467 | spin_lock_irqsave(&cntr_lock, flags); |
445 | 468 | ||
446 | prev_hdw_thread = hdw_thread; | 469 | prev_hdw_thread = hdw_thread; |
447 | 470 | ||
@@ -480,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
480 | cbe_disable_pm_interrupts(cpu); | 503 | cbe_disable_pm_interrupts(cpu); |
481 | for (i = 0; i < num_counters; i++) { | 504 | for (i = 0; i < num_counters; i++) { |
482 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] | 505 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] |
483 | = cbe_read_ctr(cpu, i); | 506 | = cbe_read_ctr(cpu, i); |
484 | 507 | ||
485 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] | 508 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] |
486 | == 0xFFFFFFFF) | 509 | == 0xFFFFFFFF) |
@@ -527,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
527 | cbe_enable_pm(cpu); | 550 | cbe_enable_pm(cpu); |
528 | } | 551 | } |
529 | 552 | ||
530 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | 553 | spin_unlock_irqrestore(&cntr_lock, flags); |
531 | 554 | ||
532 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); | 555 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); |
533 | } | 556 | } |
@@ -541,38 +564,146 @@ static void start_virt_cntrs(void) | |||
541 | add_timer(&timer_virt_cntr); | 564 | add_timer(&timer_virt_cntr); |
542 | } | 565 | } |
543 | 566 | ||
544 | /* This function is called once for all cpus combined */ | 567 | static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr, |
545 | static int cell_reg_setup(struct op_counter_config *ctr, | ||
546 | struct op_system_config *sys, int num_ctrs) | 568 | struct op_system_config *sys, int num_ctrs) |
547 | { | 569 | { |
548 | int i, j, cpu; | 570 | spu_cycle_reset = ctr[0].count; |
549 | spu_cycle_reset = 0; | ||
550 | 571 | ||
551 | if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { | 572 | /* |
552 | spu_cycle_reset = ctr[0].count; | 573 | * Each node will need to make the rtas call to start |
574 | * and stop SPU profiling. Get the token once and store it. | ||
575 | */ | ||
576 | spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); | ||
577 | |||
578 | if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { | ||
579 | printk(KERN_ERR | ||
580 | "%s: rtas token ibm,cbe-spu-perftools unknown\n", | ||
581 | __func__); | ||
582 | return -EIO; | ||
583 | } | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | /* Unfortunately, the hardware will only support event profiling | ||
588 | * on one SPU per node at a time. Therefore, we must time slice | ||
589 | * the profiling across all SPUs in the node. Note, we do this | ||
590 | * in parallel for each node. The following routine is called | ||
591 | * periodically based on kernel timer to switch which SPU is | ||
592 | * being monitored in a round robbin fashion. | ||
593 | */ | ||
594 | static void spu_evnt_swap(unsigned long data) | ||
595 | { | ||
596 | int node; | ||
597 | int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx; | ||
598 | unsigned long flags; | ||
599 | int cpu; | ||
600 | int ret; | ||
601 | u32 interrupt_mask; | ||
602 | |||
603 | |||
604 | /* enable interrupts on cntr 0 */ | ||
605 | interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0); | ||
606 | |||
607 | hdw_thread = 0; | ||
608 | |||
609 | /* Make sure spu event interrupt handler and spu event swap | ||
610 | * don't access the counters simultaneously. | ||
611 | */ | ||
612 | spin_lock_irqsave(&cntr_lock, flags); | ||
613 | |||
614 | cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx; | ||
615 | |||
616 | if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE) | ||
617 | spu_evnt_phys_spu_indx = 0; | ||
618 | |||
619 | pm_signal[0].sub_unit = spu_evnt_phys_spu_indx; | ||
620 | pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; | ||
621 | pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; | ||
622 | |||
623 | /* switch the SPU being profiled on each node */ | ||
624 | for_each_online_cpu(cpu) { | ||
625 | if (cbe_get_hw_thread_id(cpu)) | ||
626 | continue; | ||
627 | |||
628 | node = cbe_cpu_to_node(cpu); | ||
629 | cur_phys_spu = (node * NUM_SPUS_PER_NODE) | ||
630 | + cur_spu_evnt_phys_spu_indx; | ||
631 | nxt_phys_spu = (node * NUM_SPUS_PER_NODE) | ||
632 | + spu_evnt_phys_spu_indx; | ||
553 | 633 | ||
554 | /* | 634 | /* |
555 | * Each node will need to make the rtas call to start | 635 | * stop counters, save counter values, restore counts |
556 | * and stop SPU profiling. Get the token once and store it. | 636 | * for previous physical SPU |
557 | */ | 637 | */ |
558 | spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); | 638 | cbe_disable_pm(cpu); |
639 | cbe_disable_pm_interrupts(cpu); | ||
559 | 640 | ||
560 | if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { | 641 | spu_pm_cnt[cur_phys_spu] |
561 | printk(KERN_ERR | 642 | = cbe_read_ctr(cpu, 0); |
562 | "%s: rtas token ibm,cbe-spu-perftools unknown\n", | 643 | |
563 | __func__); | 644 | /* restore previous count for the next spu to sample */ |
564 | return -EIO; | 645 | /* NOTE, hardware issue, counter will not start if the |
565 | } | 646 | * counter value is at max (0xFFFFFFFF). |
647 | */ | ||
648 | if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF) | ||
649 | cbe_write_ctr(cpu, 0, 0xFFFFFFF0); | ||
650 | else | ||
651 | cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]); | ||
652 | |||
653 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
654 | |||
655 | /* setup the debug bus measure the one event and | ||
656 | * the two events to route the next SPU's PC on | ||
657 | * the debug bus | ||
658 | */ | ||
659 | ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3); | ||
660 | if (ret) | ||
661 | printk(KERN_ERR "%s: pm_rtas_activate_signals failed, " | ||
662 | "SPU event swap\n", __func__); | ||
663 | |||
664 | /* clear the trace buffer, don't want to take PC for | ||
665 | * previous SPU*/ | ||
666 | cbe_write_pm(cpu, trace_address, 0); | ||
667 | |||
668 | enable_ctr(cpu, 0, pm_regs.pm07_cntrl); | ||
669 | |||
670 | /* Enable interrupts on the CPU thread that is starting */ | ||
671 | cbe_enable_pm_interrupts(cpu, hdw_thread, | ||
672 | interrupt_mask); | ||
673 | cbe_enable_pm(cpu); | ||
566 | } | 674 | } |
567 | 675 | ||
568 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | 676 | spin_unlock_irqrestore(&cntr_lock, flags); |
569 | 677 | ||
678 | /* swap approximately every 0.1 seconds */ | ||
679 | mod_timer(&timer_spu_event_swap, jiffies + HZ / 25); | ||
680 | } | ||
681 | |||
682 | static void start_spu_event_swap(void) | ||
683 | { | ||
684 | init_timer(&timer_spu_event_swap); | ||
685 | timer_spu_event_swap.function = spu_evnt_swap; | ||
686 | timer_spu_event_swap.data = 0UL; | ||
687 | timer_spu_event_swap.expires = jiffies + HZ / 25; | ||
688 | add_timer(&timer_spu_event_swap); | ||
689 | } | ||
690 | |||
691 | static int cell_reg_setup_spu_events(struct op_counter_config *ctr, | ||
692 | struct op_system_config *sys, int num_ctrs) | ||
693 | { | ||
694 | int i; | ||
695 | |||
696 | /* routine is called once for all nodes */ | ||
697 | |||
698 | spu_evnt_phys_spu_indx = 0; | ||
570 | /* | 699 | /* |
571 | * For all events excetp PPU CYCLEs, each node will need to make | 700 | * For all events except PPU CYCLEs, each node will need to make |
572 | * the rtas cbe-perftools call to setup and reset the debug bus. | 701 | * the rtas cbe-perftools call to setup and reset the debug bus. |
573 | * Make the token lookup call once and store it in the global | 702 | * Make the token lookup call once and store it in the global |
574 | * variable pm_rtas_token. | 703 | * variable pm_rtas_token. |
575 | */ | 704 | */ |
705 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | ||
706 | |||
576 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { | 707 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { |
577 | printk(KERN_ERR | 708 | printk(KERN_ERR |
578 | "%s: rtas token ibm,cbe-perftools unknown\n", | 709 | "%s: rtas token ibm,cbe-perftools unknown\n", |
@@ -580,6 +711,58 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
580 | return -EIO; | 711 | return -EIO; |
581 | } | 712 | } |
582 | 713 | ||
714 | /* setup the pm_control register settings, | ||
715 | * settings will be written per node by the | ||
716 | * cell_cpu_setup() function. | ||
717 | */ | ||
718 | pm_regs.pm_cntrl.trace_buf_ovflw = 1; | ||
719 | |||
720 | /* Use the occurrence trace mode to have SPU PC saved | ||
721 | * to the trace buffer. Occurrence data in trace buffer | ||
722 | * is not used. Bit 2 must be set to store SPU addresses. | ||
723 | */ | ||
724 | pm_regs.pm_cntrl.trace_mode = 2; | ||
725 | |||
726 | pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus | ||
727 | event 2 & 3 */ | ||
728 | |||
729 | /* setup the debug bus event array with the SPU PC routing events. | ||
730 | * Note, pm_signal[0] will be filled in by set_pm_event() call below. | ||
731 | */ | ||
732 | pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100; | ||
733 | pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A); | ||
734 | pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100; | ||
735 | pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; | ||
736 | |||
737 | pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100; | ||
738 | pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B); | ||
739 | pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100; | ||
740 | pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; | ||
741 | |||
742 | /* Set the user selected spu event to profile on, | ||
743 | * note, only one SPU profiling event is supported | ||
744 | */ | ||
745 | num_counters = 1; /* Only support one SPU event at a time */ | ||
746 | set_pm_event(0, ctr[0].event, ctr[0].unit_mask); | ||
747 | |||
748 | reset_value[0] = 0xFFFFFFFF - ctr[0].count; | ||
749 | |||
750 | /* global, used by cell_cpu_setup */ | ||
751 | ctr_enabled |= 1; | ||
752 | |||
753 | /* Initialize the count for each SPU to the reset value */ | ||
754 | for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++) | ||
755 | spu_pm_cnt[i] = reset_value[0]; | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int cell_reg_setup_ppu(struct op_counter_config *ctr, | ||
761 | struct op_system_config *sys, int num_ctrs) | ||
762 | { | ||
763 | /* routine is called once for all nodes */ | ||
764 | int i, j, cpu; | ||
765 | |||
583 | num_counters = num_ctrs; | 766 | num_counters = num_ctrs; |
584 | 767 | ||
585 | if (unlikely(num_ctrs > NR_PHYS_CTRS)) { | 768 | if (unlikely(num_ctrs > NR_PHYS_CTRS)) { |
@@ -589,14 +772,6 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
589 | __func__); | 772 | __func__); |
590 | return -EIO; | 773 | return -EIO; |
591 | } | 774 | } |
592 | pm_regs.group_control = 0; | ||
593 | pm_regs.debug_bus_control = 0; | ||
594 | |||
595 | /* setup the pm_control register */ | ||
596 | memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl)); | ||
597 | pm_regs.pm_cntrl.stop_at_max = 1; | ||
598 | pm_regs.pm_cntrl.trace_mode = 0; | ||
599 | pm_regs.pm_cntrl.freeze = 1; | ||
600 | 775 | ||
601 | set_count_mode(sys->enable_kernel, sys->enable_user); | 776 | set_count_mode(sys->enable_kernel, sys->enable_user); |
602 | 777 | ||
@@ -665,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
665 | } | 840 | } |
666 | 841 | ||
667 | 842 | ||
843 | /* This function is called once for all cpus combined */ | ||
844 | static int cell_reg_setup(struct op_counter_config *ctr, | ||
845 | struct op_system_config *sys, int num_ctrs) | ||
846 | { | ||
847 | int ret=0; | ||
848 | spu_cycle_reset = 0; | ||
849 | |||
850 | /* initialize the spu_arr_trace value, will be reset if | ||
851 | * doing spu event profiling. | ||
852 | */ | ||
853 | pm_regs.group_control = 0; | ||
854 | pm_regs.debug_bus_control = 0; | ||
855 | pm_regs.pm_cntrl.stop_at_max = 1; | ||
856 | pm_regs.pm_cntrl.trace_mode = 0; | ||
857 | pm_regs.pm_cntrl.freeze = 1; | ||
858 | pm_regs.pm_cntrl.trace_buf_ovflw = 0; | ||
859 | pm_regs.pm_cntrl.spu_addr_trace = 0; | ||
860 | |||
861 | /* | ||
862 | * For all events except PPU CYCLEs, each node will need to make | ||
863 | * the rtas cbe-perftools call to setup and reset the debug bus. | ||
864 | * Make the token lookup call once and store it in the global | ||
865 | * variable pm_rtas_token. | ||
866 | */ | ||
867 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | ||
868 | |||
869 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { | ||
870 | printk(KERN_ERR | ||
871 | "%s: rtas token ibm,cbe-perftools unknown\n", | ||
872 | __func__); | ||
873 | return -EIO; | ||
874 | } | ||
875 | |||
876 | if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { | ||
877 | profiling_mode = SPU_PROFILING_CYCLES; | ||
878 | ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs); | ||
879 | } else if ((ctr[0].event >= SPU_EVENT_NUM_START) && | ||
880 | (ctr[0].event <= SPU_EVENT_NUM_STOP)) { | ||
881 | profiling_mode = SPU_PROFILING_EVENTS; | ||
882 | spu_cycle_reset = ctr[0].count; | ||
883 | |||
884 | /* for SPU event profiling, need to setup the | ||
885 | * pm_signal array with the events to route the | ||
886 | * SPU PC before making the FW call. Note, only | ||
887 | * one SPU event for profiling can be specified | ||
888 | * at a time. | ||
889 | */ | ||
890 | cell_reg_setup_spu_events(ctr, sys, num_ctrs); | ||
891 | } else { | ||
892 | profiling_mode = PPU_PROFILING; | ||
893 | ret = cell_reg_setup_ppu(ctr, sys, num_ctrs); | ||
894 | } | ||
895 | |||
896 | return ret; | ||
897 | } | ||
898 | |||
899 | |||
668 | 900 | ||
669 | /* This function is called once for each cpu */ | 901 | /* This function is called once for each cpu */ |
670 | static int cell_cpu_setup(struct op_counter_config *cntr) | 902 | static int cell_cpu_setup(struct op_counter_config *cntr) |
@@ -672,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
672 | u32 cpu = smp_processor_id(); | 904 | u32 cpu = smp_processor_id(); |
673 | u32 num_enabled = 0; | 905 | u32 num_enabled = 0; |
674 | int i; | 906 | int i; |
907 | int ret; | ||
675 | 908 | ||
676 | if (spu_cycle_reset) | 909 | /* Cycle based SPU profiling does not use the performance |
910 | * counters. The trace array is configured to collect | ||
911 | * the data. | ||
912 | */ | ||
913 | if (profiling_mode == SPU_PROFILING_CYCLES) | ||
677 | return 0; | 914 | return 0; |
678 | 915 | ||
679 | /* There is one performance monitor per processor chip (i.e. node), | 916 | /* There is one performance monitor per processor chip (i.e. node), |
@@ -686,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
686 | cbe_disable_pm(cpu); | 923 | cbe_disable_pm(cpu); |
687 | cbe_disable_pm_interrupts(cpu); | 924 | cbe_disable_pm_interrupts(cpu); |
688 | 925 | ||
689 | cbe_write_pm(cpu, pm_interval, 0); | ||
690 | cbe_write_pm(cpu, pm_start_stop, 0); | 926 | cbe_write_pm(cpu, pm_start_stop, 0); |
691 | cbe_write_pm(cpu, group_control, pm_regs.group_control); | 927 | cbe_write_pm(cpu, group_control, pm_regs.group_control); |
692 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); | 928 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); |
@@ -703,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
703 | * The pm_rtas_activate_signals will return -EIO if the FW | 939 | * The pm_rtas_activate_signals will return -EIO if the FW |
704 | * call failed. | 940 | * call failed. |
705 | */ | 941 | */ |
706 | return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled); | 942 | if (profiling_mode == SPU_PROFILING_EVENTS) { |
943 | /* For SPU event profiling also need to setup the | ||
944 | * pm interval timer | ||
945 | */ | ||
946 | ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), | ||
947 | num_enabled+2); | ||
948 | /* store PC from debug bus to Trace buffer as often | ||
949 | * as possible (every 10 cycles) | ||
950 | */ | ||
951 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); | ||
952 | return ret; | ||
953 | } else | ||
954 | return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), | ||
955 | num_enabled); | ||
707 | } | 956 | } |
708 | 957 | ||
709 | #define ENTRIES 303 | 958 | #define ENTRIES 303 |
@@ -885,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = { | |||
885 | }; | 1134 | }; |
886 | #endif | 1135 | #endif |
887 | 1136 | ||
888 | static int cell_global_start_spu(struct op_counter_config *ctr) | 1137 | /* |
1138 | * Note the generic OProfile stop calls do not support returning | ||
1139 | * an error on stop. Hence, will not return an error if the FW | ||
1140 | * calls fail on stop. Failure to reset the debug bus is not an issue. | ||
1141 | * Failure to disable the SPU profiling is not an issue. The FW calls | ||
1142 | * to enable the performance counters and debug bus will work even if | ||
1143 | * the hardware was not cleanly reset. | ||
1144 | */ | ||
1145 | static void cell_global_stop_spu_cycles(void) | ||
1146 | { | ||
1147 | int subfunc, rtn_value; | ||
1148 | unsigned int lfsr_value; | ||
1149 | int cpu; | ||
1150 | |||
1151 | oprofile_running = 0; | ||
1152 | smp_wmb(); | ||
1153 | |||
1154 | #ifdef CONFIG_CPU_FREQ | ||
1155 | cpufreq_unregister_notifier(&cpu_freq_notifier_block, | ||
1156 | CPUFREQ_TRANSITION_NOTIFIER); | ||
1157 | #endif | ||
1158 | |||
1159 | for_each_online_cpu(cpu) { | ||
1160 | if (cbe_get_hw_thread_id(cpu)) | ||
1161 | continue; | ||
1162 | |||
1163 | subfunc = 3; /* | ||
1164 | * 2 - activate SPU tracing, | ||
1165 | * 3 - deactivate | ||
1166 | */ | ||
1167 | lfsr_value = 0x8f100000; | ||
1168 | |||
1169 | rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, | ||
1170 | subfunc, cbe_cpu_to_node(cpu), | ||
1171 | lfsr_value); | ||
1172 | |||
1173 | if (unlikely(rtn_value != 0)) { | ||
1174 | printk(KERN_ERR | ||
1175 | "%s: rtas call ibm,cbe-spu-perftools " \ | ||
1176 | "failed, return = %d\n", | ||
1177 | __func__, rtn_value); | ||
1178 | } | ||
1179 | |||
1180 | /* Deactivate the signals */ | ||
1181 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
1182 | } | ||
1183 | |||
1184 | stop_spu_profiling_cycles(); | ||
1185 | } | ||
1186 | |||
1187 | static void cell_global_stop_spu_events(void) | ||
1188 | { | ||
1189 | int cpu; | ||
1190 | oprofile_running = 0; | ||
1191 | |||
1192 | stop_spu_profiling_events(); | ||
1193 | smp_wmb(); | ||
1194 | |||
1195 | for_each_online_cpu(cpu) { | ||
1196 | if (cbe_get_hw_thread_id(cpu)) | ||
1197 | continue; | ||
1198 | |||
1199 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | ||
1200 | /* Stop the counters */ | ||
1201 | cbe_disable_pm(cpu); | ||
1202 | cbe_write_pm07_control(cpu, 0, 0); | ||
1203 | |||
1204 | /* Deactivate the signals */ | ||
1205 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
1206 | |||
1207 | /* Deactivate interrupts */ | ||
1208 | cbe_disable_pm_interrupts(cpu); | ||
1209 | } | ||
1210 | del_timer_sync(&timer_spu_event_swap); | ||
1211 | } | ||
1212 | |||
1213 | static void cell_global_stop_ppu(void) | ||
1214 | { | ||
1215 | int cpu; | ||
1216 | |||
1217 | /* | ||
1218 | * This routine will be called once for the system. | ||
1219 | * There is one performance monitor per node, so we | ||
1220 | * only need to perform this function once per node. | ||
1221 | */ | ||
1222 | del_timer_sync(&timer_virt_cntr); | ||
1223 | oprofile_running = 0; | ||
1224 | smp_wmb(); | ||
1225 | |||
1226 | for_each_online_cpu(cpu) { | ||
1227 | if (cbe_get_hw_thread_id(cpu)) | ||
1228 | continue; | ||
1229 | |||
1230 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | ||
1231 | /* Stop the counters */ | ||
1232 | cbe_disable_pm(cpu); | ||
1233 | |||
1234 | /* Deactivate the signals */ | ||
1235 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
1236 | |||
1237 | /* Deactivate interrupts */ | ||
1238 | cbe_disable_pm_interrupts(cpu); | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | static void cell_global_stop(void) | ||
1243 | { | ||
1244 | if (profiling_mode == PPU_PROFILING) | ||
1245 | cell_global_stop_ppu(); | ||
1246 | else if (profiling_mode == SPU_PROFILING_EVENTS) | ||
1247 | cell_global_stop_spu_events(); | ||
1248 | else | ||
1249 | cell_global_stop_spu_cycles(); | ||
1250 | } | ||
1251 | |||
1252 | static int cell_global_start_spu_cycles(struct op_counter_config *ctr) | ||
889 | { | 1253 | { |
890 | int subfunc; | 1254 | int subfunc; |
891 | unsigned int lfsr_value; | 1255 | unsigned int lfsr_value; |
@@ -951,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr) | |||
951 | 1315 | ||
952 | /* start profiling */ | 1316 | /* start profiling */ |
953 | ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, | 1317 | ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, |
954 | cbe_cpu_to_node(cpu), lfsr_value); | 1318 | cbe_cpu_to_node(cpu), lfsr_value); |
955 | 1319 | ||
956 | if (unlikely(ret != 0)) { | 1320 | if (unlikely(ret != 0)) { |
957 | printk(KERN_ERR | 1321 | printk(KERN_ERR |
958 | "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", | 1322 | "%s: rtas call ibm,cbe-spu-perftools failed, " \ |
959 | __func__, ret); | 1323 | "return = %d\n", __func__, ret); |
960 | rtas_error = -EIO; | 1324 | rtas_error = -EIO; |
961 | goto out; | 1325 | goto out; |
962 | } | 1326 | } |
963 | } | 1327 | } |
964 | 1328 | ||
965 | rtas_error = start_spu_profiling(spu_cycle_reset); | 1329 | rtas_error = start_spu_profiling_cycles(spu_cycle_reset); |
966 | if (rtas_error) | 1330 | if (rtas_error) |
967 | goto out_stop; | 1331 | goto out_stop; |
968 | 1332 | ||
@@ -970,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr) | |||
970 | return 0; | 1334 | return 0; |
971 | 1335 | ||
972 | out_stop: | 1336 | out_stop: |
973 | cell_global_stop_spu(); /* clean up the PMU/debug bus */ | 1337 | cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */ |
974 | out: | 1338 | out: |
975 | return rtas_error; | 1339 | return rtas_error; |
976 | } | 1340 | } |
977 | 1341 | ||
1342 | static int cell_global_start_spu_events(struct op_counter_config *ctr) | ||
1343 | { | ||
1344 | int cpu; | ||
1345 | u32 interrupt_mask = 0; | ||
1346 | int rtn = 0; | ||
1347 | |||
1348 | hdw_thread = 0; | ||
1349 | |||
1350 | /* spu event profiling, uses the performance counters to generate | ||
1351 | * an interrupt. The hardware is setup to store the SPU program | ||
1352 | * counter into the trace array. The occurrence mode is used to | ||
1353 | * enable storing data to the trace buffer. The bits are set | ||
1354 | * to send/store the SPU address in the trace buffer. The debug | ||
1355 | * bus must be setup to route the SPU program counter onto the | ||
1356 | * debug bus. The occurrence data in the trace buffer is not used. | ||
1357 | */ | ||
1358 | |||
1359 | /* This routine gets called once for the system. | ||
1360 | * There is one performance monitor per node, so we | ||
1361 | * only need to perform this function once per node. | ||
1362 | */ | ||
1363 | |||
1364 | for_each_online_cpu(cpu) { | ||
1365 | if (cbe_get_hw_thread_id(cpu)) | ||
1366 | continue; | ||
1367 | |||
1368 | /* | ||
1369 | * Setup SPU event-based profiling. | ||
1370 | * Set perf_mon_control bit 0 to a zero before | ||
1371 | * enabling spu collection hardware. | ||
1372 | * | ||
1373 | * Only support one SPU event on one SPU per node. | ||
1374 | */ | ||
1375 | if (ctr_enabled & 1) { | ||
1376 | cbe_write_ctr(cpu, 0, reset_value[0]); | ||
1377 | enable_ctr(cpu, 0, pm_regs.pm07_cntrl); | ||
1378 | interrupt_mask |= | ||
1379 | CBE_PM_CTR_OVERFLOW_INTR(0); | ||
1380 | } else { | ||
1381 | /* Disable counter */ | ||
1382 | cbe_write_pm07_control(cpu, 0, 0); | ||
1383 | } | ||
1384 | |||
1385 | cbe_get_and_clear_pm_interrupts(cpu); | ||
1386 | cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); | ||
1387 | cbe_enable_pm(cpu); | ||
1388 | |||
1389 | /* clear the trace buffer */ | ||
1390 | cbe_write_pm(cpu, trace_address, 0); | ||
1391 | } | ||
1392 | |||
1393 | /* Start the timer to time slice collecting the event profile | ||
1394 | * on each of the SPUs. Note, can collect profile on one SPU | ||
1395 | * per node at a time. | ||
1396 | */ | ||
1397 | start_spu_event_swap(); | ||
1398 | start_spu_profiling_events(); | ||
1399 | oprofile_running = 1; | ||
1400 | smp_wmb(); | ||
1401 | |||
1402 | return rtn; | ||
1403 | } | ||
1404 | |||
978 | static int cell_global_start_ppu(struct op_counter_config *ctr) | 1405 | static int cell_global_start_ppu(struct op_counter_config *ctr) |
979 | { | 1406 | { |
980 | u32 cpu, i; | 1407 | u32 cpu, i; |
@@ -994,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr) | |||
994 | if (ctr_enabled & (1 << i)) { | 1421 | if (ctr_enabled & (1 << i)) { |
995 | cbe_write_ctr(cpu, i, reset_value[i]); | 1422 | cbe_write_ctr(cpu, i, reset_value[i]); |
996 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); | 1423 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); |
997 | interrupt_mask |= | 1424 | interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i); |
998 | CBE_PM_CTR_OVERFLOW_INTR(i); | ||
999 | } else { | 1425 | } else { |
1000 | /* Disable counter */ | 1426 | /* Disable counter */ |
1001 | cbe_write_pm07_control(cpu, i, 0); | 1427 | cbe_write_pm07_control(cpu, i, 0); |
@@ -1024,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr) | |||
1024 | 1450 | ||
1025 | static int cell_global_start(struct op_counter_config *ctr) | 1451 | static int cell_global_start(struct op_counter_config *ctr) |
1026 | { | 1452 | { |
1027 | if (spu_cycle_reset) | 1453 | if (profiling_mode == SPU_PROFILING_CYCLES) |
1028 | return cell_global_start_spu(ctr); | 1454 | return cell_global_start_spu_cycles(ctr); |
1455 | else if (profiling_mode == SPU_PROFILING_EVENTS) | ||
1456 | return cell_global_start_spu_events(ctr); | ||
1029 | else | 1457 | else |
1030 | return cell_global_start_ppu(ctr); | 1458 | return cell_global_start_ppu(ctr); |
1031 | } | 1459 | } |
1032 | 1460 | ||
1033 | /* | 1461 | |
1034 | * Note the generic OProfile stop calls do not support returning | 1462 | /* The SPU interrupt handler |
1035 | * an error on stop. Hence, will not return an error if the FW | 1463 | * |
1036 | * calls fail on stop. Failure to reset the debug bus is not an issue. | 1464 | * SPU event profiling works as follows: |
1037 | * Failure to disable the SPU profiling is not an issue. The FW calls | 1465 | * The pm_signal[0] holds the one SPU event to be measured. It is routed on |
1038 | * to enable the performance counters and debug bus will work even if | 1466 | * the debug bus using word 0 or 1. The value of pm_signal[1] and |
1039 | * the hardware was not cleanly reset. | 1467 | * pm_signal[2] contain the necessary events to route the SPU program |
1468 | * counter for the selected SPU onto the debug bus using words 2 and 3. | ||
1469 | * The pm_interval register is setup to write the SPU PC value into the | ||
1470 | * trace buffer at the maximum rate possible. The trace buffer is configured | ||
1471 | * to store the PCs, wrapping when it is full. The performance counter is | ||
1472 | * intialized to the max hardware count minus the number of events, N, between | ||
1473 | * samples. Once the N events have occured, a HW counter overflow occurs | ||
1474 | * causing the generation of a HW counter interrupt which also stops the | ||
1475 | * writing of the SPU PC values to the trace buffer. Hence the last PC | ||
1476 | * written to the trace buffer is the SPU PC that we want. Unfortunately, | ||
1477 | * we have to read from the beginning of the trace buffer to get to the | ||
1478 | * last value written. We just hope the PPU has nothing better to do then | ||
1479 | * service this interrupt. The PC for the specific SPU being profiled is | ||
1480 | * extracted from the trace buffer processed and stored. The trace buffer | ||
1481 | * is cleared, interrupts are cleared, the counter is reset to max - N. | ||
1482 | * A kernel timer is used to periodically call the routine spu_evnt_swap() | ||
1483 | * to switch to the next physical SPU in the node to profile in round robbin | ||
1484 | * order. This way data is collected for all SPUs on the node. It does mean | ||
1485 | * that we need to use a relatively small value of N to ensure enough samples | ||
1486 | * on each SPU are collected each SPU is being profiled 1/8 of the time. | ||
1487 | * It may also be necessary to use a longer sample collection period. | ||
1040 | */ | 1488 | */ |
1041 | static void cell_global_stop_spu(void) | 1489 | static void cell_handle_interrupt_spu(struct pt_regs *regs, |
1490 | struct op_counter_config *ctr) | ||
1042 | { | 1491 | { |
1043 | int subfunc, rtn_value; | 1492 | u32 cpu, cpu_tmp; |
1044 | unsigned int lfsr_value; | 1493 | u64 trace_entry; |
1045 | int cpu; | 1494 | u32 interrupt_mask; |
1495 | u64 trace_buffer[2]; | ||
1496 | u64 last_trace_buffer; | ||
1497 | u32 sample; | ||
1498 | u32 trace_addr; | ||
1499 | unsigned long sample_array_lock_flags; | ||
1500 | int spu_num; | ||
1501 | unsigned long flags; | ||
1046 | 1502 | ||
1047 | oprofile_running = 0; | 1503 | /* Make sure spu event interrupt handler and spu event swap |
1504 | * don't access the counters simultaneously. | ||
1505 | */ | ||
1506 | cpu = smp_processor_id(); | ||
1507 | spin_lock_irqsave(&cntr_lock, flags); | ||
1048 | 1508 | ||
1049 | #ifdef CONFIG_CPU_FREQ | 1509 | cpu_tmp = cpu; |
1050 | cpufreq_unregister_notifier(&cpu_freq_notifier_block, | 1510 | cbe_disable_pm(cpu); |
1051 | CPUFREQ_TRANSITION_NOTIFIER); | ||
1052 | #endif | ||
1053 | 1511 | ||
1054 | for_each_online_cpu(cpu) { | 1512 | interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); |
1055 | if (cbe_get_hw_thread_id(cpu)) | ||
1056 | continue; | ||
1057 | 1513 | ||
1058 | subfunc = 3; /* | 1514 | sample = 0xABCDEF; |
1059 | * 2 - activate SPU tracing, | 1515 | trace_entry = 0xfedcba; |
1060 | * 3 - deactivate | 1516 | last_trace_buffer = 0xdeadbeaf; |
1061 | */ | ||
1062 | lfsr_value = 0x8f100000; | ||
1063 | 1517 | ||
1064 | rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, | 1518 | if ((oprofile_running == 1) && (interrupt_mask != 0)) { |
1065 | subfunc, cbe_cpu_to_node(cpu), | 1519 | /* disable writes to trace buff */ |
1066 | lfsr_value); | 1520 | cbe_write_pm(cpu, pm_interval, 0); |
1067 | 1521 | ||
1068 | if (unlikely(rtn_value != 0)) { | 1522 | /* only have one perf cntr being used, cntr 0 */ |
1069 | printk(KERN_ERR | 1523 | if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0)) |
1070 | "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", | 1524 | && ctr[0].enabled) |
1071 | __func__, rtn_value); | 1525 | /* The SPU PC values will be read |
1526 | * from the trace buffer, reset counter | ||
1527 | */ | ||
1528 | |||
1529 | cbe_write_ctr(cpu, 0, reset_value[0]); | ||
1530 | |||
1531 | trace_addr = cbe_read_pm(cpu, trace_address); | ||
1532 | |||
1533 | while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) { | ||
1534 | /* There is data in the trace buffer to process | ||
1535 | * Read the buffer until you get to the last | ||
1536 | * entry. This is the value we want. | ||
1537 | */ | ||
1538 | |||
1539 | cbe_read_trace_buffer(cpu, trace_buffer); | ||
1540 | trace_addr = cbe_read_pm(cpu, trace_address); | ||
1072 | } | 1541 | } |
1073 | 1542 | ||
1074 | /* Deactivate the signals */ | 1543 | /* SPU Address 16 bit count format for 128 bit |
1075 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | 1544 | * HW trace buffer is used for the SPU PC storage |
1076 | } | 1545 | * HDR bits 0:15 |
1546 | * SPU Addr 0 bits 16:31 | ||
1547 | * SPU Addr 1 bits 32:47 | ||
1548 | * unused bits 48:127 | ||
1549 | * | ||
1550 | * HDR: bit4 = 1 SPU Address 0 valid | ||
1551 | * HDR: bit5 = 1 SPU Address 1 valid | ||
1552 | * - unfortunately, the valid bits don't seem to work | ||
1553 | * | ||
1554 | * Note trace_buffer[0] holds bits 0:63 of the HW | ||
1555 | * trace buffer, trace_buffer[1] holds bits 64:127 | ||
1556 | */ | ||
1077 | 1557 | ||
1078 | stop_spu_profiling(); | 1558 | trace_entry = trace_buffer[0] |
1079 | } | 1559 | & 0x00000000FFFF0000; |
1080 | 1560 | ||
1081 | static void cell_global_stop_ppu(void) | 1561 | /* only top 16 of the 18 bit SPU PC address |
1082 | { | 1562 | * is stored in trace buffer, hence shift right |
1083 | int cpu; | 1563 | * by 16 -2 bits */ |
1564 | sample = trace_entry >> 14; | ||
1565 | last_trace_buffer = trace_buffer[0]; | ||
1084 | 1566 | ||
1085 | /* | 1567 | spu_num = spu_evnt_phys_spu_indx |
1086 | * This routine will be called once for the system. | 1568 | + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE); |
1087 | * There is one performance monitor per node, so we | ||
1088 | * only need to perform this function once per node. | ||
1089 | */ | ||
1090 | del_timer_sync(&timer_virt_cntr); | ||
1091 | oprofile_running = 0; | ||
1092 | smp_wmb(); | ||
1093 | 1569 | ||
1094 | for_each_online_cpu(cpu) { | 1570 | /* make sure only one process at a time is calling |
1095 | if (cbe_get_hw_thread_id(cpu)) | 1571 | * spu_sync_buffer() |
1096 | continue; | 1572 | */ |
1573 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, | ||
1574 | sample_array_lock_flags); | ||
1575 | spu_sync_buffer(spu_num, &sample, 1); | ||
1576 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, | ||
1577 | sample_array_lock_flags); | ||
1097 | 1578 | ||
1098 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | 1579 | smp_wmb(); /* insure spu event buffer updates are written |
1099 | /* Stop the counters */ | 1580 | * don't want events intermingled... */ |
1100 | cbe_disable_pm(cpu); | ||
1101 | 1581 | ||
1102 | /* Deactivate the signals */ | 1582 | /* The counters were frozen by the interrupt. |
1103 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | 1583 | * Reenable the interrupt and restart the counters. |
1584 | */ | ||
1585 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); | ||
1586 | cbe_enable_pm_interrupts(cpu, hdw_thread, | ||
1587 | virt_cntr_inter_mask); | ||
1104 | 1588 | ||
1105 | /* Deactivate interrupts */ | 1589 | /* clear the trace buffer, re-enable writes to trace buff */ |
1106 | cbe_disable_pm_interrupts(cpu); | 1590 | cbe_write_pm(cpu, trace_address, 0); |
1107 | } | 1591 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); |
1108 | } | ||
1109 | 1592 | ||
1110 | static void cell_global_stop(void) | 1593 | /* The writes to the various performance counters only writes |
1111 | { | 1594 | * to a latch. The new values (interrupt setting bits, reset |
1112 | if (spu_cycle_reset) | 1595 | * counter value etc.) are not copied to the actual registers |
1113 | cell_global_stop_spu(); | 1596 | * until the performance monitor is enabled. In order to get |
1114 | else | 1597 | * this to work as desired, the permormance monitor needs to |
1115 | cell_global_stop_ppu(); | 1598 | * be disabled while writing to the latches. This is a |
1599 | * HW design issue. | ||
1600 | */ | ||
1601 | write_pm_cntrl(cpu); | ||
1602 | cbe_enable_pm(cpu); | ||
1603 | } | ||
1604 | spin_unlock_irqrestore(&cntr_lock, flags); | ||
1116 | } | 1605 | } |
1117 | 1606 | ||
1118 | static void cell_handle_interrupt(struct pt_regs *regs, | 1607 | static void cell_handle_interrupt_ppu(struct pt_regs *regs, |
1119 | struct op_counter_config *ctr) | 1608 | struct op_counter_config *ctr) |
1120 | { | 1609 | { |
1121 | u32 cpu; | 1610 | u32 cpu; |
1122 | u64 pc; | 1611 | u64 pc; |
@@ -1132,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1132 | * routine are not running at the same time. See the | 1621 | * routine are not running at the same time. See the |
1133 | * cell_virtual_cntr() routine for additional comments. | 1622 | * cell_virtual_cntr() routine for additional comments. |
1134 | */ | 1623 | */ |
1135 | spin_lock_irqsave(&virt_cntr_lock, flags); | 1624 | spin_lock_irqsave(&cntr_lock, flags); |
1136 | 1625 | ||
1137 | /* | 1626 | /* |
1138 | * Need to disable and reenable the performance counters | 1627 | * Need to disable and reenable the performance counters |
@@ -1185,7 +1674,16 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1185 | */ | 1674 | */ |
1186 | cbe_enable_pm(cpu); | 1675 | cbe_enable_pm(cpu); |
1187 | } | 1676 | } |
1188 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | 1677 | spin_unlock_irqrestore(&cntr_lock, flags); |
1678 | } | ||
1679 | |||
1680 | static void cell_handle_interrupt(struct pt_regs *regs, | ||
1681 | struct op_counter_config *ctr) | ||
1682 | { | ||
1683 | if (profiling_mode == PPU_PROFILING) | ||
1684 | cell_handle_interrupt_ppu(regs, ctr); | ||
1685 | else | ||
1686 | cell_handle_interrupt_spu(regs, ctr); | ||
1189 | } | 1687 | } |
1190 | 1688 | ||
1191 | /* | 1689 | /* |
@@ -1195,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1195 | */ | 1693 | */ |
1196 | static int cell_sync_start(void) | 1694 | static int cell_sync_start(void) |
1197 | { | 1695 | { |
1198 | if (spu_cycle_reset) | 1696 | if ((profiling_mode == SPU_PROFILING_CYCLES) || |
1697 | (profiling_mode == SPU_PROFILING_EVENTS)) | ||
1199 | return spu_sync_start(); | 1698 | return spu_sync_start(); |
1200 | else | 1699 | else |
1201 | return DO_GENERIC_SYNC; | 1700 | return DO_GENERIC_SYNC; |
@@ -1203,7 +1702,8 @@ static int cell_sync_start(void) | |||
1203 | 1702 | ||
1204 | static int cell_sync_stop(void) | 1703 | static int cell_sync_stop(void) |
1205 | { | 1704 | { |
1206 | if (spu_cycle_reset) | 1705 | if ((profiling_mode == SPU_PROFILING_CYCLES) || |
1706 | (profiling_mode == SPU_PROFILING_EVENTS)) | ||
1207 | return spu_sync_stop(); | 1707 | return spu_sync_stop(); |
1208 | else | 1708 | else |
1209 | return 1; | 1709 | return 1; |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 98658f25f542..8fdf06e4edf9 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * @file op_model_amd.c | 2 | * @file op_model_amd.c |
3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations | 3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations |
4 | * | 4 | * |
5 | * @remark Copyright 2002-2008 OProfile authors | 5 | * @remark Copyright 2002-2009 OProfile authors |
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
7 | * | 7 | * |
8 | * @author John Levon | 8 | * @author John Levon |
@@ -10,7 +10,7 @@ | |||
10 | * @author Graydon Hoare | 10 | * @author Graydon Hoare |
11 | * @author Robert Richter <robert.richter@amd.com> | 11 | * @author Robert Richter <robert.richter@amd.com> |
12 | * @author Barry Kasindorf | 12 | * @author Barry Kasindorf |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/oprofile.h> | 15 | #include <linux/oprofile.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS]; | |||
60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ | 60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ |
61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ | 61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ |
62 | 62 | ||
63 | /* Codes used in cpu_buffer.c */ | 63 | #define IBS_FETCH_SIZE 6 |
64 | /* This produces duplicate code, need to be fixed */ | 64 | #define IBS_OP_SIZE 12 |
65 | #define IBS_FETCH_BEGIN 3 | ||
66 | #define IBS_OP_BEGIN 4 | ||
67 | |||
68 | /* | ||
69 | * The function interface needs to be fixed, something like add | ||
70 | * data. Should then be added to linux/oprofile.h. | ||
71 | */ | ||
72 | extern void | ||
73 | oprofile_add_ibs_sample(struct pt_regs * const regs, | ||
74 | unsigned int * const ibs_sample, int ibs_code); | ||
75 | |||
76 | struct ibs_fetch_sample { | ||
77 | /* MSRC001_1031 IBS Fetch Linear Address Register */ | ||
78 | unsigned int ibs_fetch_lin_addr_low; | ||
79 | unsigned int ibs_fetch_lin_addr_high; | ||
80 | /* MSRC001_1030 IBS Fetch Control Register */ | ||
81 | unsigned int ibs_fetch_ctl_low; | ||
82 | unsigned int ibs_fetch_ctl_high; | ||
83 | /* MSRC001_1032 IBS Fetch Physical Address Register */ | ||
84 | unsigned int ibs_fetch_phys_addr_low; | ||
85 | unsigned int ibs_fetch_phys_addr_high; | ||
86 | }; | ||
87 | |||
88 | struct ibs_op_sample { | ||
89 | /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */ | ||
90 | unsigned int ibs_op_rip_low; | ||
91 | unsigned int ibs_op_rip_high; | ||
92 | /* MSRC001_1035 IBS Op Data Register */ | ||
93 | unsigned int ibs_op_data1_low; | ||
94 | unsigned int ibs_op_data1_high; | ||
95 | /* MSRC001_1036 IBS Op Data 2 Register */ | ||
96 | unsigned int ibs_op_data2_low; | ||
97 | unsigned int ibs_op_data2_high; | ||
98 | /* MSRC001_1037 IBS Op Data 3 Register */ | ||
99 | unsigned int ibs_op_data3_low; | ||
100 | unsigned int ibs_op_data3_high; | ||
101 | /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */ | ||
102 | unsigned int ibs_dc_linear_low; | ||
103 | unsigned int ibs_dc_linear_high; | ||
104 | /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */ | ||
105 | unsigned int ibs_dc_phys_low; | ||
106 | unsigned int ibs_dc_phys_high; | ||
107 | }; | ||
108 | 65 | ||
109 | static int ibs_allowed; /* AMD Family10h and later */ | 66 | static int has_ibs; /* AMD Family10h and later */ |
110 | 67 | ||
111 | struct op_ibs_config { | 68 | struct op_ibs_config { |
112 | unsigned long op_enabled; | 69 | unsigned long op_enabled; |
@@ -197,31 +154,29 @@ static inline int | |||
197 | op_amd_handle_ibs(struct pt_regs * const regs, | 154 | op_amd_handle_ibs(struct pt_regs * const regs, |
198 | struct op_msrs const * const msrs) | 155 | struct op_msrs const * const msrs) |
199 | { | 156 | { |
200 | unsigned int low, high; | 157 | u32 low, high; |
201 | struct ibs_fetch_sample ibs_fetch; | 158 | u64 msr; |
202 | struct ibs_op_sample ibs_op; | 159 | struct op_entry entry; |
203 | 160 | ||
204 | if (!ibs_allowed) | 161 | if (!has_ibs) |
205 | return 1; | 162 | return 1; |
206 | 163 | ||
207 | if (ibs_config.fetch_enabled) { | 164 | if (ibs_config.fetch_enabled) { |
208 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 165 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
209 | if (high & IBS_FETCH_HIGH_VALID_BIT) { | 166 | if (high & IBS_FETCH_HIGH_VALID_BIT) { |
210 | ibs_fetch.ibs_fetch_ctl_high = high; | 167 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); |
211 | ibs_fetch.ibs_fetch_ctl_low = low; | 168 | oprofile_write_reserve(&entry, regs, msr, |
212 | rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); | 169 | IBS_FETCH_CODE, IBS_FETCH_SIZE); |
213 | ibs_fetch.ibs_fetch_lin_addr_high = high; | 170 | oprofile_add_data(&entry, (u32)msr); |
214 | ibs_fetch.ibs_fetch_lin_addr_low = low; | 171 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
215 | rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); | 172 | oprofile_add_data(&entry, low); |
216 | ibs_fetch.ibs_fetch_phys_addr_high = high; | 173 | oprofile_add_data(&entry, high); |
217 | ibs_fetch.ibs_fetch_phys_addr_low = low; | 174 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); |
218 | 175 | oprofile_add_data(&entry, (u32)msr); | |
219 | oprofile_add_ibs_sample(regs, | 176 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
220 | (unsigned int *)&ibs_fetch, | 177 | oprofile_write_commit(&entry); |
221 | IBS_FETCH_BEGIN); | ||
222 | 178 | ||
223 | /* reenable the IRQ */ | 179 | /* reenable the IRQ */ |
224 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
225 | high &= ~IBS_FETCH_HIGH_VALID_BIT; | 180 | high &= ~IBS_FETCH_HIGH_VALID_BIT; |
226 | high |= IBS_FETCH_HIGH_ENABLE; | 181 | high |= IBS_FETCH_HIGH_ENABLE; |
227 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; | 182 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; |
@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs, | |||
232 | if (ibs_config.op_enabled) { | 187 | if (ibs_config.op_enabled) { |
233 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | 188 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); |
234 | if (low & IBS_OP_LOW_VALID_BIT) { | 189 | if (low & IBS_OP_LOW_VALID_BIT) { |
235 | rdmsr(MSR_AMD64_IBSOPRIP, low, high); | 190 | rdmsrl(MSR_AMD64_IBSOPRIP, msr); |
236 | ibs_op.ibs_op_rip_low = low; | 191 | oprofile_write_reserve(&entry, regs, msr, |
237 | ibs_op.ibs_op_rip_high = high; | 192 | IBS_OP_CODE, IBS_OP_SIZE); |
238 | rdmsr(MSR_AMD64_IBSOPDATA, low, high); | 193 | oprofile_add_data(&entry, (u32)msr); |
239 | ibs_op.ibs_op_data1_low = low; | 194 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
240 | ibs_op.ibs_op_data1_high = high; | 195 | rdmsrl(MSR_AMD64_IBSOPDATA, msr); |
241 | rdmsr(MSR_AMD64_IBSOPDATA2, low, high); | 196 | oprofile_add_data(&entry, (u32)msr); |
242 | ibs_op.ibs_op_data2_low = low; | 197 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
243 | ibs_op.ibs_op_data2_high = high; | 198 | rdmsrl(MSR_AMD64_IBSOPDATA2, msr); |
244 | rdmsr(MSR_AMD64_IBSOPDATA3, low, high); | 199 | oprofile_add_data(&entry, (u32)msr); |
245 | ibs_op.ibs_op_data3_low = low; | 200 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
246 | ibs_op.ibs_op_data3_high = high; | 201 | rdmsrl(MSR_AMD64_IBSOPDATA3, msr); |
247 | rdmsr(MSR_AMD64_IBSDCLINAD, low, high); | 202 | oprofile_add_data(&entry, (u32)msr); |
248 | ibs_op.ibs_dc_linear_low = low; | 203 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
249 | ibs_op.ibs_dc_linear_high = high; | 204 | rdmsrl(MSR_AMD64_IBSDCLINAD, msr); |
250 | rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); | 205 | oprofile_add_data(&entry, (u32)msr); |
251 | ibs_op.ibs_dc_phys_low = low; | 206 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
252 | ibs_op.ibs_dc_phys_high = high; | 207 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); |
208 | oprofile_add_data(&entry, (u32)msr); | ||
209 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
210 | oprofile_write_commit(&entry); | ||
253 | 211 | ||
254 | /* reenable the IRQ */ | 212 | /* reenable the IRQ */ |
255 | oprofile_add_ibs_sample(regs, | ||
256 | (unsigned int *)&ibs_op, | ||
257 | IBS_OP_BEGIN); | ||
258 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
259 | high = 0; | 213 | high = 0; |
260 | low &= ~IBS_OP_LOW_VALID_BIT; | 214 | low &= ~IBS_OP_LOW_VALID_BIT; |
261 | low |= IBS_OP_LOW_ENABLE; | 215 | low |= IBS_OP_LOW_ENABLE; |
@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs) | |||
305 | } | 259 | } |
306 | 260 | ||
307 | #ifdef CONFIG_OPROFILE_IBS | 261 | #ifdef CONFIG_OPROFILE_IBS |
308 | if (ibs_allowed && ibs_config.fetch_enabled) { | 262 | if (has_ibs && ibs_config.fetch_enabled) { |
309 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 263 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; |
310 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ | 264 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ |
311 | + IBS_FETCH_HIGH_ENABLE; | 265 | + IBS_FETCH_HIGH_ENABLE; |
312 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 266 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
313 | } | 267 | } |
314 | 268 | ||
315 | if (ibs_allowed && ibs_config.op_enabled) { | 269 | if (has_ibs && ibs_config.op_enabled) { |
316 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) | 270 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) |
317 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ | 271 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ |
318 | + IBS_OP_LOW_ENABLE; | 272 | + IBS_OP_LOW_ENABLE; |
@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
341 | } | 295 | } |
342 | 296 | ||
343 | #ifdef CONFIG_OPROFILE_IBS | 297 | #ifdef CONFIG_OPROFILE_IBS |
344 | if (ibs_allowed && ibs_config.fetch_enabled) { | 298 | if (has_ibs && ibs_config.fetch_enabled) { |
345 | /* clear max count and enable */ | 299 | /* clear max count and enable */ |
346 | low = 0; | 300 | low = 0; |
347 | high = 0; | 301 | high = 0; |
348 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 302 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
349 | } | 303 | } |
350 | 304 | ||
351 | if (ibs_allowed && ibs_config.op_enabled) { | 305 | if (has_ibs && ibs_config.op_enabled) { |
352 | /* clear max count and enable */ | 306 | /* clear max count and enable */ |
353 | low = 0; | 307 | low = 0; |
354 | high = 0; | 308 | high = 0; |
@@ -409,6 +363,7 @@ static int init_ibs_nmi(void) | |||
409 | | IBSCTL_LVTOFFSETVAL); | 363 | | IBSCTL_LVTOFFSETVAL); |
410 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 364 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
411 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { | 365 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { |
366 | pci_dev_put(cpu_cfg); | ||
412 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 367 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
413 | "IBSCTL = 0x%08x", value); | 368 | "IBSCTL = 0x%08x", value); |
414 | return 1; | 369 | return 1; |
@@ -436,20 +391,20 @@ static int init_ibs_nmi(void) | |||
436 | /* uninitialize the APIC for the IBS interrupts if needed */ | 391 | /* uninitialize the APIC for the IBS interrupts if needed */ |
437 | static void clear_ibs_nmi(void) | 392 | static void clear_ibs_nmi(void) |
438 | { | 393 | { |
439 | if (ibs_allowed) | 394 | if (has_ibs) |
440 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); | 395 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); |
441 | } | 396 | } |
442 | 397 | ||
443 | /* initialize the APIC for the IBS interrupts if available */ | 398 | /* initialize the APIC for the IBS interrupts if available */ |
444 | static void ibs_init(void) | 399 | static void ibs_init(void) |
445 | { | 400 | { |
446 | ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); | 401 | has_ibs = boot_cpu_has(X86_FEATURE_IBS); |
447 | 402 | ||
448 | if (!ibs_allowed) | 403 | if (!has_ibs) |
449 | return; | 404 | return; |
450 | 405 | ||
451 | if (init_ibs_nmi()) { | 406 | if (init_ibs_nmi()) { |
452 | ibs_allowed = 0; | 407 | has_ibs = 0; |
453 | return; | 408 | return; |
454 | } | 409 | } |
455 | 410 | ||
@@ -458,7 +413,7 @@ static void ibs_init(void) | |||
458 | 413 | ||
459 | static void ibs_exit(void) | 414 | static void ibs_exit(void) |
460 | { | 415 | { |
461 | if (!ibs_allowed) | 416 | if (!has_ibs) |
462 | return; | 417 | return; |
463 | 418 | ||
464 | clear_ibs_nmi(); | 419 | clear_ibs_nmi(); |
@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) | |||
478 | if (ret) | 433 | if (ret) |
479 | return ret; | 434 | return ret; |
480 | 435 | ||
481 | if (!ibs_allowed) | 436 | if (!has_ibs) |
482 | return ret; | 437 | return ret; |
483 | 438 | ||
484 | /* model specific files */ | 439 | /* model specific files */ |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 65e8294a9e29..9da5a4b81133 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -1,11 +1,12 @@ | |||
1 | /** | 1 | /** |
2 | * @file buffer_sync.c | 2 | * @file buffer_sync.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Barry Kasindorf | 8 | * @author Barry Kasindorf |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | * | 10 | * |
10 | * This is the core of the buffer management. Each | 11 | * This is the core of the buffer management. Each |
11 | * CPU buffer is processed and entered into the | 12 | * CPU buffer is processed and entered into the |
@@ -315,88 +316,73 @@ static void add_trace_begin(void) | |||
315 | add_event_entry(TRACE_BEGIN_CODE); | 316 | add_event_entry(TRACE_BEGIN_CODE); |
316 | } | 317 | } |
317 | 318 | ||
318 | #ifdef CONFIG_OPROFILE_IBS | 319 | static void add_data(struct op_entry *entry, struct mm_struct *mm) |
319 | |||
320 | #define IBS_FETCH_CODE_SIZE 2 | ||
321 | #define IBS_OP_CODE_SIZE 5 | ||
322 | |||
323 | /* | ||
324 | * Add IBS fetch and op entries to event buffer | ||
325 | */ | ||
326 | static void add_ibs_begin(int cpu, int code, struct mm_struct *mm) | ||
327 | { | 320 | { |
328 | unsigned long rip; | 321 | unsigned long code, pc, val; |
329 | int i, count; | 322 | unsigned long cookie; |
330 | unsigned long ibs_cookie = 0; | ||
331 | off_t offset; | 323 | off_t offset; |
332 | struct op_sample *sample; | ||
333 | |||
334 | sample = cpu_buffer_read_entry(cpu); | ||
335 | if (!sample) | ||
336 | goto Error; | ||
337 | rip = sample->eip; | ||
338 | 324 | ||
339 | #ifdef __LP64__ | 325 | if (!op_cpu_buffer_get_data(entry, &code)) |
340 | rip += sample->event << 32; | 326 | return; |
341 | #endif | 327 | if (!op_cpu_buffer_get_data(entry, &pc)) |
328 | return; | ||
329 | if (!op_cpu_buffer_get_size(entry)) | ||
330 | return; | ||
342 | 331 | ||
343 | if (mm) { | 332 | if (mm) { |
344 | ibs_cookie = lookup_dcookie(mm, rip, &offset); | 333 | cookie = lookup_dcookie(mm, pc, &offset); |
345 | 334 | ||
346 | if (ibs_cookie == NO_COOKIE) | 335 | if (cookie == NO_COOKIE) |
347 | offset = rip; | 336 | offset = pc; |
348 | if (ibs_cookie == INVALID_COOKIE) { | 337 | if (cookie == INVALID_COOKIE) { |
349 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | 338 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
350 | offset = rip; | 339 | offset = pc; |
351 | } | 340 | } |
352 | if (ibs_cookie != last_cookie) { | 341 | if (cookie != last_cookie) { |
353 | add_cookie_switch(ibs_cookie); | 342 | add_cookie_switch(cookie); |
354 | last_cookie = ibs_cookie; | 343 | last_cookie = cookie; |
355 | } | 344 | } |
356 | } else | 345 | } else |
357 | offset = rip; | 346 | offset = pc; |
358 | 347 | ||
359 | add_event_entry(ESCAPE_CODE); | 348 | add_event_entry(ESCAPE_CODE); |
360 | add_event_entry(code); | 349 | add_event_entry(code); |
361 | add_event_entry(offset); /* Offset from Dcookie */ | 350 | add_event_entry(offset); /* Offset from Dcookie */ |
362 | 351 | ||
363 | /* we send the Dcookie offset, but send the raw Linear Add also*/ | 352 | while (op_cpu_buffer_get_data(entry, &val)) |
364 | add_event_entry(sample->eip); | 353 | add_event_entry(val); |
365 | add_event_entry(sample->event); | ||
366 | |||
367 | if (code == IBS_FETCH_CODE) | ||
368 | count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ | ||
369 | else | ||
370 | count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ | ||
371 | |||
372 | for (i = 0; i < count; i++) { | ||
373 | sample = cpu_buffer_read_entry(cpu); | ||
374 | if (!sample) | ||
375 | goto Error; | ||
376 | add_event_entry(sample->eip); | ||
377 | add_event_entry(sample->event); | ||
378 | } | ||
379 | |||
380 | return; | ||
381 | |||
382 | Error: | ||
383 | return; | ||
384 | } | 354 | } |
385 | 355 | ||
386 | #endif | 356 | static inline void add_sample_entry(unsigned long offset, unsigned long event) |
387 | |||
388 | static void add_sample_entry(unsigned long offset, unsigned long event) | ||
389 | { | 357 | { |
390 | add_event_entry(offset); | 358 | add_event_entry(offset); |
391 | add_event_entry(event); | 359 | add_event_entry(event); |
392 | } | 360 | } |
393 | 361 | ||
394 | 362 | ||
395 | static int add_us_sample(struct mm_struct *mm, struct op_sample *s) | 363 | /* |
364 | * Add a sample to the global event buffer. If possible the | ||
365 | * sample is converted into a persistent dentry/offset pair | ||
366 | * for later lookup from userspace. Return 0 on failure. | ||
367 | */ | ||
368 | static int | ||
369 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) | ||
396 | { | 370 | { |
397 | unsigned long cookie; | 371 | unsigned long cookie; |
398 | off_t offset; | 372 | off_t offset; |
399 | 373 | ||
374 | if (in_kernel) { | ||
375 | add_sample_entry(s->eip, s->event); | ||
376 | return 1; | ||
377 | } | ||
378 | |||
379 | /* add userspace sample */ | ||
380 | |||
381 | if (!mm) { | ||
382 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | ||
383 | return 0; | ||
384 | } | ||
385 | |||
400 | cookie = lookup_dcookie(mm, s->eip, &offset); | 386 | cookie = lookup_dcookie(mm, s->eip, &offset); |
401 | 387 | ||
402 | if (cookie == INVALID_COOKIE) { | 388 | if (cookie == INVALID_COOKIE) { |
@@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s) | |||
415 | } | 401 | } |
416 | 402 | ||
417 | 403 | ||
418 | /* Add a sample to the global event buffer. If possible the | ||
419 | * sample is converted into a persistent dentry/offset pair | ||
420 | * for later lookup from userspace. | ||
421 | */ | ||
422 | static int | ||
423 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) | ||
424 | { | ||
425 | if (in_kernel) { | ||
426 | add_sample_entry(s->eip, s->event); | ||
427 | return 1; | ||
428 | } else if (mm) { | ||
429 | return add_us_sample(mm, s); | ||
430 | } else { | ||
431 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | ||
432 | } | ||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | |||
437 | static void release_mm(struct mm_struct *mm) | 404 | static void release_mm(struct mm_struct *mm) |
438 | { | 405 | { |
439 | if (!mm) | 406 | if (!mm) |
@@ -526,66 +493,69 @@ void sync_buffer(int cpu) | |||
526 | { | 493 | { |
527 | struct mm_struct *mm = NULL; | 494 | struct mm_struct *mm = NULL; |
528 | struct mm_struct *oldmm; | 495 | struct mm_struct *oldmm; |
496 | unsigned long val; | ||
529 | struct task_struct *new; | 497 | struct task_struct *new; |
530 | unsigned long cookie = 0; | 498 | unsigned long cookie = 0; |
531 | int in_kernel = 1; | 499 | int in_kernel = 1; |
532 | sync_buffer_state state = sb_buffer_start; | 500 | sync_buffer_state state = sb_buffer_start; |
533 | unsigned int i; | 501 | unsigned int i; |
534 | unsigned long available; | 502 | unsigned long available; |
503 | unsigned long flags; | ||
504 | struct op_entry entry; | ||
505 | struct op_sample *sample; | ||
535 | 506 | ||
536 | mutex_lock(&buffer_mutex); | 507 | mutex_lock(&buffer_mutex); |
537 | 508 | ||
538 | add_cpu_switch(cpu); | 509 | add_cpu_switch(cpu); |
539 | 510 | ||
540 | cpu_buffer_reset(cpu); | 511 | op_cpu_buffer_reset(cpu); |
541 | available = cpu_buffer_entries(cpu); | 512 | available = op_cpu_buffer_entries(cpu); |
542 | 513 | ||
543 | for (i = 0; i < available; ++i) { | 514 | for (i = 0; i < available; ++i) { |
544 | struct op_sample *s = cpu_buffer_read_entry(cpu); | 515 | sample = op_cpu_buffer_read_entry(&entry, cpu); |
545 | if (!s) | 516 | if (!sample) |
546 | break; | 517 | break; |
547 | 518 | ||
548 | if (is_code(s->eip)) { | 519 | if (is_code(sample->eip)) { |
549 | switch (s->event) { | 520 | flags = sample->event; |
550 | case 0: | 521 | if (flags & TRACE_BEGIN) { |
551 | case CPU_IS_KERNEL: | 522 | state = sb_bt_start; |
523 | add_trace_begin(); | ||
524 | } | ||
525 | if (flags & KERNEL_CTX_SWITCH) { | ||
552 | /* kernel/userspace switch */ | 526 | /* kernel/userspace switch */ |
553 | in_kernel = s->event; | 527 | in_kernel = flags & IS_KERNEL; |
554 | if (state == sb_buffer_start) | 528 | if (state == sb_buffer_start) |
555 | state = sb_sample_start; | 529 | state = sb_sample_start; |
556 | add_kernel_ctx_switch(s->event); | 530 | add_kernel_ctx_switch(flags & IS_KERNEL); |
557 | break; | 531 | } |
558 | case CPU_TRACE_BEGIN: | 532 | if (flags & USER_CTX_SWITCH |
559 | state = sb_bt_start; | 533 | && op_cpu_buffer_get_data(&entry, &val)) { |
560 | add_trace_begin(); | ||
561 | break; | ||
562 | #ifdef CONFIG_OPROFILE_IBS | ||
563 | case IBS_FETCH_BEGIN: | ||
564 | state = sb_bt_start; | ||
565 | add_ibs_begin(cpu, IBS_FETCH_CODE, mm); | ||
566 | break; | ||
567 | case IBS_OP_BEGIN: | ||
568 | state = sb_bt_start; | ||
569 | add_ibs_begin(cpu, IBS_OP_CODE, mm); | ||
570 | break; | ||
571 | #endif | ||
572 | default: | ||
573 | /* userspace context switch */ | 534 | /* userspace context switch */ |
535 | new = (struct task_struct *)val; | ||
574 | oldmm = mm; | 536 | oldmm = mm; |
575 | new = (struct task_struct *)s->event; | ||
576 | release_mm(oldmm); | 537 | release_mm(oldmm); |
577 | mm = take_tasks_mm(new); | 538 | mm = take_tasks_mm(new); |
578 | if (mm != oldmm) | 539 | if (mm != oldmm) |
579 | cookie = get_exec_dcookie(mm); | 540 | cookie = get_exec_dcookie(mm); |
580 | add_user_ctx_switch(new, cookie); | 541 | add_user_ctx_switch(new, cookie); |
581 | break; | ||
582 | } | ||
583 | } else if (state >= sb_bt_start && | ||
584 | !add_sample(mm, s, in_kernel)) { | ||
585 | if (state == sb_bt_start) { | ||
586 | state = sb_bt_ignore; | ||
587 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | ||
588 | } | 542 | } |
543 | if (op_cpu_buffer_get_size(&entry)) | ||
544 | add_data(&entry, mm); | ||
545 | continue; | ||
546 | } | ||
547 | |||
548 | if (state < sb_bt_start) | ||
549 | /* ignore sample */ | ||
550 | continue; | ||
551 | |||
552 | if (add_sample(mm, sample, in_kernel)) | ||
553 | continue; | ||
554 | |||
555 | /* ignore backtraces if failed to add a sample */ | ||
556 | if (state == sb_bt_start) { | ||
557 | state = sb_bt_ignore; | ||
558 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | ||
589 | } | 559 | } |
590 | } | 560 | } |
591 | release_mm(mm); | 561 | release_mm(mm); |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 61090969158f..2e03b6d796d3 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -1,11 +1,12 @@ | |||
1 | /** | 1 | /** |
2 | * @file cpu_buffer.c | 2 | * @file cpu_buffer.c |
3 | * | 3 | * |
4 | * @remark Copyright 2002 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
9 | * @author Robert Richter <robert.richter@amd.com> | ||
9 | * | 10 | * |
10 | * Each CPU has a local buffer that stores PC value/event | 11 | * Each CPU has a local buffer that stores PC value/event |
11 | * pairs. We also log context switches when we notice them. | 12 | * pairs. We also log context switches when we notice them. |
@@ -45,8 +46,8 @@ | |||
45 | * can be changed to a single buffer solution when the ring buffer | 46 | * can be changed to a single buffer solution when the ring buffer |
46 | * access is implemented as non-locking atomic code. | 47 | * access is implemented as non-locking atomic code. |
47 | */ | 48 | */ |
48 | struct ring_buffer *op_ring_buffer_read; | 49 | static struct ring_buffer *op_ring_buffer_read; |
49 | struct ring_buffer *op_ring_buffer_write; | 50 | static struct ring_buffer *op_ring_buffer_write; |
50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | 51 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
51 | 52 | ||
52 | static void wq_sync_buffer(struct work_struct *work); | 53 | static void wq_sync_buffer(struct work_struct *work); |
@@ -54,19 +55,9 @@ static void wq_sync_buffer(struct work_struct *work); | |||
54 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | 55 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) |
55 | static int work_enabled; | 56 | static int work_enabled; |
56 | 57 | ||
57 | void free_cpu_buffers(void) | ||
58 | { | ||
59 | if (op_ring_buffer_read) | ||
60 | ring_buffer_free(op_ring_buffer_read); | ||
61 | op_ring_buffer_read = NULL; | ||
62 | if (op_ring_buffer_write) | ||
63 | ring_buffer_free(op_ring_buffer_write); | ||
64 | op_ring_buffer_write = NULL; | ||
65 | } | ||
66 | |||
67 | unsigned long oprofile_get_cpu_buffer_size(void) | 58 | unsigned long oprofile_get_cpu_buffer_size(void) |
68 | { | 59 | { |
69 | return fs_cpu_buffer_size; | 60 | return oprofile_cpu_buffer_size; |
70 | } | 61 | } |
71 | 62 | ||
72 | void oprofile_cpu_buffer_inc_smpl_lost(void) | 63 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
@@ -77,11 +68,21 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) | |||
77 | cpu_buf->sample_lost_overflow++; | 68 | cpu_buf->sample_lost_overflow++; |
78 | } | 69 | } |
79 | 70 | ||
71 | void free_cpu_buffers(void) | ||
72 | { | ||
73 | if (op_ring_buffer_read) | ||
74 | ring_buffer_free(op_ring_buffer_read); | ||
75 | op_ring_buffer_read = NULL; | ||
76 | if (op_ring_buffer_write) | ||
77 | ring_buffer_free(op_ring_buffer_write); | ||
78 | op_ring_buffer_write = NULL; | ||
79 | } | ||
80 | |||
80 | int alloc_cpu_buffers(void) | 81 | int alloc_cpu_buffers(void) |
81 | { | 82 | { |
82 | int i; | 83 | int i; |
83 | 84 | ||
84 | unsigned long buffer_size = fs_cpu_buffer_size; | 85 | unsigned long buffer_size = oprofile_cpu_buffer_size; |
85 | 86 | ||
86 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); | 87 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); |
87 | if (!op_ring_buffer_read) | 88 | if (!op_ring_buffer_read) |
@@ -97,8 +98,6 @@ int alloc_cpu_buffers(void) | |||
97 | b->last_is_kernel = -1; | 98 | b->last_is_kernel = -1; |
98 | b->tracing = 0; | 99 | b->tracing = 0; |
99 | b->buffer_size = buffer_size; | 100 | b->buffer_size = buffer_size; |
100 | b->tail_pos = 0; | ||
101 | b->head_pos = 0; | ||
102 | b->sample_received = 0; | 101 | b->sample_received = 0; |
103 | b->sample_lost_overflow = 0; | 102 | b->sample_lost_overflow = 0; |
104 | b->backtrace_aborted = 0; | 103 | b->backtrace_aborted = 0; |
@@ -145,47 +144,156 @@ void end_cpu_work(void) | |||
145 | flush_scheduled_work(); | 144 | flush_scheduled_work(); |
146 | } | 145 | } |
147 | 146 | ||
148 | static inline int | 147 | /* |
149 | add_sample(struct oprofile_cpu_buffer *cpu_buf, | 148 | * This function prepares the cpu buffer to write a sample. |
150 | unsigned long pc, unsigned long event) | 149 | * |
150 | * Struct op_entry is used during operations on the ring buffer while | ||
151 | * struct op_sample contains the data that is stored in the ring | ||
152 | * buffer. Struct entry can be uninitialized. The function reserves a | ||
153 | * data array that is specified by size. Use | ||
154 | * op_cpu_buffer_write_commit() after preparing the sample. In case of | ||
155 | * errors a null pointer is returned, otherwise the pointer to the | ||
156 | * sample. | ||
157 | * | ||
158 | */ | ||
159 | struct op_sample | ||
160 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) | ||
161 | { | ||
162 | entry->event = ring_buffer_lock_reserve | ||
163 | (op_ring_buffer_write, sizeof(struct op_sample) + | ||
164 | size * sizeof(entry->sample->data[0]), &entry->irq_flags); | ||
165 | if (entry->event) | ||
166 | entry->sample = ring_buffer_event_data(entry->event); | ||
167 | else | ||
168 | entry->sample = NULL; | ||
169 | |||
170 | if (!entry->sample) | ||
171 | return NULL; | ||
172 | |||
173 | entry->size = size; | ||
174 | entry->data = entry->sample->data; | ||
175 | |||
176 | return entry->sample; | ||
177 | } | ||
178 | |||
179 | int op_cpu_buffer_write_commit(struct op_entry *entry) | ||
180 | { | ||
181 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, | ||
182 | entry->irq_flags); | ||
183 | } | ||
184 | |||
185 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) | ||
186 | { | ||
187 | struct ring_buffer_event *e; | ||
188 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
189 | if (e) | ||
190 | goto event; | ||
191 | if (ring_buffer_swap_cpu(op_ring_buffer_read, | ||
192 | op_ring_buffer_write, | ||
193 | cpu)) | ||
194 | return NULL; | ||
195 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
196 | if (e) | ||
197 | goto event; | ||
198 | return NULL; | ||
199 | |||
200 | event: | ||
201 | entry->event = e; | ||
202 | entry->sample = ring_buffer_event_data(e); | ||
203 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) | ||
204 | / sizeof(entry->sample->data[0]); | ||
205 | entry->data = entry->sample->data; | ||
206 | return entry->sample; | ||
207 | } | ||
208 | |||
209 | unsigned long op_cpu_buffer_entries(int cpu) | ||
210 | { | ||
211 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | ||
212 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | ||
213 | } | ||
214 | |||
215 | static int | ||
216 | op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, | ||
217 | int is_kernel, struct task_struct *task) | ||
151 | { | 218 | { |
152 | struct op_entry entry; | 219 | struct op_entry entry; |
153 | int ret; | 220 | struct op_sample *sample; |
221 | unsigned long flags; | ||
222 | int size; | ||
223 | |||
224 | flags = 0; | ||
225 | |||
226 | if (backtrace) | ||
227 | flags |= TRACE_BEGIN; | ||
228 | |||
229 | /* notice a switch from user->kernel or vice versa */ | ||
230 | is_kernel = !!is_kernel; | ||
231 | if (cpu_buf->last_is_kernel != is_kernel) { | ||
232 | cpu_buf->last_is_kernel = is_kernel; | ||
233 | flags |= KERNEL_CTX_SWITCH; | ||
234 | if (is_kernel) | ||
235 | flags |= IS_KERNEL; | ||
236 | } | ||
237 | |||
238 | /* notice a task switch */ | ||
239 | if (cpu_buf->last_task != task) { | ||
240 | cpu_buf->last_task = task; | ||
241 | flags |= USER_CTX_SWITCH; | ||
242 | } | ||
243 | |||
244 | if (!flags) | ||
245 | /* nothing to do */ | ||
246 | return 0; | ||
247 | |||
248 | if (flags & USER_CTX_SWITCH) | ||
249 | size = 1; | ||
250 | else | ||
251 | size = 0; | ||
252 | |||
253 | sample = op_cpu_buffer_write_reserve(&entry, size); | ||
254 | if (!sample) | ||
255 | return -ENOMEM; | ||
154 | 256 | ||
155 | ret = cpu_buffer_write_entry(&entry); | 257 | sample->eip = ESCAPE_CODE; |
156 | if (ret) | 258 | sample->event = flags; |
157 | return ret; | ||
158 | 259 | ||
159 | entry.sample->eip = pc; | 260 | if (size) |
160 | entry.sample->event = event; | 261 | op_cpu_buffer_add_data(&entry, (unsigned long)task); |
161 | 262 | ||
162 | ret = cpu_buffer_write_commit(&entry); | 263 | op_cpu_buffer_write_commit(&entry); |
163 | if (ret) | ||
164 | return ret; | ||
165 | 264 | ||
166 | return 0; | 265 | return 0; |
167 | } | 266 | } |
168 | 267 | ||
169 | static inline int | 268 | static inline int |
170 | add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) | 269 | op_add_sample(struct oprofile_cpu_buffer *cpu_buf, |
270 | unsigned long pc, unsigned long event) | ||
171 | { | 271 | { |
172 | return add_sample(buffer, ESCAPE_CODE, value); | 272 | struct op_entry entry; |
273 | struct op_sample *sample; | ||
274 | |||
275 | sample = op_cpu_buffer_write_reserve(&entry, 0); | ||
276 | if (!sample) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | sample->eip = pc; | ||
280 | sample->event = event; | ||
281 | |||
282 | return op_cpu_buffer_write_commit(&entry); | ||
173 | } | 283 | } |
174 | 284 | ||
175 | /* This must be safe from any context. It's safe writing here | 285 | /* |
176 | * because of the head/tail separation of the writer and reader | 286 | * This must be safe from any context. |
177 | * of the CPU buffer. | ||
178 | * | 287 | * |
179 | * is_kernel is needed because on some architectures you cannot | 288 | * is_kernel is needed because on some architectures you cannot |
180 | * tell if you are in kernel or user space simply by looking at | 289 | * tell if you are in kernel or user space simply by looking at |
181 | * pc. We tag this in the buffer by generating kernel enter/exit | 290 | * pc. We tag this in the buffer by generating kernel enter/exit |
182 | * events whenever is_kernel changes | 291 | * events whenever is_kernel changes |
183 | */ | 292 | */ |
184 | static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | 293 | static int |
185 | int is_kernel, unsigned long event) | 294 | log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, |
295 | unsigned long backtrace, int is_kernel, unsigned long event) | ||
186 | { | 296 | { |
187 | struct task_struct *task; | ||
188 | |||
189 | cpu_buf->sample_received++; | 297 | cpu_buf->sample_received++; |
190 | 298 | ||
191 | if (pc == ESCAPE_CODE) { | 299 | if (pc == ESCAPE_CODE) { |
@@ -193,25 +301,10 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |||
193 | return 0; | 301 | return 0; |
194 | } | 302 | } |
195 | 303 | ||
196 | is_kernel = !!is_kernel; | 304 | if (op_add_code(cpu_buf, backtrace, is_kernel, current)) |
197 | 305 | goto fail; | |
198 | task = current; | ||
199 | |||
200 | /* notice a switch from user->kernel or vice versa */ | ||
201 | if (cpu_buf->last_is_kernel != is_kernel) { | ||
202 | cpu_buf->last_is_kernel = is_kernel; | ||
203 | if (add_code(cpu_buf, is_kernel)) | ||
204 | goto fail; | ||
205 | } | ||
206 | |||
207 | /* notice a task switch */ | ||
208 | if (cpu_buf->last_task != task) { | ||
209 | cpu_buf->last_task = task; | ||
210 | if (add_code(cpu_buf, (unsigned long)task)) | ||
211 | goto fail; | ||
212 | } | ||
213 | 306 | ||
214 | if (add_sample(cpu_buf, pc, event)) | 307 | if (op_add_sample(cpu_buf, pc, event)) |
215 | goto fail; | 308 | goto fail; |
216 | 309 | ||
217 | return 1; | 310 | return 1; |
@@ -221,109 +314,102 @@ fail: | |||
221 | return 0; | 314 | return 0; |
222 | } | 315 | } |
223 | 316 | ||
224 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) | 317 | static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
225 | { | 318 | { |
226 | add_code(cpu_buf, CPU_TRACE_BEGIN); | ||
227 | cpu_buf->tracing = 1; | 319 | cpu_buf->tracing = 1; |
228 | return 1; | ||
229 | } | 320 | } |
230 | 321 | ||
231 | static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) | 322 | static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
232 | { | 323 | { |
233 | cpu_buf->tracing = 0; | 324 | cpu_buf->tracing = 0; |
234 | } | 325 | } |
235 | 326 | ||
236 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | 327 | static inline void |
237 | unsigned long event, int is_kernel) | 328 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
329 | unsigned long event, int is_kernel) | ||
238 | { | 330 | { |
239 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 331 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
240 | 332 | unsigned long backtrace = oprofile_backtrace_depth; | |
241 | if (!backtrace_depth) { | ||
242 | log_sample(cpu_buf, pc, is_kernel, event); | ||
243 | return; | ||
244 | } | ||
245 | |||
246 | if (!oprofile_begin_trace(cpu_buf)) | ||
247 | return; | ||
248 | 333 | ||
249 | /* | 334 | /* |
250 | * if log_sample() fail we can't backtrace since we lost the | 335 | * if log_sample() fail we can't backtrace since we lost the |
251 | * source of this event | 336 | * source of this event |
252 | */ | 337 | */ |
253 | if (log_sample(cpu_buf, pc, is_kernel, event)) | 338 | if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) |
254 | oprofile_ops.backtrace(regs, backtrace_depth); | 339 | /* failed */ |
340 | return; | ||
341 | |||
342 | if (!backtrace) | ||
343 | return; | ||
344 | |||
345 | oprofile_begin_trace(cpu_buf); | ||
346 | oprofile_ops.backtrace(regs, backtrace); | ||
255 | oprofile_end_trace(cpu_buf); | 347 | oprofile_end_trace(cpu_buf); |
256 | } | 348 | } |
257 | 349 | ||
350 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | ||
351 | unsigned long event, int is_kernel) | ||
352 | { | ||
353 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); | ||
354 | } | ||
355 | |||
258 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) | 356 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
259 | { | 357 | { |
260 | int is_kernel = !user_mode(regs); | 358 | int is_kernel = !user_mode(regs); |
261 | unsigned long pc = profile_pc(regs); | 359 | unsigned long pc = profile_pc(regs); |
262 | 360 | ||
263 | oprofile_add_ext_sample(pc, regs, event, is_kernel); | 361 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
264 | } | 362 | } |
265 | 363 | ||
266 | #ifdef CONFIG_OPROFILE_IBS | 364 | /* |
267 | 365 | * Add samples with data to the ring buffer. | |
268 | #define MAX_IBS_SAMPLE_SIZE 14 | 366 | * |
269 | 367 | * Use oprofile_add_data(&entry, val) to add data and | |
270 | void oprofile_add_ibs_sample(struct pt_regs * const regs, | 368 | * oprofile_write_commit(&entry) to commit the sample. |
271 | unsigned int * const ibs_sample, int ibs_code) | 369 | */ |
370 | void | ||
371 | oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | ||
372 | unsigned long pc, int code, int size) | ||
272 | { | 373 | { |
374 | struct op_sample *sample; | ||
273 | int is_kernel = !user_mode(regs); | 375 | int is_kernel = !user_mode(regs); |
274 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 376 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
275 | struct task_struct *task; | ||
276 | int fail = 0; | ||
277 | 377 | ||
278 | cpu_buf->sample_received++; | 378 | cpu_buf->sample_received++; |
279 | 379 | ||
280 | /* notice a switch from user->kernel or vice versa */ | 380 | /* no backtraces for samples with data */ |
281 | if (cpu_buf->last_is_kernel != is_kernel) { | 381 | if (op_add_code(cpu_buf, 0, is_kernel, current)) |
282 | if (add_code(cpu_buf, is_kernel)) | 382 | goto fail; |
283 | goto fail; | ||
284 | cpu_buf->last_is_kernel = is_kernel; | ||
285 | } | ||
286 | |||
287 | /* notice a task switch */ | ||
288 | if (!is_kernel) { | ||
289 | task = current; | ||
290 | if (cpu_buf->last_task != task) { | ||
291 | if (add_code(cpu_buf, (unsigned long)task)) | ||
292 | goto fail; | ||
293 | cpu_buf->last_task = task; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | fail = fail || add_code(cpu_buf, ibs_code); | ||
298 | fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); | ||
299 | fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); | ||
300 | fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); | ||
301 | |||
302 | if (ibs_code == IBS_OP_BEGIN) { | ||
303 | fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); | ||
304 | fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); | ||
305 | fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); | ||
306 | } | ||
307 | 383 | ||
308 | if (fail) | 384 | sample = op_cpu_buffer_write_reserve(entry, size + 2); |
385 | if (!sample) | ||
309 | goto fail; | 386 | goto fail; |
387 | sample->eip = ESCAPE_CODE; | ||
388 | sample->event = 0; /* no flags */ | ||
310 | 389 | ||
311 | if (backtrace_depth) | 390 | op_cpu_buffer_add_data(entry, code); |
312 | oprofile_ops.backtrace(regs, backtrace_depth); | 391 | op_cpu_buffer_add_data(entry, pc); |
313 | 392 | ||
314 | return; | 393 | return; |
315 | 394 | ||
316 | fail: | 395 | fail: |
317 | cpu_buf->sample_lost_overflow++; | 396 | cpu_buf->sample_lost_overflow++; |
318 | return; | ||
319 | } | 397 | } |
320 | 398 | ||
321 | #endif | 399 | int oprofile_add_data(struct op_entry *entry, unsigned long val) |
400 | { | ||
401 | return op_cpu_buffer_add_data(entry, val); | ||
402 | } | ||
403 | |||
404 | int oprofile_write_commit(struct op_entry *entry) | ||
405 | { | ||
406 | return op_cpu_buffer_write_commit(entry); | ||
407 | } | ||
322 | 408 | ||
323 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) | 409 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
324 | { | 410 | { |
325 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 411 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
326 | log_sample(cpu_buf, pc, is_kernel, event); | 412 | log_sample(cpu_buf, pc, 0, is_kernel, event); |
327 | } | 413 | } |
328 | 414 | ||
329 | void oprofile_add_trace(unsigned long pc) | 415 | void oprofile_add_trace(unsigned long pc) |
@@ -340,7 +426,7 @@ void oprofile_add_trace(unsigned long pc) | |||
340 | if (pc == ESCAPE_CODE) | 426 | if (pc == ESCAPE_CODE) |
341 | goto fail; | 427 | goto fail; |
342 | 428 | ||
343 | if (add_sample(cpu_buf, pc, 0)) | 429 | if (op_add_sample(cpu_buf, pc, 0)) |
344 | goto fail; | 430 | goto fail; |
345 | 431 | ||
346 | return; | 432 | return; |
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index aacb0f0bc566..63f81c44846a 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h | |||
@@ -1,10 +1,11 @@ | |||
1 | /** | 1 | /** |
2 | * @file cpu_buffer.h | 2 | * @file cpu_buffer.h |
3 | * | 3 | * |
4 | * @remark Copyright 2002 OProfile authors | 4 | * @remark Copyright 2002-2009 OProfile authors |
5 | * @remark Read the file COPYING | 5 | * @remark Read the file COPYING |
6 | * | 6 | * |
7 | * @author John Levon <levon@movementarian.org> | 7 | * @author John Levon <levon@movementarian.org> |
8 | * @author Robert Richter <robert.richter@amd.com> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #ifndef OPROFILE_CPU_BUFFER_H | 11 | #ifndef OPROFILE_CPU_BUFFER_H |
@@ -31,17 +32,12 @@ void end_cpu_work(void); | |||
31 | struct op_sample { | 32 | struct op_sample { |
32 | unsigned long eip; | 33 | unsigned long eip; |
33 | unsigned long event; | 34 | unsigned long event; |
35 | unsigned long data[0]; | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct op_entry { | 38 | struct op_entry; |
37 | struct ring_buffer_event *event; | ||
38 | struct op_sample *sample; | ||
39 | unsigned long irq_flags; | ||
40 | }; | ||
41 | 39 | ||
42 | struct oprofile_cpu_buffer { | 40 | struct oprofile_cpu_buffer { |
43 | volatile unsigned long head_pos; | ||
44 | volatile unsigned long tail_pos; | ||
45 | unsigned long buffer_size; | 41 | unsigned long buffer_size; |
46 | struct task_struct *last_task; | 42 | struct task_struct *last_task; |
47 | int last_is_kernel; | 43 | int last_is_kernel; |
@@ -54,8 +50,6 @@ struct oprofile_cpu_buffer { | |||
54 | struct delayed_work work; | 50 | struct delayed_work work; |
55 | }; | 51 | }; |
56 | 52 | ||
57 | extern struct ring_buffer *op_ring_buffer_read; | ||
58 | extern struct ring_buffer *op_ring_buffer_write; | ||
59 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | 53 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
60 | 54 | ||
61 | /* | 55 | /* |
@@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | |||
64 | * reset these to invalid values; the next sample collected will | 58 | * reset these to invalid values; the next sample collected will |
65 | * populate the buffer with proper values to initialize the buffer | 59 | * populate the buffer with proper values to initialize the buffer |
66 | */ | 60 | */ |
67 | static inline void cpu_buffer_reset(int cpu) | 61 | static inline void op_cpu_buffer_reset(int cpu) |
68 | { | 62 | { |
69 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); | 63 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); |
70 | 64 | ||
@@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu) | |||
72 | cpu_buf->last_task = NULL; | 66 | cpu_buf->last_task = NULL; |
73 | } | 67 | } |
74 | 68 | ||
75 | static inline int cpu_buffer_write_entry(struct op_entry *entry) | 69 | struct op_sample |
76 | { | 70 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); |
77 | entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, | 71 | int op_cpu_buffer_write_commit(struct op_entry *entry); |
78 | sizeof(struct op_sample), | 72 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu); |
79 | &entry->irq_flags); | 73 | unsigned long op_cpu_buffer_entries(int cpu); |
80 | if (entry->event) | ||
81 | entry->sample = ring_buffer_event_data(entry->event); | ||
82 | else | ||
83 | entry->sample = NULL; | ||
84 | |||
85 | if (!entry->sample) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | 74 | ||
91 | static inline int cpu_buffer_write_commit(struct op_entry *entry) | 75 | /* returns the remaining free size of data in the entry */ |
76 | static inline | ||
77 | int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val) | ||
92 | { | 78 | { |
93 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, | 79 | if (!entry->size) |
94 | entry->irq_flags); | 80 | return 0; |
81 | *entry->data = val; | ||
82 | entry->size--; | ||
83 | entry->data++; | ||
84 | return entry->size; | ||
95 | } | 85 | } |
96 | 86 | ||
97 | static inline struct op_sample *cpu_buffer_read_entry(int cpu) | 87 | /* returns the size of data in the entry */ |
88 | static inline | ||
89 | int op_cpu_buffer_get_size(struct op_entry *entry) | ||
98 | { | 90 | { |
99 | struct ring_buffer_event *e; | 91 | return entry->size; |
100 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
101 | if (e) | ||
102 | return ring_buffer_event_data(e); | ||
103 | if (ring_buffer_swap_cpu(op_ring_buffer_read, | ||
104 | op_ring_buffer_write, | ||
105 | cpu)) | ||
106 | return NULL; | ||
107 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
108 | if (e) | ||
109 | return ring_buffer_event_data(e); | ||
110 | return NULL; | ||
111 | } | 92 | } |
112 | 93 | ||
113 | /* "acquire" as many cpu buffer slots as we can */ | 94 | /* returns 0 if empty or the size of data including the current value */ |
114 | static inline unsigned long cpu_buffer_entries(int cpu) | 95 | static inline |
96 | int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val) | ||
115 | { | 97 | { |
116 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | 98 | int size = entry->size; |
117 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | 99 | if (!size) |
100 | return 0; | ||
101 | *val = *entry->data; | ||
102 | entry->size--; | ||
103 | entry->data++; | ||
104 | return size; | ||
118 | } | 105 | } |
119 | 106 | ||
120 | /* transient events for the CPU buffer -> event buffer */ | 107 | /* extra data flags */ |
121 | #define CPU_IS_KERNEL 1 | 108 | #define KERNEL_CTX_SWITCH (1UL << 0) |
122 | #define CPU_TRACE_BEGIN 2 | 109 | #define IS_KERNEL (1UL << 1) |
123 | #define IBS_FETCH_BEGIN 3 | 110 | #define TRACE_BEGIN (1UL << 2) |
124 | #define IBS_OP_BEGIN 4 | 111 | #define USER_CTX_SWITCH (1UL << 3) |
125 | 112 | ||
126 | #endif /* OPROFILE_CPU_BUFFER_H */ | 113 | #endif /* OPROFILE_CPU_BUFFER_H */ |
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 191a3202cecc..2b7ae366ceb1 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c | |||
@@ -73,8 +73,8 @@ int alloc_event_buffer(void) | |||
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | 74 | ||
75 | spin_lock_irqsave(&oprofilefs_lock, flags); | 75 | spin_lock_irqsave(&oprofilefs_lock, flags); |
76 | buffer_size = fs_buffer_size; | 76 | buffer_size = oprofile_buffer_size; |
77 | buffer_watershed = fs_buffer_watershed; | 77 | buffer_watershed = oprofile_buffer_watershed; |
78 | spin_unlock_irqrestore(&oprofilefs_lock, flags); | 78 | spin_unlock_irqrestore(&oprofilefs_lock, flags); |
79 | 79 | ||
80 | if (buffer_watershed >= buffer_size) | 80 | if (buffer_watershed >= buffer_size) |
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index cd375907f26f..3cffce90f82a 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -23,7 +23,7 @@ | |||
23 | struct oprofile_operations oprofile_ops; | 23 | struct oprofile_operations oprofile_ops; |
24 | 24 | ||
25 | unsigned long oprofile_started; | 25 | unsigned long oprofile_started; |
26 | unsigned long backtrace_depth; | 26 | unsigned long oprofile_backtrace_depth; |
27 | static unsigned long is_setup; | 27 | static unsigned long is_setup; |
28 | static DEFINE_MUTEX(start_mutex); | 28 | static DEFINE_MUTEX(start_mutex); |
29 | 29 | ||
@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val) | |||
172 | goto out; | 172 | goto out; |
173 | } | 173 | } |
174 | 174 | ||
175 | backtrace_depth = val; | 175 | oprofile_backtrace_depth = val; |
176 | 176 | ||
177 | out: | 177 | out: |
178 | mutex_unlock(&start_mutex); | 178 | mutex_unlock(&start_mutex); |
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index 5df0c21a608f..c288d3c24b50 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h | |||
@@ -21,12 +21,12 @@ void oprofile_stop(void); | |||
21 | 21 | ||
22 | struct oprofile_operations; | 22 | struct oprofile_operations; |
23 | 23 | ||
24 | extern unsigned long fs_buffer_size; | 24 | extern unsigned long oprofile_buffer_size; |
25 | extern unsigned long fs_cpu_buffer_size; | 25 | extern unsigned long oprofile_cpu_buffer_size; |
26 | extern unsigned long fs_buffer_watershed; | 26 | extern unsigned long oprofile_buffer_watershed; |
27 | extern struct oprofile_operations oprofile_ops; | 27 | extern struct oprofile_operations oprofile_ops; |
28 | extern unsigned long oprofile_started; | 28 | extern unsigned long oprofile_started; |
29 | extern unsigned long backtrace_depth; | 29 | extern unsigned long oprofile_backtrace_depth; |
30 | 30 | ||
31 | struct super_block; | 31 | struct super_block; |
32 | struct dentry; | 32 | struct dentry; |
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index d8201998b0b7..5d36ffc30dd5 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c | |||
@@ -14,17 +14,18 @@ | |||
14 | #include "oprofile_stats.h" | 14 | #include "oprofile_stats.h" |
15 | #include "oprof.h" | 15 | #include "oprof.h" |
16 | 16 | ||
17 | #define FS_BUFFER_SIZE_DEFAULT 131072 | 17 | #define BUFFER_SIZE_DEFAULT 131072 |
18 | #define FS_CPU_BUFFER_SIZE_DEFAULT 8192 | 18 | #define CPU_BUFFER_SIZE_DEFAULT 8192 |
19 | #define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ | 19 | #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ |
20 | 20 | ||
21 | unsigned long fs_buffer_size; | 21 | unsigned long oprofile_buffer_size; |
22 | unsigned long fs_cpu_buffer_size; | 22 | unsigned long oprofile_cpu_buffer_size; |
23 | unsigned long fs_buffer_watershed; | 23 | unsigned long oprofile_buffer_watershed; |
24 | 24 | ||
25 | static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) | 25 | static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) |
26 | { | 26 | { |
27 | return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); | 27 | return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count, |
28 | offset); | ||
28 | } | 29 | } |
29 | 30 | ||
30 | 31 | ||
@@ -125,16 +126,16 @@ static const struct file_operations dump_fops = { | |||
125 | void oprofile_create_files(struct super_block *sb, struct dentry *root) | 126 | void oprofile_create_files(struct super_block *sb, struct dentry *root) |
126 | { | 127 | { |
127 | /* reinitialize default values */ | 128 | /* reinitialize default values */ |
128 | fs_buffer_size = FS_BUFFER_SIZE_DEFAULT; | 129 | oprofile_buffer_size = BUFFER_SIZE_DEFAULT; |
129 | fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT; | 130 | oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; |
130 | fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT; | 131 | oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; |
131 | 132 | ||
132 | oprofilefs_create_file(sb, root, "enable", &enable_fops); | 133 | oprofilefs_create_file(sb, root, "enable", &enable_fops); |
133 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); | 134 | oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); |
134 | oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); | 135 | oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); |
135 | oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); | 136 | oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); |
136 | oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); | 137 | oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); |
137 | oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); | 138 | oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size); |
138 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); | 139 | oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); |
139 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); | 140 | oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); |
140 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); | 141 | oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1ce9fe572e51..1d9518bc4c58 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, | |||
164 | unsigned long oprofile_get_cpu_buffer_size(void); | 164 | unsigned long oprofile_get_cpu_buffer_size(void); |
165 | void oprofile_cpu_buffer_inc_smpl_lost(void); | 165 | void oprofile_cpu_buffer_inc_smpl_lost(void); |
166 | 166 | ||
167 | /* cpu buffer functions */ | ||
168 | |||
169 | struct op_sample; | ||
170 | |||
171 | struct op_entry { | ||
172 | struct ring_buffer_event *event; | ||
173 | struct op_sample *sample; | ||
174 | unsigned long irq_flags; | ||
175 | unsigned long size; | ||
176 | unsigned long *data; | ||
177 | }; | ||
178 | |||
179 | void oprofile_write_reserve(struct op_entry *entry, | ||
180 | struct pt_regs * const regs, | ||
181 | unsigned long pc, int code, int size); | ||
182 | int oprofile_add_data(struct op_entry *entry, unsigned long val); | ||
183 | int oprofile_write_commit(struct op_entry *entry); | ||
184 | |||
167 | #endif /* OPROFILE_H */ | 185 | #endif /* OPROFILE_H */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a9d9760dc7b6..8b0daf0662ef 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event) | |||
168 | */ | 168 | */ |
169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
170 | { | 170 | { |
171 | return rb_event_length(event); | 171 | unsigned length = rb_event_length(event); |
172 | if (event->type != RINGBUF_TYPE_DATA) | ||
173 | return length; | ||
174 | length -= RB_EVNT_HDR_SIZE; | ||
175 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | ||
176 | length -= sizeof(event->array[0]); | ||
177 | return length; | ||
172 | } | 178 | } |
173 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 179 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); |
174 | 180 | ||