diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 15:43:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 15:43:06 -0500 |
commit | 4ce5f24193cef2e26f182ce708e94ba1f5fafc0c (patch) | |
tree | 300373440be70af7c8ce662d4b30d8103e7c6026 /arch/powerpc/oprofile/cell | |
parent | 7c51d57e9d7fbce89f79c41dc8da383101dbe9c6 (diff) | |
parent | a076aa4f96f40fc75451ae835a1a665ce1faf951 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (31 commits)
powerpc/oprofile: fix whitespaces in op_model_cell.c
powerpc/oprofile: IBM CELL: add SPU event profiling support
powerpc/oprofile: fix cell/pr_util.h
powerpc/oprofile: IBM CELL: cleanup and restructuring
oprofile: make new cpu buffer functions part of the api
oprofile: remove #ifdef CONFIG_OPROFILE_IBS in non-ibs code
ring_buffer: fix ring_buffer_event_length()
oprofile: use new data sample format for ibs
oprofile: add op_cpu_buffer_get_data()
oprofile: add op_cpu_buffer_add_data()
oprofile: rework implementation of cpu buffer events
oprofile: modify op_cpu_buffer_read_entry()
oprofile: add op_cpu_buffer_write_reserve()
oprofile: rename variables in add_ibs_begin()
oprofile: rename add_sample() in cpu_buffer.c
oprofile: rename variable ibs_allowed to has_ibs in op_model_amd.c
oprofile: making add_sample_entry() inline
oprofile: remove backtrace code for ibs
oprofile: remove unused ibs macro
oprofile: remove unused components in struct oprofile_cpu_buffer
...
Diffstat (limited to 'arch/powerpc/oprofile/cell')
-rw-r--r-- | arch/powerpc/oprofile/cell/pr_util.h | 11 | ||||
-rw-r--r-- | arch/powerpc/oprofile/cell/spu_profiler.c | 56 |
2 files changed, 51 insertions, 16 deletions
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h index dfdbffa06818..964b93974d89 100644 --- a/arch/powerpc/oprofile/cell/pr_util.h +++ b/arch/powerpc/oprofile/cell/pr_util.h | |||
@@ -30,6 +30,10 @@ | |||
30 | extern struct delayed_work spu_work; | 30 | extern struct delayed_work spu_work; |
31 | extern int spu_prof_running; | 31 | extern int spu_prof_running; |
32 | 32 | ||
33 | #define TRACE_ARRAY_SIZE 1024 | ||
34 | |||
35 | extern spinlock_t oprof_spu_smpl_arry_lck; | ||
36 | |||
33 | struct spu_overlay_info { /* map of sections within an SPU overlay */ | 37 | struct spu_overlay_info { /* map of sections within an SPU overlay */ |
34 | unsigned int vma; /* SPU virtual memory address from elf */ | 38 | unsigned int vma; /* SPU virtual memory address from elf */ |
35 | unsigned int size; /* size of section from elf */ | 39 | unsigned int size; /* size of section from elf */ |
@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map); | |||
89 | * Entry point for SPU profiling. | 93 | * Entry point for SPU profiling. |
90 | * cycles_reset is the SPU_CYCLES count value specified by the user. | 94 | * cycles_reset is the SPU_CYCLES count value specified by the user. |
91 | */ | 95 | */ |
92 | int start_spu_profiling(unsigned int cycles_reset); | 96 | int start_spu_profiling_cycles(unsigned int cycles_reset); |
93 | 97 | void start_spu_profiling_events(void); | |
94 | void stop_spu_profiling(void); | ||
95 | 98 | ||
99 | void stop_spu_profiling_cycles(void); | ||
100 | void stop_spu_profiling_events(void); | ||
96 | 101 | ||
97 | /* add the necessary profiling hooks */ | 102 | /* add the necessary profiling hooks */ |
98 | int spu_sync_start(void); | 103 | int spu_sync_start(void); |
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 83faa958b9d4..9305ddaac512 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c | |||
@@ -18,11 +18,21 @@ | |||
18 | #include <asm/cell-pmu.h> | 18 | #include <asm/cell-pmu.h> |
19 | #include "pr_util.h" | 19 | #include "pr_util.h" |
20 | 20 | ||
21 | #define TRACE_ARRAY_SIZE 1024 | ||
22 | #define SCALE_SHIFT 14 | 21 | #define SCALE_SHIFT 14 |
23 | 22 | ||
24 | static u32 *samples; | 23 | static u32 *samples; |
25 | 24 | ||
25 | /* spu_prof_running is a flag used to indicate if spu profiling is enabled | ||
26 | * or not. It is set by the routines start_spu_profiling_cycles() and | ||
27 | * start_spu_profiling_events(). The flag is cleared by the routines | ||
28 | * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These | ||
29 | * routines are called via global_start() and global_stop() which are called in | ||
30 | * op_powerpc_start() and op_powerpc_stop(). These routines are called once | ||
31 | * per system as a result of the user starting/stopping oprofile. Hence, only | ||
32 | * one CPU per user at a time will be changing the value of spu_prof_running. | ||
33 | * In general, OProfile does not protect against multiple users trying to run | ||
34 | * OProfile at a time. | ||
35 | */ | ||
26 | int spu_prof_running; | 36 | int spu_prof_running; |
27 | static unsigned int profiling_interval; | 37 | static unsigned int profiling_interval; |
28 | 38 | ||
@@ -31,8 +41,8 @@ static unsigned int profiling_interval; | |||
31 | 41 | ||
32 | #define SPU_PC_MASK 0xFFFF | 42 | #define SPU_PC_MASK 0xFFFF |
33 | 43 | ||
34 | static DEFINE_SPINLOCK(sample_array_lock); | 44 | DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck); |
35 | unsigned long sample_array_lock_flags; | 45 | unsigned long oprof_spu_smpl_arry_lck_flags; |
36 | 46 | ||
37 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) | 47 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) |
38 | { | 48 | { |
@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
145 | * sample array must be loaded and then processed for a given | 155 | * sample array must be loaded and then processed for a given |
146 | * cpu. The sample array is not per cpu. | 156 | * cpu. The sample array is not per cpu. |
147 | */ | 157 | */ |
148 | spin_lock_irqsave(&sample_array_lock, | 158 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, |
149 | sample_array_lock_flags); | 159 | oprof_spu_smpl_arry_lck_flags); |
150 | num_samples = cell_spu_pc_collection(cpu); | 160 | num_samples = cell_spu_pc_collection(cpu); |
151 | 161 | ||
152 | if (num_samples == 0) { | 162 | if (num_samples == 0) { |
153 | spin_unlock_irqrestore(&sample_array_lock, | 163 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
154 | sample_array_lock_flags); | 164 | oprof_spu_smpl_arry_lck_flags); |
155 | continue; | 165 | continue; |
156 | } | 166 | } |
157 | 167 | ||
@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
162 | num_samples); | 172 | num_samples); |
163 | } | 173 | } |
164 | 174 | ||
165 | spin_unlock_irqrestore(&sample_array_lock, | 175 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
166 | sample_array_lock_flags); | 176 | oprof_spu_smpl_arry_lck_flags); |
167 | 177 | ||
168 | } | 178 | } |
169 | smp_wmb(); /* insure spu event buffer updates are written */ | 179 | smp_wmb(); /* insure spu event buffer updates are written */ |
@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
182 | 192 | ||
183 | static struct hrtimer timer; | 193 | static struct hrtimer timer; |
184 | /* | 194 | /* |
185 | * Entry point for SPU profiling. | 195 | * Entry point for SPU cycle profiling. |
186 | * NOTE: SPU profiling is done system-wide, not per-CPU. | 196 | * NOTE: SPU profiling is done system-wide, not per-CPU. |
187 | * | 197 | * |
188 | * cycles_reset is the count value specified by the user when | 198 | * cycles_reset is the count value specified by the user when |
189 | * setting up OProfile to count SPU_CYCLES. | 199 | * setting up OProfile to count SPU_CYCLES. |
190 | */ | 200 | */ |
191 | int start_spu_profiling(unsigned int cycles_reset) | 201 | int start_spu_profiling_cycles(unsigned int cycles_reset) |
192 | { | 202 | { |
193 | ktime_t kt; | 203 | ktime_t kt; |
194 | 204 | ||
@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset) | |||
212 | return 0; | 222 | return 0; |
213 | } | 223 | } |
214 | 224 | ||
215 | void stop_spu_profiling(void) | 225 | /* |
226 | * Entry point for SPU event profiling. | ||
227 | * NOTE: SPU profiling is done system-wide, not per-CPU. | ||
228 | * | ||
229 | * cycles_reset is the count value specified by the user when | ||
230 | * setting up OProfile to count SPU_CYCLES. | ||
231 | */ | ||
232 | void start_spu_profiling_events(void) | ||
233 | { | ||
234 | spu_prof_running = 1; | ||
235 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); | ||
236 | |||
237 | return; | ||
238 | } | ||
239 | |||
240 | void stop_spu_profiling_cycles(void) | ||
216 | { | 241 | { |
217 | spu_prof_running = 0; | 242 | spu_prof_running = 0; |
218 | hrtimer_cancel(&timer); | 243 | hrtimer_cancel(&timer); |
219 | kfree(samples); | 244 | kfree(samples); |
220 | pr_debug("SPU_PROF: stop_spu_profiling issued\n"); | 245 | pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n"); |
246 | } | ||
247 | |||
248 | void stop_spu_profiling_events(void) | ||
249 | { | ||
250 | spu_prof_running = 0; | ||
221 | } | 251 | } |