aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/oprofile/cell
diff options
context:
space:
mode:
authorCarl Love <cel@us.ibm.com>2008-12-01 19:18:34 -0500
committerRobert Richter <robert.richter@amd.com>2009-01-08 09:49:39 -0500
commit9b93418e7ee59dbc96d44cfde7f65f886e54dba9 (patch)
tree39f2e913f17b3a9dc50b6af39a32489a735ce3a6 /arch/powerpc/oprofile/cell
parent4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff)
powerpc/oprofile: IBM CELL: cleanup and restructuring
This patch restructures and cleans up the code a bit to make it easier to add new functionality later. The patch makes no functional changes to the existing code. Signed-off-by: Carl Love <carll@us.ibm.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/powerpc/oprofile/cell')
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index dd499c3e9da7..8b1b9ccaff9f 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -31,8 +31,8 @@ static unsigned int profiling_interval;
31 31
32#define SPU_PC_MASK 0xFFFF 32#define SPU_PC_MASK 0xFFFF
33 33
34static DEFINE_SPINLOCK(sample_array_lock); 34static DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
35unsigned long sample_array_lock_flags; 35unsigned long oprof_spu_smpl_arry_lck_flags;
36 36
37void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) 37void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
38{ 38{
@@ -145,13 +145,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
145 * sample array must be loaded and then processed for a given 145 * sample array must be loaded and then processed for a given
146 * cpu. The sample array is not per cpu. 146 * cpu. The sample array is not per cpu.
147 */ 147 */
148 spin_lock_irqsave(&sample_array_lock, 148 spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
149 sample_array_lock_flags); 149 oprof_spu_smpl_arry_lck_flags);
150 num_samples = cell_spu_pc_collection(cpu); 150 num_samples = cell_spu_pc_collection(cpu);
151 151
152 if (num_samples == 0) { 152 if (num_samples == 0) {
153 spin_unlock_irqrestore(&sample_array_lock, 153 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
154 sample_array_lock_flags); 154 oprof_spu_smpl_arry_lck_flags);
155 continue; 155 continue;
156 } 156 }
157 157
@@ -162,8 +162,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
162 num_samples); 162 num_samples);
163 } 163 }
164 164
165 spin_unlock_irqrestore(&sample_array_lock, 165 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
166 sample_array_lock_flags); 166 oprof_spu_smpl_arry_lck_flags);
167 167
168 } 168 }
169 smp_wmb(); /* insure spu event buffer updates are written */ 169 smp_wmb(); /* insure spu event buffer updates are written */
@@ -182,13 +182,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
182 182
183static struct hrtimer timer; 183static struct hrtimer timer;
184/* 184/*
185 * Entry point for SPU profiling. 185 * Entry point for SPU cycle profiling.
186 * NOTE: SPU profiling is done system-wide, not per-CPU. 186 * NOTE: SPU profiling is done system-wide, not per-CPU.
187 * 187 *
188 * cycles_reset is the count value specified by the user when 188 * cycles_reset is the count value specified by the user when
189 * setting up OProfile to count SPU_CYCLES. 189 * setting up OProfile to count SPU_CYCLES.
190 */ 190 */
191int start_spu_profiling(unsigned int cycles_reset) 191int start_spu_profiling_cycles(unsigned int cycles_reset)
192{ 192{
193 ktime_t kt; 193 ktime_t kt;
194 194
@@ -212,10 +212,10 @@ int start_spu_profiling(unsigned int cycles_reset)
212 return 0; 212 return 0;
213} 213}
214 214
215void stop_spu_profiling(void) 215void stop_spu_profiling_cycles(void)
216{ 216{
217 spu_prof_running = 0; 217 spu_prof_running = 0;
218 hrtimer_cancel(&timer); 218 hrtimer_cancel(&timer);
219 kfree(samples); 219 kfree(samples);
220 pr_debug("SPU_PROF: stop_spu_profiling issued\n"); 220 pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
221} 221}