diff options
| author | Robert Richter <robert.richter@amd.com> | 2009-01-08 09:54:04 -0500 |
|---|---|---|
| committer | Robert Richter <robert.richter@amd.com> | 2009-01-08 09:54:04 -0500 |
| commit | a076aa4f96f40fc75451ae835a1a665ce1faf951 (patch) | |
| tree | 348be8bb538f9f47da8ec237f7bbad63088af929 | |
| parent | d2852b932f0bb5e89177aa27c7bcf07f4167e129 (diff) | |
| parent | 25006644e6042aab4bb7cdc4bfc5777cd3141df7 (diff) | |
Merge branch 'oprofile/cell' into oprofile/oprofile-for-tip
| -rw-r--r-- | arch/powerpc/include/asm/cell-pmu.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/oprofile_impl.h | 6 | ||||
| -rw-r--r-- | arch/powerpc/oprofile/cell/pr_util.h | 11 | ||||
| -rw-r--r-- | arch/powerpc/oprofile/cell/spu_profiler.c | 56 | ||||
| -rw-r--r-- | arch/powerpc/oprofile/common.c | 22 | ||||
| -rw-r--r-- | arch/powerpc/oprofile/op_model_cell.c | 748 |
6 files changed, 705 insertions, 140 deletions
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h index 8066eede3a0c..b4b7338ad79e 100644 --- a/arch/powerpc/include/asm/cell-pmu.h +++ b/arch/powerpc/include/asm/cell-pmu.h | |||
| @@ -37,9 +37,11 @@ | |||
| 37 | #define CBE_PM_STOP_AT_MAX 0x40000000 | 37 | #define CBE_PM_STOP_AT_MAX 0x40000000 |
| 38 | #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) | 38 | #define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) |
| 39 | #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) | 39 | #define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) |
| 40 | #define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17) | ||
| 40 | #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) | 41 | #define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) |
| 41 | #define CBE_PM_FREEZE_ALL_CTRS 0x00100000 | 42 | #define CBE_PM_FREEZE_ALL_CTRS 0x00100000 |
| 42 | #define CBE_PM_ENABLE_EXT_TRACE 0x00008000 | 43 | #define CBE_PM_ENABLE_EXT_TRACE 0x00008000 |
| 44 | #define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9) | ||
| 43 | 45 | ||
| 44 | /* Macros for the trace_address register. */ | 46 | /* Macros for the trace_address register. */ |
| 45 | #define CBE_PM_TRACE_BUF_FULL 0x00000800 | 47 | #define CBE_PM_TRACE_BUF_FULL 0x00000800 |
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h index 95035c602ba6..639dc96077ab 100644 --- a/arch/powerpc/include/asm/oprofile_impl.h +++ b/arch/powerpc/include/asm/oprofile_impl.h | |||
| @@ -32,6 +32,12 @@ struct op_system_config { | |||
| 32 | unsigned long mmcr0; | 32 | unsigned long mmcr0; |
| 33 | unsigned long mmcr1; | 33 | unsigned long mmcr1; |
| 34 | unsigned long mmcra; | 34 | unsigned long mmcra; |
| 35 | #ifdef CONFIG_OPROFILE_CELL | ||
| 36 | /* Register for oprofile user tool to check cell kernel profiling | ||
| 37 | * suport. | ||
| 38 | */ | ||
| 39 | unsigned long cell_support; | ||
| 40 | #endif | ||
| 35 | #endif | 41 | #endif |
| 36 | unsigned long enable_kernel; | 42 | unsigned long enable_kernel; |
| 37 | unsigned long enable_user; | 43 | unsigned long enable_user; |
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h index 628009c01958..a048b0b72be3 100644 --- a/arch/powerpc/oprofile/cell/pr_util.h +++ b/arch/powerpc/oprofile/cell/pr_util.h | |||
| @@ -30,6 +30,10 @@ | |||
| 30 | extern struct delayed_work spu_work; | 30 | extern struct delayed_work spu_work; |
| 31 | extern int spu_prof_running; | 31 | extern int spu_prof_running; |
| 32 | 32 | ||
| 33 | #define TRACE_ARRAY_SIZE 1024 | ||
| 34 | |||
| 35 | extern spinlock_t oprof_spu_smpl_arry_lck; | ||
| 36 | |||
| 33 | struct spu_overlay_info { /* map of sections within an SPU overlay */ | 37 | struct spu_overlay_info { /* map of sections within an SPU overlay */ |
| 34 | unsigned int vma; /* SPU virtual memory address from elf */ | 38 | unsigned int vma; /* SPU virtual memory address from elf */ |
| 35 | unsigned int size; /* size of section from elf */ | 39 | unsigned int size; /* size of section from elf */ |
| @@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map); | |||
| 89 | * Entry point for SPU profiling. | 93 | * Entry point for SPU profiling. |
| 90 | * cycles_reset is the SPU_CYCLES count value specified by the user. | 94 | * cycles_reset is the SPU_CYCLES count value specified by the user. |
| 91 | */ | 95 | */ |
| 92 | int start_spu_profiling(unsigned int cycles_reset); | 96 | int start_spu_profiling_cycles(unsigned int cycles_reset); |
| 93 | 97 | void start_spu_profiling_events(void); | |
| 94 | void stop_spu_profiling(void); | ||
| 95 | 98 | ||
| 99 | void stop_spu_profiling_cycles(void); | ||
| 100 | void stop_spu_profiling_events(void); | ||
| 96 | 101 | ||
| 97 | /* add the necessary profiling hooks */ | 102 | /* add the necessary profiling hooks */ |
| 98 | int spu_sync_start(void); | 103 | int spu_sync_start(void); |
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index dd499c3e9da7..de170b7ae71b 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c | |||
| @@ -18,11 +18,21 @@ | |||
| 18 | #include <asm/cell-pmu.h> | 18 | #include <asm/cell-pmu.h> |
| 19 | #include "pr_util.h" | 19 | #include "pr_util.h" |
| 20 | 20 | ||
| 21 | #define TRACE_ARRAY_SIZE 1024 | ||
| 22 | #define SCALE_SHIFT 14 | 21 | #define SCALE_SHIFT 14 |
| 23 | 22 | ||
| 24 | static u32 *samples; | 23 | static u32 *samples; |
| 25 | 24 | ||
| 25 | /* spu_prof_running is a flag used to indicate if spu profiling is enabled | ||
| 26 | * or not. It is set by the routines start_spu_profiling_cycles() and | ||
| 27 | * start_spu_profiling_events(). The flag is cleared by the routines | ||
| 28 | * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These | ||
| 29 | * routines are called via global_start() and global_stop() which are called in | ||
| 30 | * op_powerpc_start() and op_powerpc_stop(). These routines are called once | ||
| 31 | * per system as a result of the user starting/stopping oprofile. Hence, only | ||
| 32 | * one CPU per user at a time will be changing the value of spu_prof_running. | ||
| 33 | * In general, OProfile does not protect against multiple users trying to run | ||
| 34 | * OProfile at a time. | ||
| 35 | */ | ||
| 26 | int spu_prof_running; | 36 | int spu_prof_running; |
| 27 | static unsigned int profiling_interval; | 37 | static unsigned int profiling_interval; |
| 28 | 38 | ||
| @@ -31,8 +41,8 @@ static unsigned int profiling_interval; | |||
| 31 | 41 | ||
| 32 | #define SPU_PC_MASK 0xFFFF | 42 | #define SPU_PC_MASK 0xFFFF |
| 33 | 43 | ||
| 34 | static DEFINE_SPINLOCK(sample_array_lock); | 44 | DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck); |
| 35 | unsigned long sample_array_lock_flags; | 45 | unsigned long oprof_spu_smpl_arry_lck_flags; |
| 36 | 46 | ||
| 37 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) | 47 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) |
| 38 | { | 48 | { |
| @@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
| 145 | * sample array must be loaded and then processed for a given | 155 | * sample array must be loaded and then processed for a given |
| 146 | * cpu. The sample array is not per cpu. | 156 | * cpu. The sample array is not per cpu. |
| 147 | */ | 157 | */ |
| 148 | spin_lock_irqsave(&sample_array_lock, | 158 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, |
| 149 | sample_array_lock_flags); | 159 | oprof_spu_smpl_arry_lck_flags); |
| 150 | num_samples = cell_spu_pc_collection(cpu); | 160 | num_samples = cell_spu_pc_collection(cpu); |
| 151 | 161 | ||
| 152 | if (num_samples == 0) { | 162 | if (num_samples == 0) { |
| 153 | spin_unlock_irqrestore(&sample_array_lock, | 163 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
| 154 | sample_array_lock_flags); | 164 | oprof_spu_smpl_arry_lck_flags); |
| 155 | continue; | 165 | continue; |
| 156 | } | 166 | } |
| 157 | 167 | ||
| @@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
| 162 | num_samples); | 172 | num_samples); |
| 163 | } | 173 | } |
| 164 | 174 | ||
| 165 | spin_unlock_irqrestore(&sample_array_lock, | 175 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, |
| 166 | sample_array_lock_flags); | 176 | oprof_spu_smpl_arry_lck_flags); |
| 167 | 177 | ||
| 168 | } | 178 | } |
| 169 | smp_wmb(); /* insure spu event buffer updates are written */ | 179 | smp_wmb(); /* insure spu event buffer updates are written */ |
| @@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |||
| 182 | 192 | ||
| 183 | static struct hrtimer timer; | 193 | static struct hrtimer timer; |
| 184 | /* | 194 | /* |
| 185 | * Entry point for SPU profiling. | 195 | * Entry point for SPU cycle profiling. |
| 186 | * NOTE: SPU profiling is done system-wide, not per-CPU. | 196 | * NOTE: SPU profiling is done system-wide, not per-CPU. |
| 187 | * | 197 | * |
| 188 | * cycles_reset is the count value specified by the user when | 198 | * cycles_reset is the count value specified by the user when |
| 189 | * setting up OProfile to count SPU_CYCLES. | 199 | * setting up OProfile to count SPU_CYCLES. |
| 190 | */ | 200 | */ |
| 191 | int start_spu_profiling(unsigned int cycles_reset) | 201 | int start_spu_profiling_cycles(unsigned int cycles_reset) |
| 192 | { | 202 | { |
| 193 | ktime_t kt; | 203 | ktime_t kt; |
| 194 | 204 | ||
| @@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset) | |||
| 212 | return 0; | 222 | return 0; |
| 213 | } | 223 | } |
| 214 | 224 | ||
| 215 | void stop_spu_profiling(void) | 225 | /* |
| 226 | * Entry point for SPU event profiling. | ||
| 227 | * NOTE: SPU profiling is done system-wide, not per-CPU. | ||
| 228 | * | ||
| 229 | * cycles_reset is the count value specified by the user when | ||
| 230 | * setting up OProfile to count SPU_CYCLES. | ||
| 231 | */ | ||
| 232 | void start_spu_profiling_events(void) | ||
| 233 | { | ||
| 234 | spu_prof_running = 1; | ||
| 235 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); | ||
| 236 | |||
| 237 | return; | ||
| 238 | } | ||
| 239 | |||
| 240 | void stop_spu_profiling_cycles(void) | ||
| 216 | { | 241 | { |
| 217 | spu_prof_running = 0; | 242 | spu_prof_running = 0; |
| 218 | hrtimer_cancel(&timer); | 243 | hrtimer_cancel(&timer); |
| 219 | kfree(samples); | 244 | kfree(samples); |
| 220 | pr_debug("SPU_PROF: stop_spu_profiling issued\n"); | 245 | pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n"); |
| 246 | } | ||
| 247 | |||
| 248 | void stop_spu_profiling_events(void) | ||
| 249 | { | ||
| 250 | spu_prof_running = 0; | ||
| 221 | } | 251 | } |
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 17807acb05d9..21f16edf6c8d 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
| @@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) | |||
| 132 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); | 132 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); |
| 133 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); | 133 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); |
| 134 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); | 134 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); |
| 135 | #ifdef CONFIG_OPROFILE_CELL | ||
| 136 | /* create a file the user tool can check to see what level of profiling | ||
| 137 | * support exits with this kernel. Initialize bit mask to indicate | ||
| 138 | * what support the kernel has: | ||
| 139 | * bit 0 - Supports SPU event profiling in addition to PPU | ||
| 140 | * event and cycles; and SPU cycle profiling | ||
| 141 | * bits 1-31 - Currently unused. | ||
| 142 | * | ||
| 143 | * If the file does not exist, then the kernel only supports SPU | ||
| 144 | * cycle profiling, PPU event and cycle profiling. | ||
| 145 | */ | ||
| 146 | oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support); | ||
| 147 | sys.cell_support = 0x1; /* Note, the user OProfile tool must check | ||
| 148 | * that this bit is set before attempting to | ||
| 149 | * user SPU event profiling. Older kernels | ||
| 150 | * will not have this file, hence the user | ||
| 151 | * tool is not allowed to do SPU event | ||
| 152 | * profiling on older kernels. Older kernels | ||
| 153 | * will accept SPU events but collected data | ||
| 154 | * is garbage. | ||
| 155 | */ | ||
| 156 | #endif | ||
| 135 | #endif | 157 | #endif |
| 136 | 158 | ||
| 137 | for (i = 0; i < model->num_counters; ++i) { | 159 | for (i = 0; i < model->num_counters; ++i) { |
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 25a4ec2514a3..ae06c6236d9c 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c | |||
| @@ -40,14 +40,15 @@ | |||
| 40 | #include "../platforms/cell/interrupt.h" | 40 | #include "../platforms/cell/interrupt.h" |
| 41 | #include "cell/pr_util.h" | 41 | #include "cell/pr_util.h" |
| 42 | 42 | ||
| 43 | static void cell_global_stop_spu(void); | 43 | #define PPU_PROFILING 0 |
| 44 | #define SPU_PROFILING_CYCLES 1 | ||
| 45 | #define SPU_PROFILING_EVENTS 2 | ||
| 44 | 46 | ||
| 45 | /* | 47 | #define SPU_EVENT_NUM_START 4100 |
| 46 | * spu_cycle_reset is the number of cycles between samples. | 48 | #define SPU_EVENT_NUM_STOP 4399 |
| 47 | * This variable is used for SPU profiling and should ONLY be set | 49 | #define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */ |
| 48 | * at the beginning of cell_reg_setup; otherwise, it's read-only. | 50 | #define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */ |
| 49 | */ | 51 | #define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */ |
| 50 | static unsigned int spu_cycle_reset; | ||
| 51 | 52 | ||
| 52 | #define NUM_SPUS_PER_NODE 8 | 53 | #define NUM_SPUS_PER_NODE 8 |
| 53 | #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ | 54 | #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ |
| @@ -66,6 +67,21 @@ static unsigned int spu_cycle_reset; | |||
| 66 | 67 | ||
| 67 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ | 68 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ |
| 68 | 69 | ||
| 70 | /* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. | ||
| 71 | * To configure counter to send value every N cycles set counter to | ||
| 72 | * 2^32 - 1 - N. | ||
| 73 | */ | ||
| 74 | #define NUM_INTERVAL_CYC 0xFFFFFFFF - 10 | ||
| 75 | |||
| 76 | /* | ||
| 77 | * spu_cycle_reset is the number of cycles between samples. | ||
| 78 | * This variable is used for SPU profiling and should ONLY be set | ||
| 79 | * at the beginning of cell_reg_setup; otherwise, it's read-only. | ||
| 80 | */ | ||
| 81 | static unsigned int spu_cycle_reset; | ||
| 82 | static unsigned int profiling_mode; | ||
| 83 | static int spu_evnt_phys_spu_indx; | ||
| 84 | |||
| 69 | struct pmc_cntrl_data { | 85 | struct pmc_cntrl_data { |
| 70 | unsigned long vcntr; | 86 | unsigned long vcntr; |
| 71 | unsigned long evnts; | 87 | unsigned long evnts; |
| @@ -105,6 +121,8 @@ struct pm_cntrl { | |||
| 105 | u16 trace_mode; | 121 | u16 trace_mode; |
| 106 | u16 freeze; | 122 | u16 freeze; |
| 107 | u16 count_mode; | 123 | u16 count_mode; |
| 124 | u16 spu_addr_trace; | ||
| 125 | u8 trace_buf_ovflw; | ||
| 108 | }; | 126 | }; |
| 109 | 127 | ||
| 110 | static struct { | 128 | static struct { |
| @@ -122,7 +140,7 @@ static struct { | |||
| 122 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) | 140 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) |
| 123 | 141 | ||
| 124 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); | 142 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); |
| 125 | 143 | static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE]; | |
| 126 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; | 144 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; |
| 127 | 145 | ||
| 128 | /* | 146 | /* |
| @@ -152,6 +170,7 @@ static u32 hdw_thread; | |||
| 152 | 170 | ||
| 153 | static u32 virt_cntr_inter_mask; | 171 | static u32 virt_cntr_inter_mask; |
| 154 | static struct timer_list timer_virt_cntr; | 172 | static struct timer_list timer_virt_cntr; |
| 173 | static struct timer_list timer_spu_event_swap; | ||
| 155 | 174 | ||
| 156 | /* | 175 | /* |
| 157 | * pm_signal needs to be global since it is initialized in | 176 | * pm_signal needs to be global since it is initialized in |
| @@ -165,7 +184,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */ | |||
| 165 | static u32 reset_value[NR_PHYS_CTRS]; | 184 | static u32 reset_value[NR_PHYS_CTRS]; |
| 166 | static int num_counters; | 185 | static int num_counters; |
| 167 | static int oprofile_running; | 186 | static int oprofile_running; |
| 168 | static DEFINE_SPINLOCK(virt_cntr_lock); | 187 | static DEFINE_SPINLOCK(cntr_lock); |
| 169 | 188 | ||
| 170 | static u32 ctr_enabled; | 189 | static u32 ctr_enabled; |
| 171 | 190 | ||
| @@ -336,13 +355,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) | |||
| 336 | for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { | 355 | for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { |
| 337 | if (bus_word & (1 << i)) { | 356 | if (bus_word & (1 << i)) { |
| 338 | pm_regs.debug_bus_control |= | 357 | pm_regs.debug_bus_control |= |
| 339 | (bus_type << (30 - (2 * i))); | 358 | (bus_type << (30 - (2 * i))); |
| 340 | 359 | ||
| 341 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { | 360 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { |
| 342 | if (input_bus[j] == 0xff) { | 361 | if (input_bus[j] == 0xff) { |
| 343 | input_bus[j] = i; | 362 | input_bus[j] = i; |
| 344 | pm_regs.group_control |= | 363 | pm_regs.group_control |= |
| 345 | (i << (30 - (2 * j))); | 364 | (i << (30 - (2 * j))); |
| 346 | 365 | ||
| 347 | break; | 366 | break; |
| 348 | } | 367 | } |
| @@ -367,12 +386,16 @@ static void write_pm_cntrl(int cpu) | |||
| 367 | if (pm_regs.pm_cntrl.stop_at_max == 1) | 386 | if (pm_regs.pm_cntrl.stop_at_max == 1) |
| 368 | val |= CBE_PM_STOP_AT_MAX; | 387 | val |= CBE_PM_STOP_AT_MAX; |
| 369 | 388 | ||
| 370 | if (pm_regs.pm_cntrl.trace_mode == 1) | 389 | if (pm_regs.pm_cntrl.trace_mode != 0) |
| 371 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); | 390 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); |
| 372 | 391 | ||
| 392 | if (pm_regs.pm_cntrl.trace_buf_ovflw == 1) | ||
| 393 | val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw); | ||
| 373 | if (pm_regs.pm_cntrl.freeze == 1) | 394 | if (pm_regs.pm_cntrl.freeze == 1) |
| 374 | val |= CBE_PM_FREEZE_ALL_CTRS; | 395 | val |= CBE_PM_FREEZE_ALL_CTRS; |
| 375 | 396 | ||
| 397 | val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace); | ||
| 398 | |||
| 376 | /* | 399 | /* |
| 377 | * Routine set_count_mode must be called previously to set | 400 | * Routine set_count_mode must be called previously to set |
| 378 | * the count mode based on the user selection of user and kernel. | 401 | * the count mode based on the user selection of user and kernel. |
| @@ -441,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
| 441 | * not both playing with the counters on the same node. | 464 | * not both playing with the counters on the same node. |
| 442 | */ | 465 | */ |
| 443 | 466 | ||
| 444 | spin_lock_irqsave(&virt_cntr_lock, flags); | 467 | spin_lock_irqsave(&cntr_lock, flags); |
| 445 | 468 | ||
| 446 | prev_hdw_thread = hdw_thread; | 469 | prev_hdw_thread = hdw_thread; |
| 447 | 470 | ||
| @@ -480,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
| 480 | cbe_disable_pm_interrupts(cpu); | 503 | cbe_disable_pm_interrupts(cpu); |
| 481 | for (i = 0; i < num_counters; i++) { | 504 | for (i = 0; i < num_counters; i++) { |
| 482 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] | 505 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] |
| 483 | = cbe_read_ctr(cpu, i); | 506 | = cbe_read_ctr(cpu, i); |
| 484 | 507 | ||
| 485 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] | 508 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] |
| 486 | == 0xFFFFFFFF) | 509 | == 0xFFFFFFFF) |
| @@ -527,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data) | |||
| 527 | cbe_enable_pm(cpu); | 550 | cbe_enable_pm(cpu); |
| 528 | } | 551 | } |
| 529 | 552 | ||
| 530 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | 553 | spin_unlock_irqrestore(&cntr_lock, flags); |
| 531 | 554 | ||
| 532 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); | 555 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); |
| 533 | } | 556 | } |
| @@ -541,38 +564,146 @@ static void start_virt_cntrs(void) | |||
| 541 | add_timer(&timer_virt_cntr); | 564 | add_timer(&timer_virt_cntr); |
| 542 | } | 565 | } |
| 543 | 566 | ||
| 544 | /* This function is called once for all cpus combined */ | 567 | static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr, |
| 545 | static int cell_reg_setup(struct op_counter_config *ctr, | ||
| 546 | struct op_system_config *sys, int num_ctrs) | 568 | struct op_system_config *sys, int num_ctrs) |
| 547 | { | 569 | { |
| 548 | int i, j, cpu; | 570 | spu_cycle_reset = ctr[0].count; |
| 549 | spu_cycle_reset = 0; | ||
| 550 | 571 | ||
| 551 | if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { | 572 | /* |
| 552 | spu_cycle_reset = ctr[0].count; | 573 | * Each node will need to make the rtas call to start |
| 574 | * and stop SPU profiling. Get the token once and store it. | ||
| 575 | */ | ||
| 576 | spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); | ||
| 577 | |||
| 578 | if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { | ||
| 579 | printk(KERN_ERR | ||
| 580 | "%s: rtas token ibm,cbe-spu-perftools unknown\n", | ||
| 581 | __func__); | ||
| 582 | return -EIO; | ||
| 583 | } | ||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | |||
| 587 | /* Unfortunately, the hardware will only support event profiling | ||
| 588 | * on one SPU per node at a time. Therefore, we must time slice | ||
| 589 | * the profiling across all SPUs in the node. Note, we do this | ||
| 590 | * in parallel for each node. The following routine is called | ||
| 591 | * periodically based on kernel timer to switch which SPU is | ||
| 592 | * being monitored in a round robbin fashion. | ||
| 593 | */ | ||
| 594 | static void spu_evnt_swap(unsigned long data) | ||
| 595 | { | ||
| 596 | int node; | ||
| 597 | int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx; | ||
| 598 | unsigned long flags; | ||
| 599 | int cpu; | ||
| 600 | int ret; | ||
| 601 | u32 interrupt_mask; | ||
| 602 | |||
| 603 | |||
| 604 | /* enable interrupts on cntr 0 */ | ||
| 605 | interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0); | ||
| 606 | |||
| 607 | hdw_thread = 0; | ||
| 608 | |||
| 609 | /* Make sure spu event interrupt handler and spu event swap | ||
| 610 | * don't access the counters simultaneously. | ||
| 611 | */ | ||
| 612 | spin_lock_irqsave(&cntr_lock, flags); | ||
| 613 | |||
| 614 | cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx; | ||
| 615 | |||
| 616 | if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE) | ||
| 617 | spu_evnt_phys_spu_indx = 0; | ||
| 618 | |||
| 619 | pm_signal[0].sub_unit = spu_evnt_phys_spu_indx; | ||
| 620 | pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; | ||
| 621 | pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; | ||
| 622 | |||
| 623 | /* switch the SPU being profiled on each node */ | ||
| 624 | for_each_online_cpu(cpu) { | ||
| 625 | if (cbe_get_hw_thread_id(cpu)) | ||
| 626 | continue; | ||
| 627 | |||
| 628 | node = cbe_cpu_to_node(cpu); | ||
| 629 | cur_phys_spu = (node * NUM_SPUS_PER_NODE) | ||
| 630 | + cur_spu_evnt_phys_spu_indx; | ||
| 631 | nxt_phys_spu = (node * NUM_SPUS_PER_NODE) | ||
| 632 | + spu_evnt_phys_spu_indx; | ||
| 553 | 633 | ||
| 554 | /* | 634 | /* |
| 555 | * Each node will need to make the rtas call to start | 635 | * stop counters, save counter values, restore counts |
| 556 | * and stop SPU profiling. Get the token once and store it. | 636 | * for previous physical SPU |
| 557 | */ | 637 | */ |
| 558 | spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); | 638 | cbe_disable_pm(cpu); |
| 639 | cbe_disable_pm_interrupts(cpu); | ||
| 559 | 640 | ||
| 560 | if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { | 641 | spu_pm_cnt[cur_phys_spu] |
| 561 | printk(KERN_ERR | 642 | = cbe_read_ctr(cpu, 0); |
| 562 | "%s: rtas token ibm,cbe-spu-perftools unknown\n", | 643 | |
| 563 | __func__); | 644 | /* restore previous count for the next spu to sample */ |
| 564 | return -EIO; | 645 | /* NOTE, hardware issue, counter will not start if the |
| 565 | } | 646 | * counter value is at max (0xFFFFFFFF). |
| 647 | */ | ||
| 648 | if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF) | ||
| 649 | cbe_write_ctr(cpu, 0, 0xFFFFFFF0); | ||
| 650 | else | ||
| 651 | cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]); | ||
| 652 | |||
| 653 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
| 654 | |||
| 655 | /* setup the debug bus measure the one event and | ||
| 656 | * the two events to route the next SPU's PC on | ||
| 657 | * the debug bus | ||
| 658 | */ | ||
| 659 | ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3); | ||
| 660 | if (ret) | ||
| 661 | printk(KERN_ERR "%s: pm_rtas_activate_signals failed, " | ||
| 662 | "SPU event swap\n", __func__); | ||
| 663 | |||
| 664 | /* clear the trace buffer, don't want to take PC for | ||
| 665 | * previous SPU*/ | ||
| 666 | cbe_write_pm(cpu, trace_address, 0); | ||
| 667 | |||
| 668 | enable_ctr(cpu, 0, pm_regs.pm07_cntrl); | ||
| 669 | |||
| 670 | /* Enable interrupts on the CPU thread that is starting */ | ||
| 671 | cbe_enable_pm_interrupts(cpu, hdw_thread, | ||
| 672 | interrupt_mask); | ||
| 673 | cbe_enable_pm(cpu); | ||
| 566 | } | 674 | } |
| 567 | 675 | ||
| 568 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | 676 | spin_unlock_irqrestore(&cntr_lock, flags); |
| 569 | 677 | ||
| 678 | /* swap approximately every 0.1 seconds */ | ||
| 679 | mod_timer(&timer_spu_event_swap, jiffies + HZ / 25); | ||
| 680 | } | ||
| 681 | |||
| 682 | static void start_spu_event_swap(void) | ||
| 683 | { | ||
| 684 | init_timer(&timer_spu_event_swap); | ||
| 685 | timer_spu_event_swap.function = spu_evnt_swap; | ||
| 686 | timer_spu_event_swap.data = 0UL; | ||
| 687 | timer_spu_event_swap.expires = jiffies + HZ / 25; | ||
| 688 | add_timer(&timer_spu_event_swap); | ||
| 689 | } | ||
| 690 | |||
| 691 | static int cell_reg_setup_spu_events(struct op_counter_config *ctr, | ||
| 692 | struct op_system_config *sys, int num_ctrs) | ||
| 693 | { | ||
| 694 | int i; | ||
| 695 | |||
| 696 | /* routine is called once for all nodes */ | ||
| 697 | |||
| 698 | spu_evnt_phys_spu_indx = 0; | ||
| 570 | /* | 699 | /* |
| 571 | * For all events excetp PPU CYCLEs, each node will need to make | 700 | * For all events except PPU CYCLEs, each node will need to make |
| 572 | * the rtas cbe-perftools call to setup and reset the debug bus. | 701 | * the rtas cbe-perftools call to setup and reset the debug bus. |
| 573 | * Make the token lookup call once and store it in the global | 702 | * Make the token lookup call once and store it in the global |
| 574 | * variable pm_rtas_token. | 703 | * variable pm_rtas_token. |
| 575 | */ | 704 | */ |
| 705 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | ||
| 706 | |||
| 576 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { | 707 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { |
| 577 | printk(KERN_ERR | 708 | printk(KERN_ERR |
| 578 | "%s: rtas token ibm,cbe-perftools unknown\n", | 709 | "%s: rtas token ibm,cbe-perftools unknown\n", |
| @@ -580,6 +711,58 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
| 580 | return -EIO; | 711 | return -EIO; |
| 581 | } | 712 | } |
| 582 | 713 | ||
| 714 | /* setup the pm_control register settings, | ||
| 715 | * settings will be written per node by the | ||
| 716 | * cell_cpu_setup() function. | ||
| 717 | */ | ||
| 718 | pm_regs.pm_cntrl.trace_buf_ovflw = 1; | ||
| 719 | |||
| 720 | /* Use the occurrence trace mode to have SPU PC saved | ||
| 721 | * to the trace buffer. Occurrence data in trace buffer | ||
| 722 | * is not used. Bit 2 must be set to store SPU addresses. | ||
| 723 | */ | ||
| 724 | pm_regs.pm_cntrl.trace_mode = 2; | ||
| 725 | |||
| 726 | pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus | ||
| 727 | event 2 & 3 */ | ||
| 728 | |||
| 729 | /* setup the debug bus event array with the SPU PC routing events. | ||
| 730 | * Note, pm_signal[0] will be filled in by set_pm_event() call below. | ||
| 731 | */ | ||
| 732 | pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100; | ||
| 733 | pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A); | ||
| 734 | pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100; | ||
| 735 | pm_signal[1].sub_unit = spu_evnt_phys_spu_indx; | ||
| 736 | |||
| 737 | pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100; | ||
| 738 | pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B); | ||
| 739 | pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100; | ||
| 740 | pm_signal[2].sub_unit = spu_evnt_phys_spu_indx; | ||
| 741 | |||
| 742 | /* Set the user selected spu event to profile on, | ||
| 743 | * note, only one SPU profiling event is supported | ||
| 744 | */ | ||
| 745 | num_counters = 1; /* Only support one SPU event at a time */ | ||
| 746 | set_pm_event(0, ctr[0].event, ctr[0].unit_mask); | ||
| 747 | |||
| 748 | reset_value[0] = 0xFFFFFFFF - ctr[0].count; | ||
| 749 | |||
| 750 | /* global, used by cell_cpu_setup */ | ||
| 751 | ctr_enabled |= 1; | ||
| 752 | |||
| 753 | /* Initialize the count for each SPU to the reset value */ | ||
| 754 | for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++) | ||
| 755 | spu_pm_cnt[i] = reset_value[0]; | ||
| 756 | |||
| 757 | return 0; | ||
| 758 | } | ||
| 759 | |||
| 760 | static int cell_reg_setup_ppu(struct op_counter_config *ctr, | ||
| 761 | struct op_system_config *sys, int num_ctrs) | ||
| 762 | { | ||
| 763 | /* routine is called once for all nodes */ | ||
| 764 | int i, j, cpu; | ||
| 765 | |||
| 583 | num_counters = num_ctrs; | 766 | num_counters = num_ctrs; |
| 584 | 767 | ||
| 585 | if (unlikely(num_ctrs > NR_PHYS_CTRS)) { | 768 | if (unlikely(num_ctrs > NR_PHYS_CTRS)) { |
| @@ -589,14 +772,6 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
| 589 | __func__); | 772 | __func__); |
| 590 | return -EIO; | 773 | return -EIO; |
| 591 | } | 774 | } |
| 592 | pm_regs.group_control = 0; | ||
| 593 | pm_regs.debug_bus_control = 0; | ||
| 594 | |||
| 595 | /* setup the pm_control register */ | ||
| 596 | memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl)); | ||
| 597 | pm_regs.pm_cntrl.stop_at_max = 1; | ||
| 598 | pm_regs.pm_cntrl.trace_mode = 0; | ||
| 599 | pm_regs.pm_cntrl.freeze = 1; | ||
| 600 | 775 | ||
| 601 | set_count_mode(sys->enable_kernel, sys->enable_user); | 776 | set_count_mode(sys->enable_kernel, sys->enable_user); |
| 602 | 777 | ||
| @@ -665,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr, | |||
| 665 | } | 840 | } |
| 666 | 841 | ||
| 667 | 842 | ||
| 843 | /* This function is called once for all cpus combined */ | ||
| 844 | static int cell_reg_setup(struct op_counter_config *ctr, | ||
| 845 | struct op_system_config *sys, int num_ctrs) | ||
| 846 | { | ||
| 847 | int ret=0; | ||
| 848 | spu_cycle_reset = 0; | ||
| 849 | |||
| 850 | /* initialize the spu_arr_trace value, will be reset if | ||
| 851 | * doing spu event profiling. | ||
| 852 | */ | ||
| 853 | pm_regs.group_control = 0; | ||
| 854 | pm_regs.debug_bus_control = 0; | ||
| 855 | pm_regs.pm_cntrl.stop_at_max = 1; | ||
| 856 | pm_regs.pm_cntrl.trace_mode = 0; | ||
| 857 | pm_regs.pm_cntrl.freeze = 1; | ||
| 858 | pm_regs.pm_cntrl.trace_buf_ovflw = 0; | ||
| 859 | pm_regs.pm_cntrl.spu_addr_trace = 0; | ||
| 860 | |||
| 861 | /* | ||
| 862 | * For all events except PPU CYCLEs, each node will need to make | ||
| 863 | * the rtas cbe-perftools call to setup and reset the debug bus. | ||
| 864 | * Make the token lookup call once and store it in the global | ||
| 865 | * variable pm_rtas_token. | ||
| 866 | */ | ||
| 867 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | ||
| 868 | |||
| 869 | if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { | ||
| 870 | printk(KERN_ERR | ||
| 871 | "%s: rtas token ibm,cbe-perftools unknown\n", | ||
| 872 | __func__); | ||
| 873 | return -EIO; | ||
| 874 | } | ||
| 875 | |||
| 876 | if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { | ||
| 877 | profiling_mode = SPU_PROFILING_CYCLES; | ||
| 878 | ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs); | ||
| 879 | } else if ((ctr[0].event >= SPU_EVENT_NUM_START) && | ||
| 880 | (ctr[0].event <= SPU_EVENT_NUM_STOP)) { | ||
| 881 | profiling_mode = SPU_PROFILING_EVENTS; | ||
| 882 | spu_cycle_reset = ctr[0].count; | ||
| 883 | |||
| 884 | /* for SPU event profiling, need to setup the | ||
| 885 | * pm_signal array with the events to route the | ||
| 886 | * SPU PC before making the FW call. Note, only | ||
| 887 | * one SPU event for profiling can be specified | ||
| 888 | * at a time. | ||
| 889 | */ | ||
| 890 | cell_reg_setup_spu_events(ctr, sys, num_ctrs); | ||
| 891 | } else { | ||
| 892 | profiling_mode = PPU_PROFILING; | ||
| 893 | ret = cell_reg_setup_ppu(ctr, sys, num_ctrs); | ||
| 894 | } | ||
| 895 | |||
| 896 | return ret; | ||
| 897 | } | ||
| 898 | |||
| 899 | |||
| 668 | 900 | ||
| 669 | /* This function is called once for each cpu */ | 901 | /* This function is called once for each cpu */ |
| 670 | static int cell_cpu_setup(struct op_counter_config *cntr) | 902 | static int cell_cpu_setup(struct op_counter_config *cntr) |
| @@ -672,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
| 672 | u32 cpu = smp_processor_id(); | 904 | u32 cpu = smp_processor_id(); |
| 673 | u32 num_enabled = 0; | 905 | u32 num_enabled = 0; |
| 674 | int i; | 906 | int i; |
| 907 | int ret; | ||
| 675 | 908 | ||
| 676 | if (spu_cycle_reset) | 909 | /* Cycle based SPU profiling does not use the performance |
| 910 | * counters. The trace array is configured to collect | ||
| 911 | * the data. | ||
| 912 | */ | ||
| 913 | if (profiling_mode == SPU_PROFILING_CYCLES) | ||
| 677 | return 0; | 914 | return 0; |
| 678 | 915 | ||
| 679 | /* There is one performance monitor per processor chip (i.e. node), | 916 | /* There is one performance monitor per processor chip (i.e. node), |
| @@ -686,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
| 686 | cbe_disable_pm(cpu); | 923 | cbe_disable_pm(cpu); |
| 687 | cbe_disable_pm_interrupts(cpu); | 924 | cbe_disable_pm_interrupts(cpu); |
| 688 | 925 | ||
| 689 | cbe_write_pm(cpu, pm_interval, 0); | ||
| 690 | cbe_write_pm(cpu, pm_start_stop, 0); | 926 | cbe_write_pm(cpu, pm_start_stop, 0); |
| 691 | cbe_write_pm(cpu, group_control, pm_regs.group_control); | 927 | cbe_write_pm(cpu, group_control, pm_regs.group_control); |
| 692 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); | 928 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); |
| @@ -703,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr) | |||
| 703 | * The pm_rtas_activate_signals will return -EIO if the FW | 939 | * The pm_rtas_activate_signals will return -EIO if the FW |
| 704 | * call failed. | 940 | * call failed. |
| 705 | */ | 941 | */ |
| 706 | return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled); | 942 | if (profiling_mode == SPU_PROFILING_EVENTS) { |
| 943 | /* For SPU event profiling also need to setup the | ||
| 944 | * pm interval timer | ||
| 945 | */ | ||
| 946 | ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), | ||
| 947 | num_enabled+2); | ||
| 948 | /* store PC from debug bus to Trace buffer as often | ||
| 949 | * as possible (every 10 cycles) | ||
| 950 | */ | ||
| 951 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); | ||
| 952 | return ret; | ||
| 953 | } else | ||
| 954 | return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), | ||
| 955 | num_enabled); | ||
| 707 | } | 956 | } |
| 708 | 957 | ||
| 709 | #define ENTRIES 303 | 958 | #define ENTRIES 303 |
| @@ -885,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = { | |||
| 885 | }; | 1134 | }; |
| 886 | #endif | 1135 | #endif |
| 887 | 1136 | ||
| 888 | static int cell_global_start_spu(struct op_counter_config *ctr) | 1137 | /* |
| 1138 | * Note the generic OProfile stop calls do not support returning | ||
| 1139 | * an error on stop. Hence, will not return an error if the FW | ||
| 1140 | * calls fail on stop. Failure to reset the debug bus is not an issue. | ||
| 1141 | * Failure to disable the SPU profiling is not an issue. The FW calls | ||
| 1142 | * to enable the performance counters and debug bus will work even if | ||
| 1143 | * the hardware was not cleanly reset. | ||
| 1144 | */ | ||
| 1145 | static void cell_global_stop_spu_cycles(void) | ||
| 1146 | { | ||
| 1147 | int subfunc, rtn_value; | ||
| 1148 | unsigned int lfsr_value; | ||
| 1149 | int cpu; | ||
| 1150 | |||
| 1151 | oprofile_running = 0; | ||
| 1152 | smp_wmb(); | ||
| 1153 | |||
| 1154 | #ifdef CONFIG_CPU_FREQ | ||
| 1155 | cpufreq_unregister_notifier(&cpu_freq_notifier_block, | ||
| 1156 | CPUFREQ_TRANSITION_NOTIFIER); | ||
| 1157 | #endif | ||
| 1158 | |||
| 1159 | for_each_online_cpu(cpu) { | ||
| 1160 | if (cbe_get_hw_thread_id(cpu)) | ||
| 1161 | continue; | ||
| 1162 | |||
| 1163 | subfunc = 3; /* | ||
| 1164 | * 2 - activate SPU tracing, | ||
| 1165 | * 3 - deactivate | ||
| 1166 | */ | ||
| 1167 | lfsr_value = 0x8f100000; | ||
| 1168 | |||
| 1169 | rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, | ||
| 1170 | subfunc, cbe_cpu_to_node(cpu), | ||
| 1171 | lfsr_value); | ||
| 1172 | |||
| 1173 | if (unlikely(rtn_value != 0)) { | ||
| 1174 | printk(KERN_ERR | ||
| 1175 | "%s: rtas call ibm,cbe-spu-perftools " \ | ||
| 1176 | "failed, return = %d\n", | ||
| 1177 | __func__, rtn_value); | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | /* Deactivate the signals */ | ||
| 1181 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | stop_spu_profiling_cycles(); | ||
| 1185 | } | ||
| 1186 | |||
| 1187 | static void cell_global_stop_spu_events(void) | ||
| 1188 | { | ||
| 1189 | int cpu; | ||
| 1190 | oprofile_running = 0; | ||
| 1191 | |||
| 1192 | stop_spu_profiling_events(); | ||
| 1193 | smp_wmb(); | ||
| 1194 | |||
| 1195 | for_each_online_cpu(cpu) { | ||
| 1196 | if (cbe_get_hw_thread_id(cpu)) | ||
| 1197 | continue; | ||
| 1198 | |||
| 1199 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | ||
| 1200 | /* Stop the counters */ | ||
| 1201 | cbe_disable_pm(cpu); | ||
| 1202 | cbe_write_pm07_control(cpu, 0, 0); | ||
| 1203 | |||
| 1204 | /* Deactivate the signals */ | ||
| 1205 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
| 1206 | |||
| 1207 | /* Deactivate interrupts */ | ||
| 1208 | cbe_disable_pm_interrupts(cpu); | ||
| 1209 | } | ||
| 1210 | del_timer_sync(&timer_spu_event_swap); | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | static void cell_global_stop_ppu(void) | ||
| 1214 | { | ||
| 1215 | int cpu; | ||
| 1216 | |||
| 1217 | /* | ||
| 1218 | * This routine will be called once for the system. | ||
| 1219 | * There is one performance monitor per node, so we | ||
| 1220 | * only need to perform this function once per node. | ||
| 1221 | */ | ||
| 1222 | del_timer_sync(&timer_virt_cntr); | ||
| 1223 | oprofile_running = 0; | ||
| 1224 | smp_wmb(); | ||
| 1225 | |||
| 1226 | for_each_online_cpu(cpu) { | ||
| 1227 | if (cbe_get_hw_thread_id(cpu)) | ||
| 1228 | continue; | ||
| 1229 | |||
| 1230 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | ||
| 1231 | /* Stop the counters */ | ||
| 1232 | cbe_disable_pm(cpu); | ||
| 1233 | |||
| 1234 | /* Deactivate the signals */ | ||
| 1235 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | ||
| 1236 | |||
| 1237 | /* Deactivate interrupts */ | ||
| 1238 | cbe_disable_pm_interrupts(cpu); | ||
| 1239 | } | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | static void cell_global_stop(void) | ||
| 1243 | { | ||
| 1244 | if (profiling_mode == PPU_PROFILING) | ||
| 1245 | cell_global_stop_ppu(); | ||
| 1246 | else if (profiling_mode == SPU_PROFILING_EVENTS) | ||
| 1247 | cell_global_stop_spu_events(); | ||
| 1248 | else | ||
| 1249 | cell_global_stop_spu_cycles(); | ||
| 1250 | } | ||
| 1251 | |||
| 1252 | static int cell_global_start_spu_cycles(struct op_counter_config *ctr) | ||
| 889 | { | 1253 | { |
| 890 | int subfunc; | 1254 | int subfunc; |
| 891 | unsigned int lfsr_value; | 1255 | unsigned int lfsr_value; |
| @@ -951,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr) | |||
| 951 | 1315 | ||
| 952 | /* start profiling */ | 1316 | /* start profiling */ |
| 953 | ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, | 1317 | ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, |
| 954 | cbe_cpu_to_node(cpu), lfsr_value); | 1318 | cbe_cpu_to_node(cpu), lfsr_value); |
| 955 | 1319 | ||
| 956 | if (unlikely(ret != 0)) { | 1320 | if (unlikely(ret != 0)) { |
| 957 | printk(KERN_ERR | 1321 | printk(KERN_ERR |
| 958 | "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", | 1322 | "%s: rtas call ibm,cbe-spu-perftools failed, " \ |
| 959 | __func__, ret); | 1323 | "return = %d\n", __func__, ret); |
| 960 | rtas_error = -EIO; | 1324 | rtas_error = -EIO; |
| 961 | goto out; | 1325 | goto out; |
| 962 | } | 1326 | } |
| 963 | } | 1327 | } |
| 964 | 1328 | ||
| 965 | rtas_error = start_spu_profiling(spu_cycle_reset); | 1329 | rtas_error = start_spu_profiling_cycles(spu_cycle_reset); |
| 966 | if (rtas_error) | 1330 | if (rtas_error) |
| 967 | goto out_stop; | 1331 | goto out_stop; |
| 968 | 1332 | ||
| @@ -970,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr) | |||
| 970 | return 0; | 1334 | return 0; |
| 971 | 1335 | ||
| 972 | out_stop: | 1336 | out_stop: |
| 973 | cell_global_stop_spu(); /* clean up the PMU/debug bus */ | 1337 | cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */ |
| 974 | out: | 1338 | out: |
| 975 | return rtas_error; | 1339 | return rtas_error; |
| 976 | } | 1340 | } |
| 977 | 1341 | ||
| 1342 | static int cell_global_start_spu_events(struct op_counter_config *ctr) | ||
| 1343 | { | ||
| 1344 | int cpu; | ||
| 1345 | u32 interrupt_mask = 0; | ||
| 1346 | int rtn = 0; | ||
| 1347 | |||
| 1348 | hdw_thread = 0; | ||
| 1349 | |||
| 1350 | /* spu event profiling, uses the performance counters to generate | ||
| 1351 | * an interrupt. The hardware is setup to store the SPU program | ||
| 1352 | * counter into the trace array. The occurrence mode is used to | ||
| 1353 | * enable storing data to the trace buffer. The bits are set | ||
| 1354 | * to send/store the SPU address in the trace buffer. The debug | ||
| 1355 | * bus must be setup to route the SPU program counter onto the | ||
| 1356 | * debug bus. The occurrence data in the trace buffer is not used. | ||
| 1357 | */ | ||
| 1358 | |||
| 1359 | /* This routine gets called once for the system. | ||
| 1360 | * There is one performance monitor per node, so we | ||
| 1361 | * only need to perform this function once per node. | ||
| 1362 | */ | ||
| 1363 | |||
| 1364 | for_each_online_cpu(cpu) { | ||
| 1365 | if (cbe_get_hw_thread_id(cpu)) | ||
| 1366 | continue; | ||
| 1367 | |||
| 1368 | /* | ||
| 1369 | * Setup SPU event-based profiling. | ||
| 1370 | * Set perf_mon_control bit 0 to a zero before | ||
| 1371 | * enabling spu collection hardware. | ||
| 1372 | * | ||
| 1373 | * Only support one SPU event on one SPU per node. | ||
| 1374 | */ | ||
| 1375 | if (ctr_enabled & 1) { | ||
| 1376 | cbe_write_ctr(cpu, 0, reset_value[0]); | ||
| 1377 | enable_ctr(cpu, 0, pm_regs.pm07_cntrl); | ||
| 1378 | interrupt_mask |= | ||
| 1379 | CBE_PM_CTR_OVERFLOW_INTR(0); | ||
| 1380 | } else { | ||
| 1381 | /* Disable counter */ | ||
| 1382 | cbe_write_pm07_control(cpu, 0, 0); | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | cbe_get_and_clear_pm_interrupts(cpu); | ||
| 1386 | cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); | ||
| 1387 | cbe_enable_pm(cpu); | ||
| 1388 | |||
| 1389 | /* clear the trace buffer */ | ||
| 1390 | cbe_write_pm(cpu, trace_address, 0); | ||
| 1391 | } | ||
| 1392 | |||
| 1393 | /* Start the timer to time slice collecting the event profile | ||
| 1394 | * on each of the SPUs. Note, can collect profile on one SPU | ||
| 1395 | * per node at a time. | ||
| 1396 | */ | ||
| 1397 | start_spu_event_swap(); | ||
| 1398 | start_spu_profiling_events(); | ||
| 1399 | oprofile_running = 1; | ||
| 1400 | smp_wmb(); | ||
| 1401 | |||
| 1402 | return rtn; | ||
| 1403 | } | ||
| 1404 | |||
| 978 | static int cell_global_start_ppu(struct op_counter_config *ctr) | 1405 | static int cell_global_start_ppu(struct op_counter_config *ctr) |
| 979 | { | 1406 | { |
| 980 | u32 cpu, i; | 1407 | u32 cpu, i; |
| @@ -994,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr) | |||
| 994 | if (ctr_enabled & (1 << i)) { | 1421 | if (ctr_enabled & (1 << i)) { |
| 995 | cbe_write_ctr(cpu, i, reset_value[i]); | 1422 | cbe_write_ctr(cpu, i, reset_value[i]); |
| 996 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); | 1423 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); |
| 997 | interrupt_mask |= | 1424 | interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i); |
| 998 | CBE_PM_CTR_OVERFLOW_INTR(i); | ||
| 999 | } else { | 1425 | } else { |
| 1000 | /* Disable counter */ | 1426 | /* Disable counter */ |
| 1001 | cbe_write_pm07_control(cpu, i, 0); | 1427 | cbe_write_pm07_control(cpu, i, 0); |
| @@ -1024,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr) | |||
| 1024 | 1450 | ||
| 1025 | static int cell_global_start(struct op_counter_config *ctr) | 1451 | static int cell_global_start(struct op_counter_config *ctr) |
| 1026 | { | 1452 | { |
| 1027 | if (spu_cycle_reset) | 1453 | if (profiling_mode == SPU_PROFILING_CYCLES) |
| 1028 | return cell_global_start_spu(ctr); | 1454 | return cell_global_start_spu_cycles(ctr); |
| 1455 | else if (profiling_mode == SPU_PROFILING_EVENTS) | ||
| 1456 | return cell_global_start_spu_events(ctr); | ||
| 1029 | else | 1457 | else |
| 1030 | return cell_global_start_ppu(ctr); | 1458 | return cell_global_start_ppu(ctr); |
| 1031 | } | 1459 | } |
| 1032 | 1460 | ||
| 1033 | /* | 1461 | |
| 1034 | * Note the generic OProfile stop calls do not support returning | 1462 | /* The SPU interrupt handler |
| 1035 | * an error on stop. Hence, will not return an error if the FW | 1463 | * |
| 1036 | * calls fail on stop. Failure to reset the debug bus is not an issue. | 1464 | * SPU event profiling works as follows: |
| 1037 | * Failure to disable the SPU profiling is not an issue. The FW calls | 1465 | * The pm_signal[0] holds the one SPU event to be measured. It is routed on |
| 1038 | * to enable the performance counters and debug bus will work even if | 1466 | * the debug bus using word 0 or 1. The value of pm_signal[1] and |
| 1039 | * the hardware was not cleanly reset. | 1467 | * pm_signal[2] contain the necessary events to route the SPU program |
| 1468 | * counter for the selected SPU onto the debug bus using words 2 and 3. | ||
| 1469 | * The pm_interval register is setup to write the SPU PC value into the | ||
| 1470 | * trace buffer at the maximum rate possible. The trace buffer is configured | ||
| 1471 | * to store the PCs, wrapping when it is full. The performance counter is | ||
| 1472 | * intialized to the max hardware count minus the number of events, N, between | ||
| 1473 | * samples. Once the N events have occured, a HW counter overflow occurs | ||
| 1474 | * causing the generation of a HW counter interrupt which also stops the | ||
| 1475 | * writing of the SPU PC values to the trace buffer. Hence the last PC | ||
| 1476 | * written to the trace buffer is the SPU PC that we want. Unfortunately, | ||
| 1477 | * we have to read from the beginning of the trace buffer to get to the | ||
| 1478 | * last value written. We just hope the PPU has nothing better to do then | ||
| 1479 | * service this interrupt. The PC for the specific SPU being profiled is | ||
| 1480 | * extracted from the trace buffer processed and stored. The trace buffer | ||
| 1481 | * is cleared, interrupts are cleared, the counter is reset to max - N. | ||
| 1482 | * A kernel timer is used to periodically call the routine spu_evnt_swap() | ||
| 1483 | * to switch to the next physical SPU in the node to profile in round robbin | ||
| 1484 | * order. This way data is collected for all SPUs on the node. It does mean | ||
| 1485 | * that we need to use a relatively small value of N to ensure enough samples | ||
| 1486 | * on each SPU are collected each SPU is being profiled 1/8 of the time. | ||
| 1487 | * It may also be necessary to use a longer sample collection period. | ||
| 1040 | */ | 1488 | */ |
| 1041 | static void cell_global_stop_spu(void) | 1489 | static void cell_handle_interrupt_spu(struct pt_regs *regs, |
| 1490 | struct op_counter_config *ctr) | ||
| 1042 | { | 1491 | { |
| 1043 | int subfunc, rtn_value; | 1492 | u32 cpu, cpu_tmp; |
| 1044 | unsigned int lfsr_value; | 1493 | u64 trace_entry; |
| 1045 | int cpu; | 1494 | u32 interrupt_mask; |
| 1495 | u64 trace_buffer[2]; | ||
| 1496 | u64 last_trace_buffer; | ||
| 1497 | u32 sample; | ||
| 1498 | u32 trace_addr; | ||
| 1499 | unsigned long sample_array_lock_flags; | ||
| 1500 | int spu_num; | ||
| 1501 | unsigned long flags; | ||
| 1046 | 1502 | ||
| 1047 | oprofile_running = 0; | 1503 | /* Make sure spu event interrupt handler and spu event swap |
| 1504 | * don't access the counters simultaneously. | ||
| 1505 | */ | ||
| 1506 | cpu = smp_processor_id(); | ||
| 1507 | spin_lock_irqsave(&cntr_lock, flags); | ||
| 1048 | 1508 | ||
| 1049 | #ifdef CONFIG_CPU_FREQ | 1509 | cpu_tmp = cpu; |
| 1050 | cpufreq_unregister_notifier(&cpu_freq_notifier_block, | 1510 | cbe_disable_pm(cpu); |
| 1051 | CPUFREQ_TRANSITION_NOTIFIER); | ||
| 1052 | #endif | ||
| 1053 | 1511 | ||
| 1054 | for_each_online_cpu(cpu) { | 1512 | interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); |
| 1055 | if (cbe_get_hw_thread_id(cpu)) | ||
| 1056 | continue; | ||
| 1057 | 1513 | ||
| 1058 | subfunc = 3; /* | 1514 | sample = 0xABCDEF; |
| 1059 | * 2 - activate SPU tracing, | 1515 | trace_entry = 0xfedcba; |
| 1060 | * 3 - deactivate | 1516 | last_trace_buffer = 0xdeadbeaf; |
| 1061 | */ | ||
| 1062 | lfsr_value = 0x8f100000; | ||
| 1063 | 1517 | ||
| 1064 | rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, | 1518 | if ((oprofile_running == 1) && (interrupt_mask != 0)) { |
| 1065 | subfunc, cbe_cpu_to_node(cpu), | 1519 | /* disable writes to trace buff */ |
| 1066 | lfsr_value); | 1520 | cbe_write_pm(cpu, pm_interval, 0); |
| 1067 | 1521 | ||
| 1068 | if (unlikely(rtn_value != 0)) { | 1522 | /* only have one perf cntr being used, cntr 0 */ |
| 1069 | printk(KERN_ERR | 1523 | if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0)) |
| 1070 | "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", | 1524 | && ctr[0].enabled) |
| 1071 | __func__, rtn_value); | 1525 | /* The SPU PC values will be read |
| 1526 | * from the trace buffer, reset counter | ||
| 1527 | */ | ||
| 1528 | |||
| 1529 | cbe_write_ctr(cpu, 0, reset_value[0]); | ||
| 1530 | |||
| 1531 | trace_addr = cbe_read_pm(cpu, trace_address); | ||
| 1532 | |||
| 1533 | while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) { | ||
| 1534 | /* There is data in the trace buffer to process | ||
| 1535 | * Read the buffer until you get to the last | ||
| 1536 | * entry. This is the value we want. | ||
| 1537 | */ | ||
| 1538 | |||
| 1539 | cbe_read_trace_buffer(cpu, trace_buffer); | ||
| 1540 | trace_addr = cbe_read_pm(cpu, trace_address); | ||
| 1072 | } | 1541 | } |
| 1073 | 1542 | ||
| 1074 | /* Deactivate the signals */ | 1543 | /* SPU Address 16 bit count format for 128 bit |
| 1075 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | 1544 | * HW trace buffer is used for the SPU PC storage |
| 1076 | } | 1545 | * HDR bits 0:15 |
| 1546 | * SPU Addr 0 bits 16:31 | ||
| 1547 | * SPU Addr 1 bits 32:47 | ||
| 1548 | * unused bits 48:127 | ||
| 1549 | * | ||
| 1550 | * HDR: bit4 = 1 SPU Address 0 valid | ||
| 1551 | * HDR: bit5 = 1 SPU Address 1 valid | ||
| 1552 | * - unfortunately, the valid bits don't seem to work | ||
| 1553 | * | ||
| 1554 | * Note trace_buffer[0] holds bits 0:63 of the HW | ||
| 1555 | * trace buffer, trace_buffer[1] holds bits 64:127 | ||
| 1556 | */ | ||
| 1077 | 1557 | ||
| 1078 | stop_spu_profiling(); | 1558 | trace_entry = trace_buffer[0] |
| 1079 | } | 1559 | & 0x00000000FFFF0000; |
| 1080 | 1560 | ||
| 1081 | static void cell_global_stop_ppu(void) | 1561 | /* only top 16 of the 18 bit SPU PC address |
| 1082 | { | 1562 | * is stored in trace buffer, hence shift right |
| 1083 | int cpu; | 1563 | * by 16 -2 bits */ |
| 1564 | sample = trace_entry >> 14; | ||
| 1565 | last_trace_buffer = trace_buffer[0]; | ||
| 1084 | 1566 | ||
| 1085 | /* | 1567 | spu_num = spu_evnt_phys_spu_indx |
| 1086 | * This routine will be called once for the system. | 1568 | + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE); |
| 1087 | * There is one performance monitor per node, so we | ||
| 1088 | * only need to perform this function once per node. | ||
| 1089 | */ | ||
| 1090 | del_timer_sync(&timer_virt_cntr); | ||
| 1091 | oprofile_running = 0; | ||
| 1092 | smp_wmb(); | ||
| 1093 | 1569 | ||
| 1094 | for_each_online_cpu(cpu) { | 1570 | /* make sure only one process at a time is calling |
| 1095 | if (cbe_get_hw_thread_id(cpu)) | 1571 | * spu_sync_buffer() |
| 1096 | continue; | 1572 | */ |
| 1573 | spin_lock_irqsave(&oprof_spu_smpl_arry_lck, | ||
| 1574 | sample_array_lock_flags); | ||
| 1575 | spu_sync_buffer(spu_num, &sample, 1); | ||
| 1576 | spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck, | ||
| 1577 | sample_array_lock_flags); | ||
| 1097 | 1578 | ||
| 1098 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | 1579 | smp_wmb(); /* insure spu event buffer updates are written |
| 1099 | /* Stop the counters */ | 1580 | * don't want events intermingled... */ |
| 1100 | cbe_disable_pm(cpu); | ||
| 1101 | 1581 | ||
| 1102 | /* Deactivate the signals */ | 1582 | /* The counters were frozen by the interrupt. |
| 1103 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | 1583 | * Reenable the interrupt and restart the counters. |
| 1584 | */ | ||
| 1585 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); | ||
| 1586 | cbe_enable_pm_interrupts(cpu, hdw_thread, | ||
| 1587 | virt_cntr_inter_mask); | ||
| 1104 | 1588 | ||
| 1105 | /* Deactivate interrupts */ | 1589 | /* clear the trace buffer, re-enable writes to trace buff */ |
| 1106 | cbe_disable_pm_interrupts(cpu); | 1590 | cbe_write_pm(cpu, trace_address, 0); |
| 1107 | } | 1591 | cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC); |
| 1108 | } | ||
| 1109 | 1592 | ||
| 1110 | static void cell_global_stop(void) | 1593 | /* The writes to the various performance counters only writes |
| 1111 | { | 1594 | * to a latch. The new values (interrupt setting bits, reset |
| 1112 | if (spu_cycle_reset) | 1595 | * counter value etc.) are not copied to the actual registers |
| 1113 | cell_global_stop_spu(); | 1596 | * until the performance monitor is enabled. In order to get |
| 1114 | else | 1597 | * this to work as desired, the permormance monitor needs to |
| 1115 | cell_global_stop_ppu(); | 1598 | * be disabled while writing to the latches. This is a |
| 1599 | * HW design issue. | ||
| 1600 | */ | ||
| 1601 | write_pm_cntrl(cpu); | ||
| 1602 | cbe_enable_pm(cpu); | ||
| 1603 | } | ||
| 1604 | spin_unlock_irqrestore(&cntr_lock, flags); | ||
| 1116 | } | 1605 | } |
| 1117 | 1606 | ||
| 1118 | static void cell_handle_interrupt(struct pt_regs *regs, | 1607 | static void cell_handle_interrupt_ppu(struct pt_regs *regs, |
| 1119 | struct op_counter_config *ctr) | 1608 | struct op_counter_config *ctr) |
| 1120 | { | 1609 | { |
| 1121 | u32 cpu; | 1610 | u32 cpu; |
| 1122 | u64 pc; | 1611 | u64 pc; |
| @@ -1132,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
| 1132 | * routine are not running at the same time. See the | 1621 | * routine are not running at the same time. See the |
| 1133 | * cell_virtual_cntr() routine for additional comments. | 1622 | * cell_virtual_cntr() routine for additional comments. |
| 1134 | */ | 1623 | */ |
| 1135 | spin_lock_irqsave(&virt_cntr_lock, flags); | 1624 | spin_lock_irqsave(&cntr_lock, flags); |
| 1136 | 1625 | ||
| 1137 | /* | 1626 | /* |
| 1138 | * Need to disable and reenable the performance counters | 1627 | * Need to disable and reenable the performance counters |
| @@ -1185,7 +1674,16 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
| 1185 | */ | 1674 | */ |
| 1186 | cbe_enable_pm(cpu); | 1675 | cbe_enable_pm(cpu); |
| 1187 | } | 1676 | } |
| 1188 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | 1677 | spin_unlock_irqrestore(&cntr_lock, flags); |
| 1678 | } | ||
| 1679 | |||
| 1680 | static void cell_handle_interrupt(struct pt_regs *regs, | ||
| 1681 | struct op_counter_config *ctr) | ||
| 1682 | { | ||
| 1683 | if (profiling_mode == PPU_PROFILING) | ||
| 1684 | cell_handle_interrupt_ppu(regs, ctr); | ||
| 1685 | else | ||
| 1686 | cell_handle_interrupt_spu(regs, ctr); | ||
| 1189 | } | 1687 | } |
| 1190 | 1688 | ||
| 1191 | /* | 1689 | /* |
| @@ -1195,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
| 1195 | */ | 1693 | */ |
| 1196 | static int cell_sync_start(void) | 1694 | static int cell_sync_start(void) |
| 1197 | { | 1695 | { |
| 1198 | if (spu_cycle_reset) | 1696 | if ((profiling_mode == SPU_PROFILING_CYCLES) || |
| 1697 | (profiling_mode == SPU_PROFILING_EVENTS)) | ||
| 1199 | return spu_sync_start(); | 1698 | return spu_sync_start(); |
| 1200 | else | 1699 | else |
| 1201 | return DO_GENERIC_SYNC; | 1700 | return DO_GENERIC_SYNC; |
| @@ -1203,7 +1702,8 @@ static int cell_sync_start(void) | |||
| 1203 | 1702 | ||
| 1204 | static int cell_sync_stop(void) | 1703 | static int cell_sync_stop(void) |
| 1205 | { | 1704 | { |
| 1206 | if (spu_cycle_reset) | 1705 | if ((profiling_mode == SPU_PROFILING_CYCLES) || |
| 1706 | (profiling_mode == SPU_PROFILING_EVENTS)) | ||
| 1207 | return spu_sync_stop(); | 1707 | return spu_sync_stop(); |
| 1208 | else | 1708 | else |
| 1209 | return 1; | 1709 | return 1; |
