aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c116
1 files changed, 5 insertions, 111 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index fc99f7b8012f..d4f0a70f4845 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -45,7 +45,7 @@
45extern void calibrate_delay(void); 45extern void calibrate_delay(void);
46 46
47/* Please don't make this stuff initdata!!! --DaveM */ 47/* Please don't make this stuff initdata!!! --DaveM */
48static unsigned char boot_cpu_id; 48unsigned char boot_cpu_id;
49 49
50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; 51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id)
81 struct device_node *dp; 81 struct device_node *dp;
82 int def; 82 int def;
83 83
84 /* multiplier and counter set by
85 smp_setup_percpu_timer() */
86 cpu_data(id).udelay_val = loops_per_jiffy; 84 cpu_data(id).udelay_val = loops_per_jiffy;
87 85
88 cpu_find_by_mid(id, &dp); 86 cpu_find_by_mid(id, &dp);
@@ -125,7 +123,7 @@ void __init smp_store_cpu_info(int id)
125 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); 123 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
126} 124}
127 125
128static void smp_setup_percpu_timer(void); 126extern void setup_sparc64_timer(void);
129 127
130static volatile unsigned long callin_flag = 0; 128static volatile unsigned long callin_flag = 0;
131 129
@@ -140,7 +138,7 @@ void __init smp_callin(void)
140 138
141 __flush_tlb_all(); 139 __flush_tlb_all();
142 140
143 smp_setup_percpu_timer(); 141 setup_sparc64_timer();
144 142
145 if (cheetah_pcache_forced_on) 143 if (cheetah_pcache_forced_on)
146 cheetah_enable_pcache(); 144 cheetah_enable_pcache();
@@ -177,8 +175,6 @@ void cpu_panic(void)
177 panic("SMP bolixed\n"); 175 panic("SMP bolixed\n");
178} 176}
179 177
180static unsigned long current_tick_offset __read_mostly;
181
182/* This tick register synchronization scheme is taken entirely from 178/* This tick register synchronization scheme is taken entirely from
183 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 179 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
184 * 180 *
@@ -261,7 +257,7 @@ void smp_synchronize_tick_client(void)
261 } else 257 } else
262 adj = -delta; 258 adj = -delta;
263 259
264 tick_ops->add_tick(adj, current_tick_offset); 260 tick_ops->add_tick(adj);
265 } 261 }
266#if DEBUG_TICK_SYNC 262#if DEBUG_TICK_SYNC
267 t[i].rt = rt; 263 t[i].rt = rt;
@@ -1180,117 +1176,15 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1180 preempt_enable(); 1176 preempt_enable();
1181} 1177}
1182 1178
1183#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1184#define prof_counter(__cpu) cpu_data(__cpu).counter
1185
1186void smp_percpu_timer_interrupt(struct pt_regs *regs)
1187{
1188 unsigned long compare, tick, pstate;
1189 int cpu = smp_processor_id();
1190 int user = user_mode(regs);
1191 struct pt_regs *old_regs;
1192
1193 /*
1194 * Check for level 14 softint.
1195 */
1196 {
1197 unsigned long tick_mask = tick_ops->softint_mask;
1198
1199 if (!(get_softint() & tick_mask)) {
1200 extern void handler_irq(int, struct pt_regs *);
1201
1202 handler_irq(14, regs);
1203 return;
1204 }
1205 clear_softint(tick_mask);
1206 }
1207
1208 old_regs = set_irq_regs(regs);
1209 do {
1210 profile_tick(CPU_PROFILING);
1211 if (!--prof_counter(cpu)) {
1212 irq_enter();
1213
1214 if (cpu == boot_cpu_id) {
1215 kstat_this_cpu.irqs[0]++;
1216 timer_tick_interrupt(regs);
1217 }
1218
1219 update_process_times(user);
1220
1221 irq_exit();
1222
1223 prof_counter(cpu) = prof_multiplier(cpu);
1224 }
1225
1226 /* Guarantee that the following sequences execute
1227 * uninterrupted.
1228 */
1229 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1230 "wrpr %0, %1, %%pstate"
1231 : "=r" (pstate)
1232 : "i" (PSTATE_IE));
1233
1234 compare = tick_ops->add_compare(current_tick_offset);
1235 tick = tick_ops->get_tick();
1236
1237 /* Restore PSTATE_IE. */
1238 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1239 : /* no outputs */
1240 : "r" (pstate));
1241 } while (time_after_eq(tick, compare));
1242 set_irq_regs(old_regs);
1243}
1244
1245static void __init smp_setup_percpu_timer(void)
1246{
1247 int cpu = smp_processor_id();
1248 unsigned long pstate;
1249
1250 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1251
1252 /* Guarantee that the following sequences execute
1253 * uninterrupted.
1254 */
1255 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1256 "wrpr %0, %1, %%pstate"
1257 : "=r" (pstate)
1258 : "i" (PSTATE_IE));
1259
1260 tick_ops->init_tick(current_tick_offset);
1261
1262 /* Restore PSTATE_IE. */
1263 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1264 : /* no outputs */
1265 : "r" (pstate));
1266}
1267
1268void __init smp_tick_init(void) 1179void __init smp_tick_init(void)
1269{ 1180{
1270 boot_cpu_id = hard_smp_processor_id(); 1181 boot_cpu_id = hard_smp_processor_id();
1271 current_tick_offset = timer_tick_offset;
1272
1273 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1274} 1182}
1275 1183
1276/* /proc/profile writes can call this, don't __init it please. */ 1184/* /proc/profile writes can call this, don't __init it please. */
1277static DEFINE_SPINLOCK(prof_setup_lock);
1278
1279int setup_profiling_timer(unsigned int multiplier) 1185int setup_profiling_timer(unsigned int multiplier)
1280{ 1186{
1281 unsigned long flags; 1187 return -EINVAL;
1282 int i;
1283
1284 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1285 return -EINVAL;
1286
1287 spin_lock_irqsave(&prof_setup_lock, flags);
1288 for_each_possible_cpu(i)
1289 prof_multiplier(i) = multiplier;
1290 current_tick_offset = (timer_tick_offset / multiplier);
1291 spin_unlock_irqrestore(&prof_setup_lock, flags);
1292
1293 return 0;
1294} 1188}
1295 1189
1296static void __init smp_tune_scheduling(void) 1190static void __init smp_tune_scheduling(void)