aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-02-22 09:24:10 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 04:54:11 -0400
commit777a447529ad138f5fceb9c9ad28bab19848f277 (patch)
treed6fa75027b26d0b2d436cb0f8dc97c72f411b970 /arch/sparc64/kernel/smp.c
parenta58c9f3c1e929c3c323c26dbdafef46373a719d4 (diff)
[SPARC64]: Unify timer interrupt handler.
Things were scattered all over the place, split between SMP and non-SMP. Unify it all so that dyntick support is easier to add. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c87
1 files changed, 2 insertions, 85 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index fc99f7b8012f..39deb0346eb5 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -45,7 +45,7 @@
45extern void calibrate_delay(void); 45extern void calibrate_delay(void);
46 46
47/* Please don't make this stuff initdata!!! --DaveM */ 47/* Please don't make this stuff initdata!!! --DaveM */
48static unsigned char boot_cpu_id; 48unsigned char boot_cpu_id;
49 49
50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; 51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id)
81 struct device_node *dp; 81 struct device_node *dp;
82 int def; 82 int def;
83 83
84 /* multiplier and counter set by
85 smp_setup_percpu_timer() */
86 cpu_data(id).udelay_val = loops_per_jiffy; 84 cpu_data(id).udelay_val = loops_per_jiffy;
87 85
88 cpu_find_by_mid(id, &dp); 86 cpu_find_by_mid(id, &dp);
@@ -1180,75 +1178,10 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1180 preempt_enable(); 1178 preempt_enable();
1181} 1179}
1182 1180
1183#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1184#define prof_counter(__cpu) cpu_data(__cpu).counter
1185
1186void smp_percpu_timer_interrupt(struct pt_regs *regs)
1187{
1188 unsigned long compare, tick, pstate;
1189 int cpu = smp_processor_id();
1190 int user = user_mode(regs);
1191 struct pt_regs *old_regs;
1192
1193 /*
1194 * Check for level 14 softint.
1195 */
1196 {
1197 unsigned long tick_mask = tick_ops->softint_mask;
1198
1199 if (!(get_softint() & tick_mask)) {
1200 extern void handler_irq(int, struct pt_regs *);
1201
1202 handler_irq(14, regs);
1203 return;
1204 }
1205 clear_softint(tick_mask);
1206 }
1207
1208 old_regs = set_irq_regs(regs);
1209 do {
1210 profile_tick(CPU_PROFILING);
1211 if (!--prof_counter(cpu)) {
1212 irq_enter();
1213
1214 if (cpu == boot_cpu_id) {
1215 kstat_this_cpu.irqs[0]++;
1216 timer_tick_interrupt(regs);
1217 }
1218
1219 update_process_times(user);
1220
1221 irq_exit();
1222
1223 prof_counter(cpu) = prof_multiplier(cpu);
1224 }
1225
1226 /* Guarantee that the following sequences execute
1227 * uninterrupted.
1228 */
1229 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1230 "wrpr %0, %1, %%pstate"
1231 : "=r" (pstate)
1232 : "i" (PSTATE_IE));
1233
1234 compare = tick_ops->add_compare(current_tick_offset);
1235 tick = tick_ops->get_tick();
1236
1237 /* Restore PSTATE_IE. */
1238 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1239 : /* no outputs */
1240 : "r" (pstate));
1241 } while (time_after_eq(tick, compare));
1242 set_irq_regs(old_regs);
1243}
1244
1245static void __init smp_setup_percpu_timer(void) 1181static void __init smp_setup_percpu_timer(void)
1246{ 1182{
1247 int cpu = smp_processor_id();
1248 unsigned long pstate; 1183 unsigned long pstate;
1249 1184
1250 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1251
1252 /* Guarantee that the following sequences execute 1185 /* Guarantee that the following sequences execute
1253 * uninterrupted. 1186 * uninterrupted.
1254 */ 1187 */
@@ -1269,28 +1202,12 @@ void __init smp_tick_init(void)
1269{ 1202{
1270 boot_cpu_id = hard_smp_processor_id(); 1203 boot_cpu_id = hard_smp_processor_id();
1271 current_tick_offset = timer_tick_offset; 1204 current_tick_offset = timer_tick_offset;
1272
1273 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1274} 1205}
1275 1206
1276/* /proc/profile writes can call this, don't __init it please. */ 1207/* /proc/profile writes can call this, don't __init it please. */
1277static DEFINE_SPINLOCK(prof_setup_lock);
1278
1279int setup_profiling_timer(unsigned int multiplier) 1208int setup_profiling_timer(unsigned int multiplier)
1280{ 1209{
1281 unsigned long flags; 1210 return -EINVAL;
1282 int i;
1283
1284 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1285 return -EINVAL;
1286
1287 spin_lock_irqsave(&prof_setup_lock, flags);
1288 for_each_possible_cpu(i)
1289 prof_multiplier(i) = multiplier;
1290 current_tick_offset = (timer_tick_offset / multiplier);
1291 spin_unlock_irqrestore(&prof_setup_lock, flags);
1292
1293 return 0;
1294} 1211}
1295 1212
1296static void __init smp_tune_scheduling(void) 1213static void __init smp_tune_scheduling(void)