aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-02-22 09:24:10 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 04:54:11 -0400
commit777a447529ad138f5fceb9c9ad28bab19848f277 (patch)
treed6fa75027b26d0b2d436cb0f8dc97c72f411b970
parenta58c9f3c1e929c3c323c26dbdafef46373a719d4 (diff)
[SPARC64]: Unify timer interrupt handler.
Things were scattered all over the place, split between SMP and non-SMP. Unify it all so that dyntick support is easier to add. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/irq.c26
-rw-r--r--arch/sparc64/kernel/smp.c87
-rw-r--r--arch/sparc64/kernel/time.c43
-rw-r--r--arch/sparc64/kernel/ttable.S6
-rw-r--r--include/asm-sparc64/cpudata.h4
-rw-r--r--include/asm-sparc64/smp.h4
-rw-r--r--include/asm-sparc64/ttable.h27
7 files changed, 25 insertions, 172 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c443db184371..d1bb3b3f2639 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -589,32 +589,6 @@ void ack_bad_irq(unsigned int virt_irq)
589 ino, virt_irq); 589 ino, virt_irq);
590} 590}
591 591
592#ifndef CONFIG_SMP
593extern irqreturn_t timer_interrupt(int, void *);
594
595void timer_irq(int irq, struct pt_regs *regs)
596{
597 unsigned long clr_mask = 1 << irq;
598 unsigned long tick_mask = tick_ops->softint_mask;
599 struct pt_regs *old_regs;
600
601 if (get_softint() & tick_mask) {
602 irq = 0;
603 clr_mask = tick_mask;
604 }
605 clear_softint(clr_mask);
606
607 old_regs = set_irq_regs(regs);
608 irq_enter();
609
610 kstat_this_cpu.irqs[0]++;
611 timer_interrupt(irq, NULL);
612
613 irq_exit();
614 set_irq_regs(old_regs);
615}
616#endif
617
618void handler_irq(int irq, struct pt_regs *regs) 592void handler_irq(int irq, struct pt_regs *regs)
619{ 593{
620 struct ino_bucket *bucket; 594 struct ino_bucket *bucket;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index fc99f7b8012f..39deb0346eb5 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -45,7 +45,7 @@
45extern void calibrate_delay(void); 45extern void calibrate_delay(void);
46 46
47/* Please don't make this stuff initdata!!! --DaveM */ 47/* Please don't make this stuff initdata!!! --DaveM */
48static unsigned char boot_cpu_id; 48unsigned char boot_cpu_id;
49 49
50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; 51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id)
81 struct device_node *dp; 81 struct device_node *dp;
82 int def; 82 int def;
83 83
84 /* multiplier and counter set by
85 smp_setup_percpu_timer() */
86 cpu_data(id).udelay_val = loops_per_jiffy; 84 cpu_data(id).udelay_val = loops_per_jiffy;
87 85
88 cpu_find_by_mid(id, &dp); 86 cpu_find_by_mid(id, &dp);
@@ -1180,75 +1178,10 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1180 preempt_enable(); 1178 preempt_enable();
1181} 1179}
1182 1180
1183#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1184#define prof_counter(__cpu) cpu_data(__cpu).counter
1185
1186void smp_percpu_timer_interrupt(struct pt_regs *regs)
1187{
1188 unsigned long compare, tick, pstate;
1189 int cpu = smp_processor_id();
1190 int user = user_mode(regs);
1191 struct pt_regs *old_regs;
1192
1193 /*
1194 * Check for level 14 softint.
1195 */
1196 {
1197 unsigned long tick_mask = tick_ops->softint_mask;
1198
1199 if (!(get_softint() & tick_mask)) {
1200 extern void handler_irq(int, struct pt_regs *);
1201
1202 handler_irq(14, regs);
1203 return;
1204 }
1205 clear_softint(tick_mask);
1206 }
1207
1208 old_regs = set_irq_regs(regs);
1209 do {
1210 profile_tick(CPU_PROFILING);
1211 if (!--prof_counter(cpu)) {
1212 irq_enter();
1213
1214 if (cpu == boot_cpu_id) {
1215 kstat_this_cpu.irqs[0]++;
1216 timer_tick_interrupt(regs);
1217 }
1218
1219 update_process_times(user);
1220
1221 irq_exit();
1222
1223 prof_counter(cpu) = prof_multiplier(cpu);
1224 }
1225
1226 /* Guarantee that the following sequences execute
1227 * uninterrupted.
1228 */
1229 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1230 "wrpr %0, %1, %%pstate"
1231 : "=r" (pstate)
1232 : "i" (PSTATE_IE));
1233
1234 compare = tick_ops->add_compare(current_tick_offset);
1235 tick = tick_ops->get_tick();
1236
1237 /* Restore PSTATE_IE. */
1238 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1239 : /* no outputs */
1240 : "r" (pstate));
1241 } while (time_after_eq(tick, compare));
1242 set_irq_regs(old_regs);
1243}
1244
1245static void __init smp_setup_percpu_timer(void) 1181static void __init smp_setup_percpu_timer(void)
1246{ 1182{
1247 int cpu = smp_processor_id();
1248 unsigned long pstate; 1183 unsigned long pstate;
1249 1184
1250 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1251
1252 /* Guarantee that the following sequences execute 1185 /* Guarantee that the following sequences execute
1253 * uninterrupted. 1186 * uninterrupted.
1254 */ 1187 */
@@ -1269,28 +1202,12 @@ void __init smp_tick_init(void)
1269{ 1202{
1270 boot_cpu_id = hard_smp_processor_id(); 1203 boot_cpu_id = hard_smp_processor_id();
1271 current_tick_offset = timer_tick_offset; 1204 current_tick_offset = timer_tick_offset;
1272
1273 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1274} 1205}
1275 1206
1276/* /proc/profile writes can call this, don't __init it please. */ 1207/* /proc/profile writes can call this, don't __init it please. */
1277static DEFINE_SPINLOCK(prof_setup_lock);
1278
1279int setup_profiling_timer(unsigned int multiplier) 1208int setup_profiling_timer(unsigned int multiplier)
1280{ 1209{
1281 unsigned long flags; 1210 return -EINVAL;
1282 int i;
1283
1284 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1285 return -EINVAL;
1286
1287 spin_lock_irqsave(&prof_setup_lock, flags);
1288 for_each_possible_cpu(i)
1289 prof_multiplier(i) = multiplier;
1290 current_tick_offset = (timer_tick_offset / multiplier);
1291 spin_unlock_irqrestore(&prof_setup_lock, flags);
1292
1293 return 0;
1294} 1211}
1295 1212
1296static void __init smp_tune_scheduling(void) 1213static void __init smp_tune_scheduling(void)
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index d457079118dc..48e1217c1e42 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -31,6 +31,7 @@
31#include <linux/profile.h> 31#include <linux/profile.h>
32#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
33#include <linux/rtc.h> 33#include <linux/rtc.h>
34#include <linux/kernel_stat.h>
34 35
35#include <asm/oplib.h> 36#include <asm/oplib.h>
36#include <asm/mostek.h> 37#include <asm/mostek.h>
@@ -423,12 +424,6 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
423 .softint_mask = 1UL << 0, 424 .softint_mask = 1UL << 0,
424}; 425};
425 426
426/* timer_interrupt() needs to keep up the real-time clock,
427 * as well as call the "do_timer()" routine every clocktick
428 *
429 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
430 * interrupts, one at level14 and one with softint bit 0.
431 */
432unsigned long timer_tick_offset __read_mostly; 427unsigned long timer_tick_offset __read_mostly;
433 428
434static unsigned long timer_ticks_per_nsec_quotient __read_mostly; 429static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
@@ -487,18 +482,27 @@ void notify_arch_cmos_timer(void)
487 mod_timer(&sync_cmos_timer, jiffies + 1); 482 mod_timer(&sync_cmos_timer, jiffies + 1);
488} 483}
489 484
490irqreturn_t timer_interrupt(int irq, void *dev_id) 485void timer_interrupt(int irq, struct pt_regs *regs)
491{ 486{
487 struct pt_regs *old_regs = set_irq_regs(regs);
492 unsigned long ticks, compare, pstate; 488 unsigned long ticks, compare, pstate;
489 unsigned long tick_mask = tick_ops->softint_mask;
490
491 clear_softint(tick_mask);
492
493 irq_enter();
493 494
494 write_seqlock(&xtime_lock); 495 kstat_this_cpu.irqs[0]++;
495 496
496 do { 497 do {
497#ifndef CONFIG_SMP
498 profile_tick(CPU_PROFILING); 498 profile_tick(CPU_PROFILING);
499 update_process_times(user_mode(get_irq_regs())); 499 update_process_times(user_mode(get_irq_regs()));
500#endif 500
501 do_timer(1); 501 if (smp_processor_id() == boot_cpu_id) {
502 write_seqlock(&xtime_lock);
503 do_timer(1);
504 write_sequnlock(&xtime_lock);
505 }
502 506
503 /* Guarantee that the following sequences execute 507 /* Guarantee that the following sequences execute
504 * uninterrupted. 508 * uninterrupted.
@@ -515,24 +519,13 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
515 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 519 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
516 : /* no outputs */ 520 : /* no outputs */
517 : "r" (pstate)); 521 : "r" (pstate));
518 } while (time_after_eq(ticks, compare)); 522 } while (unlikely(time_after_eq(ticks, compare)));
519 523
520 write_sequnlock(&xtime_lock); 524 irq_exit();
521 525
522 return IRQ_HANDLED; 526 set_irq_regs(old_regs);
523} 527}
524 528
525#ifdef CONFIG_SMP
526void timer_tick_interrupt(struct pt_regs *regs)
527{
528 write_seqlock(&xtime_lock);
529
530 do_timer(1);
531
532 write_sequnlock(&xtime_lock);
533}
534#endif
535
536/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 529/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
537static void __init kick_start_clock(void) 530static void __init kick_start_clock(void)
538{ 531{
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index d7d2a8bdc66e..7575aa371da8 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -60,11 +60,7 @@ tl0_irq4: BTRAP(0x44)
60tl0_irq5: TRAP_IRQ(handler_irq, 5) 60tl0_irq5: TRAP_IRQ(handler_irq, 5)
61tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) 61tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
62tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) 62tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
63#ifndef CONFIG_SMP 63tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
64tl0_irq14: TRAP_IRQ(timer_irq, 14)
65#else
66tl0_irq14: TICK_SMP_IRQ
67#endif
68tl0_irq15: TRAP_IRQ(handler_irq, 15) 64tl0_irq15: TRAP_IRQ(handler_irq, 15)
69tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) 65tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
70tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) 66tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index f2cc9411b4c7..e89922d6718c 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -17,8 +17,8 @@
17typedef struct { 17typedef struct {
18 /* Dcache line 1 */ 18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int multiplier; 20 unsigned int __pad0_1;
21 unsigned int counter; 21 unsigned int __pad0_2;
22 unsigned int __pad1; 22 unsigned int __pad1;
23 unsigned long clock_tick; /* %tick's per second */ 23 unsigned long clock_tick; /* %tick's per second */
24 unsigned long udelay_val; 24 unsigned long udelay_val;
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 388249b751c3..cca54804b722 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -42,15 +42,15 @@ extern int hard_smp_processor_id(void);
42#define raw_smp_processor_id() (current_thread_info()->cpu) 42#define raw_smp_processor_id() (current_thread_info()->cpu)
43 43
44extern void smp_setup_cpu_possible_map(void); 44extern void smp_setup_cpu_possible_map(void);
45extern unsigned char boot_cpu_id;
45 46
46#endif /* !(__ASSEMBLY__) */ 47#endif /* !(__ASSEMBLY__) */
47 48
48#else 49#else
49 50
50#define smp_setup_cpu_possible_map() do { } while (0) 51#define smp_setup_cpu_possible_map() do { } while (0)
52#define boot_cpu_id (0)
51 53
52#endif /* !(CONFIG_SMP) */ 54#endif /* !(CONFIG_SMP) */
53 55
54#define NO_PROC_ID 0xFF
55
56#endif /* !(_SPARC64_SMP_H) */ 56#endif /* !(_SPARC64_SMP_H) */
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index c2a16e188499..bbb9c8f13d61 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -157,23 +157,6 @@
157 ba,a,pt %xcc, rtrap_irq; \ 157 ba,a,pt %xcc, rtrap_irq; \
158 .previous; 158 .previous;
159 159
160#define TICK_SMP_IRQ \
161 rdpr %pil, %g2; \
162 wrpr %g0, 15, %pil; \
163 sethi %hi(1f-4), %g7; \
164 ba,pt %xcc, etrap_irq; \
165 or %g7, %lo(1f-4), %g7; \
166 nop; \
167 nop; \
168 nop; \
169 .subsection 2; \
1701: call trace_hardirqs_off; \
171 nop; \
172 call smp_percpu_timer_interrupt; \
173 add %sp, PTREGS_OFF, %o0; \
174 ba,a,pt %xcc, rtrap_irq; \
175 .previous;
176
177#else 160#else
178 161
179#define TRAP_IRQ(routine, level) \ 162#define TRAP_IRQ(routine, level) \
@@ -186,16 +169,6 @@
186 add %sp, PTREGS_OFF, %o1; \ 169 add %sp, PTREGS_OFF, %o1; \
187 ba,a,pt %xcc, rtrap_irq; 170 ba,a,pt %xcc, rtrap_irq;
188 171
189#define TICK_SMP_IRQ \
190 rdpr %pil, %g2; \
191 wrpr %g0, 15, %pil; \
192 sethi %hi(109f), %g7; \
193 ba,pt %xcc, etrap_irq; \
194109: or %g7, %lo(109b), %g7; \
195 call smp_percpu_timer_interrupt; \
196 add %sp, PTREGS_OFF, %o0; \
197 ba,a,pt %xcc, rtrap_irq;
198
199#endif 172#endif
200 173
201#define TRAP_IVEC TRAP_NOSAVE(do_ivec) 174#define TRAP_IVEC TRAP_NOSAVE(do_ivec)