aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/au1000/common/irq.c4
-rw-r--r--arch/mips/au1000/common/time.c40
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/time.c93
-rw-r--r--arch/mips/mips-boards/generic/time.c112
-rw-r--r--arch/mips/mipssim/sim_time.c72
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c5
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c10
-rw-r--r--arch/mips/sibyte/bcm1480/time.c13
-rw-r--r--arch/mips/sibyte/sb1250/time.c13
-rw-r--r--include/asm-mips/time.h16
11 files changed, 88 insertions, 292 deletions
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index db2ba0dbfd5a..47949d6f2c10 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -65,8 +65,6 @@
65#define EXT_INTC1_REQ1 5 /* IP 5 */ 65#define EXT_INTC1_REQ1 5 /* IP 5 */
66#define MIPS_TIMER_IP 7 /* IP 7 */ 66#define MIPS_TIMER_IP 7 /* IP 7 */
67 67
68extern void mips_timer_interrupt(void);
69
70void (*board_init_irq)(void); 68void (*board_init_irq)(void);
71 69
72static DEFINE_SPINLOCK(irq_lock); 70static DEFINE_SPINLOCK(irq_lock);
@@ -635,7 +633,7 @@ asmlinkage void plat_irq_dispatch(void)
635 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; 633 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
636 634
637 if (pending & CAUSEF_IP7) 635 if (pending & CAUSEF_IP7)
638 mips_timer_interrupt(); 636 ll_timer_interrupt(63);
639 else if (pending & CAUSEF_IP2) 637 else if (pending & CAUSEF_IP2)
640 intc0_req0_irqdispatch(); 638 intc0_req0_irqdispatch();
641 else if (pending & CAUSEF_IP3) 639 else if (pending & CAUSEF_IP3)
diff --git a/arch/mips/au1000/common/time.c b/arch/mips/au1000/common/time.c
index fb1fd50f19a6..726c340460b6 100644
--- a/arch/mips/au1000/common/time.c
+++ b/arch/mips/au1000/common/time.c
@@ -64,48 +64,8 @@ static unsigned long last_pc0, last_match20;
64 64
65static DEFINE_SPINLOCK(time_lock); 65static DEFINE_SPINLOCK(time_lock);
66 66
67static inline void ack_r4ktimer(unsigned long newval)
68{
69 write_c0_compare(newval);
70}
71
72/*
73 * There are a lot of conceptually broken versions of the MIPS timer interrupt
74 * handler floating around. This one is rather different, but the algorithm
75 * is provably more robust.
76 */
77unsigned long wtimer; 67unsigned long wtimer;
78 68
79void mips_timer_interrupt(void)
80{
81 int irq = 63;
82
83 irq_enter();
84 kstat_this_cpu.irqs[irq]++;
85
86 if (r4k_offset == 0)
87 goto null;
88
89 do {
90 kstat_this_cpu.irqs[irq]++;
91 do_timer(1);
92#ifndef CONFIG_SMP
93 update_process_times(user_mode(get_irq_regs()));
94#endif
95 r4k_cur += r4k_offset;
96 ack_r4ktimer(r4k_cur);
97
98 } while (((unsigned long)read_c0_count()
99 - r4k_cur) < 0x7fffffff);
100
101 irq_exit();
102 return;
103
104null:
105 ack_r4ktimer(0);
106 irq_exit();
107}
108
109#ifdef CONFIG_PM 69#ifdef CONFIG_PM
110irqreturn_t counter0_irq(int irq, void *dev_id) 70irqreturn_t counter0_irq(int irq, void *dev_id)
111{ 71{
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 137183bba54f..a7afbf2c9710 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -867,7 +867,7 @@ void ipi_decode(struct smtc_ipi *pipi)
867#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 867#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
868 clock_hang_reported[dest_copy] = 0; 868 clock_hang_reported[dest_copy] = 0;
869#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 869#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
870 local_timer_interrupt(0, NULL); 870 local_timer_interrupt(0);
871 irq_exit(); 871 irq_exit();
872 break; 872 break;
873 case LINUX_SMP_IPI: 873 case LINUX_SMP_IPI:
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index c48ebd4b495e..d23e6825e988 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -144,7 +144,7 @@ void local_timer_interrupt(int irq, void *dev_id)
144 * High-level timer interrupt service routines. This function 144 * High-level timer interrupt service routines. This function
145 * is set as irqaction->handler and is invoked through do_IRQ. 145 * is set as irqaction->handler and is invoked through do_IRQ.
146 */ 146 */
147irqreturn_t timer_interrupt(int irq, void *dev_id) 147static irqreturn_t timer_interrupt(int irq, void *dev_id)
148{ 148{
149 write_seqlock(&xtime_lock); 149 write_seqlock(&xtime_lock);
150 150
@@ -174,9 +174,10 @@ int null_perf_irq(void)
174 return 0; 174 return 0;
175} 175}
176 176
177EXPORT_SYMBOL(null_perf_irq);
178
177int (*perf_irq)(void) = null_perf_irq; 179int (*perf_irq)(void) = null_perf_irq;
178 180
179EXPORT_SYMBOL(null_perf_irq);
180EXPORT_SYMBOL(perf_irq); 181EXPORT_SYMBOL(perf_irq);
181 182
182/* 183/*
@@ -208,35 +209,79 @@ static inline int handle_perf_irq (int r2)
208 !r2; 209 !r2;
209} 210}
210 211
211asmlinkage void ll_timer_interrupt(int irq) 212void ll_timer_interrupt(int irq, void *dev_id)
212{ 213{
213 int r2 = cpu_has_mips_r2; 214 int cpu = smp_processor_id();
214 215
215 irq_enter(); 216#ifdef CONFIG_MIPS_MT_SMTC
216 kstat_this_cpu.irqs[irq]++; 217 /*
218 * In an SMTC system, one Count/Compare set exists per VPE.
219 * Which TC within a VPE gets the interrupt is essentially
220 * random - we only know that it shouldn't be one with
221 * IXMT set. Whichever TC gets the interrupt needs to
222 * send special interprocessor interrupts to the other
223 * TCs to make sure that they schedule, etc.
224 *
225 * That code is specific to the SMTC kernel, not to
226 * the a particular platform, so it's invoked from
227 * the general MIPS timer_interrupt routine.
228 */
229
230 /*
231 * We could be here due to timer interrupt,
232 * perf counter overflow, or both.
233 */
234 (void) handle_perf_irq(1);
235
236 if (read_c0_cause() & (1 << 30)) {
237 /*
238 * There are things we only want to do once per tick
239 * in an "MP" system. One TC of each VPE will take
240 * the actual timer interrupt. The others will get
241 * timer broadcast IPIs. We use whoever it is that takes
242 * the tick on VPE 0 to run the full timer_interrupt().
243 */
244 if (cpu_data[cpu].vpe_id == 0) {
245 timer_interrupt(irq, NULL);
246 } else {
247 write_c0_compare(read_c0_count() +
248 (mips_hpt_frequency/HZ));
249 local_timer_interrupt(irq, dev_id);
250 }
251 smtc_timer_broadcast(cpu_data[cpu].vpe_id);
252 }
253#else /* CONFIG_MIPS_MT_SMTC */
254 int r2 = cpu_has_mips_r2;
217 255
218 if (handle_perf_irq(r2)) 256 if (handle_perf_irq(r2))
219 goto out; 257 return;
220 258
221 if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) 259 if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
222 goto out; 260 return;
223
224 timer_interrupt(irq, NULL);
225
226out:
227 irq_exit();
228}
229
230asmlinkage void ll_local_timer_interrupt(int irq)
231{
232 irq_enter();
233 if (smp_processor_id() != 0)
234 kstat_this_cpu.irqs[irq]++;
235
236 /* we keep interrupt disabled all the time */
237 local_timer_interrupt(irq, NULL);
238 261
239 irq_exit(); 262 if (cpu == 0) {
263 /*
264 * CPU 0 handles the global timer interrupt job and process
265 * accounting resets count/compare registers to trigger next
266 * timer int.
267 */
268 timer_interrupt(irq, NULL);
269 } else {
270 /* Everyone else needs to reset the timer int here as
271 ll_local_timer_interrupt doesn't */
272 /*
273 * FIXME: need to cope with counter underflow.
274 * More support needs to be added to kernel/time for
275 * counter/timer interrupts on multiple CPU's
276 */
277 write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
278
279 /*
280 * Other CPUs should do profiling and process accounting
281 */
282 local_timer_interrupt(irq, dev_id);
283 }
284#endif /* CONFIG_MIPS_MT_SMTC */
240} 285}
241 286
242/* 287/*
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index 075f9d46f40e..345de881013c 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -68,108 +68,6 @@ static void mips_perf_dispatch(void)
68} 68}
69 69
70/* 70/*
71 * Redeclare until I get around mopping the timer code insanity on MIPS.
72 */
73extern int null_perf_irq(void);
74
75extern int (*perf_irq)(void);
76
77/*
78 * Possibly handle a performance counter interrupt.
79 * Return true if the timer interrupt should not be checked
80 */
81static inline int handle_perf_irq (int r2)
82{
83 /*
84 * The performance counter overflow interrupt may be shared with the
85 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
86 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
87 * and we can't reliably determine if a counter interrupt has also
88 * happened (!r2) then don't check for a timer interrupt.
89 */
90 return (cp0_perfcount_irq < 0) &&
91 perf_irq() == IRQ_HANDLED &&
92 !r2;
93}
94
95irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
96{
97 int cpu = smp_processor_id();
98
99#ifdef CONFIG_MIPS_MT_SMTC
100 /*
101 * In an SMTC system, one Count/Compare set exists per VPE.
102 * Which TC within a VPE gets the interrupt is essentially
103 * random - we only know that it shouldn't be one with
104 * IXMT set. Whichever TC gets the interrupt needs to
105 * send special interprocessor interrupts to the other
106 * TCs to make sure that they schedule, etc.
107 *
108 * That code is specific to the SMTC kernel, not to
109 * the a particular platform, so it's invoked from
110 * the general MIPS timer_interrupt routine.
111 */
112
113 /*
114 * We could be here due to timer interrupt,
115 * perf counter overflow, or both.
116 */
117 (void) handle_perf_irq(1);
118
119 if (read_c0_cause() & (1 << 30)) {
120 /*
121 * There are things we only want to do once per tick
122 * in an "MP" system. One TC of each VPE will take
123 * the actual timer interrupt. The others will get
124 * timer broadcast IPIs. We use whoever it is that takes
125 * the tick on VPE 0 to run the full timer_interrupt().
126 */
127 if (cpu_data[cpu].vpe_id == 0) {
128 timer_interrupt(irq, NULL);
129 } else {
130 write_c0_compare(read_c0_count() +
131 (mips_hpt_frequency/HZ));
132 local_timer_interrupt(irq, dev_id);
133 }
134 smtc_timer_broadcast();
135 }
136#else /* CONFIG_MIPS_MT_SMTC */
137 int r2 = cpu_has_mips_r2;
138
139 if (handle_perf_irq(r2))
140 goto out;
141
142 if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
143 goto out;
144
145 if (cpu == 0) {
146 /*
147 * CPU 0 handles the global timer interrupt job and process
148 * accounting resets count/compare registers to trigger next
149 * timer int.
150 */
151 timer_interrupt(irq, NULL);
152 } else {
153 /* Everyone else needs to reset the timer int here as
154 ll_local_timer_interrupt doesn't */
155 /*
156 * FIXME: need to cope with counter underflow.
157 * More support needs to be added to kernel/time for
158 * counter/timer interrupts on multiple CPU's
159 */
160 write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
161
162 /*
163 * Other CPUs should do profiling and process accounting
164 */
165 local_timer_interrupt(irq, dev_id);
166 }
167out:
168#endif /* CONFIG_MIPS_MT_SMTC */
169 return IRQ_HANDLED;
170}
171
172/*
173 * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect 71 * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
174 */ 72 */
175static unsigned int __init estimate_cpu_frequency(void) 73static unsigned int __init estimate_cpu_frequency(void)
@@ -246,7 +144,7 @@ void __init plat_time_init(void)
246 mips_scroll_message(); 144 mips_scroll_message();
247} 145}
248 146
249irqreturn_t mips_perf_interrupt(int irq, void *dev_id) 147static irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
250{ 148{
251 return perf_irq(); 149 return perf_irq();
252} 150}
@@ -257,8 +155,10 @@ static struct irqaction perf_irqaction = {
257 .name = "performance", 155 .name = "performance",
258}; 156};
259 157
260void __init plat_perf_setup(struct irqaction *irq) 158void __init plat_perf_setup(void)
261{ 159{
160 struct irqaction *irq = &perf_irqaction;
161
262 cp0_perfcount_irq = -1; 162 cp0_perfcount_irq = -1;
263 163
264#ifdef MSC01E_INT_BASE 164#ifdef MSC01E_INT_BASE
@@ -297,8 +197,6 @@ void __init plat_timer_setup(struct irqaction *irq)
297 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 197 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
298 } 198 }
299 199
300 /* we are using the cpu counter for timer interrupts */
301 irq->handler = mips_timer_interrupt; /* we use our own handler */
302#ifdef CONFIG_MIPS_MT_SMTC 200#ifdef CONFIG_MIPS_MT_SMTC
303 setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); 201 setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq);
304#else 202#else
@@ -308,5 +206,5 @@ void __init plat_timer_setup(struct irqaction *irq)
308 set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); 206 set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
309#endif 207#endif
310 208
311 plat_perf_setup(&perf_irqaction); 209 plat_perf_setup();
312} 210}
diff --git a/arch/mips/mipssim/sim_time.c b/arch/mips/mipssim/sim_time.c
index 9a355e77952f..3625f7d49035 100644
--- a/arch/mips/mipssim/sim_time.c
+++ b/arch/mips/mipssim/sim_time.c
@@ -23,77 +23,6 @@
23 23
24unsigned long cpu_khz; 24unsigned long cpu_khz;
25 25
26irqreturn_t sim_timer_interrupt(int irq, void *dev_id)
27{
28#ifdef CONFIG_SMP
29 int cpu = smp_processor_id();
30
31 /*
32 * CPU 0 handles the global timer interrupt job
33 * resets count/compare registers to trigger next timer int.
34 */
35#ifndef CONFIG_MIPS_MT_SMTC
36 if (cpu == 0) {
37 timer_interrupt(irq, dev_id);
38 } else {
39 /* Everyone else needs to reset the timer int here as
40 ll_local_timer_interrupt doesn't */
41 /*
42 * FIXME: need to cope with counter underflow.
43 * More support needs to be added to kernel/time for
44 * counter/timer interrupts on multiple CPU's
45 */
46 write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
47 }
48#else /* SMTC */
49 /*
50 * In SMTC system, one Count/Compare set exists per VPE.
51 * Which TC within a VPE gets the interrupt is essentially
52 * random - we only know that it shouldn't be one with
53 * IXMT set. Whichever TC gets the interrupt needs to
54 * send special interprocessor interrupts to the other
55 * TCs to make sure that they schedule, etc.
56 *
57 * That code is specific to the SMTC kernel, not to
58 * the simulation platform, so it's invoked from
59 * the general MIPS timer_interrupt routine.
60 *
61 * We have a problem in that the interrupt vector code
62 * had to turn off the timer IM bit to avoid redundant
63 * entries, but we may never get to mips_cpu_irq_end
64 * to turn it back on again if the scheduler gets
65 * involved. So we clear the pending timer here,
66 * and re-enable the mask...
67 */
68
69 int vpflags = dvpe();
70 write_c0_compare (read_c0_count() - 1);
71 clear_c0_cause(0x100 << cp0_compare_irq);
72 set_c0_status(0x100 << cp0_compare_irq);
73 irq_enable_hazard();
74 evpe(vpflags);
75
76 if (cpu_data[cpu].vpe_id == 0)
77 timer_interrupt(irq, dev_id);
78 else
79 write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
80 smtc_timer_broadcast(cpu_data[cpu].vpe_id);
81
82#endif /* CONFIG_MIPS_MT_SMTC */
83
84 /*
85 * every CPU should do profiling and process accounting
86 */
87 local_timer_interrupt (irq, dev_id);
88
89 return IRQ_HANDLED;
90#else
91 return timer_interrupt (irq, dev_id);
92#endif
93}
94
95
96
97/* 26/*
98 * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect 27 * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
99 */ 28 */
@@ -185,7 +114,6 @@ void __init plat_timer_setup(struct irqaction *irq)
185 } 114 }
186 115
187 /* we are using the cpu counter for timer interrupts */ 116 /* we are using the cpu counter for timer interrupts */
188 irq->handler = sim_timer_interrupt;
189 setup_irq(mips_cpu_timer_irq, irq); 117 setup_irq(mips_cpu_timer_irq, irq);
190 118
191#ifdef CONFIG_SMP 119#ifdef CONFIG_SMP
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index 18348321795d..338c0d706988 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -20,10 +20,10 @@
20#include <asm/mipsregs.h> 20#include <asm/mipsregs.h>
21#include <asm/addrspace.h> 21#include <asm/addrspace.h>
22#include <asm/irq_cpu.h> 22#include <asm/irq_cpu.h>
23
24#include <asm/sgi/ioc.h> 23#include <asm/sgi/ioc.h>
25#include <asm/sgi/hpc3.h> 24#include <asm/sgi/hpc3.h>
26#include <asm/sgi/ip22.h> 25#include <asm/sgi/ip22.h>
26#include <asm/time.h>
27 27
28/* #define DEBUG_SGINT */ 28/* #define DEBUG_SGINT */
29 29
@@ -204,7 +204,6 @@ static struct irqaction map1_cascade = {
204#define SGI_INTERRUPTS SGINT_LOCAL3 204#define SGI_INTERRUPTS SGINT_LOCAL3
205#endif 205#endif
206 206
207extern void indy_r4k_timer_interrupt(void);
208extern void indy_8254timer_irq(void); 207extern void indy_8254timer_irq(void);
209 208
210/* 209/*
@@ -243,7 +242,7 @@ asmlinkage void plat_irq_dispatch(void)
243 * First we check for r4k counter/timer IRQ. 242 * First we check for r4k counter/timer IRQ.
244 */ 243 */
245 if (pending & CAUSEF_IP7) 244 if (pending & CAUSEF_IP7)
246 indy_r4k_timer_interrupt(); 245 ll_timer_interrupt(SGI_TIMER_IRQ, NULL);
247 else if (pending & CAUSEF_IP2) 246 else if (pending & CAUSEF_IP2)
248 indy_local0_irqdispatch(); 247 indy_local0_irqdispatch();
249 else if (pending & CAUSEF_IP3) 248 else if (pending & CAUSEF_IP3)
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index c49418067f93..a1df1f9e26fa 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -189,16 +189,6 @@ void indy_8254timer_irq(void)
189 irq_exit(); 189 irq_exit();
190} 190}
191 191
192void indy_r4k_timer_interrupt(void)
193{
194 int irq = SGI_TIMER_IRQ;
195
196 irq_enter();
197 kstat_this_cpu.irqs[irq]++;
198 timer_interrupt(irq, NULL);
199 irq_exit();
200}
201
202void __init plat_timer_setup(struct irqaction *irq) 192void __init plat_timer_setup(struct irqaction *irq)
203{ 193{
204 /* over-write the handler, we use our own way */ 194 /* over-write the handler, we use our own way */
diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c
index 6f3f71bf4244..8519091d848b 100644
--- a/arch/mips/sibyte/bcm1480/time.c
+++ b/arch/mips/sibyte/bcm1480/time.c
@@ -103,18 +103,7 @@ void bcm1480_timer_interrupt(void)
103 __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS, 103 __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
104 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); 104 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
105 105
106 if (cpu == 0) { 106 ll_timer_interrupt(irq);
107 /*
108 * CPU 0 handles the global timer interrupt job
109 */
110 ll_timer_interrupt(irq);
111 }
112 else {
113 /*
114 * other CPUs should just do profiling and process accounting
115 */
116 ll_local_timer_interrupt(irq);
117 }
118} 107}
119 108
120static cycle_t bcm1480_hpt_read(void) 109static cycle_t bcm1480_hpt_read(void)
diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c
index 2efffe15ff23..5bb83cd4c113 100644
--- a/arch/mips/sibyte/sb1250/time.c
+++ b/arch/mips/sibyte/sb1250/time.c
@@ -125,18 +125,7 @@ void sb1250_timer_interrupt(void)
125 ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, 125 ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
126 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); 126 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
127 127
128 if (cpu == 0) { 128 ll_timer_interrupt(irq);
129 /*
130 * CPU 0 handles the global timer interrupt job
131 */
132 ll_timer_interrupt(irq);
133 }
134 else {
135 /*
136 * other CPUs should just do profiling and process accounting
137 */
138 ll_local_timer_interrupt(irq);
139 }
140} 129}
141 130
142/* 131/*
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h
index 963507d33f69..3516b32c9efb 100644
--- a/include/asm-mips/time.h
+++ b/include/asm-mips/time.h
@@ -49,20 +49,14 @@ extern void (*mips_timer_ack)(void);
49extern struct clocksource clocksource_mips; 49extern struct clocksource clocksource_mips;
50 50
51/* 51/*
52 * high-level timer interrupt routines. 52 * The low-level timer interrupt routine.
53 */ 53 */
54extern irqreturn_t timer_interrupt(int irq, void *dev_id); 54extern void ll_timer_interrupt(int irq, void *dev_id);
55
56/*
57 * the corresponding low-level timer interrupt routine.
58 */
59extern asmlinkage void ll_timer_interrupt(int irq);
60 55
61/* 56/*
62 * profiling and process accouting is done separately in local_timer_interrupt 57 * profiling and process accouting is done separately in local_timer_interrupt
63 */ 58 */
64extern void local_timer_interrupt(int irq, void *dev_id); 59extern void local_timer_interrupt(int irq, void *dev_id);
65extern asmlinkage void ll_local_timer_interrupt(int irq);
66 60
67/* 61/*
68 * board specific routines required by time_init(). 62 * board specific routines required by time_init().
@@ -78,4 +72,10 @@ extern void plat_timer_setup(struct irqaction *irq);
78 */ 72 */
79extern unsigned int mips_hpt_frequency; 73extern unsigned int mips_hpt_frequency;
80 74
75/*
76 * The performance counter IRQ on MIPS is a close relative to the timer IRQ
77 * so it lives here.
78 */
79extern int (*perf_irq)(void);
80
81#endif /* _ASM_TIME_H */ 81#endif /* _ASM_TIME_H */