diff options
author | Kevin D. Kissell <kevink@paralogos.com> | 2008-09-09 15:48:52 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2008-10-03 12:58:58 -0400 |
commit | 8531a35e5e275b17c57c39b7911bc2b37025f28c (patch) | |
tree | c593e23c875d0639a8f422c0ceb8b2a7738d143e /arch/mips/kernel/cevt-r4k.c | |
parent | d2bb01b042a38219fbddaafc214c5beb96248d2f (diff) |
[MIPS] SMTC: Fix SMTC dyntick support.
Rework of SMTC support to make it work with the new clock event system,
allowing "tickless" operation, and to make it compatible with the use of
the "wait_irqoff" idle loop. The new clocking scheme means that the
previously optional IPI instant replay mechanism is now required, and has
been made more robust.
Signed-off-by: Kevin D. Kissell <kevink@paralogos.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/cevt-r4k.c')
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 173 |
1 files changed, 48 insertions, 125 deletions
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -12,6 +12,14 @@ | |||
12 | 12 | ||
13 | #include <asm/smtc_ipi.h> | 13 | #include <asm/smtc_ipi.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/cevt-r4k.h> | ||
16 | |||
17 | /* | ||
18 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
19 | * of these routines with SMTC-specific variants. | ||
20 | */ | ||
21 | |||
22 | #ifndef CONFIG_MIPS_MT_SMTC | ||
15 | 23 | ||
16 | static int mips_next_event(unsigned long delta, | 24 | static int mips_next_event(unsigned long delta, |
17 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, | |||
19 | unsigned int cnt; | 27 | unsigned int cnt; |
20 | int res; | 28 | int res; |
21 | 29 | ||
22 | #ifdef CONFIG_MIPS_MT_SMTC | ||
23 | { | ||
24 | unsigned long flags, vpflags; | ||
25 | local_irq_save(flags); | ||
26 | vpflags = dvpe(); | ||
27 | #endif | ||
28 | cnt = read_c0_count(); | 30 | cnt = read_c0_count(); |
29 | cnt += delta; | 31 | cnt += delta; |
30 | write_c0_compare(cnt); | 32 | write_c0_compare(cnt); |
31 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 33 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | evpe(vpflags); | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | #endif | ||
37 | return res; | 34 | return res; |
38 | } | 35 | } |
39 | 36 | ||
40 | static void mips_set_mode(enum clock_event_mode mode, | 37 | #endif /* CONFIG_MIPS_MT_SMTC */ |
41 | struct clock_event_device *evt) | 38 | |
39 | void mips_set_clock_mode(enum clock_event_mode mode, | ||
40 | struct clock_event_device *evt) | ||
42 | { | 41 | { |
43 | /* Nothing to do ... */ | 42 | /* Nothing to do ... */ |
44 | } | 43 | } |
45 | 44 | ||
46 | static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 45 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
47 | static int cp0_timer_irq_installed; | 46 | int cp0_timer_irq_installed; |
48 | 47 | ||
49 | /* | 48 | #ifndef CONFIG_MIPS_MT_SMTC |
50 | * Timer ack for an R4k-compatible timer of a known frequency. | ||
51 | */ | ||
52 | static void c0_timer_ack(void) | ||
53 | { | ||
54 | write_c0_compare(read_c0_compare()); | ||
55 | } | ||
56 | 49 | ||
57 | /* | 50 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
58 | * Possibly handle a performance counter interrupt. | ||
59 | * Return true if the timer interrupt should not be checked | ||
60 | */ | ||
61 | static inline int handle_perf_irq(int r2) | ||
62 | { | ||
63 | /* | ||
64 | * The performance counter overflow interrupt may be shared with the | ||
65 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
66 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
67 | * and we can't reliably determine if a counter interrupt has also | ||
68 | * happened (!r2) then don't check for a timer interrupt. | ||
69 | */ | ||
70 | return (cp0_perfcount_irq < 0) && | ||
71 | perf_irq() == IRQ_HANDLED && | ||
72 | !r2; | ||
73 | } | ||
74 | |||
75 | static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
76 | { | 51 | { |
77 | const int r2 = cpu_has_mips_r2; | 52 | const int r2 = cpu_has_mips_r2; |
78 | struct clock_event_device *cd; | 53 | struct clock_event_device *cd; |
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
93 | * interrupt. Being the paranoiacs we are we check anyway. | 68 | * interrupt. Being the paranoiacs we are we check anyway. |
94 | */ | 69 | */ |
95 | if (!r2 || (read_c0_cause() & (1 << 30))) { | 70 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
96 | c0_timer_ack(); | 71 | /* Clear Count/Compare Interrupt */ |
97 | #ifdef CONFIG_MIPS_MT_SMTC | 72 | write_c0_compare(read_c0_compare()); |
98 | if (cpu_data[cpu].vpe_id) | ||
99 | goto out; | ||
100 | cpu = 0; | ||
101 | #endif | ||
102 | cd = &per_cpu(mips_clockevent_device, cpu); | 73 | cd = &per_cpu(mips_clockevent_device, cpu); |
103 | cd->event_handler(cd); | 74 | cd->event_handler(cd); |
104 | } | 75 | } |
@@ -107,65 +78,16 @@ out: | |||
107 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
108 | } | 79 | } |
109 | 80 | ||
110 | static struct irqaction c0_compare_irqaction = { | 81 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
82 | |||
83 | struct irqaction c0_compare_irqaction = { | ||
111 | .handler = c0_compare_interrupt, | 84 | .handler = c0_compare_interrupt, |
112 | #ifdef CONFIG_MIPS_MT_SMTC | ||
113 | .flags = IRQF_DISABLED, | ||
114 | #else | ||
115 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
116 | #endif | ||
117 | .name = "timer", | 86 | .name = "timer", |
118 | }; | 87 | }; |
119 | 88 | ||
120 | #ifdef CONFIG_MIPS_MT_SMTC | ||
121 | DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | ||
122 | |||
123 | static void smtc_set_mode(enum clock_event_mode mode, | ||
124 | struct clock_event_device *evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static void mips_broadcast(cpumask_t mask) | ||
129 | { | ||
130 | unsigned int cpu; | ||
131 | |||
132 | for_each_cpu_mask(cpu, mask) | ||
133 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
134 | } | ||
135 | |||
136 | static void setup_smtc_dummy_clockevent_device(void) | ||
137 | { | ||
138 | //uint64_t mips_freq = mips_hpt_^frequency; | ||
139 | unsigned int cpu = smp_processor_id(); | ||
140 | struct clock_event_device *cd; | ||
141 | 89 | ||
142 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 90 | void mips_event_handler(struct clock_event_device *dev) |
143 | |||
144 | cd->name = "SMTC"; | ||
145 | cd->features = CLOCK_EVT_FEAT_DUMMY; | ||
146 | |||
147 | /* Calculate the min / max delta */ | ||
148 | cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
149 | cd->shift = 0; //32; | ||
150 | cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); | ||
151 | cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); | ||
152 | |||
153 | cd->rating = 200; | ||
154 | cd->irq = 17; //-1; | ||
155 | // if (cpu) | ||
156 | // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); | ||
157 | // else | ||
158 | cd->cpumask = cpumask_of_cpu(cpu); | ||
159 | |||
160 | cd->set_mode = smtc_set_mode; | ||
161 | |||
162 | cd->broadcast = mips_broadcast; | ||
163 | |||
164 | clockevents_register_device(cd); | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | static void mips_event_handler(struct clock_event_device *dev) | ||
169 | { | 91 | { |
170 | } | 92 | } |
171 | 93 | ||
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) | |||
177 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 99 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; |
178 | } | 100 | } |
179 | 101 | ||
180 | static int c0_compare_int_usable(void) | 102 | /* |
103 | * Compare interrupt can be routed and latched outside the core, | ||
104 | * so a single execution hazard barrier may not be enough to give | ||
105 | * it time to clear as seen in the Cause register. 4 time the | ||
106 | * pipeline depth seems reasonably conservative, and empirically | ||
107 | * works better in configurations with high CPU/bus clock ratios. | ||
108 | */ | ||
109 | |||
110 | #define compare_change_hazard() \ | ||
111 | do { \ | ||
112 | irq_disable_hazard(); \ | ||
113 | irq_disable_hazard(); \ | ||
114 | irq_disable_hazard(); \ | ||
115 | irq_disable_hazard(); \ | ||
116 | } while (0) | ||
117 | |||
118 | int c0_compare_int_usable(void) | ||
181 | { | 119 | { |
182 | unsigned int delta; | 120 | unsigned int delta; |
183 | unsigned int cnt; | 121 | unsigned int cnt; |
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) | |||
187 | */ | 125 | */ |
188 | if (c0_compare_int_pending()) { | 126 | if (c0_compare_int_pending()) { |
189 | write_c0_compare(read_c0_count()); | 127 | write_c0_compare(read_c0_count()); |
190 | irq_disable_hazard(); | 128 | compare_change_hazard(); |
191 | if (c0_compare_int_pending()) | 129 | if (c0_compare_int_pending()) |
192 | return 0; | 130 | return 0; |
193 | } | 131 | } |
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) | |||
196 | cnt = read_c0_count(); | 134 | cnt = read_c0_count(); |
197 | cnt += delta; | 135 | cnt += delta; |
198 | write_c0_compare(cnt); | 136 | write_c0_compare(cnt); |
199 | irq_disable_hazard(); | 137 | compare_change_hazard(); |
200 | if ((int)(read_c0_count() - cnt) < 0) | 138 | if ((int)(read_c0_count() - cnt) < 0) |
201 | break; | 139 | break; |
202 | /* increase delta if the timer was already expired */ | 140 | /* increase delta if the timer was already expired */ |
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) | |||
205 | while ((int)(read_c0_count() - cnt) <= 0) | 143 | while ((int)(read_c0_count() - cnt) <= 0) |
206 | ; /* Wait for expiry */ | 144 | ; /* Wait for expiry */ |
207 | 145 | ||
146 | compare_change_hazard(); | ||
208 | if (!c0_compare_int_pending()) | 147 | if (!c0_compare_int_pending()) |
209 | return 0; | 148 | return 0; |
210 | 149 | ||
211 | write_c0_compare(read_c0_count()); | 150 | write_c0_compare(read_c0_count()); |
212 | irq_disable_hazard(); | 151 | compare_change_hazard(); |
213 | if (c0_compare_int_pending()) | 152 | if (c0_compare_int_pending()) |
214 | return 0; | 153 | return 0; |
215 | 154 | ||
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) | |||
219 | return 1; | 158 | return 1; |
220 | } | 159 | } |
221 | 160 | ||
161 | #ifndef CONFIG_MIPS_MT_SMTC | ||
162 | |||
222 | int __cpuinit mips_clockevent_init(void) | 163 | int __cpuinit mips_clockevent_init(void) |
223 | { | 164 | { |
224 | uint64_t mips_freq = mips_hpt_frequency; | 165 | uint64_t mips_freq = mips_hpt_frequency; |
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) | |||
229 | if (!cpu_has_counter || !mips_hpt_frequency) | 170 | if (!cpu_has_counter || !mips_hpt_frequency) |
230 | return -ENXIO; | 171 | return -ENXIO; |
231 | 172 | ||
232 | #ifdef CONFIG_MIPS_MT_SMTC | ||
233 | setup_smtc_dummy_clockevent_device(); | ||
234 | |||
235 | /* | ||
236 | * On SMTC we only register VPE0's compare interrupt as clockevent | ||
237 | * device. | ||
238 | */ | ||
239 | if (cpu) | ||
240 | return 0; | ||
241 | #endif | ||
242 | |||
243 | if (!c0_compare_int_usable()) | 173 | if (!c0_compare_int_usable()) |
244 | return -ENXIO; | 174 | return -ENXIO; |
245 | 175 | ||
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) | |||
265 | 195 | ||
266 | cd->rating = 300; | 196 | cd->rating = 300; |
267 | cd->irq = irq; | 197 | cd->irq = irq; |
268 | #ifdef CONFIG_MIPS_MT_SMTC | ||
269 | cd->cpumask = CPU_MASK_ALL; | ||
270 | #else | ||
271 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of_cpu(cpu); |
272 | #endif | ||
273 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
274 | cd->set_mode = mips_set_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
275 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
276 | 202 | ||
277 | clockevents_register_device(cd); | 203 | clockevents_register_device(cd); |
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) | |||
281 | 207 | ||
282 | cp0_timer_irq_installed = 1; | 208 | cp0_timer_irq_installed = 1; |
283 | 209 | ||
284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
285 | #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) | ||
286 | setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); | ||
287 | #else | ||
288 | setup_irq(irq, &c0_compare_irqaction); | 210 | setup_irq(irq, &c0_compare_irqaction); |
289 | #endif | ||
290 | 211 | ||
291 | return 0; | 212 | return 0; |
292 | } | 213 | } |
214 | |||
215 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||