diff options
author | Kevin D. Kissell <kevink@paralogos.com> | 2008-09-09 15:48:52 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2008-10-03 12:58:58 -0400 |
commit | 8531a35e5e275b17c57c39b7911bc2b37025f28c (patch) | |
tree | c593e23c875d0639a8f422c0ceb8b2a7738d143e /arch/mips | |
parent | d2bb01b042a38219fbddaafc214c5beb96248d2f (diff) |
[MIPS] SMTC: Fix SMTC dyntick support.
Rework of SMTC support to make it work with the new clock event system,
allowing "tickless" operation, and to make it compatible with the use of
the "wait_irqoff" idle loop. The new clocking scheme means that the
previously optional IPI instant replay mechanism is now required, and has
been made more robust.
Signed-off-by: Kevin D. Kissell <kevink@paralogos.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 26 | ||||
-rw-r--r-- | arch/mips/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 173 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-smtc.c | 321 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 4 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 252 | ||||
-rw-r--r-- | arch/mips/mti-malta/malta-smtc.c | 9 |
8 files changed, 523 insertions, 273 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d72..c930b8ceb418 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC | |||
1403 | depends on CPU_MIPS32_R2 | 1403 | depends on CPU_MIPS32_R2 |
1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... | 1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... |
1405 | depends on SYS_SUPPORTS_MULTITHREADING | 1405 | depends on SYS_SUPPORTS_MULTITHREADING |
1406 | select GENERIC_CLOCKEVENTS_BROADCAST | ||
1407 | select CPU_MIPSR2_IRQ_VI | 1406 | select CPU_MIPSR2_IRQ_VI |
1408 | select CPU_MIPSR2_IRQ_EI | 1407 | select CPU_MIPSR2_IRQ_EI |
1409 | select MIPS_MT | 1408 | select MIPS_MT |
@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER | |||
1451 | Includes a loader for loading an elf relocatable object | 1450 | Includes a loader for loading an elf relocatable object |
1452 | onto another VPE and running it. | 1451 | onto another VPE and running it. |
1453 | 1452 | ||
1454 | config MIPS_MT_SMTC_INSTANT_REPLAY | ||
1455 | bool "Low-latency Dispatch of Deferred SMTC IPIs" | ||
1456 | depends on MIPS_MT_SMTC && !PREEMPT | ||
1457 | default y | ||
1458 | help | ||
1459 | SMTC pseudo-interrupts between TCs are deferred and queued | ||
1460 | if the target TC is interrupt-inhibited (IXMT). In the first | ||
1461 | SMTC prototypes, these queued IPIs were serviced on return | ||
1462 | to user mode, or on entry into the kernel idle loop. The | ||
1463 | INSTANT_REPLAY option dispatches them as part of local_irq_restore() | ||
1464 | processing, which adds runtime overhead (hence the option to turn | ||
1465 | it off), but ensures that IPIs are handled promptly even under | ||
1466 | heavy I/O interrupt load. | ||
1467 | |||
1468 | config MIPS_MT_SMTC_IM_BACKSTOP | 1453 | config MIPS_MT_SMTC_IM_BACKSTOP |
1469 | bool "Use per-TC register bits as backstop for inhibited IM bits" | 1454 | bool "Use per-TC register bits as backstop for inhibited IM bits" |
1470 | depends on MIPS_MT_SMTC | 1455 | depends on MIPS_MT_SMTC |
1471 | default y | 1456 | default n |
1472 | help | 1457 | help |
1473 | To support multiple TC microthreads acting as "CPUs" within | 1458 | To support multiple TC microthreads acting as "CPUs" within |
1474 | a VPE, VPE-wide interrupt mask bits must be specially manipulated | 1459 | a VPE, VPE-wide interrupt mask bits must be specially manipulated |
1475 | during interrupt handling. To support legacy drivers and interrupt | 1460 | during interrupt handling. To support legacy drivers and interrupt |
1476 | controller management code, SMTC has a "backstop" to track and | 1461 | controller management code, SMTC has a "backstop" to track and |
1477 | if necessary restore the interrupt mask. This has some performance | 1462 | if necessary restore the interrupt mask. This has some performance |
1478 | impact on interrupt service overhead. Disable it only if you know | 1463 | impact on interrupt service overhead. |
1479 | what you are doing. | ||
1480 | 1464 | ||
1481 | config MIPS_MT_SMTC_IRQAFF | 1465 | config MIPS_MT_SMTC_IRQAFF |
1482 | bool "Support IRQ affinity API" | 1466 | bool "Support IRQ affinity API" |
@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF | |||
1486 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | 1470 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) |
1487 | for SMTC Linux kernel. Requires platform support, of which | 1471 | for SMTC Linux kernel. Requires platform support, of which |
1488 | an example can be found in the MIPS kernel i8259 and Malta | 1472 | an example can be found in the MIPS kernel i8259 and Malta |
1489 | platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY | 1473 | platform code. Adds some overhead to interrupt dispatch, and |
1490 | be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to | 1474 | should be used only if you know what you are doing. |
1491 | interrupt dispatch, and should be used only if you know what | ||
1492 | you are doing. | ||
1493 | 1475 | ||
1494 | config MIPS_VPE_LOADER_TOM | 1476 | config MIPS_VPE_LOADER_TOM |
1495 | bool "Load VPE program into memory hidden from linux" | 1477 | bool "Load VPE program into memory hidden from linux" |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f93974797..25775cb54000 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | |||
10 | 10 | ||
11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
13 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | ||
13 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 14 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
14 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 15 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
15 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | 16 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -12,6 +12,14 @@ | |||
12 | 12 | ||
13 | #include <asm/smtc_ipi.h> | 13 | #include <asm/smtc_ipi.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/cevt-r4k.h> | ||
16 | |||
17 | /* | ||
18 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
19 | * of these routines with SMTC-specific variants. | ||
20 | */ | ||
21 | |||
22 | #ifndef CONFIG_MIPS_MT_SMTC | ||
15 | 23 | ||
16 | static int mips_next_event(unsigned long delta, | 24 | static int mips_next_event(unsigned long delta, |
17 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, | |||
19 | unsigned int cnt; | 27 | unsigned int cnt; |
20 | int res; | 28 | int res; |
21 | 29 | ||
22 | #ifdef CONFIG_MIPS_MT_SMTC | ||
23 | { | ||
24 | unsigned long flags, vpflags; | ||
25 | local_irq_save(flags); | ||
26 | vpflags = dvpe(); | ||
27 | #endif | ||
28 | cnt = read_c0_count(); | 30 | cnt = read_c0_count(); |
29 | cnt += delta; | 31 | cnt += delta; |
30 | write_c0_compare(cnt); | 32 | write_c0_compare(cnt); |
31 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 33 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | evpe(vpflags); | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | #endif | ||
37 | return res; | 34 | return res; |
38 | } | 35 | } |
39 | 36 | ||
40 | static void mips_set_mode(enum clock_event_mode mode, | 37 | #endif /* CONFIG_MIPS_MT_SMTC */ |
41 | struct clock_event_device *evt) | 38 | |
39 | void mips_set_clock_mode(enum clock_event_mode mode, | ||
40 | struct clock_event_device *evt) | ||
42 | { | 41 | { |
43 | /* Nothing to do ... */ | 42 | /* Nothing to do ... */ |
44 | } | 43 | } |
45 | 44 | ||
46 | static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 45 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
47 | static int cp0_timer_irq_installed; | 46 | int cp0_timer_irq_installed; |
48 | 47 | ||
49 | /* | 48 | #ifndef CONFIG_MIPS_MT_SMTC |
50 | * Timer ack for an R4k-compatible timer of a known frequency. | ||
51 | */ | ||
52 | static void c0_timer_ack(void) | ||
53 | { | ||
54 | write_c0_compare(read_c0_compare()); | ||
55 | } | ||
56 | 49 | ||
57 | /* | 50 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
58 | * Possibly handle a performance counter interrupt. | ||
59 | * Return true if the timer interrupt should not be checked | ||
60 | */ | ||
61 | static inline int handle_perf_irq(int r2) | ||
62 | { | ||
63 | /* | ||
64 | * The performance counter overflow interrupt may be shared with the | ||
65 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
66 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
67 | * and we can't reliably determine if a counter interrupt has also | ||
68 | * happened (!r2) then don't check for a timer interrupt. | ||
69 | */ | ||
70 | return (cp0_perfcount_irq < 0) && | ||
71 | perf_irq() == IRQ_HANDLED && | ||
72 | !r2; | ||
73 | } | ||
74 | |||
75 | static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
76 | { | 51 | { |
77 | const int r2 = cpu_has_mips_r2; | 52 | const int r2 = cpu_has_mips_r2; |
78 | struct clock_event_device *cd; | 53 | struct clock_event_device *cd; |
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
93 | * interrupt. Being the paranoiacs we are we check anyway. | 68 | * interrupt. Being the paranoiacs we are we check anyway. |
94 | */ | 69 | */ |
95 | if (!r2 || (read_c0_cause() & (1 << 30))) { | 70 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
96 | c0_timer_ack(); | 71 | /* Clear Count/Compare Interrupt */ |
97 | #ifdef CONFIG_MIPS_MT_SMTC | 72 | write_c0_compare(read_c0_compare()); |
98 | if (cpu_data[cpu].vpe_id) | ||
99 | goto out; | ||
100 | cpu = 0; | ||
101 | #endif | ||
102 | cd = &per_cpu(mips_clockevent_device, cpu); | 73 | cd = &per_cpu(mips_clockevent_device, cpu); |
103 | cd->event_handler(cd); | 74 | cd->event_handler(cd); |
104 | } | 75 | } |
@@ -107,65 +78,16 @@ out: | |||
107 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
108 | } | 79 | } |
109 | 80 | ||
110 | static struct irqaction c0_compare_irqaction = { | 81 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
82 | |||
83 | struct irqaction c0_compare_irqaction = { | ||
111 | .handler = c0_compare_interrupt, | 84 | .handler = c0_compare_interrupt, |
112 | #ifdef CONFIG_MIPS_MT_SMTC | ||
113 | .flags = IRQF_DISABLED, | ||
114 | #else | ||
115 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
116 | #endif | ||
117 | .name = "timer", | 86 | .name = "timer", |
118 | }; | 87 | }; |
119 | 88 | ||
120 | #ifdef CONFIG_MIPS_MT_SMTC | ||
121 | DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | ||
122 | |||
123 | static void smtc_set_mode(enum clock_event_mode mode, | ||
124 | struct clock_event_device *evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static void mips_broadcast(cpumask_t mask) | ||
129 | { | ||
130 | unsigned int cpu; | ||
131 | |||
132 | for_each_cpu_mask(cpu, mask) | ||
133 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
134 | } | ||
135 | |||
136 | static void setup_smtc_dummy_clockevent_device(void) | ||
137 | { | ||
138 | //uint64_t mips_freq = mips_hpt_^frequency; | ||
139 | unsigned int cpu = smp_processor_id(); | ||
140 | struct clock_event_device *cd; | ||
141 | 89 | ||
142 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 90 | void mips_event_handler(struct clock_event_device *dev) |
143 | |||
144 | cd->name = "SMTC"; | ||
145 | cd->features = CLOCK_EVT_FEAT_DUMMY; | ||
146 | |||
147 | /* Calculate the min / max delta */ | ||
148 | cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
149 | cd->shift = 0; //32; | ||
150 | cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); | ||
151 | cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); | ||
152 | |||
153 | cd->rating = 200; | ||
154 | cd->irq = 17; //-1; | ||
155 | // if (cpu) | ||
156 | // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); | ||
157 | // else | ||
158 | cd->cpumask = cpumask_of_cpu(cpu); | ||
159 | |||
160 | cd->set_mode = smtc_set_mode; | ||
161 | |||
162 | cd->broadcast = mips_broadcast; | ||
163 | |||
164 | clockevents_register_device(cd); | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | static void mips_event_handler(struct clock_event_device *dev) | ||
169 | { | 91 | { |
170 | } | 92 | } |
171 | 93 | ||
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) | |||
177 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 99 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; |
178 | } | 100 | } |
179 | 101 | ||
180 | static int c0_compare_int_usable(void) | 102 | /* |
103 | * Compare interrupt can be routed and latched outside the core, | ||
104 | * so a single execution hazard barrier may not be enough to give | ||
105 | * it time to clear as seen in the Cause register. 4 time the | ||
106 | * pipeline depth seems reasonably conservative, and empirically | ||
107 | * works better in configurations with high CPU/bus clock ratios. | ||
108 | */ | ||
109 | |||
110 | #define compare_change_hazard() \ | ||
111 | do { \ | ||
112 | irq_disable_hazard(); \ | ||
113 | irq_disable_hazard(); \ | ||
114 | irq_disable_hazard(); \ | ||
115 | irq_disable_hazard(); \ | ||
116 | } while (0) | ||
117 | |||
118 | int c0_compare_int_usable(void) | ||
181 | { | 119 | { |
182 | unsigned int delta; | 120 | unsigned int delta; |
183 | unsigned int cnt; | 121 | unsigned int cnt; |
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) | |||
187 | */ | 125 | */ |
188 | if (c0_compare_int_pending()) { | 126 | if (c0_compare_int_pending()) { |
189 | write_c0_compare(read_c0_count()); | 127 | write_c0_compare(read_c0_count()); |
190 | irq_disable_hazard(); | 128 | compare_change_hazard(); |
191 | if (c0_compare_int_pending()) | 129 | if (c0_compare_int_pending()) |
192 | return 0; | 130 | return 0; |
193 | } | 131 | } |
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) | |||
196 | cnt = read_c0_count(); | 134 | cnt = read_c0_count(); |
197 | cnt += delta; | 135 | cnt += delta; |
198 | write_c0_compare(cnt); | 136 | write_c0_compare(cnt); |
199 | irq_disable_hazard(); | 137 | compare_change_hazard(); |
200 | if ((int)(read_c0_count() - cnt) < 0) | 138 | if ((int)(read_c0_count() - cnt) < 0) |
201 | break; | 139 | break; |
202 | /* increase delta if the timer was already expired */ | 140 | /* increase delta if the timer was already expired */ |
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) | |||
205 | while ((int)(read_c0_count() - cnt) <= 0) | 143 | while ((int)(read_c0_count() - cnt) <= 0) |
206 | ; /* Wait for expiry */ | 144 | ; /* Wait for expiry */ |
207 | 145 | ||
146 | compare_change_hazard(); | ||
208 | if (!c0_compare_int_pending()) | 147 | if (!c0_compare_int_pending()) |
209 | return 0; | 148 | return 0; |
210 | 149 | ||
211 | write_c0_compare(read_c0_count()); | 150 | write_c0_compare(read_c0_count()); |
212 | irq_disable_hazard(); | 151 | compare_change_hazard(); |
213 | if (c0_compare_int_pending()) | 152 | if (c0_compare_int_pending()) |
214 | return 0; | 153 | return 0; |
215 | 154 | ||
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) | |||
219 | return 1; | 158 | return 1; |
220 | } | 159 | } |
221 | 160 | ||
161 | #ifndef CONFIG_MIPS_MT_SMTC | ||
162 | |||
222 | int __cpuinit mips_clockevent_init(void) | 163 | int __cpuinit mips_clockevent_init(void) |
223 | { | 164 | { |
224 | uint64_t mips_freq = mips_hpt_frequency; | 165 | uint64_t mips_freq = mips_hpt_frequency; |
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) | |||
229 | if (!cpu_has_counter || !mips_hpt_frequency) | 170 | if (!cpu_has_counter || !mips_hpt_frequency) |
230 | return -ENXIO; | 171 | return -ENXIO; |
231 | 172 | ||
232 | #ifdef CONFIG_MIPS_MT_SMTC | ||
233 | setup_smtc_dummy_clockevent_device(); | ||
234 | |||
235 | /* | ||
236 | * On SMTC we only register VPE0's compare interrupt as clockevent | ||
237 | * device. | ||
238 | */ | ||
239 | if (cpu) | ||
240 | return 0; | ||
241 | #endif | ||
242 | |||
243 | if (!c0_compare_int_usable()) | 173 | if (!c0_compare_int_usable()) |
244 | return -ENXIO; | 174 | return -ENXIO; |
245 | 175 | ||
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) | |||
265 | 195 | ||
266 | cd->rating = 300; | 196 | cd->rating = 300; |
267 | cd->irq = irq; | 197 | cd->irq = irq; |
268 | #ifdef CONFIG_MIPS_MT_SMTC | ||
269 | cd->cpumask = CPU_MASK_ALL; | ||
270 | #else | ||
271 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of_cpu(cpu); |
272 | #endif | ||
273 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
274 | cd->set_mode = mips_set_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
275 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
276 | 202 | ||
277 | clockevents_register_device(cd); | 203 | clockevents_register_device(cd); |
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) | |||
281 | 207 | ||
282 | cp0_timer_irq_installed = 1; | 208 | cp0_timer_irq_installed = 1; |
283 | 209 | ||
284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
285 | #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) | ||
286 | setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); | ||
287 | #else | ||
288 | setup_irq(irq, &c0_compare_irqaction); | 210 | setup_irq(irq, &c0_compare_irqaction); |
289 | #endif | ||
290 | 211 | ||
291 | return 0; | 212 | return 0; |
292 | } | 213 | } |
214 | |||
215 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 000000000000..5162fe4b5952 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||
9 | */ | ||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/smtc_ipi.h> | ||
15 | #include <asm/time.h> | ||
16 | #include <asm/cevt-r4k.h> | ||
17 | |||
18 | /* | ||
19 | * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||
20 | * or other MIPS MT cores. | ||
21 | * | ||
22 | * Notes on SMTC Support: | ||
23 | * | ||
24 | * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||
25 | * But there's only one Count/Compare pair per VPE, and Compare | ||
26 | * interrupts are taken opportunisitically by available TCs | ||
27 | * bound to the VPE with the Count register. The new timer | ||
28 | * framework provides for global broadcasts, but we really | ||
29 | * want VPE-level multicasts for best behavior. So instead | ||
30 | * of invoking the high-level clock-event broadcast code, | ||
31 | * this version of SMTC support uses the historical SMTC | ||
32 | * multicast mechanisms "under the hood", appearing to the | ||
33 | * generic clock layer as if the interrupts are per-CPU. | ||
34 | * | ||
35 | * The approach taken here is to maintain a set of NR_CPUS | ||
36 | * virtual timers, and track which "CPU" needs to be alerted | ||
37 | * at each event. | ||
38 | * | ||
39 | * It's unlikely that we'll see a MIPS MT core with more than | ||
40 | * 2 VPEs, but we *know* that we won't need to handle more | ||
41 | * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements | ||
42 | * is always going to be overkill, but always going to be enough. | ||
43 | */ | ||
44 | |||
45 | unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||
46 | static int smtc_nextinvpe[NR_CPUS]; | ||
47 | |||
48 | /* | ||
49 | * Timestamps stored are absolute values to be programmed | ||
50 | * into Count register. Valid timestamps will never be zero. | ||
51 | * If a Zero Count value is actually calculated, it is converted | ||
52 | * to be a 1, which will introduce 1 or two CPU cycles of error | ||
53 | * roughly once every four billion events, which at 1000 HZ means | ||
54 | * about once every 50 days. If that's actually a problem, one | ||
55 | * could alternate squashing 0 to 1 and to -1. | ||
56 | */ | ||
57 | |||
58 | #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||
59 | #define ISVALID(x) ((x) != 0L) | ||
60 | |||
61 | /* | ||
62 | * Time comparison is subtle, as it's really truncated | ||
63 | * modular arithmetic. | ||
64 | */ | ||
65 | |||
66 | #define IS_SOONER(a, b, reference) \ | ||
67 | (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||
68 | |||
69 | /* | ||
70 | * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||
71 | * Could be an increasing function instead of a constant; | ||
72 | */ | ||
73 | |||
74 | #define CATCHUP_INCREMENT 64 | ||
75 | |||
76 | static int mips_next_event(unsigned long delta, | ||
77 | struct clock_event_device *evt) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | unsigned int mtflags; | ||
81 | unsigned long timestamp, reference, previous; | ||
82 | unsigned long nextcomp = 0L; | ||
83 | int vpe = current_cpu_data.vpe_id; | ||
84 | int cpu = smp_processor_id(); | ||
85 | local_irq_save(flags); | ||
86 | mtflags = dmt(); | ||
87 | |||
88 | /* | ||
89 | * Maintain the per-TC virtual timer | ||
90 | * and program the per-VPE shared Count register | ||
91 | * as appropriate here... | ||
92 | */ | ||
93 | reference = (unsigned long)read_c0_count(); | ||
94 | timestamp = MAKEVALID(reference + delta); | ||
95 | /* | ||
96 | * To really model the clock, we have to catch the case | ||
97 | * where the current next-in-VPE timestamp is the old | ||
98 | * timestamp for the calling CPE, but the new value is | ||
99 | * in fact later. In that case, we have to do a full | ||
100 | * scan and discover the new next-in-VPE CPU id and | ||
101 | * timestamp. | ||
102 | */ | ||
103 | previous = smtc_nexttime[vpe][cpu]; | ||
104 | if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||
105 | && IS_SOONER(previous, timestamp, reference)) { | ||
106 | int i; | ||
107 | int soonest = cpu; | ||
108 | |||
109 | /* | ||
110 | * Update timestamp array here, so that new | ||
111 | * value gets considered along with those of | ||
112 | * other virtual CPUs on the VPE. | ||
113 | */ | ||
114 | smtc_nexttime[vpe][cpu] = timestamp; | ||
115 | for_each_online_cpu(i) { | ||
116 | if (ISVALID(smtc_nexttime[vpe][i]) | ||
117 | && IS_SOONER(smtc_nexttime[vpe][i], | ||
118 | smtc_nexttime[vpe][soonest], reference)) { | ||
119 | soonest = i; | ||
120 | } | ||
121 | } | ||
122 | smtc_nextinvpe[vpe] = soonest; | ||
123 | nextcomp = smtc_nexttime[vpe][soonest]; | ||
124 | /* | ||
125 | * Otherwise, we don't have to process the whole array rank, | ||
126 | * we just have to see if the event horizon has gotten closer. | ||
127 | */ | ||
128 | } else { | ||
129 | if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||
130 | IS_SOONER(timestamp, | ||
131 | smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||
132 | smtc_nextinvpe[vpe] = cpu; | ||
133 | nextcomp = timestamp; | ||
134 | } | ||
135 | /* | ||
136 | * Since next-in-VPE may me the same as the executing | ||
137 | * virtual CPU, we update the array *after* checking | ||
138 | * its value. | ||
139 | */ | ||
140 | smtc_nexttime[vpe][cpu] = timestamp; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * It may be that, in fact, we don't need to update Compare, | ||
145 | * but if we do, we want to make sure we didn't fall into | ||
146 | * a crack just behind Count. | ||
147 | */ | ||
148 | if (ISVALID(nextcomp)) { | ||
149 | write_c0_compare(nextcomp); | ||
150 | ehb(); | ||
151 | /* | ||
152 | * We never return an error, we just make sure | ||
153 | * that we trigger the handlers as quickly as | ||
154 | * we can if we fell behind. | ||
155 | */ | ||
156 | while ((nextcomp - (unsigned long)read_c0_count()) | ||
157 | > (unsigned long)LONG_MAX) { | ||
158 | nextcomp += CATCHUP_INCREMENT; | ||
159 | write_c0_compare(nextcomp); | ||
160 | ehb(); | ||
161 | } | ||
162 | } | ||
163 | emt(mtflags); | ||
164 | local_irq_restore(flags); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | |||
169 | void smtc_distribute_timer(int vpe) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | unsigned int mtflags; | ||
173 | int cpu; | ||
174 | struct clock_event_device *cd; | ||
175 | unsigned long nextstamp = 0L; | ||
176 | unsigned long reference; | ||
177 | |||
178 | |||
179 | repeat: | ||
180 | for_each_online_cpu(cpu) { | ||
181 | /* | ||
182 | * Find virtual CPUs within the current VPE who have | ||
183 | * unserviced timer requests whose time is now past. | ||
184 | */ | ||
185 | local_irq_save(flags); | ||
186 | mtflags = dmt(); | ||
187 | if (cpu_data[cpu].vpe_id == vpe && | ||
188 | ISVALID(smtc_nexttime[vpe][cpu])) { | ||
189 | reference = (unsigned long)read_c0_count(); | ||
190 | if ((smtc_nexttime[vpe][cpu] - reference) | ||
191 | > (unsigned long)LONG_MAX) { | ||
192 | smtc_nexttime[vpe][cpu] = 0L; | ||
193 | emt(mtflags); | ||
194 | local_irq_restore(flags); | ||
195 | /* | ||
196 | * We don't send IPIs to ourself. | ||
197 | */ | ||
198 | if (cpu != smp_processor_id()) { | ||
199 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
200 | } else { | ||
201 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
202 | cd->event_handler(cd); | ||
203 | } | ||
204 | } else { | ||
205 | /* Local to VPE but Valid Time not yet reached. */ | ||
206 | if (!ISVALID(nextstamp) || | ||
207 | IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||
208 | reference)) { | ||
209 | smtc_nextinvpe[vpe] = cpu; | ||
210 | nextstamp = smtc_nexttime[vpe][cpu]; | ||
211 | } | ||
212 | emt(mtflags); | ||
213 | local_irq_restore(flags); | ||
214 | } | ||
215 | } else { | ||
216 | emt(mtflags); | ||
217 | local_irq_restore(flags); | ||
218 | |||
219 | } | ||
220 | } | ||
221 | /* Reprogram for interrupt at next soonest timestamp for VPE */ | ||
222 | if (ISVALID(nextstamp)) { | ||
223 | write_c0_compare(nextstamp); | ||
224 | ehb(); | ||
225 | if ((nextstamp - (unsigned long)read_c0_count()) | ||
226 | > (unsigned long)LONG_MAX) | ||
227 | goto repeat; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | |||
232 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
233 | { | ||
234 | int cpu = smp_processor_id(); | ||
235 | |||
236 | /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||
237 | handle_perf_irq(1); | ||
238 | |||
239 | if (read_c0_cause() & (1 << 30)) { | ||
240 | /* Clear Count/Compare Interrupt */ | ||
241 | write_c0_compare(read_c0_compare()); | ||
242 | smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||
243 | } | ||
244 | return IRQ_HANDLED; | ||
245 | } | ||
246 | |||
247 | |||
248 | int __cpuinit mips_clockevent_init(void) | ||
249 | { | ||
250 | uint64_t mips_freq = mips_hpt_frequency; | ||
251 | unsigned int cpu = smp_processor_id(); | ||
252 | struct clock_event_device *cd; | ||
253 | unsigned int irq; | ||
254 | int i; | ||
255 | int j; | ||
256 | |||
257 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
258 | return -ENXIO; | ||
259 | if (cpu == 0) { | ||
260 | for (i = 0; i < num_possible_cpus(); i++) { | ||
261 | smtc_nextinvpe[i] = 0; | ||
262 | for (j = 0; j < num_possible_cpus(); j++) | ||
263 | smtc_nexttime[i][j] = 0L; | ||
264 | } | ||
265 | /* | ||
266 | * SMTC also can't have the usablility test | ||
267 | * run by secondary TCs once Compare is in use. | ||
268 | */ | ||
269 | if (!c0_compare_int_usable()) | ||
270 | return -ENXIO; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * With vectored interrupts things are getting platform specific. | ||
275 | * get_c0_compare_int is a hook to allow a platform to return the | ||
276 | * interrupt number of it's liking. | ||
277 | */ | ||
278 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
279 | if (get_c0_compare_int) | ||
280 | irq = get_c0_compare_int(); | ||
281 | |||
282 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
283 | |||
284 | cd->name = "MIPS"; | ||
285 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
286 | |||
287 | /* Calculate the min / max delta */ | ||
288 | cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
289 | cd->shift = 32; | ||
290 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
291 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
292 | |||
293 | cd->rating = 300; | ||
294 | cd->irq = irq; | ||
295 | cd->cpumask = cpumask_of_cpu(cpu); | ||
296 | cd->set_next_event = mips_next_event; | ||
297 | cd->set_mode = mips_set_clock_mode; | ||
298 | cd->event_handler = mips_event_handler; | ||
299 | |||
300 | clockevents_register_device(cd); | ||
301 | |||
302 | /* | ||
303 | * On SMTC we only want to do the data structure | ||
304 | * initialization and IRQ setup once. | ||
305 | */ | ||
306 | if (cpu) | ||
307 | return 0; | ||
308 | /* | ||
309 | * And we need the hwmask associated with the c0_compare | ||
310 | * vector to be initialized. | ||
311 | */ | ||
312 | irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||
313 | if (cp0_timer_irq_installed) | ||
314 | return 0; | ||
315 | |||
316 | cp0_timer_irq_installed = 1; | ||
317 | |||
318 | setup_irq(irq, &c0_compare_irqaction); | ||
319 | |||
320 | return 0; | ||
321 | } | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 11c92dc53791..e621fda8ab37 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -54,14 +54,18 @@ extern void r4k_wait(void); | |||
54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes | 54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes |
55 | * using this version a gamble. | 55 | * using this version a gamble. |
56 | */ | 56 | */ |
57 | static void r4k_wait_irqoff(void) | 57 | void r4k_wait_irqoff(void) |
58 | { | 58 | { |
59 | local_irq_disable(); | 59 | local_irq_disable(); |
60 | if (!need_resched()) | 60 | if (!need_resched()) |
61 | __asm__(" .set mips3 \n" | 61 | __asm__(" .set push \n" |
62 | " .set mips3 \n" | ||
62 | " wait \n" | 63 | " wait \n" |
63 | " .set mips0 \n"); | 64 | " .set pop \n"); |
64 | local_irq_enable(); | 65 | local_irq_enable(); |
66 | __asm__(" .globl __pastwait \n" | ||
67 | "__pastwait: \n"); | ||
68 | return; | ||
65 | } | 69 | } |
66 | 70 | ||
67 | /* | 71 | /* |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index f886dd7f708e..01dcbe38fa01 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) | |||
282 | and t0, a0, t1 | 282 | and t0, a0, t1 |
283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
284 | mfc0 t2, CP0_TCCONTEXT | 284 | mfc0 t2, CP0_TCCONTEXT |
285 | or t0, t0, t2 | 285 | or t2, t0, t2 |
286 | mtc0 t0, CP0_TCCONTEXT | 286 | mtc0 t2, CP0_TCCONTEXT |
287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
288 | xor t1, t1, t0 | 288 | xor t1, t1, t0 |
289 | mtc0 t1, CP0_STATUS | 289 | mtc0 t1, CP0_STATUS |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 39b491b9ad87..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -1,4 +1,21 @@ | |||
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | 1 | /* |
2 | * This program is free software; you can redistribute it and/or | ||
3 | * modify it under the terms of the GNU General Public License | ||
4 | * as published by the Free Software Foundation; either version 2 | ||
5 | * of the License, or (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
17 | * Copyright (C) 2008 Kevin D. Kissell | ||
18 | */ | ||
2 | 19 | ||
3 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
4 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -21,7 +38,6 @@ | |||
21 | #include <asm/time.h> | 38 | #include <asm/time.h> |
22 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> |
23 | #include <asm/smtc.h> | 40 | #include <asm/smtc.h> |
24 | #include <asm/smtc_ipi.h> | ||
25 | #include <asm/smtc_proc.h> | 41 | #include <asm/smtc_proc.h> |
26 | 42 | ||
27 | /* | 43 | /* |
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
58 | 74 | ||
59 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 75 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
60 | 76 | ||
61 | /* | ||
62 | * Clock interrupt "latch" buffers, per "CPU" | ||
63 | */ | ||
64 | |||
65 | static atomic_t ipi_timer_latch[NR_CPUS]; | ||
66 | 77 | ||
67 | /* | 78 | /* |
68 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 79 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) | |||
282 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * phys_cpu_present_map and the logical/physical mappings. |
283 | */ | 294 | */ |
284 | 295 | ||
285 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) |
286 | { | 297 | { |
287 | int i, ntcs; | 298 | int i, ntcs; |
288 | 299 | ||
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
325 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 336 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() |
326 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 337 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) |
327 | | TCSTATUS_A); | 338 | | TCSTATUS_A); |
328 | write_tc_c0_tccontext(0); | 339 | /* |
340 | * TCContext gets an offset from the base of the IPIQ array | ||
341 | * to be used in low-level code to detect the presence of | ||
342 | * an active IPI queue | ||
343 | */ | ||
344 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
329 | /* Bind tc to vpe */ | 345 | /* Bind tc to vpe */ |
330 | write_tc_c0_tcbind(vpe); | 346 | write_tc_c0_tcbind(vpe); |
331 | /* In general, all TCs should have the same cpu_data indications */ | 347 | /* In general, all TCs should have the same cpu_data indications */ |
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
336 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 352 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
337 | cpu_data[cpu].vpe_id = vpe; | 353 | cpu_data[cpu].vpe_id = vpe; |
338 | cpu_data[cpu].tc_id = tc; | 354 | cpu_data[cpu].tc_id = tc; |
355 | /* Multi-core SMTC hasn't been tested, but be prepared */ | ||
356 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
339 | } | 357 | } |
340 | 358 | ||
359 | /* | ||
360 | * Tweak to get Count registes in as close a sync as possible. | ||
361 | * Value seems good for 34K-class cores. | ||
362 | */ | ||
363 | |||
364 | #define CP0_SKEW 8 | ||
341 | 365 | ||
342 | void mipsmt_prepare_cpus(void) | 366 | void smtc_prepare_cpus(int cpus) |
343 | { | 367 | { |
344 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 368 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
345 | unsigned long flags; | 369 | unsigned long flags; |
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) | |||
363 | IPIQ[i].head = IPIQ[i].tail = NULL; | 387 | IPIQ[i].head = IPIQ[i].tail = NULL; |
364 | spin_lock_init(&IPIQ[i].lock); | 388 | spin_lock_init(&IPIQ[i].lock); |
365 | IPIQ[i].depth = 0; | 389 | IPIQ[i].depth = 0; |
366 | atomic_set(&ipi_timer_latch[i], 0); | ||
367 | } | 390 | } |
368 | 391 | ||
369 | /* cpu_data index starts at zero */ | 392 | /* cpu_data index starts at zero */ |
370 | cpu = 0; | 393 | cpu = 0; |
371 | cpu_data[cpu].vpe_id = 0; | 394 | cpu_data[cpu].vpe_id = 0; |
372 | cpu_data[cpu].tc_id = 0; | 395 | cpu_data[cpu].tc_id = 0; |
396 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
373 | cpu++; | 397 | cpu++; |
374 | 398 | ||
375 | /* Report on boot-time options */ | 399 | /* Report on boot-time options */ |
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) | |||
484 | write_vpe_c0_compare(0); | 508 | write_vpe_c0_compare(0); |
485 | /* Propagate Config7 */ | 509 | /* Propagate Config7 */ |
486 | write_vpe_c0_config7(read_c0_config7()); | 510 | write_vpe_c0_config7(read_c0_config7()); |
487 | write_vpe_c0_count(read_c0_count()); | 511 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
512 | ehb(); | ||
488 | } | 513 | } |
489 | /* enable multi-threading within VPE */ | 514 | /* enable multi-threading within VPE */ |
490 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 515 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
585 | 610 | ||
586 | void smtc_init_secondary(void) | 611 | void smtc_init_secondary(void) |
587 | { | 612 | { |
588 | /* | ||
589 | * Start timer on secondary VPEs if necessary. | ||
590 | * plat_timer_setup has already have been invoked by init/main | ||
591 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
592 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
593 | * to across available VPEs. | ||
594 | */ | ||
595 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
596 | ((read_c0_tcbind() & TCBIND_CURVPE) | ||
597 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
598 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
599 | } | ||
600 | |||
601 | local_irq_enable(); | 613 | local_irq_enable(); |
602 | } | 614 | } |
603 | 615 | ||
604 | void smtc_smp_finish(void) | 616 | void smtc_smp_finish(void) |
605 | { | 617 | { |
618 | int cpu = smp_processor_id(); | ||
619 | |||
620 | /* | ||
621 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
622 | * Like per_cpu_trap_init() hack, this assumes that | ||
623 | * SMTC init code assigns TCs consdecutively and | ||
624 | * in ascending order across available VPEs. | ||
625 | */ | ||
626 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
627 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
628 | |||
606 | printk("TC %d going on-line as CPU %d\n", | 629 | printk("TC %d going on-line as CPU %d\n", |
607 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 630 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
608 | } | 631 | } |
@@ -755,6 +778,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
755 | struct smtc_ipi *pipi; | 778 | struct smtc_ipi *pipi; |
756 | unsigned long flags; | 779 | unsigned long flags; |
757 | int mtflags; | 780 | int mtflags; |
781 | unsigned long tcrestart; | ||
782 | extern void r4k_wait_irqoff(void), __pastwait(void); | ||
758 | 783 | ||
759 | if (cpu == smp_processor_id()) { | 784 | if (cpu == smp_processor_id()) { |
760 | printk("Cannot Send IPI to self!\n"); | 785 | printk("Cannot Send IPI to self!\n"); |
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
771 | pipi->arg = (void *)action; | 796 | pipi->arg = (void *)action; |
772 | pipi->dest = cpu; | 797 | pipi->dest = cpu; |
773 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 798 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
774 | if (type == SMTC_CLOCK_TICK) | ||
775 | atomic_inc(&ipi_timer_latch[cpu]); | ||
776 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 799 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
777 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 800 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
778 | LOCK_CORE_PRA(); | 801 | LOCK_CORE_PRA(); |
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
800 | 823 | ||
801 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 824 | if ((tcstatus & TCSTATUS_IXMT) != 0) { |
802 | /* | 825 | /* |
803 | * Spin-waiting here can deadlock, | 826 | * If we're in the the irq-off version of the wait |
804 | * so we queue the message for the target TC. | 827 | * loop, we need to force exit from the wait and |
828 | * do a direct post of the IPI. | ||
829 | */ | ||
830 | if (cpu_wait == r4k_wait_irqoff) { | ||
831 | tcrestart = read_tc_c0_tcrestart(); | ||
832 | if (tcrestart >= (unsigned long)r4k_wait_irqoff | ||
833 | && tcrestart < (unsigned long)__pastwait) { | ||
834 | write_tc_c0_tcrestart(__pastwait); | ||
835 | tcstatus &= ~TCSTATUS_IXMT; | ||
836 | write_tc_c0_tcstatus(tcstatus); | ||
837 | goto postdirect; | ||
838 | } | ||
839 | } | ||
840 | /* | ||
841 | * Otherwise we queue the message for the target TC | ||
842 | * to pick up when he does a local_irq_restore() | ||
805 | */ | 843 | */ |
806 | write_tc_c0_tchalt(0); | 844 | write_tc_c0_tchalt(0); |
807 | UNLOCK_CORE_PRA(); | 845 | UNLOCK_CORE_PRA(); |
808 | /* Try to reduce redundant timer interrupt messages */ | ||
809 | if (type == SMTC_CLOCK_TICK) { | ||
810 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | ||
811 | smtc_ipi_nq(&freeIPIq, pipi); | ||
812 | return; | ||
813 | } | ||
814 | } | ||
815 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 846 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
816 | } else { | 847 | } else { |
817 | if (type == SMTC_CLOCK_TICK) | 848 | postdirect: |
818 | atomic_inc(&ipi_timer_latch[cpu]); | ||
819 | post_direct_ipi(cpu, pipi); | 849 | post_direct_ipi(cpu, pipi); |
820 | write_tc_c0_tchalt(0); | 850 | write_tc_c0_tchalt(0); |
821 | UNLOCK_CORE_PRA(); | 851 | UNLOCK_CORE_PRA(); |
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) | |||
883 | smp_call_function_interrupt(); | 913 | smp_call_function_interrupt(); |
884 | } | 914 | } |
885 | 915 | ||
886 | DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | 916 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
887 | 917 | ||
888 | void ipi_decode(struct smtc_ipi *pipi) | 918 | void ipi_decode(struct smtc_ipi *pipi) |
889 | { | 919 | { |
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
891 | struct clock_event_device *cd; | 921 | struct clock_event_device *cd; |
892 | void *arg_copy = pipi->arg; | 922 | void *arg_copy = pipi->arg; |
893 | int type_copy = pipi->type; | 923 | int type_copy = pipi->type; |
894 | int ticks; | ||
895 | |||
896 | smtc_ipi_nq(&freeIPIq, pipi); | 924 | smtc_ipi_nq(&freeIPIq, pipi); |
897 | switch (type_copy) { | 925 | switch (type_copy) { |
898 | case SMTC_CLOCK_TICK: | 926 | case SMTC_CLOCK_TICK: |
899 | irq_enter(); | 927 | irq_enter(); |
900 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 928 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; |
901 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 929 | cd = &per_cpu(mips_clockevent_device, cpu); |
902 | ticks = atomic_read(&ipi_timer_latch[cpu]); | 930 | cd->event_handler(cd); |
903 | atomic_sub(ticks, &ipi_timer_latch[cpu]); | ||
904 | while (ticks) { | ||
905 | cd->event_handler(cd); | ||
906 | ticks--; | ||
907 | } | ||
908 | irq_exit(); | 931 | irq_exit(); |
909 | break; | 932 | break; |
910 | 933 | ||
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
937 | } | 960 | } |
938 | } | 961 | } |
939 | 962 | ||
963 | /* | ||
964 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
965 | * so it reuses the current exception frame rather than set up a | ||
966 | * new one with self_ipi. | ||
967 | */ | ||
968 | |||
940 | void deferred_smtc_ipi(void) | 969 | void deferred_smtc_ipi(void) |
941 | { | 970 | { |
942 | struct smtc_ipi *pipi; | 971 | int cpu = smp_processor_id(); |
943 | unsigned long flags; | ||
944 | /* DEBUG */ | ||
945 | int q = smp_processor_id(); | ||
946 | 972 | ||
947 | /* | 973 | /* |
948 | * Test is not atomic, but much faster than a dequeue, | 974 | * Test is not atomic, but much faster than a dequeue, |
949 | * and the vast majority of invocations will have a null queue. | 975 | * and the vast majority of invocations will have a null queue. |
976 | * If irq_disabled when this was called, then any IPIs queued | ||
977 | * after we test last will be taken on the next irq_enable/restore. | ||
978 | * If interrupts were enabled, then any IPIs added after the | ||
979 | * last test will be taken directly. | ||
950 | */ | 980 | */ |
951 | if (IPIQ[q].head != NULL) { | 981 | |
952 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | 982 | while (IPIQ[cpu].head != NULL) { |
953 | /* ipi_decode() should be called with interrupts off */ | 983 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
954 | local_irq_save(flags); | 984 | struct smtc_ipi *pipi; |
985 | unsigned long flags; | ||
986 | |||
987 | /* | ||
988 | * It may be possible we'll come in with interrupts | ||
989 | * already enabled. | ||
990 | */ | ||
991 | local_irq_save(flags); | ||
992 | |||
993 | spin_lock(&q->lock); | ||
994 | pipi = __smtc_ipi_dq(q); | ||
995 | spin_unlock(&q->lock); | ||
996 | if (pipi != NULL) | ||
955 | ipi_decode(pipi); | 997 | ipi_decode(pipi); |
956 | local_irq_restore(flags); | 998 | /* |
957 | } | 999 | * The use of the __raw_local restore isn't |
1000 | * as obviously necessary here as in smtc_ipi_replay(), | ||
1001 | * but it's more efficient, given that we're already | ||
1002 | * running down the IPI queue. | ||
1003 | */ | ||
1004 | __raw_local_irq_restore(flags); | ||
958 | } | 1005 | } |
959 | } | 1006 | } |
960 | 1007 | ||
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
1066 | 1113 | ||
1067 | /* | 1114 | /* |
1068 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1115 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
1069 | * | ||
1070 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
1071 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
1072 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
1073 | * result in a recursive call to raw_local_irq_restore(). | ||
1074 | */ | 1116 | */ |
1075 | 1117 | ||
1076 | static void __smtc_ipi_replay(void) | 1118 | /* |
1119 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
1120 | */ | ||
1121 | |||
1122 | void smtc_ipi_replay(void) | ||
1077 | { | 1123 | { |
1078 | unsigned int cpu = smp_processor_id(); | 1124 | unsigned int cpu = smp_processor_id(); |
1079 | 1125 | ||
1080 | /* | 1126 | /* |
1081 | * To the extent that we've ever turned interrupts off, | 1127 | * To the extent that we've ever turned interrupts off, |
1082 | * we may have accumulated deferred IPIs. This is subtle. | 1128 | * we may have accumulated deferred IPIs. This is subtle. |
1083 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
1084 | * exact number - but we'll also disable interrupts | ||
1085 | * and create a window of failure where a new IPI gets | ||
1086 | * queued after we test the depth but before we re-enable | ||
1087 | * interrupts. So long as IXMT never gets set, however, | ||
1088 | * we should be OK: If we pick up something and dispatch | 1129 | * we should be OK: If we pick up something and dispatch |
1089 | * it here, that's great. If we see nothing, but concurrent | 1130 | * it here, that's great. If we see nothing, but concurrent |
1090 | * with this operation, another TC sends us an IPI, IXMT | 1131 | * with this operation, another TC sends us an IPI, IXMT |
1091 | * is clear, and we'll handle it as a real pseudo-interrupt | 1132 | * is clear, and we'll handle it as a real pseudo-interrupt |
1092 | * and not a pseudo-pseudo interrupt. | 1133 | * and not a pseudo-pseudo interrupt. The important thing |
1134 | * is to do the last check for queued message *after* the | ||
1135 | * re-enabling of interrupts. | ||
1093 | */ | 1136 | */ |
1094 | if (IPIQ[cpu].depth > 0) { | 1137 | while (IPIQ[cpu].head != NULL) { |
1095 | while (1) { | 1138 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
1096 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 1139 | struct smtc_ipi *pipi; |
1097 | struct smtc_ipi *pipi; | 1140 | unsigned long flags; |
1098 | extern void self_ipi(struct smtc_ipi *); | ||
1099 | |||
1100 | spin_lock(&q->lock); | ||
1101 | pipi = __smtc_ipi_dq(q); | ||
1102 | spin_unlock(&q->lock); | ||
1103 | if (!pipi) | ||
1104 | break; | ||
1105 | 1141 | ||
1142 | /* | ||
1143 | * It's just possible we'll come in with interrupts | ||
1144 | * already enabled. | ||
1145 | */ | ||
1146 | local_irq_save(flags); | ||
1147 | |||
1148 | spin_lock(&q->lock); | ||
1149 | pipi = __smtc_ipi_dq(q); | ||
1150 | spin_unlock(&q->lock); | ||
1151 | /* | ||
1152 | ** But use a raw restore here to avoid recursion. | ||
1153 | */ | ||
1154 | __raw_local_irq_restore(flags); | ||
1155 | |||
1156 | if (pipi) { | ||
1106 | self_ipi(pipi); | 1157 | self_ipi(pipi); |
1107 | smtc_cpu_stats[cpu].selfipis++; | 1158 | smtc_cpu_stats[cpu].selfipis++; |
1108 | } | 1159 | } |
1109 | } | 1160 | } |
1110 | } | 1161 | } |
1111 | 1162 | ||
1112 | void smtc_ipi_replay(void) | ||
1113 | { | ||
1114 | raw_local_irq_disable(); | ||
1115 | __smtc_ipi_replay(); | ||
1116 | } | ||
1117 | |||
1118 | EXPORT_SYMBOL(smtc_ipi_replay); | 1163 | EXPORT_SYMBOL(smtc_ipi_replay); |
1119 | 1164 | ||
1120 | void smtc_idle_loop_hook(void) | 1165 | void smtc_idle_loop_hook(void) |
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) | |||
1193 | } | 1238 | } |
1194 | } | 1239 | } |
1195 | 1240 | ||
1196 | /* | ||
1197 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
1198 | */ | ||
1199 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
1200 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
1201 | if ((tc != smp_processor_id()) && | ||
1202 | atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { | ||
1203 | if (clock_hang_reported[tc] == 0) { | ||
1204 | pdb_msg += sprintf(pdb_msg, | ||
1205 | "TC %d looks hung with timer latch at %d\n", | ||
1206 | tc, atomic_read(&ipi_timer_latch[tc])); | ||
1207 | clock_hang_reported[tc]++; | ||
1208 | } | ||
1209 | } | ||
1210 | } | ||
1211 | emt(mtflags); | 1241 | emt(mtflags); |
1212 | local_irq_restore(flags); | 1242 | local_irq_restore(flags); |
1213 | if (pdb_msg != &id_ho_db_msg[0]) | 1243 | if (pdb_msg != &id_ho_db_msg[0]) |
1214 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 1244 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); |
1215 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 1245 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
1216 | 1246 | ||
1217 | /* | 1247 | smtc_ipi_replay(); |
1218 | * Replay any accumulated deferred IPIs. If "Instant Replay" | ||
1219 | * is in use, there should never be any. | ||
1220 | */ | ||
1221 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
1222 | { | ||
1223 | unsigned long flags; | ||
1224 | |||
1225 | local_irq_save(flags); | ||
1226 | __smtc_ipi_replay(); | ||
1227 | local_irq_restore(flags); | ||
1228 | } | ||
1229 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
1230 | } | 1248 | } |
1231 | 1249 | ||
1232 | void smtc_soft_dump(void) | 1250 | void smtc_soft_dump(void) |
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) | |||
1242 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 1260 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); |
1243 | } | 1261 | } |
1244 | smtc_ipi_qdump(); | 1262 | smtc_ipi_qdump(); |
1245 | printk("Timer IPI Backlogs:\n"); | ||
1246 | for (i=0; i < NR_CPUS; i++) { | ||
1247 | printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); | ||
1248 | } | ||
1249 | printk("%d Recoveries of \"stolen\" FPU\n", | 1263 | printk("%d Recoveries of \"stolen\" FPU\n", |
1250 | atomic_read(&smtc_fpu_recoveries)); | 1264 | atomic_read(&smtc_fpu_recoveries)); |
1251 | } | 1265 | } |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ea705e49454..f84a46a8ae6e 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) | |||
84 | 84 | ||
85 | static void __init msmtc_smp_setup(void) | 85 | static void __init msmtc_smp_setup(void) |
86 | { | 86 | { |
87 | mipsmt_build_cpu_map(0); | 87 | /* |
88 | * we won't get the definitive value until | ||
89 | * we've run smtc_prepare_cpus later, but | ||
90 | * we would appear to need an upper bound now. | ||
91 | */ | ||
92 | smp_num_siblings = smtc_build_cpu_map(0); | ||
88 | } | 93 | } |
89 | 94 | ||
90 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) | 95 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) |
91 | { | 96 | { |
92 | mipsmt_prepare_cpus(); | 97 | smtc_prepare_cpus(max_cpus); |
93 | } | 98 | } |
94 | 99 | ||
95 | struct plat_smp_ops msmtc_smp_ops = { | 100 | struct plat_smp_ops msmtc_smp_ops = { |