aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-05-23 10:29:44 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-05-23 18:07:01 -0400
commitb633648c5ad3cfbda0b3daea50d2135d44899259 (patch)
tree6100185cae10f36a55e71c3b220fc79cfa14b7c0 /arch/mips/kernel
parent8b2e62cc34feaaf1cac9440a93fb18ac0b1e81bc (diff)
MIPS: MT: Remove SMTC support
Nobody is maintaining SMTC anymore and there also seems to be no userbase. Which is a pity - the SMTC technology primarily developed by Kevin D. Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT ASE's power and elegance. Based on Markos Chandras <Markos.Chandras@imgtec.com> patch https://patchwork.linux-mips.org/patch/6719/ which while very similar did no longer apply cleanly when I tried to merge it plus some additional post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to merge once upon a time. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/cevt-r4k.c14
-rw-r--r--arch/mips/kernel/cevt-smtc.c324
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/entry.S38
-rw-r--r--arch/mips/kernel/genex.S54
-rw-r--r--arch/mips/kernel/head.S56
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/idle.c10
-rw-r--r--arch/mips/kernel/irq-msc01.c5
-rw-r--r--arch/mips/kernel/irq.c17
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/mips-mt.c18
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/r4k_switch.S33
-rw-r--r--arch/mips/kernel/rtlx-mt.c1
-rw-r--r--arch/mips/kernel/smp-cmp.c9
-rw-r--r--arch/mips/kernel/smp.c13
-rw-r--r--arch/mips/kernel/smtc-asm.S133
-rw-r--r--arch/mips/kernel/smtc-proc.c102
-rw-r--r--arch/mips/kernel/smtc.c1528
-rw-r--r--arch/mips/kernel/sync-r4k.c18
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c63
-rw-r--r--arch/mips/kernel/vpe-mt.c16
26 files changed, 11 insertions, 2462 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 277dab301cea..8f8b531bc848 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -17,7 +17,6 @@ endif
17 17
18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 20obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
22obj-$(CONFIG_CEVT_GIC) += cevt-gic.o 21obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
23obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
50 49
51obj-$(CONFIG_MIPS_MT) += mips-mt.o 50obj-$(CONFIG_MIPS_MT) += mips-mt.o
52obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o 51obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
53obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
54obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 52obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
55obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 53obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
56obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 54obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0ea75c244b48..08f897ee9a77 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -64,9 +64,6 @@ void output_ptreg_defines(void)
64 OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); 64 OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
65 OFFSET(PT_STATUS, pt_regs, cp0_status); 65 OFFSET(PT_STATUS, pt_regs, cp0_status);
66 OFFSET(PT_CAUSE, pt_regs, cp0_cause); 66 OFFSET(PT_CAUSE, pt_regs, cp0_cause);
67#ifdef CONFIG_MIPS_MT_SMTC
68 OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
69#endif /* CONFIG_MIPS_MT_SMTC */
70#ifdef CONFIG_CPU_CAVIUM_OCTEON 67#ifdef CONFIG_CPU_CAVIUM_OCTEON
71 OFFSET(PT_MPL, pt_regs, mpl); 68 OFFSET(PT_MPL, pt_regs, mpl);
72 OFFSET(PT_MTP, pt_regs, mtp); 69 OFFSET(PT_MTP, pt_regs, mtp);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 50d3f5a8d6bb..bff124ae69fa 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,17 +12,10 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14 14
15#include <asm/smtc_ipi.h>
16#include <asm/time.h> 15#include <asm/time.h>
17#include <asm/cevt-r4k.h> 16#include <asm/cevt-r4k.h>
18#include <asm/gic.h> 17#include <asm/gic.h>
19 18
20/*
21 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
22 * of these routines with SMTC-specific variants.
23 */
24
25#ifndef CONFIG_MIPS_MT_SMTC
26static int mips_next_event(unsigned long delta, 19static int mips_next_event(unsigned long delta,
27 struct clock_event_device *evt) 20 struct clock_event_device *evt)
28{ 21{
@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta,
36 return res; 29 return res;
37} 30}
38 31
39#endif /* CONFIG_MIPS_MT_SMTC */
40
41void mips_set_clock_mode(enum clock_event_mode mode, 32void mips_set_clock_mode(enum clock_event_mode mode,
42 struct clock_event_device *evt) 33 struct clock_event_device *evt)
43{ 34{
@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode,
47DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 38DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
48int cp0_timer_irq_installed; 39int cp0_timer_irq_installed;
49 40
50#ifndef CONFIG_MIPS_MT_SMTC
51irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 41irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
52{ 42{
53 const int r2 = cpu_has_mips_r2; 43 const int r2 = cpu_has_mips_r2;
@@ -82,8 +72,6 @@ out:
82 return IRQ_HANDLED; 72 return IRQ_HANDLED;
83} 73}
84 74
85#endif /* Not CONFIG_MIPS_MT_SMTC */
86
87struct irqaction c0_compare_irqaction = { 75struct irqaction c0_compare_irqaction = {
88 .handler = c0_compare_interrupt, 76 .handler = c0_compare_interrupt,
89 .flags = IRQF_PERCPU | IRQF_TIMER, 77 .flags = IRQF_PERCPU | IRQF_TIMER,
@@ -170,7 +158,6 @@ int c0_compare_int_usable(void)
170 return 1; 158 return 1;
171} 159}
172 160
173#ifndef CONFIG_MIPS_MT_SMTC
174int r4k_clockevent_init(void) 161int r4k_clockevent_init(void)
175{ 162{
176 unsigned int cpu = smp_processor_id(); 163 unsigned int cpu = smp_processor_id();
@@ -225,4 +212,3 @@ int r4k_clockevent_init(void)
225 return 0; 212 return 0;
226} 213}
227 214
228#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
deleted file mode 100644
index b6cf0a60d896..000000000000
--- a/arch/mips/kernel/cevt-smtc.c
+++ /dev/null
@@ -1,324 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10#include <linux/clockchips.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13#include <linux/smp.h>
14#include <linux/irq.h>
15
16#include <asm/smtc_ipi.h>
17#include <asm/time.h>
18#include <asm/cevt-r4k.h>
19
20/*
21 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
22 * or other MIPS MT cores.
23 *
24 * Notes on SMTC Support:
25 *
26 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
27 * But there's only one Count/Compare pair per VPE, and Compare
28 * interrupts are taken opportunisitically by available TCs
29 * bound to the VPE with the Count register. The new timer
30 * framework provides for global broadcasts, but we really
31 * want VPE-level multicasts for best behavior. So instead
32 * of invoking the high-level clock-event broadcast code,
33 * this version of SMTC support uses the historical SMTC
34 * multicast mechanisms "under the hood", appearing to the
35 * generic clock layer as if the interrupts are per-CPU.
36 *
37 * The approach taken here is to maintain a set of NR_CPUS
38 * virtual timers, and track which "CPU" needs to be alerted
39 * at each event.
40 *
41 * It's unlikely that we'll see a MIPS MT core with more than
42 * 2 VPEs, but we *know* that we won't need to handle more
43 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
44 * is always going to be overkill, but always going to be enough.
45 */
46
47unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
48static int smtc_nextinvpe[NR_CPUS];
49
50/*
51 * Timestamps stored are absolute values to be programmed
52 * into Count register. Valid timestamps will never be zero.
53 * If a Zero Count value is actually calculated, it is converted
54 * to be a 1, which will introduce 1 or two CPU cycles of error
55 * roughly once every four billion events, which at 1000 HZ means
56 * about once every 50 days. If that's actually a problem, one
57 * could alternate squashing 0 to 1 and to -1.
58 */
59
60#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
61#define ISVALID(x) ((x) != 0L)
62
63/*
64 * Time comparison is subtle, as it's really truncated
65 * modular arithmetic.
66 */
67
68#define IS_SOONER(a, b, reference) \
69 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
70
71/*
72 * CATCHUP_INCREMENT, used when the function falls behind the counter.
73 * Could be an increasing function instead of a constant;
74 */
75
76#define CATCHUP_INCREMENT 64
77
78static int mips_next_event(unsigned long delta,
79 struct clock_event_device *evt)
80{
81 unsigned long flags;
82 unsigned int mtflags;
83 unsigned long timestamp, reference, previous;
84 unsigned long nextcomp = 0L;
85 int vpe = current_cpu_data.vpe_id;
86 int cpu = smp_processor_id();
87 local_irq_save(flags);
88 mtflags = dmt();
89
90 /*
91 * Maintain the per-TC virtual timer
92 * and program the per-VPE shared Count register
93 * as appropriate here...
94 */
95 reference = (unsigned long)read_c0_count();
96 timestamp = MAKEVALID(reference + delta);
97 /*
98 * To really model the clock, we have to catch the case
99 * where the current next-in-VPE timestamp is the old
100 * timestamp for the calling CPE, but the new value is
101 * in fact later. In that case, we have to do a full
102 * scan and discover the new next-in-VPE CPU id and
103 * timestamp.
104 */
105 previous = smtc_nexttime[vpe][cpu];
106 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
107 && IS_SOONER(previous, timestamp, reference)) {
108 int i;
109 int soonest = cpu;
110
111 /*
112 * Update timestamp array here, so that new
113 * value gets considered along with those of
114 * other virtual CPUs on the VPE.
115 */
116 smtc_nexttime[vpe][cpu] = timestamp;
117 for_each_online_cpu(i) {
118 if (ISVALID(smtc_nexttime[vpe][i])
119 && IS_SOONER(smtc_nexttime[vpe][i],
120 smtc_nexttime[vpe][soonest], reference)) {
121 soonest = i;
122 }
123 }
124 smtc_nextinvpe[vpe] = soonest;
125 nextcomp = smtc_nexttime[vpe][soonest];
126 /*
127 * Otherwise, we don't have to process the whole array rank,
128 * we just have to see if the event horizon has gotten closer.
129 */
130 } else {
131 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
132 IS_SOONER(timestamp,
133 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
134 smtc_nextinvpe[vpe] = cpu;
135 nextcomp = timestamp;
136 }
137 /*
138 * Since next-in-VPE may me the same as the executing
139 * virtual CPU, we update the array *after* checking
140 * its value.
141 */
142 smtc_nexttime[vpe][cpu] = timestamp;
143 }
144
145 /*
146 * It may be that, in fact, we don't need to update Compare,
147 * but if we do, we want to make sure we didn't fall into
148 * a crack just behind Count.
149 */
150 if (ISVALID(nextcomp)) {
151 write_c0_compare(nextcomp);
152 ehb();
153 /*
154 * We never return an error, we just make sure
155 * that we trigger the handlers as quickly as
156 * we can if we fell behind.
157 */
158 while ((nextcomp - (unsigned long)read_c0_count())
159 > (unsigned long)LONG_MAX) {
160 nextcomp += CATCHUP_INCREMENT;
161 write_c0_compare(nextcomp);
162 ehb();
163 }
164 }
165 emt(mtflags);
166 local_irq_restore(flags);
167 return 0;
168}
169
170
171void smtc_distribute_timer(int vpe)
172{
173 unsigned long flags;
174 unsigned int mtflags;
175 int cpu;
176 struct clock_event_device *cd;
177 unsigned long nextstamp;
178 unsigned long reference;
179
180
181repeat:
182 nextstamp = 0L;
183 for_each_online_cpu(cpu) {
184 /*
185 * Find virtual CPUs within the current VPE who have
186 * unserviced timer requests whose time is now past.
187 */
188 local_irq_save(flags);
189 mtflags = dmt();
190 if (cpu_data[cpu].vpe_id == vpe &&
191 ISVALID(smtc_nexttime[vpe][cpu])) {
192 reference = (unsigned long)read_c0_count();
193 if ((smtc_nexttime[vpe][cpu] - reference)
194 > (unsigned long)LONG_MAX) {
195 smtc_nexttime[vpe][cpu] = 0L;
196 emt(mtflags);
197 local_irq_restore(flags);
198 /*
199 * We don't send IPIs to ourself.
200 */
201 if (cpu != smp_processor_id()) {
202 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
203 } else {
204 cd = &per_cpu(mips_clockevent_device, cpu);
205 cd->event_handler(cd);
206 }
207 } else {
208 /* Local to VPE but Valid Time not yet reached. */
209 if (!ISVALID(nextstamp) ||
210 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
211 reference)) {
212 smtc_nextinvpe[vpe] = cpu;
213 nextstamp = smtc_nexttime[vpe][cpu];
214 }
215 emt(mtflags);
216 local_irq_restore(flags);
217 }
218 } else {
219 emt(mtflags);
220 local_irq_restore(flags);
221
222 }
223 }
224 /* Reprogram for interrupt at next soonest timestamp for VPE */
225 if (ISVALID(nextstamp)) {
226 write_c0_compare(nextstamp);
227 ehb();
228 if ((nextstamp - (unsigned long)read_c0_count())
229 > (unsigned long)LONG_MAX)
230 goto repeat;
231 }
232}
233
234
235irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
236{
237 int cpu = smp_processor_id();
238
239 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
240 handle_perf_irq(1);
241
242 if (read_c0_cause() & (1 << 30)) {
243 /* Clear Count/Compare Interrupt */
244 write_c0_compare(read_c0_compare());
245 smtc_distribute_timer(cpu_data[cpu].vpe_id);
246 }
247 return IRQ_HANDLED;
248}
249
250
251int smtc_clockevent_init(void)
252{
253 uint64_t mips_freq = mips_hpt_frequency;
254 unsigned int cpu = smp_processor_id();
255 struct clock_event_device *cd;
256 unsigned int irq;
257 int i;
258 int j;
259
260 if (!cpu_has_counter || !mips_hpt_frequency)
261 return -ENXIO;
262 if (cpu == 0) {
263 for (i = 0; i < num_possible_cpus(); i++) {
264 smtc_nextinvpe[i] = 0;
265 for (j = 0; j < num_possible_cpus(); j++)
266 smtc_nexttime[i][j] = 0L;
267 }
268 /*
269 * SMTC also can't have the usablility test
270 * run by secondary TCs once Compare is in use.
271 */
272 if (!c0_compare_int_usable())
273 return -ENXIO;
274 }
275
276 /*
277 * With vectored interrupts things are getting platform specific.
278 * get_c0_compare_int is a hook to allow a platform to return the
279 * interrupt number of it's liking.
280 */
281 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
282 if (get_c0_compare_int)
283 irq = get_c0_compare_int();
284
285 cd = &per_cpu(mips_clockevent_device, cpu);
286
287 cd->name = "MIPS";
288 cd->features = CLOCK_EVT_FEAT_ONESHOT;
289
290 /* Calculate the min / max delta */
291 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
292 cd->shift = 32;
293 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
294 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
295
296 cd->rating = 300;
297 cd->irq = irq;
298 cd->cpumask = cpumask_of(cpu);
299 cd->set_next_event = mips_next_event;
300 cd->set_mode = mips_set_clock_mode;
301 cd->event_handler = mips_event_handler;
302
303 clockevents_register_device(cd);
304
305 /*
306 * On SMTC we only want to do the data structure
307 * initialization and IRQ setup once.
308 */
309 if (cpu)
310 return 0;
311 /*
312 * And we need the hwmask associated with the c0_compare
313 * vector to be initialized.
314 */
315 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
316 if (cp0_timer_irq_installed)
317 return 0;
318
319 cp0_timer_irq_installed = 1;
320
321 setup_irq(irq, &c0_compare_irqaction);
322
323 return 0;
324}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index f83dc70d2bc2..e8638c5b7d11 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -62,7 +62,7 @@ static inline void check_errata(void)
62 case CPU_34K: 62 case CPU_34K:
63 /* 63 /*
64 * Erratum "RPS May Cause Incorrect Instruction Execution" 64 * Erratum "RPS May Cause Incorrect Instruction Execution"
65 * This code only handles VPE0, any SMP/SMTC/RTOS code 65 * This code only handles VPE0, any SMP/RTOS code
66 * making use of VPE1 will be responsable for that VPE. 66 * making use of VPE1 will be responsable for that VPE.
67 */ 67 */
68 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) 68 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e5786858cdb6..4353d323f017 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -16,9 +16,6 @@
16#include <asm/isadep.h> 16#include <asm/isadep.h>
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/war.h> 18#include <asm/war.h>
19#ifdef CONFIG_MIPS_MT_SMTC
20#include <asm/mipsmtregs.h>
21#endif
22 19
23#ifndef CONFIG_PREEMPT 20#ifndef CONFIG_PREEMPT
24#define resume_kernel restore_all 21#define resume_kernel restore_all
@@ -89,41 +86,6 @@ FEXPORT(syscall_exit)
89 bnez t0, syscall_exit_work 86 bnez t0, syscall_exit_work
90 87
91restore_all: # restore full frame 88restore_all: # restore full frame
92#ifdef CONFIG_MIPS_MT_SMTC
93#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
94/* Re-arm any temporarily masked interrupts not explicitly "acked" */
95 mfc0 v0, CP0_TCSTATUS
96 ori v1, v0, TCSTATUS_IXMT
97 mtc0 v1, CP0_TCSTATUS
98 andi v0, TCSTATUS_IXMT
99 _ehb
100 mfc0 t0, CP0_TCCONTEXT
101 DMT 9 # dmt t1
102 jal mips_ihb
103 mfc0 t2, CP0_STATUS
104 andi t3, t0, 0xff00
105 or t2, t2, t3
106 mtc0 t2, CP0_STATUS
107 _ehb
108 andi t1, t1, VPECONTROL_TE
109 beqz t1, 1f
110 EMT
1111:
112 mfc0 v1, CP0_TCSTATUS
113 /* We set IXMT above, XOR should clear it here */
114 xori v1, v1, TCSTATUS_IXMT
115 or v1, v0, v1
116 mtc0 v1, CP0_TCSTATUS
117 _ehb
118 xor t0, t0, t3
119 mtc0 t0, CP0_TCCONTEXT
120#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
121/* Detect and execute deferred IPI "interrupts" */
122 LONG_L s0, TI_REGS($28)
123 LONG_S sp, TI_REGS($28)
124 jal deferred_smtc_ipi
125 LONG_S s0, TI_REGS($28)
126#endif /* CONFIG_MIPS_MT_SMTC */
127 .set noat 89 .set noat
128 RESTORE_TEMP 90 RESTORE_TEMP
129 RESTORE_AT 91 RESTORE_AT
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a9ce3408be25..ac35e12cb1f3 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -21,20 +21,6 @@
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/thread_info.h> 22#include <asm/thread_info.h>
23 23
24#ifdef CONFIG_MIPS_MT_SMTC
25#define PANIC_PIC(msg) \
26 .set push; \
27 .set nomicromips; \
28 .set reorder; \
29 PTR_LA a0,8f; \
30 .set noat; \
31 PTR_LA AT, panic; \
32 jr AT; \
339: b 9b; \
34 .set pop; \
35 TEXT(msg)
36#endif
37
38 __INIT 24 __INIT
39 25
40/* 26/*
@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp)
251 SAVE_AT 237 SAVE_AT
252 .set push 238 .set push
253 .set noreorder 239 .set noreorder
254#ifdef CONFIG_MIPS_MT_SMTC
255 /*
256 * To keep from blindly blocking *all* interrupts
257 * during service by SMTC kernel, we also want to
258 * pass the IM value to be cleared.
259 */
260FEXPORT(except_vec_vi_mori)
261 ori a0, $0, 0
262#endif /* CONFIG_MIPS_MT_SMTC */
263 PTR_LA v1, except_vec_vi_handler 240 PTR_LA v1, except_vec_vi_handler
264FEXPORT(except_vec_vi_lui) 241FEXPORT(except_vec_vi_lui)
265 lui v0, 0 /* Patched */ 242 lui v0, 0 /* Patched */
@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end)
277NESTED(except_vec_vi_handler, 0, sp) 254NESTED(except_vec_vi_handler, 0, sp)
278 SAVE_TEMP 255 SAVE_TEMP
279 SAVE_STATIC 256 SAVE_STATIC
280#ifdef CONFIG_MIPS_MT_SMTC
281 /*
282 * SMTC has an interesting problem that interrupts are level-triggered,
283 * and the CLI macro will clear EXL, potentially causing a duplicate
284 * interrupt service invocation. So we need to clear the associated
285 * IM bit of Status prior to doing CLI, and restore it after the
286 * service routine has been invoked - we must assume that the
287 * service routine will have cleared the state, and any active
288 * level represents a new or otherwised unserviced event...
289 */
290 mfc0 t1, CP0_STATUS
291 and t0, a0, t1
292#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
293 mfc0 t2, CP0_TCCONTEXT
294 or t2, t0, t2
295 mtc0 t2, CP0_TCCONTEXT
296#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
297 xor t1, t1, t0
298 mtc0 t1, CP0_STATUS
299 _ehb
300#endif /* CONFIG_MIPS_MT_SMTC */
301 CLI 257 CLI
302#ifdef CONFIG_TRACE_IRQFLAGS 258#ifdef CONFIG_TRACE_IRQFLAGS
303 move s0, v0 259 move s0, v0
304#ifdef CONFIG_MIPS_MT_SMTC
305 move s1, a0
306#endif
307 TRACE_IRQS_OFF 260 TRACE_IRQS_OFF
308#ifdef CONFIG_MIPS_MT_SMTC
309 move a0, s1
310#endif
311 move v0, s0 261 move v0, s0
312#endif 262#endif
313 263
@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
496 446
497 .align 5 447 .align 5
498 LEAF(handle_ri_rdhwr_vivt) 448 LEAF(handle_ri_rdhwr_vivt)
499#ifdef CONFIG_MIPS_MT_SMTC
500 PANIC_PIC("handle_ri_rdhwr_vivt called")
501#else
502 .set push 449 .set push
503 .set noat 450 .set noat
504 .set noreorder 451 .set noreorder
@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
517 .set pop 464 .set pop
518 bltz k1, handle_ri /* slow path */ 465 bltz k1, handle_ri /* slow path */
519 /* fall thru */ 466 /* fall thru */
520#endif
521 END(handle_ri_rdhwr_vivt) 467 END(handle_ri_rdhwr_vivt)
522 468
523 LEAF(handle_ri_rdhwr) 469 LEAF(handle_ri_rdhwr)
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index e712dcf18b2d..95afd663cd45 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -35,33 +35,12 @@
35 */ 35 */
36 .macro setup_c0_status set clr 36 .macro setup_c0_status set clr
37 .set push 37 .set push
38#ifdef CONFIG_MIPS_MT_SMTC
39 /*
40 * For SMTC, we need to set privilege and disable interrupts only for
41 * the current TC, using the TCStatus register.
42 */
43 mfc0 t0, CP0_TCSTATUS
44 /* Fortunately CU 0 is in the same place in both registers */
45 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
46 li t1, ST0_CU0 | 0x08001c00
47 or t0, t1
48 /* Clear TKSU, leave IXMT */
49 xori t0, 0x00001800
50 mtc0 t0, CP0_TCSTATUS
51 _ehb
52 /* We need to leave the global IE bit set, but clear EXL...*/
53 mfc0 t0, CP0_STATUS
54 or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
55 xor t0, ST0_EXL | ST0_ERL | \clr
56 mtc0 t0, CP0_STATUS
57#else
58 mfc0 t0, CP0_STATUS 38 mfc0 t0, CP0_STATUS
59 or t0, ST0_CU0|\set|0x1f|\clr 39 or t0, ST0_CU0|\set|0x1f|\clr
60 xor t0, 0x1f|\clr 40 xor t0, 0x1f|\clr
61 mtc0 t0, CP0_STATUS 41 mtc0 t0, CP0_STATUS
62 .set noreorder 42 .set noreorder
63 sll zero,3 # ehb 43 sll zero,3 # ehb
64#endif
65 .set pop 44 .set pop
66 .endm 45 .endm
67 46
@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
115 jr t0 94 jr t0
1160: 950:
117 96
118#ifdef CONFIG_MIPS_MT_SMTC
119 /*
120 * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
121 * We still need to enable interrupts globally in Status,
122 * and clear EXL/ERL.
123 *
124 * TCContext is used to track interrupt levels under
125 * service in SMTC kernel. Clear for boot TC before
126 * allowing any interrupts.
127 */
128 mtc0 zero, CP0_TCCONTEXT
129
130 mfc0 t0, CP0_STATUS
131 ori t0, t0, 0xff1f
132 xori t0, t0, 0x001e
133 mtc0 t0, CP0_STATUS
134#endif /* CONFIG_MIPS_MT_SMTC */
135
136 PTR_LA t0, __bss_start # clear .bss 97 PTR_LA t0, __bss_start # clear .bss
137 LONG_S zero, (t0) 98 LONG_S zero, (t0)
138 PTR_LA t1, __bss_stop - LONGSIZE 99 PTR_LA t1, __bss_stop - LONGSIZE
@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
164 * function after setting up the stack and gp registers. 125 * function after setting up the stack and gp registers.
165 */ 126 */
166NESTED(smp_bootstrap, 16, sp) 127NESTED(smp_bootstrap, 16, sp)
167#ifdef CONFIG_MIPS_MT_SMTC
168 /*
169 * Read-modify-writes of Status must be atomic, and this
170 * is one case where CLI is invoked without EXL being
171 * necessarily set. The CLI and setup_c0_status will
172 * in fact be redundant for all but the first TC of
173 * each VPE being booted.
174 */
175 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
176 jal mips_ihb
177#endif /* CONFIG_MIPS_MT_SMTC */
178 smp_slave_setup 128 smp_slave_setup
179 setup_c0_status_sec 129 setup_c0_status_sec
180#ifdef CONFIG_MIPS_MT_SMTC
181 andi t2, t2, VPECONTROL_TE
182 beqz t2, 2f
183 EMT # emt
1842:
185#endif /* CONFIG_MIPS_MT_SMTC */
186 j start_secondary 130 j start_secondary
187 END(smp_bootstrap) 131 END(smp_bootstrap)
188#endif /* CONFIG_SMP */ 132#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 2b91fe80c436..50b364897dda 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = {
42 .irq_disable = disable_8259A_irq, 42 .irq_disable = disable_8259A_irq,
43 .irq_unmask = enable_8259A_irq, 43 .irq_unmask = enable_8259A_irq,
44 .irq_mask_ack = mask_and_ack_8259A, 44 .irq_mask_ack = mask_and_ack_8259A,
45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
46 .irq_set_affinity = plat_set_irq_affinity,
47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
48}; 45};
49 46
50/* 47/*
@@ -180,7 +177,6 @@ handle_real_irq:
180 outb(cached_master_mask, PIC_MASTER_IMR); 177 outb(cached_master_mask, PIC_MASTER_IMR);
181 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 178 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
182 } 179 }
183 smtc_im_ack_irq(irq);
184 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 180 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
185 return; 181 return;
186 182
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5e3b653f5d9e..c4ceccfa3828 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -229,18 +229,8 @@ void __init check_wait(void)
229 } 229 }
230} 230}
231 231
232static void smtc_idle_hook(void)
233{
234#ifdef CONFIG_MIPS_MT_SMTC
235 void smtc_idle_loop_hook(void);
236
237 smtc_idle_loop_hook();
238#endif
239}
240
241void arch_cpu_idle(void) 232void arch_cpu_idle(void)
242{ 233{
243 smtc_idle_hook();
244 if (cpu_wait) 234 if (cpu_wait)
245 cpu_wait(); 235 cpu_wait();
246 else 236 else
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index fab40f7d2e03..4858642d543d 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d)
53 */ 53 */
54static void level_mask_and_ack_msc_irq(struct irq_data *d) 54static void level_mask_and_ack_msc_irq(struct irq_data *d)
55{ 55{
56 unsigned int irq = d->irq;
57
58 mask_msc_irq(d); 56 mask_msc_irq(d);
59 if (!cpu_has_veic) 57 if (!cpu_has_veic)
60 MSCIC_WRITE(MSC01_IC_EOI, 0); 58 MSCIC_WRITE(MSC01_IC_EOI, 0);
61 /* This actually needs to be a call into platform code */
62 smtc_im_ack_irq(irq);
63} 59}
64 60
65/* 61/*
@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
78 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 74 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
79 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 75 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
80 } 76 }
81 smtc_im_ack_irq(irq);
82} 77}
83 78
84/* 79/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d1fea7a054be..5024fa39b861 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq)
73 */ 73 */
74void ack_bad_irq(unsigned int irq) 74void ack_bad_irq(unsigned int irq)
75{ 75{
76 smtc_im_ack_irq(irq);
77 printk("unexpected IRQ # %d\n", irq); 76 printk("unexpected IRQ # %d\n", irq);
78} 77}
79 78
@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq)
142{ 141{
143 irq_enter(); 142 irq_enter();
144 check_stack_overflow(); 143 check_stack_overflow();
145 if (!smtc_handle_on_other_cpu(irq))
146 generic_handle_irq(irq);
147 irq_exit();
148}
149
150#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
151/*
152 * To avoid inefficient and in some cases pathological re-checking of
153 * IRQ affinity, we have this variant that skips the affinity check.
154 */
155
156void __irq_entry do_IRQ_no_affinity(unsigned int irq)
157{
158 irq_enter();
159 smtc_im_backstop(irq);
160 generic_handle_irq(irq); 144 generic_handle_irq(irq);
161 irq_exit(); 145 irq_exit();
162} 146}
163 147
164#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index cb098628aee8..362bb3707e62 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 * General MIPS MT support routines, usable in AP/SP and SMVP.
3 * Copyright (C) 2005 Mips Technologies, Inc 3 * Copyright (C) 2005 Mips Technologies, Inc
4 */ 4 */
5#include <linux/cpu.h> 5#include <linux/cpu.h>
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 6ded9bd1489c..88b1ef5f868a 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 * General MIPS MT support routines, usable in AP/SP and SMVP.
3 * Copyright (C) 2005 Mips Technologies, Inc 3 * Copyright (C) 2005 Mips Technologies, Inc
4 */ 4 */
5 5
@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
57 int tc; 57 int tc;
58 unsigned long haltval; 58 unsigned long haltval;
59 unsigned long tcstatval; 59 unsigned long tcstatval;
60#ifdef CONFIG_MIPS_MT_SMTC
61 void smtc_soft_dump(void);
62#endif /* CONFIG_MIPT_MT_SMTC */
63 60
64 local_irq_save(flags); 61 local_irq_save(flags);
65 vpflags = dvpe(); 62 vpflags = dvpe();
@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl)
116 if (!haltval) 113 if (!haltval)
117 write_tc_c0_tchalt(0); 114 write_tc_c0_tchalt(0);
118 } 115 }
119#ifdef CONFIG_MIPS_MT_SMTC
120 smtc_soft_dump();
121#endif /* CONFIG_MIPT_MT_SMTC */
122 printk("===========================\n"); 116 printk("===========================\n");
123 evpe(vpflags); 117 evpe(vpflags);
124 local_irq_restore(flags); 118 local_irq_restore(flags);
@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void)
295 289
296void mt_cflush_lockdown(void) 290void mt_cflush_lockdown(void)
297{ 291{
298#ifdef CONFIG_MIPS_MT_SMTC
299 void smtc_cflush_lockdown(void);
300
301 smtc_cflush_lockdown();
302#endif /* CONFIG_MIPS_MT_SMTC */
303 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 292 /* FILL IN VSMP and AP/SP VERSIONS HERE */
304} 293}
305 294
306void mt_cflush_release(void) 295void mt_cflush_release(void)
307{ 296{
308#ifdef CONFIG_MIPS_MT_SMTC
309 void smtc_cflush_release(void);
310
311 smtc_cflush_release();
312#endif /* CONFIG_MIPS_MT_SMTC */
313 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 297 /* FILL IN VSMP and AP/SP VERSIONS HERE */
314} 298}
315 299
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 60e39dc7f1eb..0a1ec0f3beff 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
140 */ 140 */
141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
142 142
143#ifdef CONFIG_MIPS_MT_SMTC
144 /*
145 * SMTC restores TCStatus after Status, and the CU bits
146 * are aliased there.
147 */
148 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
149#endif
150 clear_tsk_thread_flag(p, TIF_USEDFPU); 143 clear_tsk_thread_flag(p, TIF_USEDFPU);
151 144
152#ifdef CONFIG_MIPS_MT_FPAFF 145#ifdef CONFIG_MIPS_MT_FPAFF
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index abacac7c33ef..547c522964de 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -87,18 +87,6 @@
87 87
88 PTR_ADDU t0, $28, _THREAD_SIZE - 32 88 PTR_ADDU t0, $28, _THREAD_SIZE - 32
89 set_saved_sp t0, t1, t2 89 set_saved_sp t0, t1, t2
90#ifdef CONFIG_MIPS_MT_SMTC
91 /* Read-modify-writes of Status must be atomic on a VPE */
92 mfc0 t2, CP0_TCSTATUS
93 ori t1, t2, TCSTATUS_IXMT
94 mtc0 t1, CP0_TCSTATUS
95 andi t2, t2, TCSTATUS_IXMT
96 _ehb
97 DMT 8 # dmt t0
98 move t1,ra
99 jal mips_ihb
100 move ra,t1
101#endif /* CONFIG_MIPS_MT_SMTC */
102 mfc0 t1, CP0_STATUS /* Do we really need this? */ 90 mfc0 t1, CP0_STATUS /* Do we really need this? */
103 li a3, 0xff01 91 li a3, 0xff01
104 and t1, a3 92 and t1, a3
@@ -107,18 +95,6 @@
107 and a2, a3 95 and a2, a3
108 or a2, t1 96 or a2, t1
109 mtc0 a2, CP0_STATUS 97 mtc0 a2, CP0_STATUS
110#ifdef CONFIG_MIPS_MT_SMTC
111 _ehb
112 andi t0, t0, VPECONTROL_TE
113 beqz t0, 1f
114 emt
1151:
116 mfc0 t1, CP0_TCSTATUS
117 xori t1, t1, TCSTATUS_IXMT
118 or t1, t1, t2
119 mtc0 t1, CP0_TCSTATUS
120 _ehb
121#endif /* CONFIG_MIPS_MT_SMTC */
122 move v0, a0 98 move v0, a0
123 jr ra 99 jr ra
124 END(resume) 100 END(resume)
@@ -176,19 +152,10 @@ LEAF(_restore_msa)
176#define FPU_DEFAULT 0x00000000 152#define FPU_DEFAULT 0x00000000
177 153
178LEAF(_init_fpu) 154LEAF(_init_fpu)
179#ifdef CONFIG_MIPS_MT_SMTC
180 /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
181 mfc0 t0, CP0_TCSTATUS
182 /* Bit position is the same for Status, TCStatus */
183 li t1, ST0_CU1
184 or t0, t1
185 mtc0 t0, CP0_TCSTATUS
186#else /* Normal MIPS CU1 enable */
187 mfc0 t0, CP0_STATUS 155 mfc0 t0, CP0_STATUS
188 li t1, ST0_CU1 156 li t1, ST0_CU1
189 or t0, t1 157 or t0, t1
190 mtc0 t0, CP0_STATUS 158 mtc0 t0, CP0_STATUS
191#endif /* CONFIG_MIPS_MT_SMTC */
192 enable_fpu_hazard 159 enable_fpu_hazard
193 160
194 li t1, FPU_DEFAULT 161 li t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 9c1aca00fd54..5a66b975989e 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
36 unsigned long flags; 36 unsigned long flags;
37 int i; 37 int i;
38 38
39 /* Ought not to be strictly necessary for SMTC builds */
40 local_irq_save(flags); 39 local_irq_save(flags);
41 vpeflags = dvpe(); 40 vpeflags = dvpe();
42 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); 41 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 3ef55fb7ac03..64d06f6a9adf 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -49,14 +49,11 @@ static void cmp_init_secondary(void)
49 49
50 /* Enable per-cpu interrupts: platform specific */ 50 /* Enable per-cpu interrupts: platform specific */
51 51
52#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 52#ifdef CONFIG_MIPS_MT_SMP
53 if (cpu_has_mipsmt) 53 if (cpu_has_mipsmt)
54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
55 TCBIND_CURVPE; 55 TCBIND_CURVPE;
56#endif 56#endif
57#ifdef CONFIG_MIPS_MT_SMTC
58 c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
59#endif
60} 57}
61 58
62static void cmp_smp_finish(void) 59static void cmp_smp_finish(void)
@@ -135,10 +132,6 @@ void __init cmp_smp_setup(void)
135 unsigned int mvpconf0 = read_c0_mvpconf0(); 132 unsigned int mvpconf0 = read_c0_mvpconf0();
136 133
137 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 134 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
138#elif defined(CONFIG_MIPS_MT_SMTC)
139 unsigned int mvpconf0 = read_c0_mvpconf0();
140
141 nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
142#endif 135#endif
143 smp_num_siblings = nvpe; 136 smp_num_siblings = nvpe;
144 } 137 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 0a022ee33b2a..35bb05a13f05 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -43,10 +43,6 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/setup.h> 44#include <asm/setup.h>
45 45
46#ifdef CONFIG_MIPS_MT_SMTC
47#include <asm/mipsmtregs.h>
48#endif /* CONFIG_MIPS_MT_SMTC */
49
50volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 46volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
51 47
52int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
@@ -102,12 +98,6 @@ asmlinkage void start_secondary(void)
102{ 98{
103 unsigned int cpu; 99 unsigned int cpu;
104 100
105#ifdef CONFIG_MIPS_MT_SMTC
106 /* Only do cpu_probe for first TC of CPU */
107 if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
108 __cpu_name[smp_processor_id()] = __cpu_name[0];
109 else
110#endif /* CONFIG_MIPS_MT_SMTC */
111 cpu_probe(); 101 cpu_probe();
112 cpu_report(); 102 cpu_report();
113 per_cpu_trap_init(false); 103 per_cpu_trap_init(false);
@@ -238,13 +228,10 @@ static void flush_tlb_mm_ipi(void *mm)
238 * o collapses to normal function call on UP kernels 228 * o collapses to normal function call on UP kernels
239 * o collapses to normal function call on systems with a single shared 229 * o collapses to normal function call on systems with a single shared
240 * primary cache. 230 * primary cache.
241 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
242 */ 231 */
243static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 232static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
244{ 233{
245#ifndef CONFIG_MIPS_MT_SMTC
246 smp_call_function(func, info, 1); 234 smp_call_function(func, info, 1);
247#endif
248} 235}
249 236
250static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 237static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
deleted file mode 100644
index 2866863a39df..000000000000
--- a/arch/mips/kernel/smtc-asm.S
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * Assembly Language Functions for MIPS MT SMTC support
3 */
4
5/*
6 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
7
8#include <asm/regdef.h>
9#include <asm/asmmacro.h>
10#include <asm/stackframe.h>
11#include <asm/irqflags.h>
12
13/*
14 * "Software Interrupt" linkage.
15 *
16 * This is invoked when an "Interrupt" is sent from one TC to another,
17 * where the TC to be interrupted is halted, has it's Restart address
18 * and Status values saved by the "remote control" thread, then modified
19 * to cause execution to begin here, in kenel mode. This code then
20 * disguises the TC state as that of an exception and transfers
21 * control to the general exception or vectored interrupt handler.
22 */
23 .set noreorder
24
25/*
26The __smtc_ipi_vector would use k0 and k1 as temporaries and
271) Set EXL (this is per-VPE, so this can't be done by proxy!)
282) Restore the K/CU and IXMT bits to the pre "exception" state
29 (EXL means no interrupts and access to the kernel map).
303) Set EPC to be the saved value of TCRestart.
314) Jump to the exception handler entry point passed by the sender.
32
33CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
34*/
35
36/*
37 * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
38 * state of pre-halt thread, then save everything and call
39 * thought some function pointer to imaginary_exception, which
40 * will parse a register value or memory message queue to
41 * deliver things like interprocessor interrupts. On return
42 * from that function, jump to the global ret_from_irq code
43 * to invoke the scheduler and return as appropriate.
44 */
45
46#define PT_PADSLOT4 (PT_R0-8)
47#define PT_PADSLOT5 (PT_R0-4)
48
49 .text
50 .align 5
51FEXPORT(__smtc_ipi_vector)
52#ifdef CONFIG_CPU_MICROMIPS
53 nop
54#endif
55 .set noat
56 /* Disable thread scheduling to make Status update atomic */
57 DMT 27 # dmt k1
58 _ehb
59 /* Set EXL */
60 mfc0 k0,CP0_STATUS
61 ori k0,k0,ST0_EXL
62 mtc0 k0,CP0_STATUS
63 _ehb
64 /* Thread scheduling now inhibited by EXL. Restore TE state. */
65 andi k1,k1,VPECONTROL_TE
66 beqz k1,1f
67 emt
681:
69 /*
70 * The IPI sender has put some information on the anticipated
71 * kernel stack frame. If we were in user mode, this will be
72 * built above the saved kernel SP. If we were already in the
73 * kernel, it will be built above the current CPU SP.
74 *
75 * Were we in kernel mode, as indicated by CU0?
76 */
77 sll k1,k0,3
78 .set noreorder
79 bltz k1,2f
80 move k1,sp
81 .set reorder
82 /*
83 * If previously in user mode, set CU0 and use kernel stack.
84 */
85 li k1,ST0_CU0
86 or k1,k1,k0
87 mtc0 k1,CP0_STATUS
88 _ehb
89 get_saved_sp
90 /* Interrupting TC will have pre-set values in slots in the new frame */
912: subu k1,k1,PT_SIZE
92 /* Load TCStatus Value */
93 lw k0,PT_TCSTATUS(k1)
94 /* Write it to TCStatus to restore CU/KSU/IXMT state */
95 mtc0 k0,$2,1
96 _ehb
97 lw k0,PT_EPC(k1)
98 mtc0 k0,CP0_EPC
99 /* Save all will redundantly recompute the SP, but use it for now */
100 SAVE_ALL
101 CLI
102 TRACE_IRQS_OFF
103 /* Function to be invoked passed stack pad slot 5 */
104 lw t0,PT_PADSLOT5(sp)
105 /* Argument from sender passed in stack pad slot 4 */
106 lw a0,PT_PADSLOT4(sp)
107 LONG_L s0, TI_REGS($28)
108 LONG_S sp, TI_REGS($28)
109 PTR_LA ra, ret_from_irq
110 jr t0
111
112/*
113 * Called from idle loop to provoke processing of queued IPIs
114 * First IPI message in queue passed as argument.
115 */
116
117LEAF(self_ipi)
118 /* Before anything else, block interrupts */
119 mfc0 t0,CP0_TCSTATUS
120 ori t1,t0,TCSTATUS_IXMT
121 mtc0 t1,CP0_TCSTATUS
122 _ehb
123 /* We know we're in kernel mode, so prepare stack frame */
124 subu t1,sp,PT_SIZE
125 sw ra,PT_EPC(t1)
126 sw a0,PT_PADSLOT4(t1)
127 la t2,ipi_decode
128 sw t2,PT_PADSLOT5(t1)
129 /* Save pre-disable value of TCStatus */
130 sw t0,PT_TCSTATUS(t1)
131 j __smtc_ipi_vector
132 nop
133END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
deleted file mode 100644
index 38635a996cbf..000000000000
--- a/arch/mips/kernel/smtc-proc.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * /proc hooks for SMTC kernel
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <linux/atomic.h>
14#include <asm/hardirq.h>
15#include <asm/mmu_context.h>
16#include <asm/mipsregs.h>
17#include <asm/cacheflush.h>
18#include <linux/proc_fs.h>
19#include <linux/seq_file.h>
20
21#include <asm/smtc_proc.h>
22
23/*
24 * /proc diagnostic and statistics hooks
25 */
26
27/*
28 * Statistics gathered
29 */
30unsigned long selfipis[NR_CPUS];
31
32struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
33
34atomic_t smtc_fpu_recoveries;
35
36static int smtc_proc_show(struct seq_file *m, void *v)
37{
38 int i;
39 extern unsigned long ebase;
40
41 seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status);
42 seq_printf(m, "Config7: 0x%08x\n", read_c0_config7());
43 seq_printf(m, "EBASE: 0x%08lx\n", ebase);
44 seq_printf(m, "Counter Interrupts taken per CPU (TC)\n");
45 for (i=0; i < NR_CPUS; i++)
46 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
47 seq_printf(m, "Self-IPIs by CPU:\n");
48 for(i = 0; i < NR_CPUS; i++)
49 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
50 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
51 atomic_read(&smtc_fpu_recoveries));
52 return 0;
53}
54
55static int smtc_proc_open(struct inode *inode, struct file *file)
56{
57 return single_open(file, smtc_proc_show, NULL);
58}
59
60static const struct file_operations smtc_proc_fops = {
61 .open = smtc_proc_open,
62 .read = seq_read,
63 .llseek = seq_lseek,
64 .release = single_release,
65};
66
67void init_smtc_stats(void)
68{
69 int i;
70
71 for (i=0; i<NR_CPUS; i++) {
72 smtc_cpu_stats[i].timerints = 0;
73 smtc_cpu_stats[i].selfipis = 0;
74 }
75
76 atomic_set(&smtc_fpu_recoveries, 0);
77
78 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
79}
80
81static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
82 unsigned long action_unused, void *data)
83{
84 struct proc_cpuinfo_notifier_args *pcn = data;
85 struct seq_file *m = pcn->m;
86 unsigned long n = pcn->n;
87
88 if (!cpu_has_mipsmt)
89 return NOTIFY_OK;
90
91 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
92 seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
93
94 return NOTIFY_OK;
95}
96
97static int __init proc_cpuinfo_notifier_init(void)
98{
99 return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
100}
101
102subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
deleted file mode 100644
index c1681d65dd5c..000000000000
--- a/arch/mips/kernel/smtc.c
+++ /dev/null
@@ -1,1528 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
19
20#include <linux/clockchips.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/smp.h>
24#include <linux/cpumask.h>
25#include <linux/interrupt.h>
26#include <linux/kernel_stat.h>
27#include <linux/module.h>
28#include <linux/ftrace.h>
29#include <linux/slab.h>
30
31#include <asm/cpu.h>
32#include <asm/processor.h>
33#include <linux/atomic.h>
34#include <asm/hardirq.h>
35#include <asm/hazards.h>
36#include <asm/irq.h>
37#include <asm/idle.h>
38#include <asm/mmu_context.h>
39#include <asm/mipsregs.h>
40#include <asm/cacheflush.h>
41#include <asm/time.h>
42#include <asm/addrspace.h>
43#include <asm/smtc.h>
44#include <asm/smtc_proc.h>
45#include <asm/setup.h>
46
47/*
48 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
49 * in do_IRQ. These are passed in setup_irq_smtc() and stored
50 * in this table.
51 */
52unsigned long irq_hwmask[NR_IRQS];
53
54#define LOCK_MT_PRA() \
55 local_irq_save(flags); \
56 mtflags = dmt()
57
58#define UNLOCK_MT_PRA() \
59 emt(mtflags); \
60 local_irq_restore(flags)
61
62#define LOCK_CORE_PRA() \
63 local_irq_save(flags); \
64 mtflags = dvpe()
65
66#define UNLOCK_CORE_PRA() \
67 evpe(mtflags); \
68 local_irq_restore(flags)
69
70/*
71 * Data structures purely associated with SMTC parallelism
72 */
73
74
75/*
76 * Table for tracking ASIDs whose lifetime is prolonged.
77 */
78
79asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
80
81/*
82 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
83 */
84
85#define IPIBUF_PER_CPU 4
86
87struct smtc_ipi_q IPIQ[NR_CPUS];
88static struct smtc_ipi_q freeIPIq;
89
90
91/*
92 * Number of FPU contexts for each VPE
93 */
94
95static int smtc_nconf1[MAX_SMTC_VPES];
96
97
98/* Forward declarations */
99
100void ipi_decode(struct smtc_ipi *);
101static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
102static void setup_cross_vpe_interrupts(unsigned int nvpe);
103void init_smtc_stats(void);
104
105/* Global SMTC Status */
106
107unsigned int smtc_status;
108
109/* Boot command line configuration overrides */
110
111static int vpe0limit;
112static int ipibuffers;
113static int nostlb;
114static int asidmask;
115unsigned long smtc_asid_mask = 0xff;
116
117static int __init vpe0tcs(char *str)
118{
119 get_option(&str, &vpe0limit);
120
121 return 1;
122}
123
124static int __init ipibufs(char *str)
125{
126 get_option(&str, &ipibuffers);
127 return 1;
128}
129
130static int __init stlb_disable(char *s)
131{
132 nostlb = 1;
133 return 1;
134}
135
136static int __init asidmask_set(char *str)
137{
138 get_option(&str, &asidmask);
139 switch (asidmask) {
140 case 0x1:
141 case 0x3:
142 case 0x7:
143 case 0xf:
144 case 0x1f:
145 case 0x3f:
146 case 0x7f:
147 case 0xff:
148 smtc_asid_mask = (unsigned long)asidmask;
149 break;
150 default:
151 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
152 }
153 return 1;
154}
155
156__setup("vpe0tcs=", vpe0tcs);
157__setup("ipibufs=", ipibufs);
158__setup("nostlb", stlb_disable);
159__setup("asidmask=", asidmask_set);
160
161#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
162
163static int hang_trig;
164
165static int __init hangtrig_enable(char *s)
166{
167 hang_trig = 1;
168 return 1;
169}
170
171
172__setup("hangtrig", hangtrig_enable);
173
174#define DEFAULT_BLOCKED_IPI_LIMIT 32
175
176static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
177
178static int __init tintq(char *str)
179{
180 get_option(&str, &timerq_limit);
181 return 1;
182}
183
184__setup("tintq=", tintq);
185
186static int imstuckcount[MAX_SMTC_VPES][8];
187/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
188static int vpemask[MAX_SMTC_VPES][8] = {
189 {0, 0, 1, 0, 0, 0, 0, 1},
190 {0, 0, 0, 0, 0, 0, 0, 1}
191};
192int tcnoprog[NR_CPUS];
193static atomic_t idle_hook_initialized = ATOMIC_INIT(0);
194static int clock_hang_reported[NR_CPUS];
195
196#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
197
198/*
199 * Configure shared TLB - VPC configuration bit must be set by caller
200 */
201
202static void smtc_configure_tlb(void)
203{
204 int i, tlbsiz, vpes;
205 unsigned long mvpconf0;
206 unsigned long config1val;
207
208 /* Set up ASID preservation table */
209 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
210 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
211 smtc_live_asid[vpes][i] = 0;
212 }
213 }
214 mvpconf0 = read_c0_mvpconf0();
215
216 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
217 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
218 /* If we have multiple VPEs, try to share the TLB */
219 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
220 /*
221 * If TLB sizing is programmable, shared TLB
222 * size is the total available complement.
223 * Otherwise, we have to take the sum of all
224 * static VPE TLB entries.
225 */
226 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
227 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
228 /*
229 * If there's more than one VPE, there had better
230 * be more than one TC, because we need one to bind
231 * to each VPE in turn to be able to read
232 * its configuration state!
233 */
234 settc(1);
235 /* Stop the TC from doing anything foolish */
236 write_tc_c0_tchalt(TCHALT_H);
237 mips_ihb();
238 /* No need to un-Halt - that happens later anyway */
239 for (i=0; i < vpes; i++) {
240 write_tc_c0_tcbind(i);
241 /*
242 * To be 100% sure we're really getting the right
243 * information, we exit the configuration state
244 * and do an IHB after each rebinding.
245 */
246 write_c0_mvpcontrol(
247 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
248 mips_ihb();
249 /*
250 * Only count if the MMU Type indicated is TLB
251 */
252 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
253 config1val = read_vpe_c0_config1();
254 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
255 }
256
257 /* Put core back in configuration state */
258 write_c0_mvpcontrol(
259 read_c0_mvpcontrol() | MVPCONTROL_VPC );
260 mips_ihb();
261 }
262 }
263 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
264 ehb();
265
266 /*
267 * Setup kernel data structures to use software total,
268 * rather than read the per-VPE Config1 value. The values
269 * for "CPU 0" gets copied to all the other CPUs as part
270 * of their initialization in smtc_cpu_setup().
271 */
272
273 /* MIPS32 limits TLB indices to 64 */
274 if (tlbsiz > 64)
275 tlbsiz = 64;
276 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
277 smtc_status |= SMTC_TLB_SHARED;
278 local_flush_tlb_all();
279
280 printk("TLB of %d entry pairs shared by %d VPEs\n",
281 tlbsiz, vpes);
282 } else {
283 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
284 }
285 }
286}
287
288
289/*
290 * Incrementally build the CPU map out of constituent MIPS MT cores,
291 * using the specified available VPEs and TCs. Plaform code needs
292 * to ensure that each MIPS MT core invokes this routine on reset,
293 * one at a time(!).
294 *
295 * This version of the build_cpu_map and prepare_cpus routines assumes
296 * that *all* TCs of a MIPS MT core will be used for Linux, and that
297 * they will be spread across *all* available VPEs (to minimise the
298 * loss of efficiency due to exception service serialization).
299 * An improved version would pick up configuration information and
300 * possibly leave some TCs/VPEs as "slave" processors.
301 *
302 * Use c0_MVPConf0 to find out how many TCs are available, setting up
303 * cpu_possible_mask and the logical/physical mappings.
304 */
305
306int __init smtc_build_cpu_map(int start_cpu_slot)
307{
308 int i, ntcs;
309
310 /*
311 * The CPU map isn't actually used for anything at this point,
312 * so it's not clear what else we should do apart from set
313 * everything up so that "logical" = "physical".
314 */
315 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
316 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
317 set_cpu_possible(i, true);
318 __cpu_number_map[i] = i;
319 __cpu_logical_map[i] = i;
320 }
321#ifdef CONFIG_MIPS_MT_FPAFF
322 /* Initialize map of CPUs with FPUs */
323 cpus_clear(mt_fpu_cpumask);
324#endif
325
326 /* One of those TC's is the one booting, and not a secondary... */
327 printk("%i available secondary CPU TC(s)\n", i - 1);
328
329 return i;
330}
331
332/*
333 * Common setup before any secondaries are started
334 * Make sure all CPUs are in a sensible state before we boot any of the
335 * secondaries.
336 *
337 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
338 * as possible across the available VPEs.
339 */
340
341static void smtc_tc_setup(int vpe, int tc, int cpu)
342{
343 static int cp1contexts[MAX_SMTC_VPES];
344
345 /*
346 * Make a local copy of the available FPU contexts in order
347 * to keep track of TCs that can have one.
348 */
349 if (tc == 1)
350 {
351 /*
352 * FIXME: Multi-core SMTC hasn't been tested and the
353 * maximum number of VPEs may change.
354 */
355 cp1contexts[0] = smtc_nconf1[0] - 1;
356 cp1contexts[1] = smtc_nconf1[1];
357 }
358
359 settc(tc);
360 write_tc_c0_tchalt(TCHALT_H);
361 mips_ihb();
362 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
363 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
364 | TCSTATUS_A);
365 /*
366 * TCContext gets an offset from the base of the IPIQ array
367 * to be used in low-level code to detect the presence of
368 * an active IPI queue.
369 */
370 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
371
372 /* Bind TC to VPE. */
373 write_tc_c0_tcbind(vpe);
374
375 /* In general, all TCs should have the same cpu_data indications. */
376 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
377
378 /* Check to see if there is a FPU context available for this TC. */
379 if (!cp1contexts[vpe])
380 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
381 else
382 cp1contexts[vpe]--;
383
384 /* Store the TC and VPE into the cpu_data structure. */
385 cpu_data[cpu].vpe_id = vpe;
386 cpu_data[cpu].tc_id = tc;
387
388 /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */
389 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
390}
391
392/*
393 * Tweak to get Count registers synced as closely as possible. The
394 * value seems good for 34K-class cores.
395 */
396
397#define CP0_SKEW 8
398
399void smtc_prepare_cpus(int cpus)
400{
401 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
402 unsigned long flags;
403 unsigned long val;
404 int nipi;
405 struct smtc_ipi *pipi;
406
407 /* disable interrupts so we can disable MT */
408 local_irq_save(flags);
409 /* disable MT so we can configure */
410 dvpe();
411 dmt();
412
413 spin_lock_init(&freeIPIq.lock);
414
415 /*
416 * We probably don't have as many VPEs as we do SMP "CPUs",
417 * but it's possible - and in any case we'll never use more!
418 */
419 for (i=0; i<NR_CPUS; i++) {
420 IPIQ[i].head = IPIQ[i].tail = NULL;
421 spin_lock_init(&IPIQ[i].lock);
422 IPIQ[i].depth = 0;
423 IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
424 }
425
426 /* cpu_data index starts at zero */
427 cpu = 0;
428 cpu_data[cpu].vpe_id = 0;
429 cpu_data[cpu].tc_id = 0;
430 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
431 cpu++;
432
433 /* Report on boot-time options */
434 mips_mt_set_cpuoptions();
435 if (vpelimit > 0)
436 printk("Limit of %d VPEs set\n", vpelimit);
437 if (tclimit > 0)
438 printk("Limit of %d TCs set\n", tclimit);
439 if (nostlb) {
440 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
441 }
442 if (asidmask)
443 printk("ASID mask value override to 0x%x\n", asidmask);
444
445 /* Temporary */
446#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
447 if (hang_trig)
448 printk("Logic Analyser Trigger on suspected TC hang\n");
449#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
450
451 /* Put MVPE's into 'configuration state' */
452 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
453
454 val = read_c0_mvpconf0();
455 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
456 if (vpelimit > 0 && nvpe > vpelimit)
457 nvpe = vpelimit;
458 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
459 if (ntc > NR_CPUS)
460 ntc = NR_CPUS;
461 if (tclimit > 0 && ntc > tclimit)
462 ntc = tclimit;
463 slop = ntc % nvpe;
464 for (i = 0; i < nvpe; i++) {
465 tcpervpe[i] = ntc / nvpe;
466 if (slop) {
467 if((slop - i) > 0) tcpervpe[i]++;
468 }
469 }
470 /* Handle command line override for VPE0 */
471 if (vpe0limit > ntc) vpe0limit = ntc;
472 if (vpe0limit > 0) {
473 int slopslop;
474 if (vpe0limit < tcpervpe[0]) {
475 /* Reducing TC count - distribute to others */
476 slop = tcpervpe[0] - vpe0limit;
477 slopslop = slop % (nvpe - 1);
478 tcpervpe[0] = vpe0limit;
479 for (i = 1; i < nvpe; i++) {
480 tcpervpe[i] += slop / (nvpe - 1);
481 if(slopslop && ((slopslop - (i - 1) > 0)))
482 tcpervpe[i]++;
483 }
484 } else if (vpe0limit > tcpervpe[0]) {
485 /* Increasing TC count - steal from others */
486 slop = vpe0limit - tcpervpe[0];
487 slopslop = slop % (nvpe - 1);
488 tcpervpe[0] = vpe0limit;
489 for (i = 1; i < nvpe; i++) {
490 tcpervpe[i] -= slop / (nvpe - 1);
491 if(slopslop && ((slopslop - (i - 1) > 0)))
492 tcpervpe[i]--;
493 }
494 }
495 }
496
497 /* Set up shared TLB */
498 smtc_configure_tlb();
499
500 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
501 /* Get number of CP1 contexts for each VPE. */
502 if (tc == 0)
503 {
504 /*
505 * Do not call settc() for TC0 or the FPU context
506 * value will be incorrect. Besides, we know that
507 * we are TC0 anyway.
508 */
509 smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() &
510 VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
511 if (nvpe == 2)
512 {
513 settc(1);
514 smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() &
515 VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
516 settc(0);
517 }
518 }
519 if (tcpervpe[vpe] == 0)
520 continue;
521 if (vpe != 0)
522 printk(", ");
523 printk("VPE %d: TC", vpe);
524 for (i = 0; i < tcpervpe[vpe]; i++) {
525 /*
526 * TC 0 is bound to VPE 0 at reset,
527 * and is presumably executing this
528 * code. Leave it alone!
529 */
530 if (tc != 0) {
531 smtc_tc_setup(vpe, tc, cpu);
532 if (vpe != 0) {
533 /*
534 * Set MVP bit (possibly again). Do it
535 * here to catch CPUs that have no TCs
536 * bound to the VPE at reset. In that
537 * case, a TC must be bound to the VPE
538 * before we can set VPEControl[MVP]
539 */
540 write_vpe_c0_vpeconf0(
541 read_vpe_c0_vpeconf0() |
542 VPECONF0_MVP);
543 }
544 cpu++;
545 }
546 printk(" %d", tc);
547 tc++;
548 }
549 if (vpe != 0) {
550 /*
551 * Allow this VPE to control others.
552 */
553 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
554 VPECONF0_MVP);
555
556 /*
557 * Clear any stale software interrupts from VPE's Cause
558 */
559 write_vpe_c0_cause(0);
560
561 /*
562 * Clear ERL/EXL of VPEs other than 0
563 * and set restricted interrupt enable/mask.
564 */
565 write_vpe_c0_status((read_vpe_c0_status()
566 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
567 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
568 | ST0_IE));
569 /*
570 * set config to be the same as vpe0,
571 * particularly kseg0 coherency alg
572 */
573 write_vpe_c0_config(read_c0_config());
574 /* Clear any pending timer interrupt */
575 write_vpe_c0_compare(0);
576 /* Propagate Config7 */
577 write_vpe_c0_config7(read_c0_config7());
578 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
579 ehb();
580 }
581 /* enable multi-threading within VPE */
582 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
583 /* enable the VPE */
584 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
585 }
586
587 /*
588 * Pull any physically present but unused TCs out of circulation.
589 */
590 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
591 set_cpu_possible(tc, false);
592 set_cpu_present(tc, false);
593 tc++;
594 }
595
596 /* release config state */
597 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
598
599 printk("\n");
600
601 /* Set up coprocessor affinity CPU mask(s) */
602
603#ifdef CONFIG_MIPS_MT_FPAFF
604 for (tc = 0; tc < ntc; tc++) {
605 if (cpu_data[tc].options & MIPS_CPU_FPU)
606 cpu_set(tc, mt_fpu_cpumask);
607 }
608#endif
609
610 /* set up ipi interrupts... */
611
612 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
613
614 setup_cross_vpe_interrupts(nvpe);
615
616 /* Set up queue of free IPI "messages". */
617 nipi = NR_CPUS * IPIBUF_PER_CPU;
618 if (ipibuffers > 0)
619 nipi = ipibuffers;
620
621 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
622 if (pipi == NULL)
623 panic("kmalloc of IPI message buffers failed");
624 else
625 printk("IPI buffer pool of %d buffers\n", nipi);
626 for (i = 0; i < nipi; i++) {
627 smtc_ipi_nq(&freeIPIq, pipi);
628 pipi++;
629 }
630
631 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
632 emt(EMT_ENABLE);
633 evpe(EVPE_ENABLE);
634 local_irq_restore(flags);
635 /* Initialize SMTC /proc statistics/diagnostics */
636 init_smtc_stats();
637}
638
639
640/*
641 * Setup the PC, SP, and GP of a secondary processor and start it
642 * running!
643 * smp_bootstrap is the place to resume from
644 * __KSTK_TOS(idle) is apparently the stack pointer
645 * (unsigned long)idle->thread_info the gp
646 *
647 */
648void smtc_boot_secondary(int cpu, struct task_struct *idle)
649{
650 extern u32 kernelsp[NR_CPUS];
651 unsigned long flags;
652 int mtflags;
653
654 LOCK_MT_PRA();
655 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
656 dvpe();
657 }
658 settc(cpu_data[cpu].tc_id);
659
660 /* pc */
661 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
662
663 /* stack pointer */
664 kernelsp[cpu] = __KSTK_TOS(idle);
665 write_tc_gpr_sp(__KSTK_TOS(idle));
666
667 /* global pointer */
668 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
669
670 smtc_status |= SMTC_MTC_ACTIVE;
671 write_tc_c0_tchalt(0);
672 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
673 evpe(EVPE_ENABLE);
674 }
675 UNLOCK_MT_PRA();
676}
677
678void smtc_init_secondary(void)
679{
680}
681
682void smtc_smp_finish(void)
683{
684 int cpu = smp_processor_id();
685
686 /*
687 * Lowest-numbered CPU per VPE starts a clock tick.
688 * Like per_cpu_trap_init() hack, this assumes that
689 * SMTC init code assigns TCs consdecutively and
690 * in ascending order across available VPEs.
691 */
692 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
693 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
694
695 local_irq_enable();
696
697 printk("TC %d going on-line as CPU %d\n",
698 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
699}
700
701void smtc_cpus_done(void)
702{
703}
704
705/*
706 * Support for SMTC-optimized driver IRQ registration
707 */
708
709/*
710 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
711 * in do_IRQ. These are passed in setup_irq_smtc() and stored
712 * in this table.
713 */
714
715int setup_irq_smtc(unsigned int irq, struct irqaction * new,
716 unsigned long hwmask)
717{
718#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
719 unsigned int vpe = current_cpu_data.vpe_id;
720
721 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
722#endif
723 irq_hwmask[irq] = hwmask;
724
725 return setup_irq(irq, new);
726}
727
728#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
729/*
730 * Support for IRQ affinity to TCs
731 */
732
733void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
734{
735 /*
736 * If a "fast path" cache of quickly decodable affinity state
737 * is maintained, this is where it gets done, on a call up
738 * from the platform affinity code.
739 */
740}
741
742void smtc_forward_irq(struct irq_data *d)
743{
744 unsigned int irq = d->irq;
745 int target;
746
747 /*
748 * OK wise guy, now figure out how to get the IRQ
749 * to be serviced on an authorized "CPU".
750 *
751 * Ideally, to handle the situation where an IRQ has multiple
752 * eligible CPUS, we would maintain state per IRQ that would
753 * allow a fair distribution of service requests. Since the
754 * expected use model is any-or-only-one, for simplicity
755 * and efficiency, we just pick the easiest one to find.
756 */
757
758 target = cpumask_first(d->affinity);
759
760 /*
761 * We depend on the platform code to have correctly processed
762 * IRQ affinity change requests to ensure that the IRQ affinity
763 * mask has been purged of bits corresponding to nonexistent and
764 * offline "CPUs", and to TCs bound to VPEs other than the VPE
765 * connected to the physical interrupt input for the interrupt
766 * in question. Otherwise we have a nasty problem with interrupt
767 * mask management. This is best handled in non-performance-critical
768 * platform IRQ affinity setting code, to minimize interrupt-time
769 * checks.
770 */
771
772 /* If no one is eligible, service locally */
773 if (target >= NR_CPUS)
774 do_IRQ_no_affinity(irq);
775 else
776 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
777}
778
779#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
780
781/*
782 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
783 * Within a VPE one TC can interrupt another by different approaches.
784 * The easiest to get right would probably be to make all TCs except
785 * the target IXMT and set a software interrupt, but an IXMT-based
786 * scheme requires that a handler must run before a new IPI could
787 * be sent, which would break the "broadcast" loops in MIPS MT.
788 * A more gonzo approach within a VPE is to halt the TC, extract
789 * its Restart, Status, and a couple of GPRs, and program the Restart
790 * address to emulate an interrupt.
791 *
792 * Within a VPE, one can be confident that the target TC isn't in
793 * a critical EXL state when halted, since the write to the Halt
794 * register could not have issued on the writing thread if the
795 * halting thread had EXL set. So k0 and k1 of the target TC
796 * can be used by the injection code. Across VPEs, one can't
797 * be certain that the target TC isn't in a critical exception
798 * state. So we try a two-step process of sending a software
799 * interrupt to the target VPE, which either handles the event
800 * itself (if it was the target) or injects the event within
801 * the VPE.
802 */
803
804static void smtc_ipi_qdump(void)
805{
806 int i;
807 struct smtc_ipi *temp;
808
809 for (i = 0; i < NR_CPUS ;i++) {
810 pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
811 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
812 IPIQ[i].depth);
813 temp = IPIQ[i].head;
814
815 while (temp != IPIQ[i].tail) {
816 pr_debug("%d %d %d: ", temp->type, temp->dest,
817 (int)temp->arg);
818#ifdef SMTC_IPI_DEBUG
819 pr_debug("%u %lu\n", temp->sender, temp->stamp);
820#else
821 pr_debug("\n");
822#endif
823 temp = temp->flink;
824 }
825 }
826}
827
828/*
829 * The standard atomic.h primitives don't quite do what we want
830 * here: We need an atomic add-and-return-previous-value (which
831 * could be done with atomic_add_return and a decrement) and an
832 * atomic set/zero-and-return-previous-value (which can't really
833 * be done with the atomic.h primitives). And since this is
834 * MIPS MT, we can assume that we have LL/SC.
835 */
836static inline int atomic_postincrement(atomic_t *v)
837{
838 unsigned long result;
839
840 unsigned long temp;
841
842 __asm__ __volatile__(
843 "1: ll %0, %2 \n"
844 " addu %1, %0, 1 \n"
845 " sc %1, %2 \n"
846 " beqz %1, 1b \n"
847 __WEAK_LLSC_MB
848 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
849 : "m" (v->counter)
850 : "memory");
851
852 return result;
853}
854
855void smtc_send_ipi(int cpu, int type, unsigned int action)
856{
857 int tcstatus;
858 struct smtc_ipi *pipi;
859 unsigned long flags;
860 int mtflags;
861 unsigned long tcrestart;
862 int set_resched_flag = (type == LINUX_SMP_IPI &&
863 action == SMP_RESCHEDULE_YOURSELF);
864
865 if (cpu == smp_processor_id()) {
866 printk("Cannot Send IPI to self!\n");
867 return;
868 }
869 if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
870 return; /* There is a reschedule queued already */
871
872 /* Set up a descriptor, to be delivered either promptly or queued */
873 pipi = smtc_ipi_dq(&freeIPIq);
874 if (pipi == NULL) {
875 bust_spinlocks(1);
876 mips_mt_regdump(dvpe());
877 panic("IPI Msg. Buffers Depleted");
878 }
879 pipi->type = type;
880 pipi->arg = (void *)action;
881 pipi->dest = cpu;
882 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
883 /* If not on same VPE, enqueue and send cross-VPE interrupt */
884 IPIQ[cpu].resched_flag |= set_resched_flag;
885 smtc_ipi_nq(&IPIQ[cpu], pipi);
886 LOCK_CORE_PRA();
887 settc(cpu_data[cpu].tc_id);
888 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
889 UNLOCK_CORE_PRA();
890 } else {
891 /*
892 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
893 * since ASID shootdown on the other VPE may
894 * collide with this operation.
895 */
896 LOCK_CORE_PRA();
897 settc(cpu_data[cpu].tc_id);
898 /* Halt the targeted TC */
899 write_tc_c0_tchalt(TCHALT_H);
900 mips_ihb();
901
902 /*
903 * Inspect TCStatus - if IXMT is set, we have to queue
904 * a message. Otherwise, we set up the "interrupt"
905 * of the other TC
906 */
907 tcstatus = read_tc_c0_tcstatus();
908
909 if ((tcstatus & TCSTATUS_IXMT) != 0) {
910 /*
911 * If we're in the the irq-off version of the wait
912 * loop, we need to force exit from the wait and
913 * do a direct post of the IPI.
914 */
915 if (cpu_wait == r4k_wait_irqoff) {
916 tcrestart = read_tc_c0_tcrestart();
917 if (address_is_in_r4k_wait_irqoff(tcrestart)) {
918 write_tc_c0_tcrestart(__pastwait);
919 tcstatus &= ~TCSTATUS_IXMT;
920 write_tc_c0_tcstatus(tcstatus);
921 goto postdirect;
922 }
923 }
924 /*
925 * Otherwise we queue the message for the target TC
926 * to pick up when he does a local_irq_restore()
927 */
928 write_tc_c0_tchalt(0);
929 UNLOCK_CORE_PRA();
930 IPIQ[cpu].resched_flag |= set_resched_flag;
931 smtc_ipi_nq(&IPIQ[cpu], pipi);
932 } else {
933postdirect:
934 post_direct_ipi(cpu, pipi);
935 write_tc_c0_tchalt(0);
936 UNLOCK_CORE_PRA();
937 }
938 }
939}
940
941/*
942 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
943 */
944static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
945{
946 struct pt_regs *kstack;
947 unsigned long tcstatus;
948 unsigned long tcrestart;
949 extern u32 kernelsp[NR_CPUS];
950 extern void __smtc_ipi_vector(void);
951//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
952
953 /* Extract Status, EPC from halted TC */
954 tcstatus = read_tc_c0_tcstatus();
955 tcrestart = read_tc_c0_tcrestart();
956 /* If TCRestart indicates a WAIT instruction, advance the PC */
957 if ((tcrestart & 0x80000000)
958 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
959 tcrestart += 4;
960 }
961 /*
962 * Save on TC's future kernel stack
963 *
964 * CU bit of Status is indicator that TC was
965 * already running on a kernel stack...
966 */
967 if (tcstatus & ST0_CU0) {
968 /* Note that this "- 1" is pointer arithmetic */
969 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
970 } else {
971 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
972 }
973
974 kstack->cp0_epc = (long)tcrestart;
975 /* Save TCStatus */
976 kstack->cp0_tcstatus = tcstatus;
977 /* Pass token of operation to be performed kernel stack pad area */
978 kstack->pad0[4] = (unsigned long)pipi;
979 /* Pass address of function to be called likewise */
980 kstack->pad0[5] = (unsigned long)&ipi_decode;
981 /* Set interrupt exempt and kernel mode */
982 tcstatus |= TCSTATUS_IXMT;
983 tcstatus &= ~TCSTATUS_TKSU;
984 write_tc_c0_tcstatus(tcstatus);
985 ehb();
986 /* Set TC Restart address to be SMTC IPI vector */
987 write_tc_c0_tcrestart(__smtc_ipi_vector);
988}
989
990static void ipi_resched_interrupt(void)
991{
992 scheduler_ipi();
993}
994
995static void ipi_call_interrupt(void)
996{
997 /* Invoke generic function invocation code in smp.c */
998 smp_call_function_interrupt();
999}
1000
1001DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
1002
1003static void __irq_entry smtc_clock_tick_interrupt(void)
1004{
1005 unsigned int cpu = smp_processor_id();
1006 struct clock_event_device *cd;
1007 int irq = MIPS_CPU_IRQ_BASE + 1;
1008
1009 irq_enter();
1010 kstat_incr_irq_this_cpu(irq);
1011 cd = &per_cpu(mips_clockevent_device, cpu);
1012 cd->event_handler(cd);
1013 irq_exit();
1014}
1015
1016void ipi_decode(struct smtc_ipi *pipi)
1017{
1018 void *arg_copy = pipi->arg;
1019 int type_copy = pipi->type;
1020
1021 smtc_ipi_nq(&freeIPIq, pipi);
1022
1023 switch (type_copy) {
1024 case SMTC_CLOCK_TICK:
1025 smtc_clock_tick_interrupt();
1026 break;
1027
1028 case LINUX_SMP_IPI:
1029 switch ((int)arg_copy) {
1030 case SMP_RESCHEDULE_YOURSELF:
1031 ipi_resched_interrupt();
1032 break;
1033 case SMP_CALL_FUNCTION:
1034 ipi_call_interrupt();
1035 break;
1036 default:
1037 printk("Impossible SMTC IPI Argument %p\n", arg_copy);
1038 break;
1039 }
1040 break;
1041#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
1042 case IRQ_AFFINITY_IPI:
1043 /*
1044 * Accept a "forwarded" interrupt that was initially
1045 * taken by a TC who doesn't have affinity for the IRQ.
1046 */
1047 do_IRQ_no_affinity((int)arg_copy);
1048 break;
1049#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
1050 default:
1051 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
1052 break;
1053 }
1054}
1055
1056/*
1057 * Similar to smtc_ipi_replay(), but invoked from context restore,
1058 * so it reuses the current exception frame rather than set up a
1059 * new one with self_ipi.
1060 */
1061
1062void deferred_smtc_ipi(void)
1063{
1064 int cpu = smp_processor_id();
1065
1066 /*
1067 * Test is not atomic, but much faster than a dequeue,
1068 * and the vast majority of invocations will have a null queue.
1069 * If irq_disabled when this was called, then any IPIs queued
1070 * after we test last will be taken on the next irq_enable/restore.
1071 * If interrupts were enabled, then any IPIs added after the
1072 * last test will be taken directly.
1073 */
1074
1075 while (IPIQ[cpu].head != NULL) {
1076 struct smtc_ipi_q *q = &IPIQ[cpu];
1077 struct smtc_ipi *pipi;
1078 unsigned long flags;
1079
1080 /*
1081 * It may be possible we'll come in with interrupts
1082 * already enabled.
1083 */
1084 local_irq_save(flags);
1085 spin_lock(&q->lock);
1086 pipi = __smtc_ipi_dq(q);
1087 spin_unlock(&q->lock);
1088 if (pipi != NULL) {
1089 if (pipi->type == LINUX_SMP_IPI &&
1090 (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
1091 IPIQ[cpu].resched_flag = 0;
1092 ipi_decode(pipi);
1093 }
1094 /*
1095 * The use of the __raw_local restore isn't
1096 * as obviously necessary here as in smtc_ipi_replay(),
1097 * but it's more efficient, given that we're already
1098 * running down the IPI queue.
1099 */
1100 __arch_local_irq_restore(flags);
1101 }
1102}
1103
1104/*
1105 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
1106 * set via cross-VPE MTTR manipulation of the Cause register. It would be
1107 * in some regards preferable to have external logic for "doorbell" hardware
1108 * interrupts.
1109 */
1110
1111static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
1112
1113static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
1114{
1115 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
1116 int my_tc = cpu_data[smp_processor_id()].tc_id;
1117 int cpu;
1118 struct smtc_ipi *pipi;
1119 unsigned long tcstatus;
1120 int sent;
1121 unsigned long flags;
1122 unsigned int mtflags;
1123 unsigned int vpflags;
1124
1125 /*
1126 * So long as cross-VPE interrupts are done via
1127 * MFTR/MTTR read-modify-writes of Cause, we need
1128 * to stop other VPEs whenever the local VPE does
1129 * anything similar.
1130 */
1131 local_irq_save(flags);
1132 vpflags = dvpe();
1133 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
1134 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
1135 irq_enable_hazard();
1136 evpe(vpflags);
1137 local_irq_restore(flags);
1138
1139 /*
1140 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
1141 * queued for TCs on this VPE other than the current one.
1142 * Return-from-interrupt should cause us to drain the queue
1143 * for the current TC, so we ought not to have to do it explicitly here.
1144 */
1145
1146 for_each_online_cpu(cpu) {
1147 if (cpu_data[cpu].vpe_id != my_vpe)
1148 continue;
1149
1150 pipi = smtc_ipi_dq(&IPIQ[cpu]);
1151 if (pipi != NULL) {
1152 if (cpu_data[cpu].tc_id != my_tc) {
1153 sent = 0;
1154 LOCK_MT_PRA();
1155 settc(cpu_data[cpu].tc_id);
1156 write_tc_c0_tchalt(TCHALT_H);
1157 mips_ihb();
1158 tcstatus = read_tc_c0_tcstatus();
1159 if ((tcstatus & TCSTATUS_IXMT) == 0) {
1160 post_direct_ipi(cpu, pipi);
1161 sent = 1;
1162 }
1163 write_tc_c0_tchalt(0);
1164 UNLOCK_MT_PRA();
1165 if (!sent) {
1166 smtc_ipi_req(&IPIQ[cpu], pipi);
1167 }
1168 } else {
1169 /*
1170 * ipi_decode() should be called
1171 * with interrupts off
1172 */
1173 local_irq_save(flags);
1174 if (pipi->type == LINUX_SMP_IPI &&
1175 (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
1176 IPIQ[cpu].resched_flag = 0;
1177 ipi_decode(pipi);
1178 local_irq_restore(flags);
1179 }
1180 }
1181 }
1182
1183 return IRQ_HANDLED;
1184}
1185
1186static void ipi_irq_dispatch(void)
1187{
1188 do_IRQ(cpu_ipi_irq);
1189}
1190
1191static struct irqaction irq_ipi = {
1192 .handler = ipi_interrupt,
1193 .flags = IRQF_PERCPU,
1194 .name = "SMTC_IPI"
1195};
1196
1197static void setup_cross_vpe_interrupts(unsigned int nvpe)
1198{
1199 if (nvpe < 1)
1200 return;
1201
1202 if (!cpu_has_vint)
1203 panic("SMTC Kernel requires Vectored Interrupt support");
1204
1205 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1206
1207 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1208
1209 irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
1210}
1211
1212/*
1213 * SMTC-specific hacks invoked from elsewhere in the kernel.
1214 */
1215
1216 /*
1217 * smtc_ipi_replay is called from raw_local_irq_restore
1218 */
1219
1220void smtc_ipi_replay(void)
1221{
1222 unsigned int cpu = smp_processor_id();
1223
1224 /*
1225 * To the extent that we've ever turned interrupts off,
1226 * we may have accumulated deferred IPIs. This is subtle.
1227 * we should be OK: If we pick up something and dispatch
1228 * it here, that's great. If we see nothing, but concurrent
1229 * with this operation, another TC sends us an IPI, IXMT
1230 * is clear, and we'll handle it as a real pseudo-interrupt
1231 * and not a pseudo-pseudo interrupt. The important thing
1232 * is to do the last check for queued message *after* the
1233 * re-enabling of interrupts.
1234 */
1235 while (IPIQ[cpu].head != NULL) {
1236 struct smtc_ipi_q *q = &IPIQ[cpu];
1237 struct smtc_ipi *pipi;
1238 unsigned long flags;
1239
1240 /*
1241 * It's just possible we'll come in with interrupts
1242 * already enabled.
1243 */
1244 local_irq_save(flags);
1245
1246 spin_lock(&q->lock);
1247 pipi = __smtc_ipi_dq(q);
1248 spin_unlock(&q->lock);
1249 /*
1250 ** But use a raw restore here to avoid recursion.
1251 */
1252 __arch_local_irq_restore(flags);
1253
1254 if (pipi) {
1255 self_ipi(pipi);
1256 smtc_cpu_stats[cpu].selfipis++;
1257 }
1258 }
1259}
1260
1261EXPORT_SYMBOL(smtc_ipi_replay);
1262
1263void smtc_idle_loop_hook(void)
1264{
1265#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1266 int im;
1267 int flags;
1268 int mtflags;
1269 int bit;
1270 int vpe;
1271 int tc;
1272 int hook_ntcs;
1273 /*
1274 * printk within DMT-protected regions can deadlock,
1275 * so buffer diagnostic messages for later output.
1276 */
1277 char *pdb_msg;
1278 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1279
1280 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1281 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1282 int mvpconf0;
1283 /* Tedious stuff to just do once */
1284 mvpconf0 = read_c0_mvpconf0();
1285 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1286 if (hook_ntcs > NR_CPUS)
1287 hook_ntcs = NR_CPUS;
1288 for (tc = 0; tc < hook_ntcs; tc++) {
1289 tcnoprog[tc] = 0;
1290 clock_hang_reported[tc] = 0;
1291 }
1292 for (vpe = 0; vpe < 2; vpe++)
1293 for (im = 0; im < 8; im++)
1294 imstuckcount[vpe][im] = 0;
1295 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1296 atomic_set(&idle_hook_initialized, 1000);
1297 } else {
1298 /* Someone else is initializing in parallel - let 'em finish */
1299 while (atomic_read(&idle_hook_initialized) < 1000)
1300 ;
1301 }
1302 }
1303
1304 /* Have we stupidly left IXMT set somewhere? */
1305 if (read_c0_tcstatus() & 0x400) {
1306 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1307 ehb();
1308 printk("Dangling IXMT in cpu_idle()\n");
1309 }
1310
1311 /* Have we stupidly left an IM bit turned off? */
1312#define IM_LIMIT 2000
1313 local_irq_save(flags);
1314 mtflags = dmt();
1315 pdb_msg = &id_ho_db_msg[0];
1316 im = read_c0_status();
1317 vpe = current_cpu_data.vpe_id;
1318 for (bit = 0; bit < 8; bit++) {
1319 /*
1320 * In current prototype, I/O interrupts
1321 * are masked for VPE > 0
1322 */
1323 if (vpemask[vpe][bit]) {
1324 if (!(im & (0x100 << bit)))
1325 imstuckcount[vpe][bit]++;
1326 else
1327 imstuckcount[vpe][bit] = 0;
1328 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1329 set_c0_status(0x100 << bit);
1330 ehb();
1331 imstuckcount[vpe][bit] = 0;
1332 pdb_msg += sprintf(pdb_msg,
1333 "Dangling IM %d fixed for VPE %d\n", bit,
1334 vpe);
1335 }
1336 }
1337 }
1338
1339 emt(mtflags);
1340 local_irq_restore(flags);
1341 if (pdb_msg != &id_ho_db_msg[0])
1342 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1343#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1344
1345 smtc_ipi_replay();
1346}
1347
1348void smtc_soft_dump(void)
1349{
1350 int i;
1351
1352 printk("Counter Interrupts taken per CPU (TC)\n");
1353 for (i=0; i < NR_CPUS; i++) {
1354 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1355 }
1356 printk("Self-IPI invocations:\n");
1357 for (i=0; i < NR_CPUS; i++) {
1358 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1359 }
1360 smtc_ipi_qdump();
1361 printk("%d Recoveries of \"stolen\" FPU\n",
1362 atomic_read(&smtc_fpu_recoveries));
1363}
1364
1365
1366/*
1367 * TLB management routines special to SMTC
1368 */
1369
1370void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1371{
1372 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1373 int tlb, i;
1374
1375 /*
1376 * It would be nice to be able to use a spinlock here,
1377 * but this is invoked from within TLB flush routines
1378 * that protect themselves with DVPE, so if a lock is
1379 * held by another TC, it'll never be freed.
1380 *
1381 * DVPE/DMT must not be done with interrupts enabled,
1382 * so even so most callers will already have disabled
1383 * them, let's be really careful...
1384 */
1385
1386 local_irq_save(flags);
1387 if (smtc_status & SMTC_TLB_SHARED) {
1388 mtflags = dvpe();
1389 tlb = 0;
1390 } else {
1391 mtflags = dmt();
1392 tlb = cpu_data[cpu].vpe_id;
1393 }
1394 asid = asid_cache(cpu);
1395
1396 do {
1397 if (!((asid += ASID_INC) & ASID_MASK) ) {
1398 if (cpu_has_vtag_icache)
1399 flush_icache_all();
1400 /* Traverse all online CPUs (hack requires contiguous range) */
1401 for_each_online_cpu(i) {
1402 /*
1403 * We don't need to worry about our own CPU, nor those of
1404 * CPUs who don't share our TLB.
1405 */
1406 if ((i != smp_processor_id()) &&
1407 ((smtc_status & SMTC_TLB_SHARED) ||
1408 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1409 settc(cpu_data[i].tc_id);
1410 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1411 if (!prevhalt) {
1412 write_tc_c0_tchalt(TCHALT_H);
1413 mips_ihb();
1414 }
1415 tcstat = read_tc_c0_tcstatus();
1416 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1417 if (!prevhalt)
1418 write_tc_c0_tchalt(0);
1419 }
1420 }
1421 if (!asid) /* fix version if needed */
1422 asid = ASID_FIRST_VERSION;
1423 local_flush_tlb_all(); /* start new asid cycle */
1424 }
1425 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1426
1427 /*
1428 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1429 */
1430 for_each_online_cpu(i) {
1431 if ((smtc_status & SMTC_TLB_SHARED) ||
1432 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1433 cpu_context(i, mm) = asid_cache(i) = asid;
1434 }
1435
1436 if (smtc_status & SMTC_TLB_SHARED)
1437 evpe(mtflags);
1438 else
1439 emt(mtflags);
1440 local_irq_restore(flags);
1441}
1442
1443/*
1444 * Invoked from macros defined in mmu_context.h
1445 * which must already have disabled interrupts
1446 * and done a DVPE or DMT as appropriate.
1447 */
1448
1449void smtc_flush_tlb_asid(unsigned long asid)
1450{
1451 int entry;
1452 unsigned long ehi;
1453
1454 entry = read_c0_wired();
1455
1456 /* Traverse all non-wired entries */
1457 while (entry < current_cpu_data.tlbsize) {
1458 write_c0_index(entry);
1459 ehb();
1460 tlb_read();
1461 ehb();
1462 ehi = read_c0_entryhi();
1463 if ((ehi & ASID_MASK) == asid) {
1464 /*
1465 * Invalidate only entries with specified ASID,
1466 * makiing sure all entries differ.
1467 */
1468 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1469 write_c0_entrylo0(0);
1470 write_c0_entrylo1(0);
1471 mtc0_tlbw_hazard();
1472 tlb_write_indexed();
1473 }
1474 entry++;
1475 }
1476 write_c0_index(PARKED_INDEX);
1477 tlbw_use_hazard();
1478}
1479
1480/*
1481 * Support for single-threading cache flush operations.
1482 */
1483
1484static int halt_state_save[NR_CPUS];
1485
1486/*
1487 * To really, really be sure that nothing is being done
1488 * by other TCs, halt them all. This code assumes that
1489 * a DVPE has already been done, so while their Halted
1490 * state is theoretically architecturally unstable, in
1491 * practice, it's not going to change while we're looking
1492 * at it.
1493 */
1494
1495void smtc_cflush_lockdown(void)
1496{
1497 int cpu;
1498
1499 for_each_online_cpu(cpu) {
1500 if (cpu != smp_processor_id()) {
1501 settc(cpu_data[cpu].tc_id);
1502 halt_state_save[cpu] = read_tc_c0_tchalt();
1503 write_tc_c0_tchalt(TCHALT_H);
1504 }
1505 }
1506 mips_ihb();
1507}
1508
1509/* It would be cheating to change the cpu_online states during a flush! */
1510
1511void smtc_cflush_release(void)
1512{
1513 int cpu;
1514
1515 /*
1516 * Start with a hazard barrier to ensure
1517 * that all CACHE ops have played through.
1518 */
1519 mips_ihb();
1520
1521 for_each_online_cpu(cpu) {
1522 if (cpu != smp_processor_id()) {
1523 settc(cpu_data[cpu].tc_id);
1524 write_tc_c0_tchalt(halt_state_save[cpu]);
1525 }
1526 }
1527 mips_ihb();
1528}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index c24ad5f4b324..2242bdd4370e 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -6,8 +6,6 @@
6 * not have done anything significant (but they may have had interrupts 6 * not have done anything significant (but they may have had interrupts
7 * enabled briefly - prom_smp_finish() should not be responsible for enabling 7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
8 * interrupts...) 8 * interrupts...)
9 *
10 * FIXME: broken for SMTC
11 */ 9 */
12 10
13#include <linux/kernel.h> 11#include <linux/kernel.h>
@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu)
33 unsigned long flags; 31 unsigned long flags;
34 unsigned int initcount; 32 unsigned int initcount;
35 33
36#ifdef CONFIG_MIPS_MT_SMTC
37 /*
38 * SMTC needs to synchronise per VPE, not per CPU
39 * ignore for now
40 */
41 return;
42#endif
43
44 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); 34 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
45 35
46 local_irq_save(flags); 36 local_irq_save(flags);
@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu)
110 int i; 100 int i;
111 unsigned int initcount; 101 unsigned int initcount;
112 102
113#ifdef CONFIG_MIPS_MT_SMTC
114 /*
115 * SMTC needs to synchronise per VPE, not per CPU
116 * ignore for now
117 */
118 return;
119#endif
120
121 /* 103 /*
122 * Not every cpu is online at the time this gets called, 104 * Not every cpu is online at the time this gets called,
123 * so we first wait for the master to say everyone is ready 105 * so we first wait for the master to say everyone is ready
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index dcb8e5d3bb8a..8d0170969e22 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -26,7 +26,6 @@
26#include <asm/cpu-features.h> 26#include <asm/cpu-features.h>
27#include <asm/cpu-type.h> 27#include <asm/cpu-type.h>
28#include <asm/div64.h> 28#include <asm/div64.h>
29#include <asm/smtc_ipi.h>
30#include <asm/time.h> 29#include <asm/time.h>
31 30
32/* 31/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 074e857ced28..3a2672907f80 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -370,9 +370,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
370{ 370{
371 static int die_counter; 371 static int die_counter;
372 int sig = SIGSEGV; 372 int sig = SIGSEGV;
373#ifdef CONFIG_MIPS_MT_SMTC
374 unsigned long dvpret;
375#endif /* CONFIG_MIPS_MT_SMTC */
376 373
377 oops_enter(); 374 oops_enter();
378 375
@@ -382,13 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
382 379
383 console_verbose(); 380 console_verbose();
384 raw_spin_lock_irq(&die_lock); 381 raw_spin_lock_irq(&die_lock);
385#ifdef CONFIG_MIPS_MT_SMTC
386 dvpret = dvpe();
387#endif /* CONFIG_MIPS_MT_SMTC */
388 bust_spinlocks(1); 382 bust_spinlocks(1);
389#ifdef CONFIG_MIPS_MT_SMTC
390 mips_mt_regdump(dvpret);
391#endif /* CONFIG_MIPS_MT_SMTC */
392 383
393 printk("%s[#%d]:\n", str, ++die_counter); 384 printk("%s[#%d]:\n", str, ++die_counter);
394 show_registers(regs); 385 show_registers(regs);
@@ -1759,19 +1750,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1759 extern char rollback_except_vec_vi; 1750 extern char rollback_except_vec_vi;
1760 char *vec_start = using_rollback_handler() ? 1751 char *vec_start = using_rollback_handler() ?
1761 &rollback_except_vec_vi : &except_vec_vi; 1752 &rollback_except_vec_vi : &except_vec_vi;
1762#ifdef CONFIG_MIPS_MT_SMTC
1763 /*
1764 * We need to provide the SMTC vectored interrupt handler
1765 * not only with the address of the handler, but with the
1766 * Status.IM bit to be masked before going there.
1767 */
1768 extern char except_vec_vi_mori;
1769#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1770 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1771#else
1772 const int mori_offset = &except_vec_vi_mori - vec_start;
1773#endif
1774#endif /* CONFIG_MIPS_MT_SMTC */
1775#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1753#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1776 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1754 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1777 const int ori_offset = &except_vec_vi_ori - vec_start + 2; 1755 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
@@ -1795,12 +1773,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1795#else 1773#else
1796 handler_len); 1774 handler_len);
1797#endif 1775#endif
1798#ifdef CONFIG_MIPS_MT_SMTC
1799 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1800
1801 h = (u16 *)(b + mori_offset);
1802 *h = (0x100 << n);
1803#endif /* CONFIG_MIPS_MT_SMTC */
1804 h = (u16 *)(b + lui_offset); 1776 h = (u16 *)(b + lui_offset);
1805 *h = (handler >> 16) & 0xffff; 1777 *h = (handler >> 16) & 0xffff;
1806 h = (u16 *)(b + ori_offset); 1778 h = (u16 *)(b + ori_offset);
@@ -1870,20 +1842,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
1870 unsigned int cpu = smp_processor_id(); 1842 unsigned int cpu = smp_processor_id();
1871 unsigned int status_set = ST0_CU0; 1843 unsigned int status_set = ST0_CU0;
1872 unsigned int hwrena = cpu_hwrena_impl_bits; 1844 unsigned int hwrena = cpu_hwrena_impl_bits;
1873#ifdef CONFIG_MIPS_MT_SMTC
1874 int secondaryTC = 0;
1875 int bootTC = (cpu == 0);
1876
1877 /*
1878 * Only do per_cpu_trap_init() for first TC of Each VPE.
1879 * Note that this hack assumes that the SMTC init code
1880 * assigns TCs consecutively and in ascending order.
1881 */
1882
1883 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1884 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1885 secondaryTC = 1;
1886#endif /* CONFIG_MIPS_MT_SMTC */
1887 1845
1888 /* 1846 /*
1889 * Disable coprocessors and select 32-bit or 64-bit addressing 1847 * Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1911,10 +1869,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
1911 if (hwrena) 1869 if (hwrena)
1912 write_c0_hwrena(hwrena); 1870 write_c0_hwrena(hwrena);
1913 1871
1914#ifdef CONFIG_MIPS_MT_SMTC
1915 if (!secondaryTC) {
1916#endif /* CONFIG_MIPS_MT_SMTC */
1917
1918 if (cpu_has_veic || cpu_has_vint) { 1872 if (cpu_has_veic || cpu_has_vint) {
1919 unsigned long sr = set_c0_status(ST0_BEV); 1873 unsigned long sr = set_c0_status(ST0_BEV);
1920 write_c0_ebase(ebase); 1874 write_c0_ebase(ebase);
@@ -1949,10 +1903,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
1949 cp0_perfcount_irq = -1; 1903 cp0_perfcount_irq = -1;
1950 } 1904 }
1951 1905
1952#ifdef CONFIG_MIPS_MT_SMTC
1953 }
1954#endif /* CONFIG_MIPS_MT_SMTC */
1955
1956 if (!cpu_data[cpu].asid_cache) 1906 if (!cpu_data[cpu].asid_cache)
1957 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1907 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1958 1908
@@ -1961,23 +1911,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
1961 BUG_ON(current->mm); 1911 BUG_ON(current->mm);
1962 enter_lazy_tlb(&init_mm, current); 1912 enter_lazy_tlb(&init_mm, current);
1963 1913
1964#ifdef CONFIG_MIPS_MT_SMTC
1965 if (bootTC) {
1966#endif /* CONFIG_MIPS_MT_SMTC */
1967 /* Boot CPU's cache setup in setup_arch(). */ 1914 /* Boot CPU's cache setup in setup_arch(). */
1968 if (!is_boot_cpu) 1915 if (!is_boot_cpu)
1969 cpu_cache_init(); 1916 cpu_cache_init();
1970 tlb_init(); 1917 tlb_init();
1971#ifdef CONFIG_MIPS_MT_SMTC
1972 } else if (!secondaryTC) {
1973 /*
1974 * First TC in non-boot VPE must do subset of tlb_init()
1975 * for MMU countrol registers.
1976 */
1977 write_c0_pagemask(PM_DEFAULT_MASK);
1978 write_c0_wired(0);
1979 }
1980#endif /* CONFIG_MIPS_MT_SMTC */
1981 TLBMISS_HANDLER_SETUP(); 1918 TLBMISS_HANDLER_SETUP();
1982} 1919}
1983 1920
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
index 949ae0e17018..2e003b11a098 100644
--- a/arch/mips/kernel/vpe-mt.c
+++ b/arch/mips/kernel/vpe-mt.c
@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v)
127 clear_c0_mvpcontrol(MVPCONTROL_VPC); 127 clear_c0_mvpcontrol(MVPCONTROL_VPC);
128 128
129 /* 129 /*
130 * SMTC/SMVP kernels manage VPE enable independently, 130 * SMVP kernels manage VPE enable independently, but uniprocessor
131 * but uniprocessor kernels need to turn it on, even 131 * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
132 * if that wasn't the pre-dvpe() state.
133 */ 132 */
134#ifdef CONFIG_SMP 133#ifdef CONFIG_SMP
135 evpe(vpeflags); 134 evpe(vpeflags);
@@ -454,12 +453,11 @@ int __init vpe_module_init(void)
454 453
455 settc(tc); 454 settc(tc);
456 455
457 /* Any TC that is bound to VPE0 gets left as is - in 456 /*
458 * case we are running SMTC on VPE0. A TC that is bound 457 * A TC that is bound to any other VPE gets bound to
459 * to any other VPE gets bound to VPE0, ideally I'd like 458 * VPE0, ideally I'd like to make it homeless but it
460 * to make it homeless but it doesn't appear to let me 459 * doesn't appear to let me bind a TC to a non-existent
461 * bind a TC to a non-existent VPE. Which is perfectly 460 * VPE. Which is perfectly reasonable.
462 * reasonable.
463 * 461 *
464 * The (un)bound state is visible to an EJTAG probe so 462 * The (un)bound state is visible to an EJTAG probe so
465 * may notify GDB... 463 * may notify GDB...