aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-05-23 10:29:44 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-05-23 18:07:01 -0400
commitb633648c5ad3cfbda0b3daea50d2135d44899259 (patch)
tree6100185cae10f36a55e71c3b220fc79cfa14b7c0 /arch/mips/include
parent8b2e62cc34feaaf1cac9440a93fb18ac0b1e81bc (diff)
MIPS: MT: Remove SMTC support
Nobody is maintaining SMTC anymore and there also seems to be no userbase. Which is a pity - the SMTC technology primarily developed by Kevin D. Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT ASE's power and elegance. Based on Markos Chandras <Markos.Chandras@imgtec.com> patch https://patchwork.linux-mips.org/patch/6719/ which while very similar did no longer apply cleanly when I tried to merge it plus some additional post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to merge once upon a time. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/asmmacro.h22
-rw-r--r--arch/mips/include/asm/cpu-info.h13
-rw-r--r--arch/mips/include/asm/fixmap.h4
-rw-r--r--arch/mips/include/asm/irq.h96
-rw-r--r--arch/mips/include/asm/irqflags.h32
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h30
-rw-r--r--arch/mips/include/asm/mach-sead3/kernel-entry-init.h31
-rw-r--r--arch/mips/include/asm/mips_mt.h5
-rw-r--r--arch/mips/include/asm/mipsregs.h133
-rw-r--r--arch/mips/include/asm/mmu_context.h107
-rw-r--r--arch/mips/include/asm/module.h8
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/include/asm/r4kcache.h5
-rw-r--r--arch/mips/include/asm/smtc.h78
-rw-r--r--arch/mips/include/asm/smtc_ipi.h129
-rw-r--r--arch/mips/include/asm/smtc_proc.h23
-rw-r--r--arch/mips/include/asm/stackframe.h196
-rw-r--r--arch/mips/include/asm/thread_info.h11
-rw-r--r--arch/mips/include/asm/time.h5
19 files changed, 18 insertions, 913 deletions
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index b464b8b1147a..f7db79a846bb 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -17,26 +17,8 @@
17#ifdef CONFIG_64BIT 17#ifdef CONFIG_64BIT
18#include <asm/asmmacro-64.h> 18#include <asm/asmmacro-64.h>
19#endif 19#endif
20#ifdef CONFIG_MIPS_MT_SMTC
21#include <asm/mipsmtregs.h>
22#endif
23
24#ifdef CONFIG_MIPS_MT_SMTC
25 .macro local_irq_enable reg=t0
26 mfc0 \reg, CP0_TCSTATUS
27 ori \reg, \reg, TCSTATUS_IXMT
28 xori \reg, \reg, TCSTATUS_IXMT
29 mtc0 \reg, CP0_TCSTATUS
30 _ehb
31 .endm
32 20
33 .macro local_irq_disable reg=t0 21#ifdef CONFIG_CPU_MIPSR2
34 mfc0 \reg, CP0_TCSTATUS
35 ori \reg, \reg, TCSTATUS_IXMT
36 mtc0 \reg, CP0_TCSTATUS
37 _ehb
38 .endm
39#elif defined(CONFIG_CPU_MIPSR2)
40 .macro local_irq_enable reg=t0 22 .macro local_irq_enable reg=t0
41 ei 23 ei
42 irq_enable_hazard 24 irq_enable_hazard
@@ -71,7 +53,7 @@
71 sw \reg, TI_PRE_COUNT($28) 53 sw \reg, TI_PRE_COUNT($28)
72#endif 54#endif
73 .endm 55 .endm
74#endif /* CONFIG_MIPS_MT_SMTC */ 56#endif /* CONFIG_CPU_MIPSR2 */
75 57
76 .macro fpu_save_16even thread tmp=t0 58 .macro fpu_save_16even thread tmp=t0
77 cfc1 \tmp, fcr31 59 cfc1 \tmp, fcr31
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index dc2135be2a3a..7ba0e07a9091 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -65,18 +65,13 @@ struct cpuinfo_mips {
65#ifdef CONFIG_64BIT 65#ifdef CONFIG_64BIT
66 int vmbits; /* Virtual memory size in bits */ 66 int vmbits; /* Virtual memory size in bits */
67#endif 67#endif
68#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 68#ifdef CONFIG_MIPS_MT_SMP
69 /* 69 /*
70 * In the MIPS MT "SMTC" model, each TC is considered 70 * There is not necessarily a 1:1 mapping of VPE num to CPU number
71 * to be a "CPU" for the purposes of scheduling, but 71 * in particular on multi-core systems.
72 * exception resources, ASID spaces, etc, are common
73 * to all TCs within the same VPE.
74 */ 72 */
75 int vpe_id; /* Virtual Processor number */ 73 int vpe_id; /* Virtual Processor number */
76#endif 74#endif
77#ifdef CONFIG_MIPS_MT_SMTC
78 int tc_id; /* Thread Context number */
79#endif
80 void *data; /* Additional data */ 75 void *data; /* Additional data */
81 unsigned int watch_reg_count; /* Number that exist */ 76 unsigned int watch_reg_count; /* Number that exist */
82 unsigned int watch_reg_use_cnt; /* Usable by ptrace */ 77 unsigned int watch_reg_use_cnt; /* Usable by ptrace */
@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args {
117 unsigned long n; 112 unsigned long n;
118}; 113};
119 114
120#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 115#ifdef CONFIG_MIPS_MT_SMP
121# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) 116# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
122#else 117#else
123# define cpu_vpe_id(cpuinfo) 0 118# define cpu_vpe_id(cpuinfo) 0
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 8c012af2f451..6842ffafd1e7 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -48,11 +48,7 @@
48enum fixed_addresses { 48enum fixed_addresses {
49#define FIX_N_COLOURS 8 49#define FIX_N_COLOURS 8
50 FIX_CMAP_BEGIN, 50 FIX_CMAP_BEGIN,
51#ifdef CONFIG_MIPS_MT_SMTC
52 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
53#else
54 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), 51 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
55#endif
56#ifdef CONFIG_HIGHMEM 52#ifdef CONFIG_HIGHMEM
57 /* reserved pte's for temporary kernel mappings */ 53 /* reserved pte's for temporary kernel mappings */
58 FIX_KMAP_BEGIN = FIX_CMAP_END + 1, 54 FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 7bc2cdb35057..ae1f7b24dd1a 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)
26#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 26#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
27#endif 27#endif
28 28
29#ifdef CONFIG_MIPS_MT_SMTC
30
31struct irqaction;
32
33extern unsigned long irq_hwmask[];
34extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
35 unsigned long hwmask);
36
37static inline void smtc_im_ack_irq(unsigned int irq)
38{
39 if (irq_hwmask[irq] & ST0_IM)
40 set_c0_status(irq_hwmask[irq] & ST0_IM);
41}
42
43#else
44
45static inline void smtc_im_ack_irq(unsigned int irq)
46{
47}
48
49#endif /* CONFIG_MIPS_MT_SMTC */
50
51#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
52#include <linux/cpumask.h>
53
54extern int plat_set_irq_affinity(struct irq_data *d,
55 const struct cpumask *affinity, bool force);
56extern void smtc_forward_irq(struct irq_data *d);
57
58/*
59 * IRQ affinity hook invoked at the beginning of interrupt dispatch
60 * if option is enabled.
61 *
62 * Up through Linux 2.6.22 (at least) cpumask operations are very
63 * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
64 * used a "fast path" per-IRQ-descriptor cache of affinity information
65 * to reduce latency. As there is a project afoot to optimize the
66 * cpumask implementations, this version is optimistically assuming
67 * that cpumask.h macro overhead is reasonable during interrupt dispatch.
68 */
69static inline int handle_on_other_cpu(unsigned int irq)
70{
71 struct irq_data *d = irq_get_irq_data(irq);
72
73 if (cpumask_test_cpu(smp_processor_id(), d->affinity))
74 return 0;
75 smtc_forward_irq(d);
76 return 1;
77}
78
79#else /* Not doing SMTC affinity */
80
81static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
82
83#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
84
85#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
86
87static inline void smtc_im_backstop(unsigned int irq)
88{
89 if (irq_hwmask[irq] & 0x0000ff00)
90 write_c0_tccontext(read_c0_tccontext() &
91 ~(irq_hwmask[irq] & 0x0000ff00));
92}
93
94/*
95 * Clear interrupt mask handling "backstop" if irq_hwmask
96 * entry so indicates. This implies that the ack() or end()
97 * functions will take over re-enabling the low-level mask.
98 * Otherwise it will be done on return from exception.
99 */
100static inline int smtc_handle_on_other_cpu(unsigned int irq)
101{
102 int ret = handle_on_other_cpu(irq);
103
104 if (!ret)
105 smtc_im_backstop(irq);
106 return ret;
107}
108
109#else
110
111static inline void smtc_im_backstop(unsigned int irq) { }
112static inline int smtc_handle_on_other_cpu(unsigned int irq)
113{
114 return handle_on_other_cpu(irq);
115}
116
117#endif
118
119extern void do_IRQ(unsigned int irq); 29extern void do_IRQ(unsigned int irq);
120 30
121#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
122
123extern void do_IRQ_no_affinity(unsigned int irq);
124
125#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
126
127extern void arch_init_irq(void); 31extern void arch_init_irq(void);
128extern void spurious_interrupt(void); 32extern void spurious_interrupt(void);
129 33
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 45c00951888b..0fa5fdcd1f01 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -17,7 +17,7 @@
17#include <linux/stringify.h> 17#include <linux/stringify.h>
18#include <asm/hazards.h> 18#include <asm/hazards.h>
19 19
20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20#ifdef CONFIG_CPU_MIPSR2
21 21
22static inline void arch_local_irq_disable(void) 22static inline void arch_local_irq_disable(void)
23{ 23{
@@ -118,30 +118,15 @@ void arch_local_irq_disable(void);
118unsigned long arch_local_irq_save(void); 118unsigned long arch_local_irq_save(void);
119void arch_local_irq_restore(unsigned long flags); 119void arch_local_irq_restore(unsigned long flags);
120void __arch_local_irq_restore(unsigned long flags); 120void __arch_local_irq_restore(unsigned long flags);
121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 121#endif /* CONFIG_CPU_MIPSR2 */
122
123
124extern void smtc_ipi_replay(void);
125 122
126static inline void arch_local_irq_enable(void) 123static inline void arch_local_irq_enable(void)
127{ 124{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__( 125 __asm__ __volatile__(
136 " .set push \n" 126 " .set push \n"
137 " .set reorder \n" 127 " .set reorder \n"
138 " .set noat \n" 128 " .set noat \n"
139#ifdef CONFIG_MIPS_MT_SMTC 129#if defined(CONFIG_CPU_MIPSR2)
140 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
141 " ori $1, 0x400 \n"
142 " xori $1, 0x400 \n"
143 " mtc0 $1, $2, 1 \n"
144#elif defined(CONFIG_CPU_MIPSR2)
145 " ei \n" 130 " ei \n"
146#else 131#else
147 " mfc0 $1,$12 \n" 132 " mfc0 $1,$12 \n"
@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)
163 asm __volatile__( 148 asm __volatile__(
164 " .set push \n" 149 " .set push \n"
165 " .set reorder \n" 150 " .set reorder \n"
166#ifdef CONFIG_MIPS_MT_SMTC
167 " mfc0 %[flags], $2, 1 \n"
168#else
169 " mfc0 %[flags], $12 \n" 151 " mfc0 %[flags], $12 \n"
170#endif
171 " .set pop \n" 152 " .set pop \n"
172 : [flags] "=r" (flags)); 153 : [flags] "=r" (flags));
173 154
@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)
177 158
178static inline int arch_irqs_disabled_flags(unsigned long flags) 159static inline int arch_irqs_disabled_flags(unsigned long flags)
179{ 160{
180#ifdef CONFIG_MIPS_MT_SMTC
181 /*
182 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
183 */
184 return flags & 0x400;
185#else
186 return !(flags & 1); 161 return !(flags & 1);
187#endif
188} 162}
189 163
190#endif /* #ifndef __ASSEMBLY__ */ 164#endif /* #ifndef __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 7c5e17a17849..77eeda77e73c 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -80,36 +80,6 @@
80 .endm 80 .endm
81 81
82 .macro kernel_entry_setup 82 .macro kernel_entry_setup
83#ifdef CONFIG_MIPS_MT_SMTC
84 mfc0 t0, CP0_CONFIG
85 bgez t0, 9f
86 mfc0 t0, CP0_CONFIG, 1
87 bgez t0, 9f
88 mfc0 t0, CP0_CONFIG, 2
89 bgez t0, 9f
90 mfc0 t0, CP0_CONFIG, 3
91 and t0, 1<<2
92 bnez t0, 0f
939:
94 /* Assume we came from YAMON... */
95 PTR_LA v0, 0x9fc00534 /* YAMON print */
96 lw v0, (v0)
97 move a0, zero
98 PTR_LA a1, nonmt_processor
99 jal v0
100
101 PTR_LA v0, 0x9fc00520 /* YAMON exit */
102 lw v0, (v0)
103 li a0, 1
104 jal v0
105
1061: b 1b
107
108 __INITDATA
109nonmt_processor:
110 .asciz "SMTC kernel requires the MT ASE to run\n"
111 __FINIT
112#endif
113 83
114#ifdef CONFIG_EVA 84#ifdef CONFIG_EVA
115 sync 85 sync
diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
index 3dfbd8e7947f..6cccd4d558d7 100644
--- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
@@ -10,37 +10,6 @@
10#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 10#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
11 11
12 .macro kernel_entry_setup 12 .macro kernel_entry_setup
13#ifdef CONFIG_MIPS_MT_SMTC
14 mfc0 t0, CP0_CONFIG
15 bgez t0, 9f
16 mfc0 t0, CP0_CONFIG, 1
17 bgez t0, 9f
18 mfc0 t0, CP0_CONFIG, 2
19 bgez t0, 9f
20 mfc0 t0, CP0_CONFIG, 3
21 and t0, 1<<2
22 bnez t0, 0f
239 :
24 /* Assume we came from YAMON... */
25 PTR_LA v0, 0x9fc00534 /* YAMON print */
26 lw v0, (v0)
27 move a0, zero
28 PTR_LA a1, nonmt_processor
29 jal v0
30
31 PTR_LA v0, 0x9fc00520 /* YAMON exit */
32 lw v0, (v0)
33 li a0, 1
34 jal v0
35
361 : b 1b
37
38 __INITDATA
39nonmt_processor :
40 .asciz "SMTC kernel requires the MT ASE to run\n"
41 __FINIT
420 :
43#endif
44 .endm 13 .endm
45 14
46/* 15/*
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index a3df0c3faa0e..f6ba004a7711 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,7 +1,6 @@
1/* 1/*
2 * Definitions and decalrations for MIPS MT support 2 * Definitions and decalrations for MIPS MT support that are common between
3 * that are common between SMTC, VSMP, and/or AP/SP 3 * the VSMP, and AP/SP kernel models.
4 * kernel models.
5 */ 4 */
6#ifndef __ASM_MIPS_MT_H 5#ifndef __ASM_MIPS_MT_H
7#define __ASM_MIPS_MT_H 6#define __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 88e30d5022b3..fb2d17487ec2 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1014,19 +1014,8 @@ do { \
1014#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) 1014#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
1015 1015
1016#define read_c0_status() __read_32bit_c0_register($12, 0) 1016#define read_c0_status() __read_32bit_c0_register($12, 0)
1017#ifdef CONFIG_MIPS_MT_SMTC 1017
1018#define write_c0_status(val) \
1019do { \
1020 __write_32bit_c0_register($12, 0, val); \
1021 __ehb(); \
1022} while (0)
1023#else
1024/*
1025 * Legacy non-SMTC code, which may be hazardous
1026 * but which might not support EHB
1027 */
1028#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) 1018#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
1029#endif /* CONFIG_MIPS_MT_SMTC */
1030 1019
1031#define read_c0_cause() __read_32bit_c0_register($13, 0) 1020#define read_c0_cause() __read_32bit_c0_register($13, 0)
1032#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) 1021#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
@@ -1750,11 +1739,6 @@ static inline void tlb_write_random(void)
1750/* 1739/*
1751 * Manipulate bits in a c0 register. 1740 * Manipulate bits in a c0 register.
1752 */ 1741 */
1753#ifndef CONFIG_MIPS_MT_SMTC
1754/*
1755 * SMTC Linux requires shutting-down microthread scheduling
1756 * during CP0 register read-modify-write sequences.
1757 */
1758#define __BUILD_SET_C0(name) \ 1742#define __BUILD_SET_C0(name) \
1759static inline unsigned int \ 1743static inline unsigned int \
1760set_c0_##name(unsigned int set) \ 1744set_c0_##name(unsigned int set) \
@@ -1793,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \
1793 return res; \ 1777 return res; \
1794} 1778}
1795 1779
1796#else /* SMTC versions that manage MT scheduling */
1797
1798#include <linux/irqflags.h>
1799
1800/*
1801 * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
1802 * header file recursion.
1803 */
1804static inline unsigned int __dmt(void)
1805{
1806 int res;
1807
1808 __asm__ __volatile__(
1809 " .set push \n"
1810 " .set mips32r2 \n"
1811 " .set noat \n"
1812 " .word 0x41610BC1 # dmt $1 \n"
1813 " ehb \n"
1814 " move %0, $1 \n"
1815 " .set pop \n"
1816 : "=r" (res));
1817
1818 instruction_hazard();
1819
1820 return res;
1821}
1822
1823#define __VPECONTROL_TE_SHIFT 15
1824#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
1825
1826#define __EMT_ENABLE __VPECONTROL_TE
1827
1828static inline void __emt(unsigned int previous)
1829{
1830 if ((previous & __EMT_ENABLE))
1831 __asm__ __volatile__(
1832 " .set mips32r2 \n"
1833 " .word 0x41600be1 # emt \n"
1834 " ehb \n"
1835 " .set mips0 \n");
1836}
1837
1838static inline void __ehb(void)
1839{
1840 __asm__ __volatile__(
1841 " .set mips32r2 \n"
1842 " ehb \n" " .set mips0 \n");
1843}
1844
1845/*
1846 * Note that local_irq_save/restore affect TC-specific IXMT state,
1847 * not Status.IE as in non-SMTC kernel.
1848 */
1849
1850#define __BUILD_SET_C0(name) \
1851static inline unsigned int \
1852set_c0_##name(unsigned int set) \
1853{ \
1854 unsigned int res; \
1855 unsigned int new; \
1856 unsigned int omt; \
1857 unsigned long flags; \
1858 \
1859 local_irq_save(flags); \
1860 omt = __dmt(); \
1861 res = read_c0_##name(); \
1862 new = res | set; \
1863 write_c0_##name(new); \
1864 __emt(omt); \
1865 local_irq_restore(flags); \
1866 \
1867 return res; \
1868} \
1869 \
1870static inline unsigned int \
1871clear_c0_##name(unsigned int clear) \
1872{ \
1873 unsigned int res; \
1874 unsigned int new; \
1875 unsigned int omt; \
1876 unsigned long flags; \
1877 \
1878 local_irq_save(flags); \
1879 omt = __dmt(); \
1880 res = read_c0_##name(); \
1881 new = res & ~clear; \
1882 write_c0_##name(new); \
1883 __emt(omt); \
1884 local_irq_restore(flags); \
1885 \
1886 return res; \
1887} \
1888 \
1889static inline unsigned int \
1890change_c0_##name(unsigned int change, unsigned int newbits) \
1891{ \
1892 unsigned int res; \
1893 unsigned int new; \
1894 unsigned int omt; \
1895 unsigned long flags; \
1896 \
1897 local_irq_save(flags); \
1898 \
1899 omt = __dmt(); \
1900 res = read_c0_##name(); \
1901 new = res & ~change; \
1902 new |= (newbits & change); \
1903 write_c0_##name(new); \
1904 __emt(omt); \
1905 local_irq_restore(flags); \
1906 \
1907 return res; \
1908}
1909#endif
1910
1911__BUILD_SET_C0(status) 1780__BUILD_SET_C0(status)
1912__BUILD_SET_C0(cause) 1781__BUILD_SET_C0(cause)
1913__BUILD_SET_C0(config) 1782__BUILD_SET_C0(config)
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e277bbad2871..0f75aaca201b 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -18,10 +18,6 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/hazards.h> 19#include <asm/hazards.h>
20#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21#ifdef CONFIG_MIPS_MT_SMTC
22#include <asm/mipsmtregs.h>
23#include <asm/smtc.h>
24#endif /* SMTC */
25#include <asm-generic/mm_hooks.h> 21#include <asm-generic/mm_hooks.h>
26 22
27#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 23#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
@@ -63,13 +59,6 @@ extern unsigned long pgd_current[];
63#define ASID_INC 0x10 59#define ASID_INC 0x10
64#define ASID_MASK 0xff0 60#define ASID_MASK 0xff0
65 61
66#elif defined(CONFIG_MIPS_MT_SMTC)
67
68#define ASID_INC 0x1
69extern unsigned long smtc_asid_mask;
70#define ASID_MASK (smtc_asid_mask)
71#define HW_ASID_MASK 0xff
72/* End SMTC/34K debug hack */
73#else /* FIXME: not correct for R6000 */ 62#else /* FIXME: not correct for R6000 */
74 63
75#define ASID_INC 0x1 64#define ASID_INC 0x1
@@ -92,7 +81,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
92#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 81#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
93#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 82#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
94 83
95#ifndef CONFIG_MIPS_MT_SMTC
96/* Normal, classic MIPS get_new_mmu_context */ 84/* Normal, classic MIPS get_new_mmu_context */
97static inline void 85static inline void
98get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 86get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
@@ -115,12 +103,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
115 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 103 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
116} 104}
117 105
118#else /* CONFIG_MIPS_MT_SMTC */
119
120#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
121
122#endif /* CONFIG_MIPS_MT_SMTC */
123
124/* 106/*
125 * Initialize the context related info for a new mm_struct 107 * Initialize the context related info for a new mm_struct
126 * instance. 108 * instance.
@@ -141,46 +123,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
141{ 123{
142 unsigned int cpu = smp_processor_id(); 124 unsigned int cpu = smp_processor_id();
143 unsigned long flags; 125 unsigned long flags;
144#ifdef CONFIG_MIPS_MT_SMTC
145 unsigned long oldasid;
146 unsigned long mtflags;
147 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
148 local_irq_save(flags);
149 mtflags = dvpe();
150#else /* Not SMTC */
151 local_irq_save(flags); 126 local_irq_save(flags);
152#endif /* CONFIG_MIPS_MT_SMTC */
153 127
154 /* Check if our ASID is of an older version and thus invalid */ 128 /* Check if our ASID is of an older version and thus invalid */
155 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 129 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
156 get_new_mmu_context(next, cpu); 130 get_new_mmu_context(next, cpu);
157#ifdef CONFIG_MIPS_MT_SMTC
158 /*
159 * If the EntryHi ASID being replaced happens to be
160 * the value flagged at ASID recycling time as having
161 * an extended life, clear the bit showing it being
162 * in use by this "CPU", and if that's the last bit,
163 * free up the ASID value for use and flush any old
164 * instances of it from the TLB.
165 */
166 oldasid = (read_c0_entryhi() & ASID_MASK);
167 if(smtc_live_asid[mytlb][oldasid]) {
168 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
169 if(smtc_live_asid[mytlb][oldasid] == 0)
170 smtc_flush_tlb_asid(oldasid);
171 }
172 /*
173 * Tread softly on EntryHi, and so long as we support
174 * having ASID_MASK smaller than the hardware maximum,
175 * make sure no "soft" bits become "hard"...
176 */
177 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
178 cpu_asid(cpu, next));
179 ehb(); /* Make sure it propagates to TCStatus */
180 evpe(mtflags);
181#else
182 write_c0_entryhi(cpu_asid(cpu, next)); 131 write_c0_entryhi(cpu_asid(cpu, next));
183#endif /* CONFIG_MIPS_MT_SMTC */
184 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 132 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
185 133
186 /* 134 /*
@@ -213,34 +161,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
213 unsigned long flags; 161 unsigned long flags;
214 unsigned int cpu = smp_processor_id(); 162 unsigned int cpu = smp_processor_id();
215 163
216#ifdef CONFIG_MIPS_MT_SMTC
217 unsigned long oldasid;
218 unsigned long mtflags;
219 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
220#endif /* CONFIG_MIPS_MT_SMTC */
221
222 local_irq_save(flags); 164 local_irq_save(flags);
223 165
224 /* Unconditionally get a new ASID. */ 166 /* Unconditionally get a new ASID. */
225 get_new_mmu_context(next, cpu); 167 get_new_mmu_context(next, cpu);
226 168
227#ifdef CONFIG_MIPS_MT_SMTC
228 /* See comments for similar code above */
229 mtflags = dvpe();
230 oldasid = read_c0_entryhi() & ASID_MASK;
231 if(smtc_live_asid[mytlb][oldasid]) {
232 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
233 if(smtc_live_asid[mytlb][oldasid] == 0)
234 smtc_flush_tlb_asid(oldasid);
235 }
236 /* See comments for similar code above */
237 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
238 cpu_asid(cpu, next));
239 ehb(); /* Make sure it propagates to TCStatus */
240 evpe(mtflags);
241#else
242 write_c0_entryhi(cpu_asid(cpu, next)); 169 write_c0_entryhi(cpu_asid(cpu, next));
243#endif /* CONFIG_MIPS_MT_SMTC */
244 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 170 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
245 171
246 /* mark mmu ownership change */ 172 /* mark mmu ownership change */
@@ -258,48 +184,15 @@ static inline void
258drop_mmu_context(struct mm_struct *mm, unsigned cpu) 184drop_mmu_context(struct mm_struct *mm, unsigned cpu)
259{ 185{
260 unsigned long flags; 186 unsigned long flags;
261#ifdef CONFIG_MIPS_MT_SMTC
262 unsigned long oldasid;
263 /* Can't use spinlock because called from TLB flush within DVPE */
264 unsigned int prevvpe;
265 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
266#endif /* CONFIG_MIPS_MT_SMTC */
267 187
268 local_irq_save(flags); 188 local_irq_save(flags);
269 189
270 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 190 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
271 get_new_mmu_context(mm, cpu); 191 get_new_mmu_context(mm, cpu);
272#ifdef CONFIG_MIPS_MT_SMTC
273 /* See comments for similar code above */
274 prevvpe = dvpe();
275 oldasid = (read_c0_entryhi() & ASID_MASK);
276 if (smtc_live_asid[mytlb][oldasid]) {
277 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
278 if(smtc_live_asid[mytlb][oldasid] == 0)
279 smtc_flush_tlb_asid(oldasid);
280 }
281 /* See comments for similar code above */
282 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
283 | cpu_asid(cpu, mm));
284 ehb(); /* Make sure it propagates to TCStatus */
285 evpe(prevvpe);
286#else /* not CONFIG_MIPS_MT_SMTC */
287 write_c0_entryhi(cpu_asid(cpu, mm)); 192 write_c0_entryhi(cpu_asid(cpu, mm));
288#endif /* CONFIG_MIPS_MT_SMTC */
289 } else { 193 } else {
290 /* will get a new context next time */ 194 /* will get a new context next time */
291#ifndef CONFIG_MIPS_MT_SMTC
292 cpu_context(cpu, mm) = 0; 195 cpu_context(cpu, mm) = 0;
293#else /* SMTC */
294 int i;
295
296 /* SMTC shares the TLB (and ASIDs) across VPEs */
297 for_each_online_cpu(i) {
298 if((smtc_status & SMTC_TLB_SHARED)
299 || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
300 cpu_context(i, mm) = 0;
301 }
302#endif /* CONFIG_MIPS_MT_SMTC */
303 } 196 }
304 local_irq_restore(flags); 197 local_irq_restore(flags);
305} 198}
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index c2edae382d5d..800fe578dc99 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr)
144#define MODULE_KERNEL_TYPE "64BIT " 144#define MODULE_KERNEL_TYPE "64BIT "
145#endif 145#endif
146 146
147#ifdef CONFIG_MIPS_MT_SMTC
148#define MODULE_KERNEL_SMTC "MT_SMTC "
149#else
150#define MODULE_KERNEL_SMTC ""
151#endif
152
153#define MODULE_ARCH_VERMAGIC \ 147#define MODULE_ARCH_VERMAGIC \
154 MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC 148 MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
155 149
156#endif /* _ASM_MODULE_H */ 150#endif /* _ASM_MODULE_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index bf1ac8d35783..7e6e682aece3 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -39,9 +39,6 @@ struct pt_regs {
39 unsigned long cp0_badvaddr; 39 unsigned long cp0_badvaddr;
40 unsigned long cp0_cause; 40 unsigned long cp0_cause;
41 unsigned long cp0_epc; 41 unsigned long cp0_epc;
42#ifdef CONFIG_MIPS_MT_SMTC
43 unsigned long cp0_tcstatus;
44#endif /* CONFIG_MIPS_MT_SMTC */
45#ifdef CONFIG_CPU_CAVIUM_OCTEON 42#ifdef CONFIG_CPU_CAVIUM_OCTEON
46 unsigned long long mpl[3]; /* MTM{0,1,2} */ 43 unsigned long long mpl[3]; /* MTM{0,1,2} */
47 unsigned long long mtp[3]; /* MTP{0,1,2} */ 44 unsigned long long mtp[3]; /* MTP{0,1,2} */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index ca64cbe44493..fe8d1b622477 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -43,11 +43,10 @@
43 : "i" (op), "R" (*(unsigned char *)(addr))) 43 : "i" (op), "R" (*(unsigned char *)(addr)))
44 44
45#ifdef CONFIG_MIPS_MT 45#ifdef CONFIG_MIPS_MT
46
46/* 47/*
47 * Temporary hacks for SMTC debug. Optionally force single-threaded 48 * Optionally force single-threaded execution during I-cache flushes.
48 * execution during I-cache flushes.
49 */ 49 */
50
51#define PROTECT_CACHE_FLUSHES 1 50#define PROTECT_CACHE_FLUSHES 1
52 51
53#ifdef PROTECT_CACHE_FLUSHES 52#ifdef PROTECT_CACHE_FLUSHES
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h
deleted file mode 100644
index e56b439b7871..000000000000
--- a/arch/mips/include/asm/smtc.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef _ASM_SMTC_MT_H
2#define _ASM_SMTC_MT_H
3
4/*
5 * Definitions for SMTC multitasking on MIPS MT cores
6 */
7
8#include <asm/mips_mt.h>
9#include <asm/smtc_ipi.h>
10
11/*
12 * System-wide SMTC status information
13 */
14
15extern unsigned int smtc_status;
16
17#define SMTC_TLB_SHARED 0x00000001
18#define SMTC_MTC_ACTIVE 0x00000002
19
20/*
21 * TLB/ASID Management information
22 */
23
24#define MAX_SMTC_TLBS 2
25#define MAX_SMTC_ASIDS 256
26#if NR_CPUS <= 8
27typedef char asiduse;
28#else
29#if NR_CPUS <= 16
30typedef short asiduse;
31#else
32typedef long asiduse;
33#endif
34#endif
35
36/*
37 * VPE Management information
38 */
39
40#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
41
42extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
43
44struct mm_struct;
45struct task_struct;
46
47void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
48void self_ipi(struct smtc_ipi *);
49void smtc_flush_tlb_asid(unsigned long asid);
50extern int smtc_build_cpu_map(int startslot);
51extern void smtc_prepare_cpus(int cpus);
52extern void smtc_smp_finish(void);
53extern void smtc_boot_secondary(int cpu, struct task_struct *t);
54extern void smtc_cpus_done(void);
55extern void smtc_init_secondary(void);
56
57
58/*
59 * Sharing the TLB between multiple VPEs means that the
60 * "random" index selection function is not allowed to
61 * select the current value of the Index register. To
62 * avoid additional TLB pressure, the Index registers
63 * are "parked" with an non-Valid value.
64 */
65
66#define PARKED_INDEX ((unsigned int)0x80000000)
67
68/*
69 * Define low-level interrupt mask for IPIs, if necessary.
70 * By default, use SW interrupt 1, which requires no external
71 * hardware support, but which works only for single-core
72 * MIPS MT systems.
73 */
74#ifndef MIPS_CPU_IPI_IRQ
75#define MIPS_CPU_IPI_IRQ 1
76#endif
77
78#endif /* _ASM_SMTC_MT_H */
diff --git a/arch/mips/include/asm/smtc_ipi.h b/arch/mips/include/asm/smtc_ipi.h
deleted file mode 100644
index 15278dbd7e79..000000000000
--- a/arch/mips/include/asm/smtc_ipi.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
3 */
4#ifndef __ASM_SMTC_IPI_H
5#define __ASM_SMTC_IPI_H
6
7#include <linux/spinlock.h>
8
9//#define SMTC_IPI_DEBUG
10
11#ifdef SMTC_IPI_DEBUG
12#include <asm/mipsregs.h>
13#include <asm/mipsmtregs.h>
14#endif /* SMTC_IPI_DEBUG */
15
16/*
17 * An IPI "message"
18 */
19
20struct smtc_ipi {
21 struct smtc_ipi *flink;
22 int type;
23 void *arg;
24 int dest;
25#ifdef SMTC_IPI_DEBUG
26 int sender;
27 long stamp;
28#endif /* SMTC_IPI_DEBUG */
29};
30
31/*
32 * Defined IPI Types
33 */
34
35#define LINUX_SMP_IPI 1
36#define SMTC_CLOCK_TICK 2
37#define IRQ_AFFINITY_IPI 3
38
39/*
40 * A queue of IPI messages
41 */
42
43struct smtc_ipi_q {
44 struct smtc_ipi *head;
45 spinlock_t lock;
46 struct smtc_ipi *tail;
47 int depth;
48 int resched_flag; /* reschedule already queued */
49};
50
51static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
52{
53 unsigned long flags;
54
55 spin_lock_irqsave(&q->lock, flags);
56 if (q->head == NULL)
57 q->head = q->tail = p;
58 else
59 q->tail->flink = p;
60 p->flink = NULL;
61 q->tail = p;
62 q->depth++;
63#ifdef SMTC_IPI_DEBUG
64 p->sender = read_c0_tcbind();
65 p->stamp = read_c0_count();
66#endif /* SMTC_IPI_DEBUG */
67 spin_unlock_irqrestore(&q->lock, flags);
68}
69
70static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
71{
72 struct smtc_ipi *p;
73
74 if (q->head == NULL)
75 p = NULL;
76 else {
77 p = q->head;
78 q->head = q->head->flink;
79 q->depth--;
80 /* Arguably unnecessary, but leaves queue cleaner */
81 if (q->head == NULL)
82 q->tail = NULL;
83 }
84
85 return p;
86}
87
88static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
89{
90 unsigned long flags;
91 struct smtc_ipi *p;
92
93 spin_lock_irqsave(&q->lock, flags);
94 p = __smtc_ipi_dq(q);
95 spin_unlock_irqrestore(&q->lock, flags);
96
97 return p;
98}
99
100static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&q->lock, flags);
105 if (q->head == NULL) {
106 q->head = q->tail = p;
107 p->flink = NULL;
108 } else {
109 p->flink = q->head;
110 q->head = p;
111 }
112 q->depth++;
113 spin_unlock_irqrestore(&q->lock, flags);
114}
115
116static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
117{
118 unsigned long flags;
119 int retval;
120
121 spin_lock_irqsave(&q->lock, flags);
122 retval = q->depth;
123 spin_unlock_irqrestore(&q->lock, flags);
124 return retval;
125}
126
127extern void smtc_send_ipi(int cpu, int type, unsigned int action);
128
129#endif /* __ASM_SMTC_IPI_H */
diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
deleted file mode 100644
index 25da651f1f5f..000000000000
--- a/arch/mips/include/asm/smtc_proc.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Definitions for SMTC /proc entries
3 * Copyright(C) 2005 MIPS Technologies Inc.
4 */
5#ifndef __ASM_SMTC_PROC_H
6#define __ASM_SMTC_PROC_H
7
8/*
9 * per-"CPU" statistics
10 */
11
12struct smtc_cpu_proc {
13 unsigned long timerints;
14 unsigned long selfipis;
15};
16
17extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
18
19/* Count of number of recoveries of "stolen" FPU access rights on 34K */
20
21extern atomic_t smtc_fpu_recoveries;
22
23#endif /* __ASM_SMTC_PROC_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index d301e108d5b8..b188c797565c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -19,22 +19,12 @@
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/thread_info.h> 20#include <asm/thread_info.h>
21 21
22/* 22#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
23 * For SMTC kernel, global IE should be left set, and interrupts
24 * controlled exclusively via IXMT.
25 */
26#ifdef CONFIG_MIPS_MT_SMTC
27#define STATMASK 0x1e
28#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
29#define STATMASK 0x3f 23#define STATMASK 0x3f
30#else 24#else
31#define STATMASK 0x1f 25#define STATMASK 0x1f
32#endif 26#endif
33 27
34#ifdef CONFIG_MIPS_MT_SMTC
35#include <asm/mipsmtregs.h>
36#endif /* CONFIG_MIPS_MT_SMTC */
37
38 .macro SAVE_AT 28 .macro SAVE_AT
39 .set push 29 .set push
40 .set noat 30 .set noat
@@ -186,16 +176,6 @@
186 mfc0 v1, CP0_STATUS 176 mfc0 v1, CP0_STATUS
187 LONG_S $2, PT_R2(sp) 177 LONG_S $2, PT_R2(sp)
188 LONG_S v1, PT_STATUS(sp) 178 LONG_S v1, PT_STATUS(sp)
189#ifdef CONFIG_MIPS_MT_SMTC
190 /*
191 * Ideally, these instructions would be shuffled in
192 * to cover the pipeline delay.
193 */
194 .set mips32
195 mfc0 k0, CP0_TCSTATUS
196 .set mips0
197 LONG_S k0, PT_TCSTATUS(sp)
198#endif /* CONFIG_MIPS_MT_SMTC */
199 LONG_S $4, PT_R4(sp) 179 LONG_S $4, PT_R4(sp)
200 mfc0 v1, CP0_CAUSE 180 mfc0 v1, CP0_CAUSE
201 LONG_S $5, PT_R5(sp) 181 LONG_S $5, PT_R5(sp)
@@ -321,36 +301,6 @@
321 .set push 301 .set push
322 .set reorder 302 .set reorder
323 .set noat 303 .set noat
324#ifdef CONFIG_MIPS_MT_SMTC
325 .set mips32r2
326 /*
327 * We need to make sure the read-modify-write
328 * of Status below isn't perturbed by an interrupt
329 * or cross-TC access, so we need to do at least a DMT,
330 * protected by an interrupt-inhibit. But setting IXMT
331 * also creates a few-cycle window where an IPI could
332 * be queued and not be detected before potentially
333 * returning to a WAIT or user-mode loop. It must be
334 * replayed.
335 *
336 * We're in the middle of a context switch, and
337 * we can't dispatch it directly without trashing
338 * some registers, so we'll try to detect this unlikely
339 * case and program a software interrupt in the VPE,
340 * as would be done for a cross-VPE IPI. To accommodate
341 * the handling of that case, we're doing a DVPE instead
342 * of just a DMT here to protect against other threads.
343 * This is a lot of cruft to cover a tiny window.
344 * If you can find a better design, implement it!
345 *
346 */
347 mfc0 v0, CP0_TCSTATUS
348 ori v0, TCSTATUS_IXMT
349 mtc0 v0, CP0_TCSTATUS
350 _ehb
351 DVPE 5 # dvpe a1
352 jal mips_ihb
353#endif /* CONFIG_MIPS_MT_SMTC */
354 mfc0 a0, CP0_STATUS 304 mfc0 a0, CP0_STATUS
355 ori a0, STATMASK 305 ori a0, STATMASK
356 xori a0, STATMASK 306 xori a0, STATMASK
@@ -362,59 +312,6 @@
362 and v0, v1 312 and v0, v1
363 or v0, a0 313 or v0, a0
364 mtc0 v0, CP0_STATUS 314 mtc0 v0, CP0_STATUS
365#ifdef CONFIG_MIPS_MT_SMTC
366/*
367 * Only after EXL/ERL have been restored to status can we
368 * restore TCStatus.IXMT.
369 */
370 LONG_L v1, PT_TCSTATUS(sp)
371 _ehb
372 mfc0 a0, CP0_TCSTATUS
373 andi v1, TCSTATUS_IXMT
374 bnez v1, 0f
375
376/*
377 * We'd like to detect any IPIs queued in the tiny window
378 * above and request an software interrupt to service them
379 * when we ERET.
380 *
381 * Computing the offset into the IPIQ array of the executing
382 * TC's IPI queue in-line would be tedious. We use part of
383 * the TCContext register to hold 16 bits of offset that we
384 * can add in-line to find the queue head.
385 */
386 mfc0 v0, CP0_TCCONTEXT
387 la a2, IPIQ
388 srl v0, v0, 16
389 addu a2, a2, v0
390 LONG_L v0, 0(a2)
391 beqz v0, 0f
392/*
393 * If we have a queue, provoke dispatch within the VPE by setting C_SW1
394 */
395 mfc0 v0, CP0_CAUSE
396 ori v0, v0, C_SW1
397 mtc0 v0, CP0_CAUSE
3980:
399 /*
400 * This test should really never branch but
401 * let's be prudent here. Having atomized
402 * the shared register modifications, we can
403 * now EVPE, and must do so before interrupts
404 * are potentially re-enabled.
405 */
406 andi a1, a1, MVPCONTROL_EVP
407 beqz a1, 1f
408 evpe
4091:
410 /* We know that TCStatua.IXMT should be set from above */
411 xori a0, a0, TCSTATUS_IXMT
412 or a0, a0, v1
413 mtc0 a0, CP0_TCSTATUS
414 _ehb
415
416 .set mips0
417#endif /* CONFIG_MIPS_MT_SMTC */
418 LONG_L v1, PT_EPC(sp) 315 LONG_L v1, PT_EPC(sp)
419 MTC0 v1, CP0_EPC 316 MTC0 v1, CP0_EPC
420 LONG_L $31, PT_R31(sp) 317 LONG_L $31, PT_R31(sp)
@@ -467,33 +364,11 @@
467 * Set cp0 enable bit as sign that we're running on the kernel stack 364 * Set cp0 enable bit as sign that we're running on the kernel stack
468 */ 365 */
469 .macro CLI 366 .macro CLI
470#if !defined(CONFIG_MIPS_MT_SMTC)
471 mfc0 t0, CP0_STATUS 367 mfc0 t0, CP0_STATUS
472 li t1, ST0_CU0 | STATMASK 368 li t1, ST0_CU0 | STATMASK
473 or t0, t1 369 or t0, t1
474 xori t0, STATMASK 370 xori t0, STATMASK
475 mtc0 t0, CP0_STATUS 371 mtc0 t0, CP0_STATUS
476#else /* CONFIG_MIPS_MT_SMTC */
477 /*
478 * For SMTC, we need to set privilege
479 * and disable interrupts only for the
480 * current TC, using the TCStatus register.
481 */
482 mfc0 t0, CP0_TCSTATUS
483 /* Fortunately CU 0 is in the same place in both registers */
484 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
485 li t1, ST0_CU0 | 0x08001c00
486 or t0, t1
487 /* Clear TKSU, leave IXMT */
488 xori t0, 0x00001800
489 mtc0 t0, CP0_TCSTATUS
490 _ehb
491 /* We need to leave the global IE bit set, but clear EXL...*/
492 mfc0 t0, CP0_STATUS
493 ori t0, ST0_EXL | ST0_ERL
494 xori t0, ST0_EXL | ST0_ERL
495 mtc0 t0, CP0_STATUS
496#endif /* CONFIG_MIPS_MT_SMTC */
497 irq_disable_hazard 372 irq_disable_hazard
498 .endm 373 .endm
499 374
@@ -502,35 +377,11 @@
502 * Set cp0 enable bit as sign that we're running on the kernel stack 377 * Set cp0 enable bit as sign that we're running on the kernel stack
503 */ 378 */
504 .macro STI 379 .macro STI
505#if !defined(CONFIG_MIPS_MT_SMTC)
506 mfc0 t0, CP0_STATUS 380 mfc0 t0, CP0_STATUS
507 li t1, ST0_CU0 | STATMASK 381 li t1, ST0_CU0 | STATMASK
508 or t0, t1 382 or t0, t1
509 xori t0, STATMASK & ~1 383 xori t0, STATMASK & ~1
510 mtc0 t0, CP0_STATUS 384 mtc0 t0, CP0_STATUS
511#else /* CONFIG_MIPS_MT_SMTC */
512 /*
513 * For SMTC, we need to set privilege
514 * and enable interrupts only for the
515 * current TC, using the TCStatus register.
516 */
517 _ehb
518 mfc0 t0, CP0_TCSTATUS
519 /* Fortunately CU 0 is in the same place in both registers */
520 /* Set TCU0, TKSU (for later inversion) and IXMT */
521 li t1, ST0_CU0 | 0x08001c00
522 or t0, t1
523 /* Clear TKSU *and* IXMT */
524 xori t0, 0x00001c00
525 mtc0 t0, CP0_TCSTATUS
526 _ehb
527 /* We need to leave the global IE bit set, but clear EXL...*/
528 mfc0 t0, CP0_STATUS
529 ori t0, ST0_EXL
530 xori t0, ST0_EXL
531 mtc0 t0, CP0_STATUS
532 /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
533#endif /* CONFIG_MIPS_MT_SMTC */
534 irq_enable_hazard 385 irq_enable_hazard
535 .endm 386 .endm
536 387
@@ -540,32 +391,6 @@
540 * Set cp0 enable bit as sign that we're running on the kernel stack 391 * Set cp0 enable bit as sign that we're running on the kernel stack
541 */ 392 */
542 .macro KMODE 393 .macro KMODE
543#ifdef CONFIG_MIPS_MT_SMTC
544 /*
545 * This gets baroque in SMTC. We want to
546 * protect the non-atomic clearing of EXL
547 * with DMT/EMT, but we don't want to take
548 * an interrupt while DMT is still in effect.
549 */
550
551 /* KMODE gets invoked from both reorder and noreorder code */
552 .set push
553 .set mips32r2
554 .set noreorder
555 mfc0 v0, CP0_TCSTATUS
556 andi v1, v0, TCSTATUS_IXMT
557 ori v0, TCSTATUS_IXMT
558 mtc0 v0, CP0_TCSTATUS
559 _ehb
560 DMT 2 # dmt v0
561 /*
562 * We don't know a priori if ra is "live"
563 */
564 move t0, ra
565 jal mips_ihb
566 nop /* delay slot */
567 move ra, t0
568#endif /* CONFIG_MIPS_MT_SMTC */
569 mfc0 t0, CP0_STATUS 394 mfc0 t0, CP0_STATUS
570 li t1, ST0_CU0 | (STATMASK & ~1) 395 li t1, ST0_CU0 | (STATMASK & ~1)
571#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 396#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -576,25 +401,6 @@
576 or t0, t1 401 or t0, t1
577 xori t0, STATMASK & ~1 402 xori t0, STATMASK & ~1
578 mtc0 t0, CP0_STATUS 403 mtc0 t0, CP0_STATUS
579#ifdef CONFIG_MIPS_MT_SMTC
580 _ehb
581 andi v0, v0, VPECONTROL_TE
582 beqz v0, 2f
583 nop /* delay slot */
584 emt
5852:
586 mfc0 v0, CP0_TCSTATUS
587 /* Clear IXMT, then OR in previous value */
588 ori v0, TCSTATUS_IXMT
589 xori v0, TCSTATUS_IXMT
590 or v0, v1, v0
591 mtc0 v0, CP0_TCSTATUS
592 /*
593 * irq_disable_hazard below should expand to EHB
594 * on 24K/34K CPUS
595 */
596 .set pop
597#endif /* CONFIG_MIPS_MT_SMTC */
598 irq_disable_hazard 404 irq_disable_hazard
599 .endm 405 .endm
600 406
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d2d961d6cb86..7de865805deb 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void)
159 * We stash processor id into a COP0 register to retrieve it fast 159 * We stash processor id into a COP0 register to retrieve it fast
160 * at kernel exception entry. 160 * at kernel exception entry.
161 */ 161 */
162#if defined(CONFIG_MIPS_MT_SMTC) 162#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
163#define SMP_CPUID_REG 2, 2 /* TCBIND */
164#define ASM_SMP_CPUID_REG $2, 2
165#define SMP_CPUID_PTRSHIFT 19
166#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
167#define SMP_CPUID_REG 20, 0 /* XCONTEXT */ 163#define SMP_CPUID_REG 20, 0 /* XCONTEXT */
168#define ASM_SMP_CPUID_REG $20 164#define ASM_SMP_CPUID_REG $20
169#define SMP_CPUID_PTRSHIFT 48 165#define SMP_CPUID_PTRSHIFT 48
@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void)
179#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) 175#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
180#endif 176#endif
181 177
182#ifdef CONFIG_MIPS_MT_SMTC
183#define ASM_CPUID_MFC0 mfc0
184#define UASM_i_CPUID_MFC0 uasm_i_mfc0
185#else
186#define ASM_CPUID_MFC0 MFC0 178#define ASM_CPUID_MFC0 MFC0
187#define UASM_i_CPUID_MFC0 UASM_i_MFC0 179#define UASM_i_CPUID_MFC0 UASM_i_MFC0
188#endif
189 180
190#endif /* __KERNEL__ */ 181#endif /* __KERNEL__ */
191#endif /* _ASM_THREAD_INFO_H */ 182#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 24f534a7fbc3..8f3047d611ee 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,14 +52,11 @@ extern int (*perf_irq)(void);
52 */ 52 */
53extern unsigned int __weak get_c0_compare_int(void); 53extern unsigned int __weak get_c0_compare_int(void);
54extern int r4k_clockevent_init(void); 54extern int r4k_clockevent_init(void);
55extern int smtc_clockevent_init(void);
56extern int gic_clockevent_init(void); 55extern int gic_clockevent_init(void);
57 56
58static inline int mips_clockevent_init(void) 57static inline int mips_clockevent_init(void)
59{ 58{
60#ifdef CONFIG_MIPS_MT_SMTC 59#if defined(CONFIG_CEVT_GIC)
61 return smtc_clockevent_init();
62#elif defined(CONFIG_CEVT_GIC)
63 return (gic_clockevent_init() | r4k_clockevent_init()); 60 return (gic_clockevent_init() | r4k_clockevent_init());
64#elif defined(CONFIG_CEVT_R4K) 61#elif defined(CONFIG_CEVT_R4K)
65 return r4k_clockevent_init(); 62 return r4k_clockevent_init();