diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2014-05-23 10:29:44 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2014-05-23 18:07:01 -0400 |
commit | b633648c5ad3cfbda0b3daea50d2135d44899259 (patch) | |
tree | 6100185cae10f36a55e71c3b220fc79cfa14b7c0 /arch | |
parent | 8b2e62cc34feaaf1cac9440a93fb18ac0b1e81bc (diff) |
MIPS: MT: Remove SMTC support
Nobody is maintaining SMTC anymore and there also seems to be no userbase.
Which is a pity - the SMTC technology primarily developed by Kevin D.
Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT
ASE's power and elegance.
Based on Markos Chandras <Markos.Chandras@imgtec.com> patch
https://patchwork.linux-mips.org/patch/6719/ which while very similar did
no longer apply cleanly when I tried to merge it plus some additional
post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to
merge once upon a time.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
64 files changed, 71 insertions, 4096 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 45e75b6173b5..e3f040cbaff3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1852,7 +1852,7 @@ config FORCE_MAX_ZONEORDER | |||
1852 | 1852 | ||
1853 | config CEVT_GIC | 1853 | config CEVT_GIC |
1854 | bool "Use GIC global counter for clock events" | 1854 | bool "Use GIC global counter for clock events" |
1855 | depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) | 1855 | depends on IRQ_GIC && !MIPS_SEAD3 |
1856 | help | 1856 | help |
1857 | Use the GIC global counter for the clock events. The R4K clock | 1857 | Use the GIC global counter for the clock events. The R4K clock |
1858 | event driver is always present, so if the platform ends up not | 1858 | event driver is always present, so if the platform ends up not |
@@ -1936,24 +1936,6 @@ config MIPS_MT_SMP | |||
1936 | Intel Hyperthreading feature. For further information go to | 1936 | Intel Hyperthreading feature. For further information go to |
1937 | <http://www.imgtec.com/mips/mips-multithreading.asp>. | 1937 | <http://www.imgtec.com/mips/mips-multithreading.asp>. |
1938 | 1938 | ||
1939 | config MIPS_MT_SMTC | ||
1940 | bool "Use all TCs on all VPEs for SMP (DEPRECATED)" | ||
1941 | depends on CPU_MIPS32_R2 | ||
1942 | depends on SYS_SUPPORTS_MULTITHREADING | ||
1943 | depends on !MIPS_CPS | ||
1944 | select CPU_MIPSR2_IRQ_VI | ||
1945 | select CPU_MIPSR2_IRQ_EI | ||
1946 | select MIPS_MT | ||
1947 | select SMP | ||
1948 | select SMP_UP | ||
1949 | select SYS_SUPPORTS_SMP | ||
1950 | select NR_CPUS_DEFAULT_8 | ||
1951 | help | ||
1952 | This is a kernel model which is known as SMTC. This is | ||
1953 | supported on cores with the MT ASE and presents all TCs | ||
1954 | available on all VPEs to support SMP. For further | ||
1955 | information see <http://www.linux-mips.org/wiki/34K#SMTC>. | ||
1956 | |||
1957 | endchoice | 1939 | endchoice |
1958 | 1940 | ||
1959 | config MIPS_MT | 1941 | config MIPS_MT |
@@ -1977,7 +1959,7 @@ config SYS_SUPPORTS_MULTITHREADING | |||
1977 | config MIPS_MT_FPAFF | 1959 | config MIPS_MT_FPAFF |
1978 | bool "Dynamic FPU affinity for FP-intensive threads" | 1960 | bool "Dynamic FPU affinity for FP-intensive threads" |
1979 | default y | 1961 | default y |
1980 | depends on MIPS_MT_SMP || MIPS_MT_SMTC | 1962 | depends on MIPS_MT_SMP |
1981 | 1963 | ||
1982 | config MIPS_VPE_LOADER | 1964 | config MIPS_VPE_LOADER |
1983 | bool "VPE loader support." | 1965 | bool "VPE loader support." |
@@ -1999,29 +1981,6 @@ config MIPS_VPE_LOADER_MT | |||
1999 | default "y" | 1981 | default "y" |
2000 | depends on MIPS_VPE_LOADER && !MIPS_CMP | 1982 | depends on MIPS_VPE_LOADER && !MIPS_CMP |
2001 | 1983 | ||
2002 | config MIPS_MT_SMTC_IM_BACKSTOP | ||
2003 | bool "Use per-TC register bits as backstop for inhibited IM bits" | ||
2004 | depends on MIPS_MT_SMTC | ||
2005 | default n | ||
2006 | help | ||
2007 | To support multiple TC microthreads acting as "CPUs" within | ||
2008 | a VPE, VPE-wide interrupt mask bits must be specially manipulated | ||
2009 | during interrupt handling. To support legacy drivers and interrupt | ||
2010 | controller management code, SMTC has a "backstop" to track and | ||
2011 | if necessary restore the interrupt mask. This has some performance | ||
2012 | impact on interrupt service overhead. | ||
2013 | |||
2014 | config MIPS_MT_SMTC_IRQAFF | ||
2015 | bool "Support IRQ affinity API" | ||
2016 | depends on MIPS_MT_SMTC | ||
2017 | default n | ||
2018 | help | ||
2019 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | ||
2020 | for SMTC Linux kernel. Requires platform support, of which | ||
2021 | an example can be found in the MIPS kernel i8259 and Malta | ||
2022 | platform code. Adds some overhead to interrupt dispatch, and | ||
2023 | should be used only if you know what you are doing. | ||
2024 | |||
2025 | config MIPS_VPE_LOADER_TOM | 1984 | config MIPS_VPE_LOADER_TOM |
2026 | bool "Load VPE program into memory hidden from linux" | 1985 | bool "Load VPE program into memory hidden from linux" |
2027 | depends on MIPS_VPE_LOADER | 1986 | depends on MIPS_VPE_LOADER |
@@ -2049,7 +2008,7 @@ config MIPS_VPE_APSP_API_MT | |||
2049 | 2008 | ||
2050 | config MIPS_CMP | 2009 | config MIPS_CMP |
2051 | bool "MIPS CMP framework support (DEPRECATED)" | 2010 | bool "MIPS CMP framework support (DEPRECATED)" |
2052 | depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC | 2011 | depends on SYS_SUPPORTS_MIPS_CMP |
2053 | select MIPS_GIC_IPI | 2012 | select MIPS_GIC_IPI |
2054 | select SYNC_R4K | 2013 | select SYNC_R4K |
2055 | select WEAK_ORDERING | 2014 | select WEAK_ORDERING |
@@ -2256,7 +2215,7 @@ config NODES_SHIFT | |||
2256 | 2215 | ||
2257 | config HW_PERF_EVENTS | 2216 | config HW_PERF_EVENTS |
2258 | bool "Enable hardware performance counter support for perf events" | 2217 | bool "Enable hardware performance counter support for perf events" |
2259 | depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) | 2218 | depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) |
2260 | default y | 2219 | default y |
2261 | help | 2220 | help |
2262 | Enable hardware performance counter support for perf events. If | 2221 | Enable hardware performance counter support for perf events. If |
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 25de29211d76..3a2b775e8458 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug | |||
@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE | |||
79 | 79 | ||
80 | Normally, you will choose 'N' here. | 80 | Normally, you will choose 'N' here. |
81 | 81 | ||
82 | config SMTC_IDLE_HOOK_DEBUG | ||
83 | bool "Enable additional debug checks before going into CPU idle loop" | ||
84 | depends on DEBUG_KERNEL && MIPS_MT_SMTC | ||
85 | help | ||
86 | This option enables Enable additional debug checks before going into | ||
87 | CPU idle loop. For details on these checks, see | ||
88 | arch/mips/kernel/smtc.c. This debugging option result in significant | ||
89 | overhead so should be disabled in production kernels. | ||
90 | |||
91 | config SB1XXX_CORELIS | 82 | config SB1XXX_CORELIS |
92 | bool "Corelis Debugger" | 83 | bool "Corelis Debugger" |
93 | depends on SIBYTE_SB1xxx_SOC | 84 | depends on SIBYTE_SB1xxx_SOC |
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig deleted file mode 100644 index eb316447588c..000000000000 --- a/arch/mips/configs/maltasmtc_defconfig +++ /dev/null | |||
@@ -1,196 +0,0 @@ | |||
1 | CONFIG_MIPS_MALTA=y | ||
2 | CONFIG_CPU_LITTLE_ENDIAN=y | ||
3 | CONFIG_CPU_MIPS32_R2=y | ||
4 | CONFIG_PAGE_SIZE_16KB=y | ||
5 | CONFIG_MIPS_MT_SMTC=y | ||
6 | # CONFIG_MIPS_MT_FPAFF is not set | ||
7 | CONFIG_NR_CPUS=9 | ||
8 | CONFIG_HZ_48=y | ||
9 | CONFIG_LOCALVERSION="smtc" | ||
10 | CONFIG_SYSVIPC=y | ||
11 | CONFIG_POSIX_MQUEUE=y | ||
12 | CONFIG_AUDIT=y | ||
13 | CONFIG_IKCONFIG=y | ||
14 | CONFIG_IKCONFIG_PROC=y | ||
15 | CONFIG_LOG_BUF_SHIFT=15 | ||
16 | CONFIG_SYSCTL_SYSCALL=y | ||
17 | CONFIG_EMBEDDED=y | ||
18 | CONFIG_SLAB=y | ||
19 | CONFIG_MODULES=y | ||
20 | CONFIG_MODULE_UNLOAD=y | ||
21 | CONFIG_MODVERSIONS=y | ||
22 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
23 | # CONFIG_BLK_DEV_BSG is not set | ||
24 | CONFIG_PCI=y | ||
25 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
26 | CONFIG_NET=y | ||
27 | CONFIG_PACKET=y | ||
28 | CONFIG_UNIX=y | ||
29 | CONFIG_XFRM_USER=m | ||
30 | CONFIG_NET_KEY=y | ||
31 | CONFIG_INET=y | ||
32 | CONFIG_IP_MULTICAST=y | ||
33 | CONFIG_IP_ADVANCED_ROUTER=y | ||
34 | CONFIG_IP_MULTIPLE_TABLES=y | ||
35 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
36 | CONFIG_IP_ROUTE_VERBOSE=y | ||
37 | CONFIG_IP_PNP=y | ||
38 | CONFIG_IP_PNP_DHCP=y | ||
39 | CONFIG_IP_PNP_BOOTP=y | ||
40 | CONFIG_NET_IPIP=m | ||
41 | CONFIG_IP_MROUTE=y | ||
42 | CONFIG_IP_PIMSM_V1=y | ||
43 | CONFIG_IP_PIMSM_V2=y | ||
44 | CONFIG_SYN_COOKIES=y | ||
45 | CONFIG_INET_AH=m | ||
46 | CONFIG_INET_ESP=m | ||
47 | CONFIG_INET_IPCOMP=m | ||
48 | # CONFIG_INET_LRO is not set | ||
49 | CONFIG_INET6_AH=m | ||
50 | CONFIG_INET6_ESP=m | ||
51 | CONFIG_INET6_IPCOMP=m | ||
52 | CONFIG_IPV6_TUNNEL=m | ||
53 | CONFIG_BRIDGE=m | ||
54 | CONFIG_VLAN_8021Q=m | ||
55 | CONFIG_ATALK=m | ||
56 | CONFIG_DEV_APPLETALK=m | ||
57 | CONFIG_IPDDP=m | ||
58 | CONFIG_IPDDP_ENCAP=y | ||
59 | CONFIG_NET_SCHED=y | ||
60 | CONFIG_NET_SCH_CBQ=m | ||
61 | CONFIG_NET_SCH_HTB=m | ||
62 | CONFIG_NET_SCH_HFSC=m | ||
63 | CONFIG_NET_SCH_PRIO=m | ||
64 | CONFIG_NET_SCH_RED=m | ||
65 | CONFIG_NET_SCH_SFQ=m | ||
66 | CONFIG_NET_SCH_TEQL=m | ||
67 | CONFIG_NET_SCH_TBF=m | ||
68 | CONFIG_NET_SCH_GRED=m | ||
69 | CONFIG_NET_SCH_DSMARK=m | ||
70 | CONFIG_NET_SCH_NETEM=m | ||
71 | CONFIG_NET_SCH_INGRESS=m | ||
72 | CONFIG_NET_CLS_BASIC=m | ||
73 | CONFIG_NET_CLS_TCINDEX=m | ||
74 | CONFIG_NET_CLS_ROUTE4=m | ||
75 | CONFIG_NET_CLS_FW=m | ||
76 | CONFIG_NET_CLS_U32=m | ||
77 | CONFIG_NET_CLS_RSVP=m | ||
78 | CONFIG_NET_CLS_RSVP6=m | ||
79 | CONFIG_NET_CLS_ACT=y | ||
80 | CONFIG_NET_ACT_POLICE=y | ||
81 | CONFIG_NET_CLS_IND=y | ||
82 | # CONFIG_WIRELESS is not set | ||
83 | CONFIG_DEVTMPFS=y | ||
84 | CONFIG_BLK_DEV_LOOP=y | ||
85 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
86 | CONFIG_IDE=y | ||
87 | # CONFIG_IDE_PROC_FS is not set | ||
88 | # CONFIG_IDEPCI_PCIBUS_ORDER is not set | ||
89 | CONFIG_BLK_DEV_GENERIC=y | ||
90 | CONFIG_BLK_DEV_PIIX=y | ||
91 | CONFIG_SCSI=y | ||
92 | CONFIG_BLK_DEV_SD=y | ||
93 | CONFIG_CHR_DEV_SG=y | ||
94 | # CONFIG_SCSI_LOWLEVEL is not set | ||
95 | CONFIG_NETDEVICES=y | ||
96 | # CONFIG_NET_VENDOR_3COM is not set | ||
97 | # CONFIG_NET_VENDOR_ADAPTEC is not set | ||
98 | # CONFIG_NET_VENDOR_ALTEON is not set | ||
99 | CONFIG_PCNET32=y | ||
100 | # CONFIG_NET_VENDOR_ATHEROS is not set | ||
101 | # CONFIG_NET_VENDOR_BROADCOM is not set | ||
102 | # CONFIG_NET_VENDOR_BROCADE is not set | ||
103 | # CONFIG_NET_VENDOR_CHELSIO is not set | ||
104 | # CONFIG_NET_VENDOR_CISCO is not set | ||
105 | # CONFIG_NET_VENDOR_DEC is not set | ||
106 | # CONFIG_NET_VENDOR_DLINK is not set | ||
107 | # CONFIG_NET_VENDOR_EMULEX is not set | ||
108 | # CONFIG_NET_VENDOR_EXAR is not set | ||
109 | # CONFIG_NET_VENDOR_HP is not set | ||
110 | # CONFIG_NET_VENDOR_INTEL is not set | ||
111 | # CONFIG_NET_VENDOR_MARVELL is not set | ||
112 | # CONFIG_NET_VENDOR_MELLANOX is not set | ||
113 | # CONFIG_NET_VENDOR_MICREL is not set | ||
114 | # CONFIG_NET_VENDOR_MYRI is not set | ||
115 | # CONFIG_NET_VENDOR_NATSEMI is not set | ||
116 | # CONFIG_NET_VENDOR_NVIDIA is not set | ||
117 | # CONFIG_NET_VENDOR_OKI is not set | ||
118 | # CONFIG_NET_PACKET_ENGINE is not set | ||
119 | # CONFIG_NET_VENDOR_QLOGIC is not set | ||
120 | # CONFIG_NET_VENDOR_REALTEK is not set | ||
121 | # CONFIG_NET_VENDOR_RDC is not set | ||
122 | # CONFIG_NET_VENDOR_SEEQ is not set | ||
123 | # CONFIG_NET_VENDOR_SILAN is not set | ||
124 | # CONFIG_NET_VENDOR_SIS is not set | ||
125 | # CONFIG_NET_VENDOR_SMSC is not set | ||
126 | # CONFIG_NET_VENDOR_STMICRO is not set | ||
127 | # CONFIG_NET_VENDOR_SUN is not set | ||
128 | # CONFIG_NET_VENDOR_TEHUTI is not set | ||
129 | # CONFIG_NET_VENDOR_TI is not set | ||
130 | # CONFIG_NET_VENDOR_TOSHIBA is not set | ||
131 | # CONFIG_NET_VENDOR_VIA is not set | ||
132 | # CONFIG_WLAN is not set | ||
133 | # CONFIG_VT is not set | ||
134 | CONFIG_LEGACY_PTY_COUNT=16 | ||
135 | CONFIG_SERIAL_8250=y | ||
136 | CONFIG_SERIAL_8250_CONSOLE=y | ||
137 | CONFIG_HW_RANDOM=y | ||
138 | # CONFIG_HWMON is not set | ||
139 | CONFIG_VIDEO_OUTPUT_CONTROL=m | ||
140 | CONFIG_FB=y | ||
141 | CONFIG_FIRMWARE_EDID=y | ||
142 | CONFIG_FB_MATROX=y | ||
143 | CONFIG_FB_MATROX_G=y | ||
144 | CONFIG_USB=y | ||
145 | CONFIG_USB_EHCI_HCD=y | ||
146 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||
147 | CONFIG_USB_UHCI_HCD=y | ||
148 | CONFIG_USB_STORAGE=y | ||
149 | CONFIG_NEW_LEDS=y | ||
150 | CONFIG_LEDS_CLASS=y | ||
151 | CONFIG_LEDS_TRIGGERS=y | ||
152 | CONFIG_LEDS_TRIGGER_TIMER=y | ||
153 | CONFIG_LEDS_TRIGGER_IDE_DISK=y | ||
154 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
155 | CONFIG_LEDS_TRIGGER_BACKLIGHT=y | ||
156 | CONFIG_LEDS_TRIGGER_DEFAULT_ON=y | ||
157 | CONFIG_RTC_CLASS=y | ||
158 | CONFIG_RTC_DRV_CMOS=y | ||
159 | CONFIG_EXT2_FS=y | ||
160 | CONFIG_EXT3_FS=y | ||
161 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
162 | CONFIG_XFS_FS=y | ||
163 | CONFIG_XFS_QUOTA=y | ||
164 | CONFIG_XFS_POSIX_ACL=y | ||
165 | CONFIG_QUOTA=y | ||
166 | CONFIG_QFMT_V2=y | ||
167 | CONFIG_MSDOS_FS=m | ||
168 | CONFIG_VFAT_FS=m | ||
169 | CONFIG_PROC_KCORE=y | ||
170 | CONFIG_TMPFS=y | ||
171 | CONFIG_NFS_FS=y | ||
172 | CONFIG_ROOT_NFS=y | ||
173 | CONFIG_CIFS=m | ||
174 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
175 | CONFIG_CIFS_XATTR=y | ||
176 | CONFIG_CIFS_POSIX=y | ||
177 | CONFIG_NLS_CODEPAGE_437=m | ||
178 | CONFIG_NLS_ISO8859_1=m | ||
179 | # CONFIG_FTRACE is not set | ||
180 | CONFIG_CRYPTO_NULL=m | ||
181 | CONFIG_CRYPTO_PCBC=m | ||
182 | CONFIG_CRYPTO_HMAC=y | ||
183 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
184 | CONFIG_CRYPTO_SHA512=m | ||
185 | CONFIG_CRYPTO_TGR192=m | ||
186 | CONFIG_CRYPTO_WP512=m | ||
187 | CONFIG_CRYPTO_ANUBIS=m | ||
188 | CONFIG_CRYPTO_BLOWFISH=m | ||
189 | CONFIG_CRYPTO_CAST5=m | ||
190 | CONFIG_CRYPTO_CAST6=m | ||
191 | CONFIG_CRYPTO_KHAZAD=m | ||
192 | CONFIG_CRYPTO_SERPENT=m | ||
193 | CONFIG_CRYPTO_TEA=m | ||
194 | CONFIG_CRYPTO_TWOFISH=m | ||
195 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
196 | # CONFIG_CRYPTO_HW is not set | ||
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index b464b8b1147a..f7db79a846bb 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h | |||
@@ -17,26 +17,8 @@ | |||
17 | #ifdef CONFIG_64BIT | 17 | #ifdef CONFIG_64BIT |
18 | #include <asm/asmmacro-64.h> | 18 | #include <asm/asmmacro-64.h> |
19 | #endif | 19 | #endif |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #endif | ||
23 | |||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
25 | .macro local_irq_enable reg=t0 | ||
26 | mfc0 \reg, CP0_TCSTATUS | ||
27 | ori \reg, \reg, TCSTATUS_IXMT | ||
28 | xori \reg, \reg, TCSTATUS_IXMT | ||
29 | mtc0 \reg, CP0_TCSTATUS | ||
30 | _ehb | ||
31 | .endm | ||
32 | 20 | ||
33 | .macro local_irq_disable reg=t0 | 21 | #ifdef CONFIG_CPU_MIPSR2 |
34 | mfc0 \reg, CP0_TCSTATUS | ||
35 | ori \reg, \reg, TCSTATUS_IXMT | ||
36 | mtc0 \reg, CP0_TCSTATUS | ||
37 | _ehb | ||
38 | .endm | ||
39 | #elif defined(CONFIG_CPU_MIPSR2) | ||
40 | .macro local_irq_enable reg=t0 | 22 | .macro local_irq_enable reg=t0 |
41 | ei | 23 | ei |
42 | irq_enable_hazard | 24 | irq_enable_hazard |
@@ -71,7 +53,7 @@ | |||
71 | sw \reg, TI_PRE_COUNT($28) | 53 | sw \reg, TI_PRE_COUNT($28) |
72 | #endif | 54 | #endif |
73 | .endm | 55 | .endm |
74 | #endif /* CONFIG_MIPS_MT_SMTC */ | 56 | #endif /* CONFIG_CPU_MIPSR2 */ |
75 | 57 | ||
76 | .macro fpu_save_16even thread tmp=t0 | 58 | .macro fpu_save_16even thread tmp=t0 |
77 | cfc1 \tmp, fcr31 | 59 | cfc1 \tmp, fcr31 |
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index dc2135be2a3a..7ba0e07a9091 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h | |||
@@ -65,18 +65,13 @@ struct cpuinfo_mips { | |||
65 | #ifdef CONFIG_64BIT | 65 | #ifdef CONFIG_64BIT |
66 | int vmbits; /* Virtual memory size in bits */ | 66 | int vmbits; /* Virtual memory size in bits */ |
67 | #endif | 67 | #endif |
68 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 68 | #ifdef CONFIG_MIPS_MT_SMP |
69 | /* | 69 | /* |
70 | * In the MIPS MT "SMTC" model, each TC is considered | 70 | * There is not necessarily a 1:1 mapping of VPE num to CPU number |
71 | * to be a "CPU" for the purposes of scheduling, but | 71 | * in particular on multi-core systems. |
72 | * exception resources, ASID spaces, etc, are common | ||
73 | * to all TCs within the same VPE. | ||
74 | */ | 72 | */ |
75 | int vpe_id; /* Virtual Processor number */ | 73 | int vpe_id; /* Virtual Processor number */ |
76 | #endif | 74 | #endif |
77 | #ifdef CONFIG_MIPS_MT_SMTC | ||
78 | int tc_id; /* Thread Context number */ | ||
79 | #endif | ||
80 | void *data; /* Additional data */ | 75 | void *data; /* Additional data */ |
81 | unsigned int watch_reg_count; /* Number that exist */ | 76 | unsigned int watch_reg_count; /* Number that exist */ |
82 | unsigned int watch_reg_use_cnt; /* Usable by ptrace */ | 77 | unsigned int watch_reg_use_cnt; /* Usable by ptrace */ |
@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args { | |||
117 | unsigned long n; | 112 | unsigned long n; |
118 | }; | 113 | }; |
119 | 114 | ||
120 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 115 | #ifdef CONFIG_MIPS_MT_SMP |
121 | # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) | 116 | # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) |
122 | #else | 117 | #else |
123 | # define cpu_vpe_id(cpuinfo) 0 | 118 | # define cpu_vpe_id(cpuinfo) 0 |
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h index 8c012af2f451..6842ffafd1e7 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h | |||
@@ -48,11 +48,7 @@ | |||
48 | enum fixed_addresses { | 48 | enum fixed_addresses { |
49 | #define FIX_N_COLOURS 8 | 49 | #define FIX_N_COLOURS 8 |
50 | FIX_CMAP_BEGIN, | 50 | FIX_CMAP_BEGIN, |
51 | #ifdef CONFIG_MIPS_MT_SMTC | ||
52 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2), | ||
53 | #else | ||
54 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), | 51 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), |
55 | #endif | ||
56 | #ifdef CONFIG_HIGHMEM | 52 | #ifdef CONFIG_HIGHMEM |
57 | /* reserved pte's for temporary kernel mappings */ | 53 | /* reserved pte's for temporary kernel mappings */ |
58 | FIX_KMAP_BEGIN = FIX_CMAP_END + 1, | 54 | FIX_KMAP_BEGIN = FIX_CMAP_END + 1, |
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 7bc2cdb35057..ae1f7b24dd1a 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h | |||
@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq) | |||
26 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ | 26 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #ifdef CONFIG_MIPS_MT_SMTC | ||
30 | |||
31 | struct irqaction; | ||
32 | |||
33 | extern unsigned long irq_hwmask[]; | ||
34 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
35 | unsigned long hwmask); | ||
36 | |||
37 | static inline void smtc_im_ack_irq(unsigned int irq) | ||
38 | { | ||
39 | if (irq_hwmask[irq] & ST0_IM) | ||
40 | set_c0_status(irq_hwmask[irq] & ST0_IM); | ||
41 | } | ||
42 | |||
43 | #else | ||
44 | |||
45 | static inline void smtc_im_ack_irq(unsigned int irq) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
50 | |||
51 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
52 | #include <linux/cpumask.h> | ||
53 | |||
54 | extern int plat_set_irq_affinity(struct irq_data *d, | ||
55 | const struct cpumask *affinity, bool force); | ||
56 | extern void smtc_forward_irq(struct irq_data *d); | ||
57 | |||
58 | /* | ||
59 | * IRQ affinity hook invoked at the beginning of interrupt dispatch | ||
60 | * if option is enabled. | ||
61 | * | ||
62 | * Up through Linux 2.6.22 (at least) cpumask operations are very | ||
63 | * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity | ||
64 | * used a "fast path" per-IRQ-descriptor cache of affinity information | ||
65 | * to reduce latency. As there is a project afoot to optimize the | ||
66 | * cpumask implementations, this version is optimistically assuming | ||
67 | * that cpumask.h macro overhead is reasonable during interrupt dispatch. | ||
68 | */ | ||
69 | static inline int handle_on_other_cpu(unsigned int irq) | ||
70 | { | ||
71 | struct irq_data *d = irq_get_irq_data(irq); | ||
72 | |||
73 | if (cpumask_test_cpu(smp_processor_id(), d->affinity)) | ||
74 | return 0; | ||
75 | smtc_forward_irq(d); | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | #else /* Not doing SMTC affinity */ | ||
80 | |||
81 | static inline int handle_on_other_cpu(unsigned int irq) { return 0; } | ||
82 | |||
83 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
84 | |||
85 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | ||
86 | |||
87 | static inline void smtc_im_backstop(unsigned int irq) | ||
88 | { | ||
89 | if (irq_hwmask[irq] & 0x0000ff00) | ||
90 | write_c0_tccontext(read_c0_tccontext() & | ||
91 | ~(irq_hwmask[irq] & 0x0000ff00)); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Clear interrupt mask handling "backstop" if irq_hwmask | ||
96 | * entry so indicates. This implies that the ack() or end() | ||
97 | * functions will take over re-enabling the low-level mask. | ||
98 | * Otherwise it will be done on return from exception. | ||
99 | */ | ||
100 | static inline int smtc_handle_on_other_cpu(unsigned int irq) | ||
101 | { | ||
102 | int ret = handle_on_other_cpu(irq); | ||
103 | |||
104 | if (!ret) | ||
105 | smtc_im_backstop(irq); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | #else | ||
110 | |||
111 | static inline void smtc_im_backstop(unsigned int irq) { } | ||
112 | static inline int smtc_handle_on_other_cpu(unsigned int irq) | ||
113 | { | ||
114 | return handle_on_other_cpu(irq); | ||
115 | } | ||
116 | |||
117 | #endif | ||
118 | |||
119 | extern void do_IRQ(unsigned int irq); | 29 | extern void do_IRQ(unsigned int irq); |
120 | 30 | ||
121 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
122 | |||
123 | extern void do_IRQ_no_affinity(unsigned int irq); | ||
124 | |||
125 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
126 | |||
127 | extern void arch_init_irq(void); | 31 | extern void arch_init_irq(void); |
128 | extern void spurious_interrupt(void); | 32 | extern void spurious_interrupt(void); |
129 | 33 | ||
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 45c00951888b..0fa5fdcd1f01 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/stringify.h> | 17 | #include <linux/stringify.h> |
18 | #include <asm/hazards.h> | 18 | #include <asm/hazards.h> |
19 | 19 | ||
20 | #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) | 20 | #ifdef CONFIG_CPU_MIPSR2 |
21 | 21 | ||
22 | static inline void arch_local_irq_disable(void) | 22 | static inline void arch_local_irq_disable(void) |
23 | { | 23 | { |
@@ -118,30 +118,15 @@ void arch_local_irq_disable(void); | |||
118 | unsigned long arch_local_irq_save(void); | 118 | unsigned long arch_local_irq_save(void); |
119 | void arch_local_irq_restore(unsigned long flags); | 119 | void arch_local_irq_restore(unsigned long flags); |
120 | void __arch_local_irq_restore(unsigned long flags); | 120 | void __arch_local_irq_restore(unsigned long flags); |
121 | #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ | 121 | #endif /* CONFIG_CPU_MIPSR2 */ |
122 | |||
123 | |||
124 | extern void smtc_ipi_replay(void); | ||
125 | 122 | ||
126 | static inline void arch_local_irq_enable(void) | 123 | static inline void arch_local_irq_enable(void) |
127 | { | 124 | { |
128 | #ifdef CONFIG_MIPS_MT_SMTC | ||
129 | /* | ||
130 | * SMTC kernel needs to do a software replay of queued | ||
131 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
132 | */ | ||
133 | smtc_ipi_replay(); | ||
134 | #endif | ||
135 | __asm__ __volatile__( | 125 | __asm__ __volatile__( |
136 | " .set push \n" | 126 | " .set push \n" |
137 | " .set reorder \n" | 127 | " .set reorder \n" |
138 | " .set noat \n" | 128 | " .set noat \n" |
139 | #ifdef CONFIG_MIPS_MT_SMTC | 129 | #if defined(CONFIG_CPU_MIPSR2) |
140 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | ||
141 | " ori $1, 0x400 \n" | ||
142 | " xori $1, 0x400 \n" | ||
143 | " mtc0 $1, $2, 1 \n" | ||
144 | #elif defined(CONFIG_CPU_MIPSR2) | ||
145 | " ei \n" | 130 | " ei \n" |
146 | #else | 131 | #else |
147 | " mfc0 $1,$12 \n" | 132 | " mfc0 $1,$12 \n" |
@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
163 | asm __volatile__( | 148 | asm __volatile__( |
164 | " .set push \n" | 149 | " .set push \n" |
165 | " .set reorder \n" | 150 | " .set reorder \n" |
166 | #ifdef CONFIG_MIPS_MT_SMTC | ||
167 | " mfc0 %[flags], $2, 1 \n" | ||
168 | #else | ||
169 | " mfc0 %[flags], $12 \n" | 151 | " mfc0 %[flags], $12 \n" |
170 | #endif | ||
171 | " .set pop \n" | 152 | " .set pop \n" |
172 | : [flags] "=r" (flags)); | 153 | : [flags] "=r" (flags)); |
173 | 154 | ||
@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
177 | 158 | ||
178 | static inline int arch_irqs_disabled_flags(unsigned long flags) | 159 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
179 | { | 160 | { |
180 | #ifdef CONFIG_MIPS_MT_SMTC | ||
181 | /* | ||
182 | * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU | ||
183 | */ | ||
184 | return flags & 0x400; | ||
185 | #else | ||
186 | return !(flags & 1); | 161 | return !(flags & 1); |
187 | #endif | ||
188 | } | 162 | } |
189 | 163 | ||
190 | #endif /* #ifndef __ASSEMBLY__ */ | 164 | #endif /* #ifndef __ASSEMBLY__ */ |
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h index 7c5e17a17849..77eeda77e73c 100644 --- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h | |||
@@ -80,36 +80,6 @@ | |||
80 | .endm | 80 | .endm |
81 | 81 | ||
82 | .macro kernel_entry_setup | 82 | .macro kernel_entry_setup |
83 | #ifdef CONFIG_MIPS_MT_SMTC | ||
84 | mfc0 t0, CP0_CONFIG | ||
85 | bgez t0, 9f | ||
86 | mfc0 t0, CP0_CONFIG, 1 | ||
87 | bgez t0, 9f | ||
88 | mfc0 t0, CP0_CONFIG, 2 | ||
89 | bgez t0, 9f | ||
90 | mfc0 t0, CP0_CONFIG, 3 | ||
91 | and t0, 1<<2 | ||
92 | bnez t0, 0f | ||
93 | 9: | ||
94 | /* Assume we came from YAMON... */ | ||
95 | PTR_LA v0, 0x9fc00534 /* YAMON print */ | ||
96 | lw v0, (v0) | ||
97 | move a0, zero | ||
98 | PTR_LA a1, nonmt_processor | ||
99 | jal v0 | ||
100 | |||
101 | PTR_LA v0, 0x9fc00520 /* YAMON exit */ | ||
102 | lw v0, (v0) | ||
103 | li a0, 1 | ||
104 | jal v0 | ||
105 | |||
106 | 1: b 1b | ||
107 | |||
108 | __INITDATA | ||
109 | nonmt_processor: | ||
110 | .asciz "SMTC kernel requires the MT ASE to run\n" | ||
111 | __FINIT | ||
112 | #endif | ||
113 | 83 | ||
114 | #ifdef CONFIG_EVA | 84 | #ifdef CONFIG_EVA |
115 | sync | 85 | sync |
diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h index 3dfbd8e7947f..6cccd4d558d7 100644 --- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h | |||
@@ -10,37 +10,6 @@ | |||
10 | #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H | 10 | #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H |
11 | 11 | ||
12 | .macro kernel_entry_setup | 12 | .macro kernel_entry_setup |
13 | #ifdef CONFIG_MIPS_MT_SMTC | ||
14 | mfc0 t0, CP0_CONFIG | ||
15 | bgez t0, 9f | ||
16 | mfc0 t0, CP0_CONFIG, 1 | ||
17 | bgez t0, 9f | ||
18 | mfc0 t0, CP0_CONFIG, 2 | ||
19 | bgez t0, 9f | ||
20 | mfc0 t0, CP0_CONFIG, 3 | ||
21 | and t0, 1<<2 | ||
22 | bnez t0, 0f | ||
23 | 9 : | ||
24 | /* Assume we came from YAMON... */ | ||
25 | PTR_LA v0, 0x9fc00534 /* YAMON print */ | ||
26 | lw v0, (v0) | ||
27 | move a0, zero | ||
28 | PTR_LA a1, nonmt_processor | ||
29 | jal v0 | ||
30 | |||
31 | PTR_LA v0, 0x9fc00520 /* YAMON exit */ | ||
32 | lw v0, (v0) | ||
33 | li a0, 1 | ||
34 | jal v0 | ||
35 | |||
36 | 1 : b 1b | ||
37 | |||
38 | __INITDATA | ||
39 | nonmt_processor : | ||
40 | .asciz "SMTC kernel requires the MT ASE to run\n" | ||
41 | __FINIT | ||
42 | 0 : | ||
43 | #endif | ||
44 | .endm | 13 | .endm |
45 | 14 | ||
46 | /* | 15 | /* |
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h index a3df0c3faa0e..f6ba004a7711 100644 --- a/arch/mips/include/asm/mips_mt.h +++ b/arch/mips/include/asm/mips_mt.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Definitions and decalrations for MIPS MT support | 2 | * Definitions and decalrations for MIPS MT support that are common between |
3 | * that are common between SMTC, VSMP, and/or AP/SP | 3 | * the VSMP, and AP/SP kernel models. |
4 | * kernel models. | ||
5 | */ | 4 | */ |
6 | #ifndef __ASM_MIPS_MT_H | 5 | #ifndef __ASM_MIPS_MT_H |
7 | #define __ASM_MIPS_MT_H | 6 | #define __ASM_MIPS_MT_H |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 88e30d5022b3..fb2d17487ec2 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -1014,19 +1014,8 @@ do { \ | |||
1014 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) | 1014 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) |
1015 | 1015 | ||
1016 | #define read_c0_status() __read_32bit_c0_register($12, 0) | 1016 | #define read_c0_status() __read_32bit_c0_register($12, 0) |
1017 | #ifdef CONFIG_MIPS_MT_SMTC | 1017 | |
1018 | #define write_c0_status(val) \ | ||
1019 | do { \ | ||
1020 | __write_32bit_c0_register($12, 0, val); \ | ||
1021 | __ehb(); \ | ||
1022 | } while (0) | ||
1023 | #else | ||
1024 | /* | ||
1025 | * Legacy non-SMTC code, which may be hazardous | ||
1026 | * but which might not support EHB | ||
1027 | */ | ||
1028 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) | 1018 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) |
1029 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1030 | 1019 | ||
1031 | #define read_c0_cause() __read_32bit_c0_register($13, 0) | 1020 | #define read_c0_cause() __read_32bit_c0_register($13, 0) |
1032 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) | 1021 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) |
@@ -1750,11 +1739,6 @@ static inline void tlb_write_random(void) | |||
1750 | /* | 1739 | /* |
1751 | * Manipulate bits in a c0 register. | 1740 | * Manipulate bits in a c0 register. |
1752 | */ | 1741 | */ |
1753 | #ifndef CONFIG_MIPS_MT_SMTC | ||
1754 | /* | ||
1755 | * SMTC Linux requires shutting-down microthread scheduling | ||
1756 | * during CP0 register read-modify-write sequences. | ||
1757 | */ | ||
1758 | #define __BUILD_SET_C0(name) \ | 1742 | #define __BUILD_SET_C0(name) \ |
1759 | static inline unsigned int \ | 1743 | static inline unsigned int \ |
1760 | set_c0_##name(unsigned int set) \ | 1744 | set_c0_##name(unsigned int set) \ |
@@ -1793,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \ | |||
1793 | return res; \ | 1777 | return res; \ |
1794 | } | 1778 | } |
1795 | 1779 | ||
1796 | #else /* SMTC versions that manage MT scheduling */ | ||
1797 | |||
1798 | #include <linux/irqflags.h> | ||
1799 | |||
1800 | /* | ||
1801 | * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with | ||
1802 | * header file recursion. | ||
1803 | */ | ||
1804 | static inline unsigned int __dmt(void) | ||
1805 | { | ||
1806 | int res; | ||
1807 | |||
1808 | __asm__ __volatile__( | ||
1809 | " .set push \n" | ||
1810 | " .set mips32r2 \n" | ||
1811 | " .set noat \n" | ||
1812 | " .word 0x41610BC1 # dmt $1 \n" | ||
1813 | " ehb \n" | ||
1814 | " move %0, $1 \n" | ||
1815 | " .set pop \n" | ||
1816 | : "=r" (res)); | ||
1817 | |||
1818 | instruction_hazard(); | ||
1819 | |||
1820 | return res; | ||
1821 | } | ||
1822 | |||
1823 | #define __VPECONTROL_TE_SHIFT 15 | ||
1824 | #define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT) | ||
1825 | |||
1826 | #define __EMT_ENABLE __VPECONTROL_TE | ||
1827 | |||
1828 | static inline void __emt(unsigned int previous) | ||
1829 | { | ||
1830 | if ((previous & __EMT_ENABLE)) | ||
1831 | __asm__ __volatile__( | ||
1832 | " .set mips32r2 \n" | ||
1833 | " .word 0x41600be1 # emt \n" | ||
1834 | " ehb \n" | ||
1835 | " .set mips0 \n"); | ||
1836 | } | ||
1837 | |||
1838 | static inline void __ehb(void) | ||
1839 | { | ||
1840 | __asm__ __volatile__( | ||
1841 | " .set mips32r2 \n" | ||
1842 | " ehb \n" " .set mips0 \n"); | ||
1843 | } | ||
1844 | |||
1845 | /* | ||
1846 | * Note that local_irq_save/restore affect TC-specific IXMT state, | ||
1847 | * not Status.IE as in non-SMTC kernel. | ||
1848 | */ | ||
1849 | |||
1850 | #define __BUILD_SET_C0(name) \ | ||
1851 | static inline unsigned int \ | ||
1852 | set_c0_##name(unsigned int set) \ | ||
1853 | { \ | ||
1854 | unsigned int res; \ | ||
1855 | unsigned int new; \ | ||
1856 | unsigned int omt; \ | ||
1857 | unsigned long flags; \ | ||
1858 | \ | ||
1859 | local_irq_save(flags); \ | ||
1860 | omt = __dmt(); \ | ||
1861 | res = read_c0_##name(); \ | ||
1862 | new = res | set; \ | ||
1863 | write_c0_##name(new); \ | ||
1864 | __emt(omt); \ | ||
1865 | local_irq_restore(flags); \ | ||
1866 | \ | ||
1867 | return res; \ | ||
1868 | } \ | ||
1869 | \ | ||
1870 | static inline unsigned int \ | ||
1871 | clear_c0_##name(unsigned int clear) \ | ||
1872 | { \ | ||
1873 | unsigned int res; \ | ||
1874 | unsigned int new; \ | ||
1875 | unsigned int omt; \ | ||
1876 | unsigned long flags; \ | ||
1877 | \ | ||
1878 | local_irq_save(flags); \ | ||
1879 | omt = __dmt(); \ | ||
1880 | res = read_c0_##name(); \ | ||
1881 | new = res & ~clear; \ | ||
1882 | write_c0_##name(new); \ | ||
1883 | __emt(omt); \ | ||
1884 | local_irq_restore(flags); \ | ||
1885 | \ | ||
1886 | return res; \ | ||
1887 | } \ | ||
1888 | \ | ||
1889 | static inline unsigned int \ | ||
1890 | change_c0_##name(unsigned int change, unsigned int newbits) \ | ||
1891 | { \ | ||
1892 | unsigned int res; \ | ||
1893 | unsigned int new; \ | ||
1894 | unsigned int omt; \ | ||
1895 | unsigned long flags; \ | ||
1896 | \ | ||
1897 | local_irq_save(flags); \ | ||
1898 | \ | ||
1899 | omt = __dmt(); \ | ||
1900 | res = read_c0_##name(); \ | ||
1901 | new = res & ~change; \ | ||
1902 | new |= (newbits & change); \ | ||
1903 | write_c0_##name(new); \ | ||
1904 | __emt(omt); \ | ||
1905 | local_irq_restore(flags); \ | ||
1906 | \ | ||
1907 | return res; \ | ||
1908 | } | ||
1909 | #endif | ||
1910 | |||
1911 | __BUILD_SET_C0(status) | 1780 | __BUILD_SET_C0(status) |
1912 | __BUILD_SET_C0(cause) | 1781 | __BUILD_SET_C0(cause) |
1913 | __BUILD_SET_C0(config) | 1782 | __BUILD_SET_C0(config) |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index e277bbad2871..0f75aaca201b 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -18,10 +18,6 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/hazards.h> | 19 | #include <asm/hazards.h> |
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | #ifdef CONFIG_MIPS_MT_SMTC | ||
22 | #include <asm/mipsmtregs.h> | ||
23 | #include <asm/smtc.h> | ||
24 | #endif /* SMTC */ | ||
25 | #include <asm-generic/mm_hooks.h> | 21 | #include <asm-generic/mm_hooks.h> |
26 | 22 | ||
27 | #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ | 23 | #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ |
@@ -63,13 +59,6 @@ extern unsigned long pgd_current[]; | |||
63 | #define ASID_INC 0x10 | 59 | #define ASID_INC 0x10 |
64 | #define ASID_MASK 0xff0 | 60 | #define ASID_MASK 0xff0 |
65 | 61 | ||
66 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
67 | |||
68 | #define ASID_INC 0x1 | ||
69 | extern unsigned long smtc_asid_mask; | ||
70 | #define ASID_MASK (smtc_asid_mask) | ||
71 | #define HW_ASID_MASK 0xff | ||
72 | /* End SMTC/34K debug hack */ | ||
73 | #else /* FIXME: not correct for R6000 */ | 62 | #else /* FIXME: not correct for R6000 */ |
74 | 63 | ||
75 | #define ASID_INC 0x1 | 64 | #define ASID_INC 0x1 |
@@ -92,7 +81,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
92 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | 81 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
93 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | 82 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) |
94 | 83 | ||
95 | #ifndef CONFIG_MIPS_MT_SMTC | ||
96 | /* Normal, classic MIPS get_new_mmu_context */ | 84 | /* Normal, classic MIPS get_new_mmu_context */ |
97 | static inline void | 85 | static inline void |
98 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | 86 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
@@ -115,12 +103,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
115 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 103 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
116 | } | 104 | } |
117 | 105 | ||
118 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
119 | |||
120 | #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) | ||
121 | |||
122 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
123 | |||
124 | /* | 106 | /* |
125 | * Initialize the context related info for a new mm_struct | 107 | * Initialize the context related info for a new mm_struct |
126 | * instance. | 108 | * instance. |
@@ -141,46 +123,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
141 | { | 123 | { |
142 | unsigned int cpu = smp_processor_id(); | 124 | unsigned int cpu = smp_processor_id(); |
143 | unsigned long flags; | 125 | unsigned long flags; |
144 | #ifdef CONFIG_MIPS_MT_SMTC | ||
145 | unsigned long oldasid; | ||
146 | unsigned long mtflags; | ||
147 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
148 | local_irq_save(flags); | ||
149 | mtflags = dvpe(); | ||
150 | #else /* Not SMTC */ | ||
151 | local_irq_save(flags); | 126 | local_irq_save(flags); |
152 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
153 | 127 | ||
154 | /* Check if our ASID is of an older version and thus invalid */ | 128 | /* Check if our ASID is of an older version and thus invalid */ |
155 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | 129 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) |
156 | get_new_mmu_context(next, cpu); | 130 | get_new_mmu_context(next, cpu); |
157 | #ifdef CONFIG_MIPS_MT_SMTC | ||
158 | /* | ||
159 | * If the EntryHi ASID being replaced happens to be | ||
160 | * the value flagged at ASID recycling time as having | ||
161 | * an extended life, clear the bit showing it being | ||
162 | * in use by this "CPU", and if that's the last bit, | ||
163 | * free up the ASID value for use and flush any old | ||
164 | * instances of it from the TLB. | ||
165 | */ | ||
166 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
167 | if(smtc_live_asid[mytlb][oldasid]) { | ||
168 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
169 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
170 | smtc_flush_tlb_asid(oldasid); | ||
171 | } | ||
172 | /* | ||
173 | * Tread softly on EntryHi, and so long as we support | ||
174 | * having ASID_MASK smaller than the hardware maximum, | ||
175 | * make sure no "soft" bits become "hard"... | ||
176 | */ | ||
177 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||
178 | cpu_asid(cpu, next)); | ||
179 | ehb(); /* Make sure it propagates to TCStatus */ | ||
180 | evpe(mtflags); | ||
181 | #else | ||
182 | write_c0_entryhi(cpu_asid(cpu, next)); | 131 | write_c0_entryhi(cpu_asid(cpu, next)); |
183 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
184 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 132 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
185 | 133 | ||
186 | /* | 134 | /* |
@@ -213,34 +161,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
213 | unsigned long flags; | 161 | unsigned long flags; |
214 | unsigned int cpu = smp_processor_id(); | 162 | unsigned int cpu = smp_processor_id(); |
215 | 163 | ||
216 | #ifdef CONFIG_MIPS_MT_SMTC | ||
217 | unsigned long oldasid; | ||
218 | unsigned long mtflags; | ||
219 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
220 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
221 | |||
222 | local_irq_save(flags); | 164 | local_irq_save(flags); |
223 | 165 | ||
224 | /* Unconditionally get a new ASID. */ | 166 | /* Unconditionally get a new ASID. */ |
225 | get_new_mmu_context(next, cpu); | 167 | get_new_mmu_context(next, cpu); |
226 | 168 | ||
227 | #ifdef CONFIG_MIPS_MT_SMTC | ||
228 | /* See comments for similar code above */ | ||
229 | mtflags = dvpe(); | ||
230 | oldasid = read_c0_entryhi() & ASID_MASK; | ||
231 | if(smtc_live_asid[mytlb][oldasid]) { | ||
232 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
233 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
234 | smtc_flush_tlb_asid(oldasid); | ||
235 | } | ||
236 | /* See comments for similar code above */ | ||
237 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||
238 | cpu_asid(cpu, next)); | ||
239 | ehb(); /* Make sure it propagates to TCStatus */ | ||
240 | evpe(mtflags); | ||
241 | #else | ||
242 | write_c0_entryhi(cpu_asid(cpu, next)); | 169 | write_c0_entryhi(cpu_asid(cpu, next)); |
243 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
244 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 170 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
245 | 171 | ||
246 | /* mark mmu ownership change */ | 172 | /* mark mmu ownership change */ |
@@ -258,48 +184,15 @@ static inline void | |||
258 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) | 184 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) |
259 | { | 185 | { |
260 | unsigned long flags; | 186 | unsigned long flags; |
261 | #ifdef CONFIG_MIPS_MT_SMTC | ||
262 | unsigned long oldasid; | ||
263 | /* Can't use spinlock because called from TLB flush within DVPE */ | ||
264 | unsigned int prevvpe; | ||
265 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
266 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
267 | 187 | ||
268 | local_irq_save(flags); | 188 | local_irq_save(flags); |
269 | 189 | ||
270 | if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { | 190 | if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
271 | get_new_mmu_context(mm, cpu); | 191 | get_new_mmu_context(mm, cpu); |
272 | #ifdef CONFIG_MIPS_MT_SMTC | ||
273 | /* See comments for similar code above */ | ||
274 | prevvpe = dvpe(); | ||
275 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
276 | if (smtc_live_asid[mytlb][oldasid]) { | ||
277 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
278 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
279 | smtc_flush_tlb_asid(oldasid); | ||
280 | } | ||
281 | /* See comments for similar code above */ | ||
282 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
283 | | cpu_asid(cpu, mm)); | ||
284 | ehb(); /* Make sure it propagates to TCStatus */ | ||
285 | evpe(prevvpe); | ||
286 | #else /* not CONFIG_MIPS_MT_SMTC */ | ||
287 | write_c0_entryhi(cpu_asid(cpu, mm)); | 192 | write_c0_entryhi(cpu_asid(cpu, mm)); |
288 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
289 | } else { | 193 | } else { |
290 | /* will get a new context next time */ | 194 | /* will get a new context next time */ |
291 | #ifndef CONFIG_MIPS_MT_SMTC | ||
292 | cpu_context(cpu, mm) = 0; | 195 | cpu_context(cpu, mm) = 0; |
293 | #else /* SMTC */ | ||
294 | int i; | ||
295 | |||
296 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | ||
297 | for_each_online_cpu(i) { | ||
298 | if((smtc_status & SMTC_TLB_SHARED) | ||
299 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
300 | cpu_context(i, mm) = 0; | ||
301 | } | ||
302 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
303 | } | 196 | } |
304 | local_irq_restore(flags); | 197 | local_irq_restore(flags); |
305 | } | 198 | } |
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index c2edae382d5d..800fe578dc99 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h | |||
@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr) | |||
144 | #define MODULE_KERNEL_TYPE "64BIT " | 144 | #define MODULE_KERNEL_TYPE "64BIT " |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | #ifdef CONFIG_MIPS_MT_SMTC | ||
148 | #define MODULE_KERNEL_SMTC "MT_SMTC " | ||
149 | #else | ||
150 | #define MODULE_KERNEL_SMTC "" | ||
151 | #endif | ||
152 | |||
153 | #define MODULE_ARCH_VERMAGIC \ | 147 | #define MODULE_ARCH_VERMAGIC \ |
154 | MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC | 148 | MODULE_PROC_FAMILY MODULE_KERNEL_TYPE |
155 | 149 | ||
156 | #endif /* _ASM_MODULE_H */ | 150 | #endif /* _ASM_MODULE_H */ |
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index bf1ac8d35783..7e6e682aece3 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h | |||
@@ -39,9 +39,6 @@ struct pt_regs { | |||
39 | unsigned long cp0_badvaddr; | 39 | unsigned long cp0_badvaddr; |
40 | unsigned long cp0_cause; | 40 | unsigned long cp0_cause; |
41 | unsigned long cp0_epc; | 41 | unsigned long cp0_epc; |
42 | #ifdef CONFIG_MIPS_MT_SMTC | ||
43 | unsigned long cp0_tcstatus; | ||
44 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
45 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 42 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
46 | unsigned long long mpl[3]; /* MTM{0,1,2} */ | 43 | unsigned long long mpl[3]; /* MTM{0,1,2} */ |
47 | unsigned long long mtp[3]; /* MTP{0,1,2} */ | 44 | unsigned long long mtp[3]; /* MTP{0,1,2} */ |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index ca64cbe44493..fe8d1b622477 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -43,11 +43,10 @@ | |||
43 | : "i" (op), "R" (*(unsigned char *)(addr))) | 43 | : "i" (op), "R" (*(unsigned char *)(addr))) |
44 | 44 | ||
45 | #ifdef CONFIG_MIPS_MT | 45 | #ifdef CONFIG_MIPS_MT |
46 | |||
46 | /* | 47 | /* |
47 | * Temporary hacks for SMTC debug. Optionally force single-threaded | 48 | * Optionally force single-threaded execution during I-cache flushes. |
48 | * execution during I-cache flushes. | ||
49 | */ | 49 | */ |
50 | |||
51 | #define PROTECT_CACHE_FLUSHES 1 | 50 | #define PROTECT_CACHE_FLUSHES 1 |
52 | 51 | ||
53 | #ifdef PROTECT_CACHE_FLUSHES | 52 | #ifdef PROTECT_CACHE_FLUSHES |
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h deleted file mode 100644 index e56b439b7871..000000000000 --- a/arch/mips/include/asm/smtc.h +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | #ifndef _ASM_SMTC_MT_H | ||
2 | #define _ASM_SMTC_MT_H | ||
3 | |||
4 | /* | ||
5 | * Definitions for SMTC multitasking on MIPS MT cores | ||
6 | */ | ||
7 | |||
8 | #include <asm/mips_mt.h> | ||
9 | #include <asm/smtc_ipi.h> | ||
10 | |||
11 | /* | ||
12 | * System-wide SMTC status information | ||
13 | */ | ||
14 | |||
15 | extern unsigned int smtc_status; | ||
16 | |||
17 | #define SMTC_TLB_SHARED 0x00000001 | ||
18 | #define SMTC_MTC_ACTIVE 0x00000002 | ||
19 | |||
20 | /* | ||
21 | * TLB/ASID Management information | ||
22 | */ | ||
23 | |||
24 | #define MAX_SMTC_TLBS 2 | ||
25 | #define MAX_SMTC_ASIDS 256 | ||
26 | #if NR_CPUS <= 8 | ||
27 | typedef char asiduse; | ||
28 | #else | ||
29 | #if NR_CPUS <= 16 | ||
30 | typedef short asiduse; | ||
31 | #else | ||
32 | typedef long asiduse; | ||
33 | #endif | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * VPE Management information | ||
38 | */ | ||
39 | |||
40 | #define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */ | ||
41 | |||
42 | extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
43 | |||
44 | struct mm_struct; | ||
45 | struct task_struct; | ||
46 | |||
47 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | ||
48 | void self_ipi(struct smtc_ipi *); | ||
49 | void smtc_flush_tlb_asid(unsigned long asid); | ||
50 | extern int smtc_build_cpu_map(int startslot); | ||
51 | extern void smtc_prepare_cpus(int cpus); | ||
52 | extern void smtc_smp_finish(void); | ||
53 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | ||
54 | extern void smtc_cpus_done(void); | ||
55 | extern void smtc_init_secondary(void); | ||
56 | |||
57 | |||
58 | /* | ||
59 | * Sharing the TLB between multiple VPEs means that the | ||
60 | * "random" index selection function is not allowed to | ||
61 | * select the current value of the Index register. To | ||
62 | * avoid additional TLB pressure, the Index registers | ||
63 | * are "parked" with an non-Valid value. | ||
64 | */ | ||
65 | |||
66 | #define PARKED_INDEX ((unsigned int)0x80000000) | ||
67 | |||
68 | /* | ||
69 | * Define low-level interrupt mask for IPIs, if necessary. | ||
70 | * By default, use SW interrupt 1, which requires no external | ||
71 | * hardware support, but which works only for single-core | ||
72 | * MIPS MT systems. | ||
73 | */ | ||
74 | #ifndef MIPS_CPU_IPI_IRQ | ||
75 | #define MIPS_CPU_IPI_IRQ 1 | ||
76 | #endif | ||
77 | |||
78 | #endif /* _ASM_SMTC_MT_H */ | ||
diff --git a/arch/mips/include/asm/smtc_ipi.h b/arch/mips/include/asm/smtc_ipi.h deleted file mode 100644 index 15278dbd7e79..000000000000 --- a/arch/mips/include/asm/smtc_ipi.h +++ /dev/null | |||
@@ -1,129 +0,0 @@ | |||
1 | /* | ||
2 | * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. | ||
3 | */ | ||
4 | #ifndef __ASM_SMTC_IPI_H | ||
5 | #define __ASM_SMTC_IPI_H | ||
6 | |||
7 | #include <linux/spinlock.h> | ||
8 | |||
9 | //#define SMTC_IPI_DEBUG | ||
10 | |||
11 | #ifdef SMTC_IPI_DEBUG | ||
12 | #include <asm/mipsregs.h> | ||
13 | #include <asm/mipsmtregs.h> | ||
14 | #endif /* SMTC_IPI_DEBUG */ | ||
15 | |||
16 | /* | ||
17 | * An IPI "message" | ||
18 | */ | ||
19 | |||
20 | struct smtc_ipi { | ||
21 | struct smtc_ipi *flink; | ||
22 | int type; | ||
23 | void *arg; | ||
24 | int dest; | ||
25 | #ifdef SMTC_IPI_DEBUG | ||
26 | int sender; | ||
27 | long stamp; | ||
28 | #endif /* SMTC_IPI_DEBUG */ | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * Defined IPI Types | ||
33 | */ | ||
34 | |||
35 | #define LINUX_SMP_IPI 1 | ||
36 | #define SMTC_CLOCK_TICK 2 | ||
37 | #define IRQ_AFFINITY_IPI 3 | ||
38 | |||
39 | /* | ||
40 | * A queue of IPI messages | ||
41 | */ | ||
42 | |||
43 | struct smtc_ipi_q { | ||
44 | struct smtc_ipi *head; | ||
45 | spinlock_t lock; | ||
46 | struct smtc_ipi *tail; | ||
47 | int depth; | ||
48 | int resched_flag; /* reschedule already queued */ | ||
49 | }; | ||
50 | |||
51 | static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | |||
55 | spin_lock_irqsave(&q->lock, flags); | ||
56 | if (q->head == NULL) | ||
57 | q->head = q->tail = p; | ||
58 | else | ||
59 | q->tail->flink = p; | ||
60 | p->flink = NULL; | ||
61 | q->tail = p; | ||
62 | q->depth++; | ||
63 | #ifdef SMTC_IPI_DEBUG | ||
64 | p->sender = read_c0_tcbind(); | ||
65 | p->stamp = read_c0_count(); | ||
66 | #endif /* SMTC_IPI_DEBUG */ | ||
67 | spin_unlock_irqrestore(&q->lock, flags); | ||
68 | } | ||
69 | |||
70 | static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) | ||
71 | { | ||
72 | struct smtc_ipi *p; | ||
73 | |||
74 | if (q->head == NULL) | ||
75 | p = NULL; | ||
76 | else { | ||
77 | p = q->head; | ||
78 | q->head = q->head->flink; | ||
79 | q->depth--; | ||
80 | /* Arguably unnecessary, but leaves queue cleaner */ | ||
81 | if (q->head == NULL) | ||
82 | q->tail = NULL; | ||
83 | } | ||
84 | |||
85 | return p; | ||
86 | } | ||
87 | |||
88 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | struct smtc_ipi *p; | ||
92 | |||
93 | spin_lock_irqsave(&q->lock, flags); | ||
94 | p = __smtc_ipi_dq(q); | ||
95 | spin_unlock_irqrestore(&q->lock, flags); | ||
96 | |||
97 | return p; | ||
98 | } | ||
99 | |||
100 | static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
101 | { | ||
102 | unsigned long flags; | ||
103 | |||
104 | spin_lock_irqsave(&q->lock, flags); | ||
105 | if (q->head == NULL) { | ||
106 | q->head = q->tail = p; | ||
107 | p->flink = NULL; | ||
108 | } else { | ||
109 | p->flink = q->head; | ||
110 | q->head = p; | ||
111 | } | ||
112 | q->depth++; | ||
113 | spin_unlock_irqrestore(&q->lock, flags); | ||
114 | } | ||
115 | |||
116 | static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) | ||
117 | { | ||
118 | unsigned long flags; | ||
119 | int retval; | ||
120 | |||
121 | spin_lock_irqsave(&q->lock, flags); | ||
122 | retval = q->depth; | ||
123 | spin_unlock_irqrestore(&q->lock, flags); | ||
124 | return retval; | ||
125 | } | ||
126 | |||
127 | extern void smtc_send_ipi(int cpu, int type, unsigned int action); | ||
128 | |||
129 | #endif /* __ASM_SMTC_IPI_H */ | ||
diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h deleted file mode 100644 index 25da651f1f5f..000000000000 --- a/arch/mips/include/asm/smtc_proc.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * Definitions for SMTC /proc entries | ||
3 | * Copyright(C) 2005 MIPS Technologies Inc. | ||
4 | */ | ||
5 | #ifndef __ASM_SMTC_PROC_H | ||
6 | #define __ASM_SMTC_PROC_H | ||
7 | |||
8 | /* | ||
9 | * per-"CPU" statistics | ||
10 | */ | ||
11 | |||
12 | struct smtc_cpu_proc { | ||
13 | unsigned long timerints; | ||
14 | unsigned long selfipis; | ||
15 | }; | ||
16 | |||
17 | extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
18 | |||
19 | /* Count of number of recoveries of "stolen" FPU access rights on 34K */ | ||
20 | |||
21 | extern atomic_t smtc_fpu_recoveries; | ||
22 | |||
23 | #endif /* __ASM_SMTC_PROC_H */ | ||
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index d301e108d5b8..b188c797565c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h | |||
@@ -19,22 +19,12 @@ | |||
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/thread_info.h> | 20 | #include <asm/thread_info.h> |
21 | 21 | ||
22 | /* | 22 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
23 | * For SMTC kernel, global IE should be left set, and interrupts | ||
24 | * controlled exclusively via IXMT. | ||
25 | */ | ||
26 | #ifdef CONFIG_MIPS_MT_SMTC | ||
27 | #define STATMASK 0x1e | ||
28 | #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
29 | #define STATMASK 0x3f | 23 | #define STATMASK 0x3f |
30 | #else | 24 | #else |
31 | #define STATMASK 0x1f | 25 | #define STATMASK 0x1f |
32 | #endif | 26 | #endif |
33 | 27 | ||
34 | #ifdef CONFIG_MIPS_MT_SMTC | ||
35 | #include <asm/mipsmtregs.h> | ||
36 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
37 | |||
38 | .macro SAVE_AT | 28 | .macro SAVE_AT |
39 | .set push | 29 | .set push |
40 | .set noat | 30 | .set noat |
@@ -186,16 +176,6 @@ | |||
186 | mfc0 v1, CP0_STATUS | 176 | mfc0 v1, CP0_STATUS |
187 | LONG_S $2, PT_R2(sp) | 177 | LONG_S $2, PT_R2(sp) |
188 | LONG_S v1, PT_STATUS(sp) | 178 | LONG_S v1, PT_STATUS(sp) |
189 | #ifdef CONFIG_MIPS_MT_SMTC | ||
190 | /* | ||
191 | * Ideally, these instructions would be shuffled in | ||
192 | * to cover the pipeline delay. | ||
193 | */ | ||
194 | .set mips32 | ||
195 | mfc0 k0, CP0_TCSTATUS | ||
196 | .set mips0 | ||
197 | LONG_S k0, PT_TCSTATUS(sp) | ||
198 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
199 | LONG_S $4, PT_R4(sp) | 179 | LONG_S $4, PT_R4(sp) |
200 | mfc0 v1, CP0_CAUSE | 180 | mfc0 v1, CP0_CAUSE |
201 | LONG_S $5, PT_R5(sp) | 181 | LONG_S $5, PT_R5(sp) |
@@ -321,36 +301,6 @@ | |||
321 | .set push | 301 | .set push |
322 | .set reorder | 302 | .set reorder |
323 | .set noat | 303 | .set noat |
324 | #ifdef CONFIG_MIPS_MT_SMTC | ||
325 | .set mips32r2 | ||
326 | /* | ||
327 | * We need to make sure the read-modify-write | ||
328 | * of Status below isn't perturbed by an interrupt | ||
329 | * or cross-TC access, so we need to do at least a DMT, | ||
330 | * protected by an interrupt-inhibit. But setting IXMT | ||
331 | * also creates a few-cycle window where an IPI could | ||
332 | * be queued and not be detected before potentially | ||
333 | * returning to a WAIT or user-mode loop. It must be | ||
334 | * replayed. | ||
335 | * | ||
336 | * We're in the middle of a context switch, and | ||
337 | * we can't dispatch it directly without trashing | ||
338 | * some registers, so we'll try to detect this unlikely | ||
339 | * case and program a software interrupt in the VPE, | ||
340 | * as would be done for a cross-VPE IPI. To accommodate | ||
341 | * the handling of that case, we're doing a DVPE instead | ||
342 | * of just a DMT here to protect against other threads. | ||
343 | * This is a lot of cruft to cover a tiny window. | ||
344 | * If you can find a better design, implement it! | ||
345 | * | ||
346 | */ | ||
347 | mfc0 v0, CP0_TCSTATUS | ||
348 | ori v0, TCSTATUS_IXMT | ||
349 | mtc0 v0, CP0_TCSTATUS | ||
350 | _ehb | ||
351 | DVPE 5 # dvpe a1 | ||
352 | jal mips_ihb | ||
353 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
354 | mfc0 a0, CP0_STATUS | 304 | mfc0 a0, CP0_STATUS |
355 | ori a0, STATMASK | 305 | ori a0, STATMASK |
356 | xori a0, STATMASK | 306 | xori a0, STATMASK |
@@ -362,59 +312,6 @@ | |||
362 | and v0, v1 | 312 | and v0, v1 |
363 | or v0, a0 | 313 | or v0, a0 |
364 | mtc0 v0, CP0_STATUS | 314 | mtc0 v0, CP0_STATUS |
365 | #ifdef CONFIG_MIPS_MT_SMTC | ||
366 | /* | ||
367 | * Only after EXL/ERL have been restored to status can we | ||
368 | * restore TCStatus.IXMT. | ||
369 | */ | ||
370 | LONG_L v1, PT_TCSTATUS(sp) | ||
371 | _ehb | ||
372 | mfc0 a0, CP0_TCSTATUS | ||
373 | andi v1, TCSTATUS_IXMT | ||
374 | bnez v1, 0f | ||
375 | |||
376 | /* | ||
377 | * We'd like to detect any IPIs queued in the tiny window | ||
378 | * above and request an software interrupt to service them | ||
379 | * when we ERET. | ||
380 | * | ||
381 | * Computing the offset into the IPIQ array of the executing | ||
382 | * TC's IPI queue in-line would be tedious. We use part of | ||
383 | * the TCContext register to hold 16 bits of offset that we | ||
384 | * can add in-line to find the queue head. | ||
385 | */ | ||
386 | mfc0 v0, CP0_TCCONTEXT | ||
387 | la a2, IPIQ | ||
388 | srl v0, v0, 16 | ||
389 | addu a2, a2, v0 | ||
390 | LONG_L v0, 0(a2) | ||
391 | beqz v0, 0f | ||
392 | /* | ||
393 | * If we have a queue, provoke dispatch within the VPE by setting C_SW1 | ||
394 | */ | ||
395 | mfc0 v0, CP0_CAUSE | ||
396 | ori v0, v0, C_SW1 | ||
397 | mtc0 v0, CP0_CAUSE | ||
398 | 0: | ||
399 | /* | ||
400 | * This test should really never branch but | ||
401 | * let's be prudent here. Having atomized | ||
402 | * the shared register modifications, we can | ||
403 | * now EVPE, and must do so before interrupts | ||
404 | * are potentially re-enabled. | ||
405 | */ | ||
406 | andi a1, a1, MVPCONTROL_EVP | ||
407 | beqz a1, 1f | ||
408 | evpe | ||
409 | 1: | ||
410 | /* We know that TCStatua.IXMT should be set from above */ | ||
411 | xori a0, a0, TCSTATUS_IXMT | ||
412 | or a0, a0, v1 | ||
413 | mtc0 a0, CP0_TCSTATUS | ||
414 | _ehb | ||
415 | |||
416 | .set mips0 | ||
417 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
418 | LONG_L v1, PT_EPC(sp) | 315 | LONG_L v1, PT_EPC(sp) |
419 | MTC0 v1, CP0_EPC | 316 | MTC0 v1, CP0_EPC |
420 | LONG_L $31, PT_R31(sp) | 317 | LONG_L $31, PT_R31(sp) |
@@ -467,33 +364,11 @@ | |||
467 | * Set cp0 enable bit as sign that we're running on the kernel stack | 364 | * Set cp0 enable bit as sign that we're running on the kernel stack |
468 | */ | 365 | */ |
469 | .macro CLI | 366 | .macro CLI |
470 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
471 | mfc0 t0, CP0_STATUS | 367 | mfc0 t0, CP0_STATUS |
472 | li t1, ST0_CU0 | STATMASK | 368 | li t1, ST0_CU0 | STATMASK |
473 | or t0, t1 | 369 | or t0, t1 |
474 | xori t0, STATMASK | 370 | xori t0, STATMASK |
475 | mtc0 t0, CP0_STATUS | 371 | mtc0 t0, CP0_STATUS |
476 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
477 | /* | ||
478 | * For SMTC, we need to set privilege | ||
479 | * and disable interrupts only for the | ||
480 | * current TC, using the TCStatus register. | ||
481 | */ | ||
482 | mfc0 t0, CP0_TCSTATUS | ||
483 | /* Fortunately CU 0 is in the same place in both registers */ | ||
484 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
485 | li t1, ST0_CU0 | 0x08001c00 | ||
486 | or t0, t1 | ||
487 | /* Clear TKSU, leave IXMT */ | ||
488 | xori t0, 0x00001800 | ||
489 | mtc0 t0, CP0_TCSTATUS | ||
490 | _ehb | ||
491 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
492 | mfc0 t0, CP0_STATUS | ||
493 | ori t0, ST0_EXL | ST0_ERL | ||
494 | xori t0, ST0_EXL | ST0_ERL | ||
495 | mtc0 t0, CP0_STATUS | ||
496 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
497 | irq_disable_hazard | 372 | irq_disable_hazard |
498 | .endm | 373 | .endm |
499 | 374 | ||
@@ -502,35 +377,11 @@ | |||
502 | * Set cp0 enable bit as sign that we're running on the kernel stack | 377 | * Set cp0 enable bit as sign that we're running on the kernel stack |
503 | */ | 378 | */ |
504 | .macro STI | 379 | .macro STI |
505 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
506 | mfc0 t0, CP0_STATUS | 380 | mfc0 t0, CP0_STATUS |
507 | li t1, ST0_CU0 | STATMASK | 381 | li t1, ST0_CU0 | STATMASK |
508 | or t0, t1 | 382 | or t0, t1 |
509 | xori t0, STATMASK & ~1 | 383 | xori t0, STATMASK & ~1 |
510 | mtc0 t0, CP0_STATUS | 384 | mtc0 t0, CP0_STATUS |
511 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
512 | /* | ||
513 | * For SMTC, we need to set privilege | ||
514 | * and enable interrupts only for the | ||
515 | * current TC, using the TCStatus register. | ||
516 | */ | ||
517 | _ehb | ||
518 | mfc0 t0, CP0_TCSTATUS | ||
519 | /* Fortunately CU 0 is in the same place in both registers */ | ||
520 | /* Set TCU0, TKSU (for later inversion) and IXMT */ | ||
521 | li t1, ST0_CU0 | 0x08001c00 | ||
522 | or t0, t1 | ||
523 | /* Clear TKSU *and* IXMT */ | ||
524 | xori t0, 0x00001c00 | ||
525 | mtc0 t0, CP0_TCSTATUS | ||
526 | _ehb | ||
527 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
528 | mfc0 t0, CP0_STATUS | ||
529 | ori t0, ST0_EXL | ||
530 | xori t0, ST0_EXL | ||
531 | mtc0 t0, CP0_STATUS | ||
532 | /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ | ||
533 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
534 | irq_enable_hazard | 385 | irq_enable_hazard |
535 | .endm | 386 | .endm |
536 | 387 | ||
@@ -540,32 +391,6 @@ | |||
540 | * Set cp0 enable bit as sign that we're running on the kernel stack | 391 | * Set cp0 enable bit as sign that we're running on the kernel stack |
541 | */ | 392 | */ |
542 | .macro KMODE | 393 | .macro KMODE |
543 | #ifdef CONFIG_MIPS_MT_SMTC | ||
544 | /* | ||
545 | * This gets baroque in SMTC. We want to | ||
546 | * protect the non-atomic clearing of EXL | ||
547 | * with DMT/EMT, but we don't want to take | ||
548 | * an interrupt while DMT is still in effect. | ||
549 | */ | ||
550 | |||
551 | /* KMODE gets invoked from both reorder and noreorder code */ | ||
552 | .set push | ||
553 | .set mips32r2 | ||
554 | .set noreorder | ||
555 | mfc0 v0, CP0_TCSTATUS | ||
556 | andi v1, v0, TCSTATUS_IXMT | ||
557 | ori v0, TCSTATUS_IXMT | ||
558 | mtc0 v0, CP0_TCSTATUS | ||
559 | _ehb | ||
560 | DMT 2 # dmt v0 | ||
561 | /* | ||
562 | * We don't know a priori if ra is "live" | ||
563 | */ | ||
564 | move t0, ra | ||
565 | jal mips_ihb | ||
566 | nop /* delay slot */ | ||
567 | move ra, t0 | ||
568 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
569 | mfc0 t0, CP0_STATUS | 394 | mfc0 t0, CP0_STATUS |
570 | li t1, ST0_CU0 | (STATMASK & ~1) | 395 | li t1, ST0_CU0 | (STATMASK & ~1) |
571 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | 396 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
@@ -576,25 +401,6 @@ | |||
576 | or t0, t1 | 401 | or t0, t1 |
577 | xori t0, STATMASK & ~1 | 402 | xori t0, STATMASK & ~1 |
578 | mtc0 t0, CP0_STATUS | 403 | mtc0 t0, CP0_STATUS |
579 | #ifdef CONFIG_MIPS_MT_SMTC | ||
580 | _ehb | ||
581 | andi v0, v0, VPECONTROL_TE | ||
582 | beqz v0, 2f | ||
583 | nop /* delay slot */ | ||
584 | emt | ||
585 | 2: | ||
586 | mfc0 v0, CP0_TCSTATUS | ||
587 | /* Clear IXMT, then OR in previous value */ | ||
588 | ori v0, TCSTATUS_IXMT | ||
589 | xori v0, TCSTATUS_IXMT | ||
590 | or v0, v1, v0 | ||
591 | mtc0 v0, CP0_TCSTATUS | ||
592 | /* | ||
593 | * irq_disable_hazard below should expand to EHB | ||
594 | * on 24K/34K CPUS | ||
595 | */ | ||
596 | .set pop | ||
597 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
598 | irq_disable_hazard | 404 | irq_disable_hazard |
599 | .endm | 405 | .endm |
600 | 406 | ||
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index d2d961d6cb86..7de865805deb 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void) | |||
159 | * We stash processor id into a COP0 register to retrieve it fast | 159 | * We stash processor id into a COP0 register to retrieve it fast |
160 | * at kernel exception entry. | 160 | * at kernel exception entry. |
161 | */ | 161 | */ |
162 | #if defined(CONFIG_MIPS_MT_SMTC) | 162 | #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) |
163 | #define SMP_CPUID_REG 2, 2 /* TCBIND */ | ||
164 | #define ASM_SMP_CPUID_REG $2, 2 | ||
165 | #define SMP_CPUID_PTRSHIFT 19 | ||
166 | #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) | ||
167 | #define SMP_CPUID_REG 20, 0 /* XCONTEXT */ | 163 | #define SMP_CPUID_REG 20, 0 /* XCONTEXT */ |
168 | #define ASM_SMP_CPUID_REG $20 | 164 | #define ASM_SMP_CPUID_REG $20 |
169 | #define SMP_CPUID_PTRSHIFT 48 | 165 | #define SMP_CPUID_PTRSHIFT 48 |
@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void) | |||
179 | #define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) | 175 | #define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) |
180 | #endif | 176 | #endif |
181 | 177 | ||
182 | #ifdef CONFIG_MIPS_MT_SMTC | ||
183 | #define ASM_CPUID_MFC0 mfc0 | ||
184 | #define UASM_i_CPUID_MFC0 uasm_i_mfc0 | ||
185 | #else | ||
186 | #define ASM_CPUID_MFC0 MFC0 | 178 | #define ASM_CPUID_MFC0 MFC0 |
187 | #define UASM_i_CPUID_MFC0 UASM_i_MFC0 | 179 | #define UASM_i_CPUID_MFC0 UASM_i_MFC0 |
188 | #endif | ||
189 | 180 | ||
190 | #endif /* __KERNEL__ */ | 181 | #endif /* __KERNEL__ */ |
191 | #endif /* _ASM_THREAD_INFO_H */ | 182 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index 24f534a7fbc3..8f3047d611ee 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h | |||
@@ -52,14 +52,11 @@ extern int (*perf_irq)(void); | |||
52 | */ | 52 | */ |
53 | extern unsigned int __weak get_c0_compare_int(void); | 53 | extern unsigned int __weak get_c0_compare_int(void); |
54 | extern int r4k_clockevent_init(void); | 54 | extern int r4k_clockevent_init(void); |
55 | extern int smtc_clockevent_init(void); | ||
56 | extern int gic_clockevent_init(void); | 55 | extern int gic_clockevent_init(void); |
57 | 56 | ||
58 | static inline int mips_clockevent_init(void) | 57 | static inline int mips_clockevent_init(void) |
59 | { | 58 | { |
60 | #ifdef CONFIG_MIPS_MT_SMTC | 59 | #if defined(CONFIG_CEVT_GIC) |
61 | return smtc_clockevent_init(); | ||
62 | #elif defined(CONFIG_CEVT_GIC) | ||
63 | return (gic_clockevent_init() | r4k_clockevent_init()); | 60 | return (gic_clockevent_init() | r4k_clockevent_init()); |
64 | #elif defined(CONFIG_CEVT_R4K) | 61 | #elif defined(CONFIG_CEVT_R4K) |
65 | return r4k_clockevent_init(); | 62 | return r4k_clockevent_init(); |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 277dab301cea..8f8b531bc848 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -17,7 +17,6 @@ endif | |||
17 | 17 | ||
18 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 18 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
19 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 19 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
20 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | ||
21 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 20 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
22 | obj-$(CONFIG_CEVT_GIC) += cevt-gic.o | 21 | obj-$(CONFIG_CEVT_GIC) += cevt-gic.o |
23 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 22 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o | |||
50 | 49 | ||
51 | obj-$(CONFIG_MIPS_MT) += mips-mt.o | 50 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
52 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o | 51 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o |
53 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o | ||
54 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | 52 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o |
55 | obj-$(CONFIG_MIPS_CMP) += smp-cmp.o | 53 | obj-$(CONFIG_MIPS_CMP) += smp-cmp.o |
56 | obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o | 54 | obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0ea75c244b48..08f897ee9a77 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -64,9 +64,6 @@ void output_ptreg_defines(void) | |||
64 | OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); | 64 | OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); |
65 | OFFSET(PT_STATUS, pt_regs, cp0_status); | 65 | OFFSET(PT_STATUS, pt_regs, cp0_status); |
66 | OFFSET(PT_CAUSE, pt_regs, cp0_cause); | 66 | OFFSET(PT_CAUSE, pt_regs, cp0_cause); |
67 | #ifdef CONFIG_MIPS_MT_SMTC | ||
68 | OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); | ||
69 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
70 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 67 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
71 | OFFSET(PT_MPL, pt_regs, mpl); | 68 | OFFSET(PT_MPL, pt_regs, mpl); |
72 | OFFSET(PT_MTP, pt_regs, mtp); | 69 | OFFSET(PT_MTP, pt_regs, mtp); |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 50d3f5a8d6bb..bff124ae69fa 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -12,17 +12,10 @@ | |||
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | 14 | ||
15 | #include <asm/smtc_ipi.h> | ||
16 | #include <asm/time.h> | 15 | #include <asm/time.h> |
17 | #include <asm/cevt-r4k.h> | 16 | #include <asm/cevt-r4k.h> |
18 | #include <asm/gic.h> | 17 | #include <asm/gic.h> |
19 | 18 | ||
20 | /* | ||
21 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
22 | * of these routines with SMTC-specific variants. | ||
23 | */ | ||
24 | |||
25 | #ifndef CONFIG_MIPS_MT_SMTC | ||
26 | static int mips_next_event(unsigned long delta, | 19 | static int mips_next_event(unsigned long delta, |
27 | struct clock_event_device *evt) | 20 | struct clock_event_device *evt) |
28 | { | 21 | { |
@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta, | |||
36 | return res; | 29 | return res; |
37 | } | 30 | } |
38 | 31 | ||
39 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
40 | |||
41 | void mips_set_clock_mode(enum clock_event_mode mode, | 32 | void mips_set_clock_mode(enum clock_event_mode mode, |
42 | struct clock_event_device *evt) | 33 | struct clock_event_device *evt) |
43 | { | 34 | { |
@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode, | |||
47 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 38 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
48 | int cp0_timer_irq_installed; | 39 | int cp0_timer_irq_installed; |
49 | 40 | ||
50 | #ifndef CONFIG_MIPS_MT_SMTC | ||
51 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | 41 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
52 | { | 42 | { |
53 | const int r2 = cpu_has_mips_r2; | 43 | const int r2 = cpu_has_mips_r2; |
@@ -82,8 +72,6 @@ out: | |||
82 | return IRQ_HANDLED; | 72 | return IRQ_HANDLED; |
83 | } | 73 | } |
84 | 74 | ||
85 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
86 | |||
87 | struct irqaction c0_compare_irqaction = { | 75 | struct irqaction c0_compare_irqaction = { |
88 | .handler = c0_compare_interrupt, | 76 | .handler = c0_compare_interrupt, |
89 | .flags = IRQF_PERCPU | IRQF_TIMER, | 77 | .flags = IRQF_PERCPU | IRQF_TIMER, |
@@ -170,7 +158,6 @@ int c0_compare_int_usable(void) | |||
170 | return 1; | 158 | return 1; |
171 | } | 159 | } |
172 | 160 | ||
173 | #ifndef CONFIG_MIPS_MT_SMTC | ||
174 | int r4k_clockevent_init(void) | 161 | int r4k_clockevent_init(void) |
175 | { | 162 | { |
176 | unsigned int cpu = smp_processor_id(); | 163 | unsigned int cpu = smp_processor_id(); |
@@ -225,4 +212,3 @@ int r4k_clockevent_init(void) | |||
225 | return 0; | 212 | return 0; |
226 | } | 213 | } |
227 | 214 | ||
228 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c deleted file mode 100644 index b6cf0a60d896..000000000000 --- a/arch/mips/kernel/cevt-smtc.c +++ /dev/null | |||
@@ -1,324 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||
9 | */ | ||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/irq.h> | ||
15 | |||
16 | #include <asm/smtc_ipi.h> | ||
17 | #include <asm/time.h> | ||
18 | #include <asm/cevt-r4k.h> | ||
19 | |||
20 | /* | ||
21 | * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||
22 | * or other MIPS MT cores. | ||
23 | * | ||
24 | * Notes on SMTC Support: | ||
25 | * | ||
26 | * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||
27 | * But there's only one Count/Compare pair per VPE, and Compare | ||
28 | * interrupts are taken opportunisitically by available TCs | ||
29 | * bound to the VPE with the Count register. The new timer | ||
30 | * framework provides for global broadcasts, but we really | ||
31 | * want VPE-level multicasts for best behavior. So instead | ||
32 | * of invoking the high-level clock-event broadcast code, | ||
33 | * this version of SMTC support uses the historical SMTC | ||
34 | * multicast mechanisms "under the hood", appearing to the | ||
35 | * generic clock layer as if the interrupts are per-CPU. | ||
36 | * | ||
37 | * The approach taken here is to maintain a set of NR_CPUS | ||
38 | * virtual timers, and track which "CPU" needs to be alerted | ||
39 | * at each event. | ||
40 | * | ||
41 | * It's unlikely that we'll see a MIPS MT core with more than | ||
42 | * 2 VPEs, but we *know* that we won't need to handle more | ||
43 | * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements | ||
44 | * is always going to be overkill, but always going to be enough. | ||
45 | */ | ||
46 | |||
47 | unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||
48 | static int smtc_nextinvpe[NR_CPUS]; | ||
49 | |||
50 | /* | ||
51 | * Timestamps stored are absolute values to be programmed | ||
52 | * into Count register. Valid timestamps will never be zero. | ||
53 | * If a Zero Count value is actually calculated, it is converted | ||
54 | * to be a 1, which will introduce 1 or two CPU cycles of error | ||
55 | * roughly once every four billion events, which at 1000 HZ means | ||
56 | * about once every 50 days. If that's actually a problem, one | ||
57 | * could alternate squashing 0 to 1 and to -1. | ||
58 | */ | ||
59 | |||
60 | #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||
61 | #define ISVALID(x) ((x) != 0L) | ||
62 | |||
63 | /* | ||
64 | * Time comparison is subtle, as it's really truncated | ||
65 | * modular arithmetic. | ||
66 | */ | ||
67 | |||
68 | #define IS_SOONER(a, b, reference) \ | ||
69 | (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||
70 | |||
71 | /* | ||
72 | * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||
73 | * Could be an increasing function instead of a constant; | ||
74 | */ | ||
75 | |||
76 | #define CATCHUP_INCREMENT 64 | ||
77 | |||
78 | static int mips_next_event(unsigned long delta, | ||
79 | struct clock_event_device *evt) | ||
80 | { | ||
81 | unsigned long flags; | ||
82 | unsigned int mtflags; | ||
83 | unsigned long timestamp, reference, previous; | ||
84 | unsigned long nextcomp = 0L; | ||
85 | int vpe = current_cpu_data.vpe_id; | ||
86 | int cpu = smp_processor_id(); | ||
87 | local_irq_save(flags); | ||
88 | mtflags = dmt(); | ||
89 | |||
90 | /* | ||
91 | * Maintain the per-TC virtual timer | ||
92 | * and program the per-VPE shared Count register | ||
93 | * as appropriate here... | ||
94 | */ | ||
95 | reference = (unsigned long)read_c0_count(); | ||
96 | timestamp = MAKEVALID(reference + delta); | ||
97 | /* | ||
98 | * To really model the clock, we have to catch the case | ||
99 | * where the current next-in-VPE timestamp is the old | ||
100 | * timestamp for the calling CPE, but the new value is | ||
101 | * in fact later. In that case, we have to do a full | ||
102 | * scan and discover the new next-in-VPE CPU id and | ||
103 | * timestamp. | ||
104 | */ | ||
105 | previous = smtc_nexttime[vpe][cpu]; | ||
106 | if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||
107 | && IS_SOONER(previous, timestamp, reference)) { | ||
108 | int i; | ||
109 | int soonest = cpu; | ||
110 | |||
111 | /* | ||
112 | * Update timestamp array here, so that new | ||
113 | * value gets considered along with those of | ||
114 | * other virtual CPUs on the VPE. | ||
115 | */ | ||
116 | smtc_nexttime[vpe][cpu] = timestamp; | ||
117 | for_each_online_cpu(i) { | ||
118 | if (ISVALID(smtc_nexttime[vpe][i]) | ||
119 | && IS_SOONER(smtc_nexttime[vpe][i], | ||
120 | smtc_nexttime[vpe][soonest], reference)) { | ||
121 | soonest = i; | ||
122 | } | ||
123 | } | ||
124 | smtc_nextinvpe[vpe] = soonest; | ||
125 | nextcomp = smtc_nexttime[vpe][soonest]; | ||
126 | /* | ||
127 | * Otherwise, we don't have to process the whole array rank, | ||
128 | * we just have to see if the event horizon has gotten closer. | ||
129 | */ | ||
130 | } else { | ||
131 | if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||
132 | IS_SOONER(timestamp, | ||
133 | smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||
134 | smtc_nextinvpe[vpe] = cpu; | ||
135 | nextcomp = timestamp; | ||
136 | } | ||
137 | /* | ||
138 | * Since next-in-VPE may me the same as the executing | ||
139 | * virtual CPU, we update the array *after* checking | ||
140 | * its value. | ||
141 | */ | ||
142 | smtc_nexttime[vpe][cpu] = timestamp; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * It may be that, in fact, we don't need to update Compare, | ||
147 | * but if we do, we want to make sure we didn't fall into | ||
148 | * a crack just behind Count. | ||
149 | */ | ||
150 | if (ISVALID(nextcomp)) { | ||
151 | write_c0_compare(nextcomp); | ||
152 | ehb(); | ||
153 | /* | ||
154 | * We never return an error, we just make sure | ||
155 | * that we trigger the handlers as quickly as | ||
156 | * we can if we fell behind. | ||
157 | */ | ||
158 | while ((nextcomp - (unsigned long)read_c0_count()) | ||
159 | > (unsigned long)LONG_MAX) { | ||
160 | nextcomp += CATCHUP_INCREMENT; | ||
161 | write_c0_compare(nextcomp); | ||
162 | ehb(); | ||
163 | } | ||
164 | } | ||
165 | emt(mtflags); | ||
166 | local_irq_restore(flags); | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | |||
171 | void smtc_distribute_timer(int vpe) | ||
172 | { | ||
173 | unsigned long flags; | ||
174 | unsigned int mtflags; | ||
175 | int cpu; | ||
176 | struct clock_event_device *cd; | ||
177 | unsigned long nextstamp; | ||
178 | unsigned long reference; | ||
179 | |||
180 | |||
181 | repeat: | ||
182 | nextstamp = 0L; | ||
183 | for_each_online_cpu(cpu) { | ||
184 | /* | ||
185 | * Find virtual CPUs within the current VPE who have | ||
186 | * unserviced timer requests whose time is now past. | ||
187 | */ | ||
188 | local_irq_save(flags); | ||
189 | mtflags = dmt(); | ||
190 | if (cpu_data[cpu].vpe_id == vpe && | ||
191 | ISVALID(smtc_nexttime[vpe][cpu])) { | ||
192 | reference = (unsigned long)read_c0_count(); | ||
193 | if ((smtc_nexttime[vpe][cpu] - reference) | ||
194 | > (unsigned long)LONG_MAX) { | ||
195 | smtc_nexttime[vpe][cpu] = 0L; | ||
196 | emt(mtflags); | ||
197 | local_irq_restore(flags); | ||
198 | /* | ||
199 | * We don't send IPIs to ourself. | ||
200 | */ | ||
201 | if (cpu != smp_processor_id()) { | ||
202 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
203 | } else { | ||
204 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
205 | cd->event_handler(cd); | ||
206 | } | ||
207 | } else { | ||
208 | /* Local to VPE but Valid Time not yet reached. */ | ||
209 | if (!ISVALID(nextstamp) || | ||
210 | IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||
211 | reference)) { | ||
212 | smtc_nextinvpe[vpe] = cpu; | ||
213 | nextstamp = smtc_nexttime[vpe][cpu]; | ||
214 | } | ||
215 | emt(mtflags); | ||
216 | local_irq_restore(flags); | ||
217 | } | ||
218 | } else { | ||
219 | emt(mtflags); | ||
220 | local_irq_restore(flags); | ||
221 | |||
222 | } | ||
223 | } | ||
224 | /* Reprogram for interrupt at next soonest timestamp for VPE */ | ||
225 | if (ISVALID(nextstamp)) { | ||
226 | write_c0_compare(nextstamp); | ||
227 | ehb(); | ||
228 | if ((nextstamp - (unsigned long)read_c0_count()) | ||
229 | > (unsigned long)LONG_MAX) | ||
230 | goto repeat; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | |||
235 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
236 | { | ||
237 | int cpu = smp_processor_id(); | ||
238 | |||
239 | /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||
240 | handle_perf_irq(1); | ||
241 | |||
242 | if (read_c0_cause() & (1 << 30)) { | ||
243 | /* Clear Count/Compare Interrupt */ | ||
244 | write_c0_compare(read_c0_compare()); | ||
245 | smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||
246 | } | ||
247 | return IRQ_HANDLED; | ||
248 | } | ||
249 | |||
250 | |||
251 | int smtc_clockevent_init(void) | ||
252 | { | ||
253 | uint64_t mips_freq = mips_hpt_frequency; | ||
254 | unsigned int cpu = smp_processor_id(); | ||
255 | struct clock_event_device *cd; | ||
256 | unsigned int irq; | ||
257 | int i; | ||
258 | int j; | ||
259 | |||
260 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
261 | return -ENXIO; | ||
262 | if (cpu == 0) { | ||
263 | for (i = 0; i < num_possible_cpus(); i++) { | ||
264 | smtc_nextinvpe[i] = 0; | ||
265 | for (j = 0; j < num_possible_cpus(); j++) | ||
266 | smtc_nexttime[i][j] = 0L; | ||
267 | } | ||
268 | /* | ||
269 | * SMTC also can't have the usablility test | ||
270 | * run by secondary TCs once Compare is in use. | ||
271 | */ | ||
272 | if (!c0_compare_int_usable()) | ||
273 | return -ENXIO; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * With vectored interrupts things are getting platform specific. | ||
278 | * get_c0_compare_int is a hook to allow a platform to return the | ||
279 | * interrupt number of it's liking. | ||
280 | */ | ||
281 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
282 | if (get_c0_compare_int) | ||
283 | irq = get_c0_compare_int(); | ||
284 | |||
285 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
286 | |||
287 | cd->name = "MIPS"; | ||
288 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
289 | |||
290 | /* Calculate the min / max delta */ | ||
291 | cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
292 | cd->shift = 32; | ||
293 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
294 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
295 | |||
296 | cd->rating = 300; | ||
297 | cd->irq = irq; | ||
298 | cd->cpumask = cpumask_of(cpu); | ||
299 | cd->set_next_event = mips_next_event; | ||
300 | cd->set_mode = mips_set_clock_mode; | ||
301 | cd->event_handler = mips_event_handler; | ||
302 | |||
303 | clockevents_register_device(cd); | ||
304 | |||
305 | /* | ||
306 | * On SMTC we only want to do the data structure | ||
307 | * initialization and IRQ setup once. | ||
308 | */ | ||
309 | if (cpu) | ||
310 | return 0; | ||
311 | /* | ||
312 | * And we need the hwmask associated with the c0_compare | ||
313 | * vector to be initialized. | ||
314 | */ | ||
315 | irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||
316 | if (cp0_timer_irq_installed) | ||
317 | return 0; | ||
318 | |||
319 | cp0_timer_irq_installed = 1; | ||
320 | |||
321 | setup_irq(irq, &c0_compare_irqaction); | ||
322 | |||
323 | return 0; | ||
324 | } | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index f83dc70d2bc2..e8638c5b7d11 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -62,7 +62,7 @@ static inline void check_errata(void) | |||
62 | case CPU_34K: | 62 | case CPU_34K: |
63 | /* | 63 | /* |
64 | * Erratum "RPS May Cause Incorrect Instruction Execution" | 64 | * Erratum "RPS May Cause Incorrect Instruction Execution" |
65 | * This code only handles VPE0, any SMP/SMTC/RTOS code | 65 | * This code only handles VPE0, any SMP/RTOS code |
66 | * making use of VPE1 will be responsable for that VPE. | 66 | * making use of VPE1 will be responsable for that VPE. |
67 | */ | 67 | */ |
68 | if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) | 68 | if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e5786858cdb6..4353d323f017 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -16,9 +16,6 @@ | |||
16 | #include <asm/isadep.h> | 16 | #include <asm/isadep.h> |
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/war.h> | 18 | #include <asm/war.h> |
19 | #ifdef CONFIG_MIPS_MT_SMTC | ||
20 | #include <asm/mipsmtregs.h> | ||
21 | #endif | ||
22 | 19 | ||
23 | #ifndef CONFIG_PREEMPT | 20 | #ifndef CONFIG_PREEMPT |
24 | #define resume_kernel restore_all | 21 | #define resume_kernel restore_all |
@@ -89,41 +86,6 @@ FEXPORT(syscall_exit) | |||
89 | bnez t0, syscall_exit_work | 86 | bnez t0, syscall_exit_work |
90 | 87 | ||
91 | restore_all: # restore full frame | 88 | restore_all: # restore full frame |
92 | #ifdef CONFIG_MIPS_MT_SMTC | ||
93 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | ||
94 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | ||
95 | mfc0 v0, CP0_TCSTATUS | ||
96 | ori v1, v0, TCSTATUS_IXMT | ||
97 | mtc0 v1, CP0_TCSTATUS | ||
98 | andi v0, TCSTATUS_IXMT | ||
99 | _ehb | ||
100 | mfc0 t0, CP0_TCCONTEXT | ||
101 | DMT 9 # dmt t1 | ||
102 | jal mips_ihb | ||
103 | mfc0 t2, CP0_STATUS | ||
104 | andi t3, t0, 0xff00 | ||
105 | or t2, t2, t3 | ||
106 | mtc0 t2, CP0_STATUS | ||
107 | _ehb | ||
108 | andi t1, t1, VPECONTROL_TE | ||
109 | beqz t1, 1f | ||
110 | EMT | ||
111 | 1: | ||
112 | mfc0 v1, CP0_TCSTATUS | ||
113 | /* We set IXMT above, XOR should clear it here */ | ||
114 | xori v1, v1, TCSTATUS_IXMT | ||
115 | or v1, v0, v1 | ||
116 | mtc0 v1, CP0_TCSTATUS | ||
117 | _ehb | ||
118 | xor t0, t0, t3 | ||
119 | mtc0 t0, CP0_TCCONTEXT | ||
120 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | ||
121 | /* Detect and execute deferred IPI "interrupts" */ | ||
122 | LONG_L s0, TI_REGS($28) | ||
123 | LONG_S sp, TI_REGS($28) | ||
124 | jal deferred_smtc_ipi | ||
125 | LONG_S s0, TI_REGS($28) | ||
126 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
127 | .set noat | 89 | .set noat |
128 | RESTORE_TEMP | 90 | RESTORE_TEMP |
129 | RESTORE_AT | 91 | RESTORE_AT |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a9ce3408be25..ac35e12cb1f3 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -21,20 +21,6 @@ | |||
21 | #include <asm/war.h> | 21 | #include <asm/war.h> |
22 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
25 | #define PANIC_PIC(msg) \ | ||
26 | .set push; \ | ||
27 | .set nomicromips; \ | ||
28 | .set reorder; \ | ||
29 | PTR_LA a0,8f; \ | ||
30 | .set noat; \ | ||
31 | PTR_LA AT, panic; \ | ||
32 | jr AT; \ | ||
33 | 9: b 9b; \ | ||
34 | .set pop; \ | ||
35 | TEXT(msg) | ||
36 | #endif | ||
37 | |||
38 | __INIT | 24 | __INIT |
39 | 25 | ||
40 | /* | 26 | /* |
@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp) | |||
251 | SAVE_AT | 237 | SAVE_AT |
252 | .set push | 238 | .set push |
253 | .set noreorder | 239 | .set noreorder |
254 | #ifdef CONFIG_MIPS_MT_SMTC | ||
255 | /* | ||
256 | * To keep from blindly blocking *all* interrupts | ||
257 | * during service by SMTC kernel, we also want to | ||
258 | * pass the IM value to be cleared. | ||
259 | */ | ||
260 | FEXPORT(except_vec_vi_mori) | ||
261 | ori a0, $0, 0 | ||
262 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
263 | PTR_LA v1, except_vec_vi_handler | 240 | PTR_LA v1, except_vec_vi_handler |
264 | FEXPORT(except_vec_vi_lui) | 241 | FEXPORT(except_vec_vi_lui) |
265 | lui v0, 0 /* Patched */ | 242 | lui v0, 0 /* Patched */ |
@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end) | |||
277 | NESTED(except_vec_vi_handler, 0, sp) | 254 | NESTED(except_vec_vi_handler, 0, sp) |
278 | SAVE_TEMP | 255 | SAVE_TEMP |
279 | SAVE_STATIC | 256 | SAVE_STATIC |
280 | #ifdef CONFIG_MIPS_MT_SMTC | ||
281 | /* | ||
282 | * SMTC has an interesting problem that interrupts are level-triggered, | ||
283 | * and the CLI macro will clear EXL, potentially causing a duplicate | ||
284 | * interrupt service invocation. So we need to clear the associated | ||
285 | * IM bit of Status prior to doing CLI, and restore it after the | ||
286 | * service routine has been invoked - we must assume that the | ||
287 | * service routine will have cleared the state, and any active | ||
288 | * level represents a new or otherwised unserviced event... | ||
289 | */ | ||
290 | mfc0 t1, CP0_STATUS | ||
291 | and t0, a0, t1 | ||
292 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | ||
293 | mfc0 t2, CP0_TCCONTEXT | ||
294 | or t2, t0, t2 | ||
295 | mtc0 t2, CP0_TCCONTEXT | ||
296 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | ||
297 | xor t1, t1, t0 | ||
298 | mtc0 t1, CP0_STATUS | ||
299 | _ehb | ||
300 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
301 | CLI | 257 | CLI |
302 | #ifdef CONFIG_TRACE_IRQFLAGS | 258 | #ifdef CONFIG_TRACE_IRQFLAGS |
303 | move s0, v0 | 259 | move s0, v0 |
304 | #ifdef CONFIG_MIPS_MT_SMTC | ||
305 | move s1, a0 | ||
306 | #endif | ||
307 | TRACE_IRQS_OFF | 260 | TRACE_IRQS_OFF |
308 | #ifdef CONFIG_MIPS_MT_SMTC | ||
309 | move a0, s1 | ||
310 | #endif | ||
311 | move v0, s0 | 261 | move v0, s0 |
312 | #endif | 262 | #endif |
313 | 263 | ||
@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
496 | 446 | ||
497 | .align 5 | 447 | .align 5 |
498 | LEAF(handle_ri_rdhwr_vivt) | 448 | LEAF(handle_ri_rdhwr_vivt) |
499 | #ifdef CONFIG_MIPS_MT_SMTC | ||
500 | PANIC_PIC("handle_ri_rdhwr_vivt called") | ||
501 | #else | ||
502 | .set push | 449 | .set push |
503 | .set noat | 450 | .set noat |
504 | .set noreorder | 451 | .set noreorder |
@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
517 | .set pop | 464 | .set pop |
518 | bltz k1, handle_ri /* slow path */ | 465 | bltz k1, handle_ri /* slow path */ |
519 | /* fall thru */ | 466 | /* fall thru */ |
520 | #endif | ||
521 | END(handle_ri_rdhwr_vivt) | 467 | END(handle_ri_rdhwr_vivt) |
522 | 468 | ||
523 | LEAF(handle_ri_rdhwr) | 469 | LEAF(handle_ri_rdhwr) |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index e712dcf18b2d..95afd663cd45 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -35,33 +35,12 @@ | |||
35 | */ | 35 | */ |
36 | .macro setup_c0_status set clr | 36 | .macro setup_c0_status set clr |
37 | .set push | 37 | .set push |
38 | #ifdef CONFIG_MIPS_MT_SMTC | ||
39 | /* | ||
40 | * For SMTC, we need to set privilege and disable interrupts only for | ||
41 | * the current TC, using the TCStatus register. | ||
42 | */ | ||
43 | mfc0 t0, CP0_TCSTATUS | ||
44 | /* Fortunately CU 0 is in the same place in both registers */ | ||
45 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
46 | li t1, ST0_CU0 | 0x08001c00 | ||
47 | or t0, t1 | ||
48 | /* Clear TKSU, leave IXMT */ | ||
49 | xori t0, 0x00001800 | ||
50 | mtc0 t0, CP0_TCSTATUS | ||
51 | _ehb | ||
52 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
53 | mfc0 t0, CP0_STATUS | ||
54 | or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr | ||
55 | xor t0, ST0_EXL | ST0_ERL | \clr | ||
56 | mtc0 t0, CP0_STATUS | ||
57 | #else | ||
58 | mfc0 t0, CP0_STATUS | 38 | mfc0 t0, CP0_STATUS |
59 | or t0, ST0_CU0|\set|0x1f|\clr | 39 | or t0, ST0_CU0|\set|0x1f|\clr |
60 | xor t0, 0x1f|\clr | 40 | xor t0, 0x1f|\clr |
61 | mtc0 t0, CP0_STATUS | 41 | mtc0 t0, CP0_STATUS |
62 | .set noreorder | 42 | .set noreorder |
63 | sll zero,3 # ehb | 43 | sll zero,3 # ehb |
64 | #endif | ||
65 | .set pop | 44 | .set pop |
66 | .endm | 45 | .endm |
67 | 46 | ||
@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
115 | jr t0 | 94 | jr t0 |
116 | 0: | 95 | 0: |
117 | 96 | ||
118 | #ifdef CONFIG_MIPS_MT_SMTC | ||
119 | /* | ||
120 | * In SMTC kernel, "CLI" is thread-specific, in TCStatus. | ||
121 | * We still need to enable interrupts globally in Status, | ||
122 | * and clear EXL/ERL. | ||
123 | * | ||
124 | * TCContext is used to track interrupt levels under | ||
125 | * service in SMTC kernel. Clear for boot TC before | ||
126 | * allowing any interrupts. | ||
127 | */ | ||
128 | mtc0 zero, CP0_TCCONTEXT | ||
129 | |||
130 | mfc0 t0, CP0_STATUS | ||
131 | ori t0, t0, 0xff1f | ||
132 | xori t0, t0, 0x001e | ||
133 | mtc0 t0, CP0_STATUS | ||
134 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
135 | |||
136 | PTR_LA t0, __bss_start # clear .bss | 97 | PTR_LA t0, __bss_start # clear .bss |
137 | LONG_S zero, (t0) | 98 | LONG_S zero, (t0) |
138 | PTR_LA t1, __bss_stop - LONGSIZE | 99 | PTR_LA t1, __bss_stop - LONGSIZE |
@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
164 | * function after setting up the stack and gp registers. | 125 | * function after setting up the stack and gp registers. |
165 | */ | 126 | */ |
166 | NESTED(smp_bootstrap, 16, sp) | 127 | NESTED(smp_bootstrap, 16, sp) |
167 | #ifdef CONFIG_MIPS_MT_SMTC | ||
168 | /* | ||
169 | * Read-modify-writes of Status must be atomic, and this | ||
170 | * is one case where CLI is invoked without EXL being | ||
171 | * necessarily set. The CLI and setup_c0_status will | ||
172 | * in fact be redundant for all but the first TC of | ||
173 | * each VPE being booted. | ||
174 | */ | ||
175 | DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ | ||
176 | jal mips_ihb | ||
177 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
178 | smp_slave_setup | 128 | smp_slave_setup |
179 | setup_c0_status_sec | 129 | setup_c0_status_sec |
180 | #ifdef CONFIG_MIPS_MT_SMTC | ||
181 | andi t2, t2, VPECONTROL_TE | ||
182 | beqz t2, 2f | ||
183 | EMT # emt | ||
184 | 2: | ||
185 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
186 | j start_secondary | 130 | j start_secondary |
187 | END(smp_bootstrap) | 131 | END(smp_bootstrap) |
188 | #endif /* CONFIG_SMP */ | 132 | #endif /* CONFIG_SMP */ |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 2b91fe80c436..50b364897dda 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = { | |||
42 | .irq_disable = disable_8259A_irq, | 42 | .irq_disable = disable_8259A_irq, |
43 | .irq_unmask = enable_8259A_irq, | 43 | .irq_unmask = enable_8259A_irq, |
44 | .irq_mask_ack = mask_and_ack_8259A, | 44 | .irq_mask_ack = mask_and_ack_8259A, |
45 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
46 | .irq_set_affinity = plat_set_irq_affinity, | ||
47 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
48 | }; | 45 | }; |
49 | 46 | ||
50 | /* | 47 | /* |
@@ -180,7 +177,6 @@ handle_real_irq: | |||
180 | outb(cached_master_mask, PIC_MASTER_IMR); | 177 | outb(cached_master_mask, PIC_MASTER_IMR); |
181 | outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ | 178 | outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ |
182 | } | 179 | } |
183 | smtc_im_ack_irq(irq); | ||
184 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 180 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
185 | return; | 181 | return; |
186 | 182 | ||
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 5e3b653f5d9e..c4ceccfa3828 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c | |||
@@ -229,18 +229,8 @@ void __init check_wait(void) | |||
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | static void smtc_idle_hook(void) | ||
233 | { | ||
234 | #ifdef CONFIG_MIPS_MT_SMTC | ||
235 | void smtc_idle_loop_hook(void); | ||
236 | |||
237 | smtc_idle_loop_hook(); | ||
238 | #endif | ||
239 | } | ||
240 | |||
241 | void arch_cpu_idle(void) | 232 | void arch_cpu_idle(void) |
242 | { | 233 | { |
243 | smtc_idle_hook(); | ||
244 | if (cpu_wait) | 234 | if (cpu_wait) |
245 | cpu_wait(); | 235 | cpu_wait(); |
246 | else | 236 | else |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index fab40f7d2e03..4858642d543d 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d) | |||
53 | */ | 53 | */ |
54 | static void level_mask_and_ack_msc_irq(struct irq_data *d) | 54 | static void level_mask_and_ack_msc_irq(struct irq_data *d) |
55 | { | 55 | { |
56 | unsigned int irq = d->irq; | ||
57 | |||
58 | mask_msc_irq(d); | 56 | mask_msc_irq(d); |
59 | if (!cpu_has_veic) | 57 | if (!cpu_has_veic) |
60 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 58 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
61 | /* This actually needs to be a call into platform code */ | ||
62 | smtc_im_ack_irq(irq); | ||
63 | } | 59 | } |
64 | 60 | ||
65 | /* | 61 | /* |
@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d) | |||
78 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); | 74 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); |
79 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); | 75 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); |
80 | } | 76 | } |
81 | smtc_im_ack_irq(irq); | ||
82 | } | 77 | } |
83 | 78 | ||
84 | /* | 79 | /* |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d1fea7a054be..5024fa39b861 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq) | |||
73 | */ | 73 | */ |
74 | void ack_bad_irq(unsigned int irq) | 74 | void ack_bad_irq(unsigned int irq) |
75 | { | 75 | { |
76 | smtc_im_ack_irq(irq); | ||
77 | printk("unexpected IRQ # %d\n", irq); | 76 | printk("unexpected IRQ # %d\n", irq); |
78 | } | 77 | } |
79 | 78 | ||
@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq) | |||
142 | { | 141 | { |
143 | irq_enter(); | 142 | irq_enter(); |
144 | check_stack_overflow(); | 143 | check_stack_overflow(); |
145 | if (!smtc_handle_on_other_cpu(irq)) | ||
146 | generic_handle_irq(irq); | ||
147 | irq_exit(); | ||
148 | } | ||
149 | |||
150 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
151 | /* | ||
152 | * To avoid inefficient and in some cases pathological re-checking of | ||
153 | * IRQ affinity, we have this variant that skips the affinity check. | ||
154 | */ | ||
155 | |||
156 | void __irq_entry do_IRQ_no_affinity(unsigned int irq) | ||
157 | { | ||
158 | irq_enter(); | ||
159 | smtc_im_backstop(irq); | ||
160 | generic_handle_irq(irq); | 144 | generic_handle_irq(irq); |
161 | irq_exit(); | 145 | irq_exit(); |
162 | } | 146 | } |
163 | 147 | ||
164 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index cb098628aee8..362bb3707e62 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | 2 | * General MIPS MT support routines, usable in AP/SP and SMVP. |
3 | * Copyright (C) 2005 Mips Technologies, Inc | 3 | * Copyright (C) 2005 Mips Technologies, Inc |
4 | */ | 4 | */ |
5 | #include <linux/cpu.h> | 5 | #include <linux/cpu.h> |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 6ded9bd1489c..88b1ef5f868a 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | 2 | * General MIPS MT support routines, usable in AP/SP and SMVP. |
3 | * Copyright (C) 2005 Mips Technologies, Inc | 3 | * Copyright (C) 2005 Mips Technologies, Inc |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl) | |||
57 | int tc; | 57 | int tc; |
58 | unsigned long haltval; | 58 | unsigned long haltval; |
59 | unsigned long tcstatval; | 59 | unsigned long tcstatval; |
60 | #ifdef CONFIG_MIPS_MT_SMTC | ||
61 | void smtc_soft_dump(void); | ||
62 | #endif /* CONFIG_MIPT_MT_SMTC */ | ||
63 | 60 | ||
64 | local_irq_save(flags); | 61 | local_irq_save(flags); |
65 | vpflags = dvpe(); | 62 | vpflags = dvpe(); |
@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl) | |||
116 | if (!haltval) | 113 | if (!haltval) |
117 | write_tc_c0_tchalt(0); | 114 | write_tc_c0_tchalt(0); |
118 | } | 115 | } |
119 | #ifdef CONFIG_MIPS_MT_SMTC | ||
120 | smtc_soft_dump(); | ||
121 | #endif /* CONFIG_MIPT_MT_SMTC */ | ||
122 | printk("===========================\n"); | 116 | printk("===========================\n"); |
123 | evpe(vpflags); | 117 | evpe(vpflags); |
124 | local_irq_restore(flags); | 118 | local_irq_restore(flags); |
@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void) | |||
295 | 289 | ||
296 | void mt_cflush_lockdown(void) | 290 | void mt_cflush_lockdown(void) |
297 | { | 291 | { |
298 | #ifdef CONFIG_MIPS_MT_SMTC | ||
299 | void smtc_cflush_lockdown(void); | ||
300 | |||
301 | smtc_cflush_lockdown(); | ||
302 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
303 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | 292 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ |
304 | } | 293 | } |
305 | 294 | ||
306 | void mt_cflush_release(void) | 295 | void mt_cflush_release(void) |
307 | { | 296 | { |
308 | #ifdef CONFIG_MIPS_MT_SMTC | ||
309 | void smtc_cflush_release(void); | ||
310 | |||
311 | smtc_cflush_release(); | ||
312 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
313 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | 297 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ |
314 | } | 298 | } |
315 | 299 | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 60e39dc7f1eb..0a1ec0f3beff 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
140 | */ | 140 | */ |
141 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 141 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
142 | 142 | ||
143 | #ifdef CONFIG_MIPS_MT_SMTC | ||
144 | /* | ||
145 | * SMTC restores TCStatus after Status, and the CU bits | ||
146 | * are aliased there. | ||
147 | */ | ||
148 | childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); | ||
149 | #endif | ||
150 | clear_tsk_thread_flag(p, TIF_USEDFPU); | 143 | clear_tsk_thread_flag(p, TIF_USEDFPU); |
151 | 144 | ||
152 | #ifdef CONFIG_MIPS_MT_FPAFF | 145 | #ifdef CONFIG_MIPS_MT_FPAFF |
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index abacac7c33ef..547c522964de 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -87,18 +87,6 @@ | |||
87 | 87 | ||
88 | PTR_ADDU t0, $28, _THREAD_SIZE - 32 | 88 | PTR_ADDU t0, $28, _THREAD_SIZE - 32 |
89 | set_saved_sp t0, t1, t2 | 89 | set_saved_sp t0, t1, t2 |
90 | #ifdef CONFIG_MIPS_MT_SMTC | ||
91 | /* Read-modify-writes of Status must be atomic on a VPE */ | ||
92 | mfc0 t2, CP0_TCSTATUS | ||
93 | ori t1, t2, TCSTATUS_IXMT | ||
94 | mtc0 t1, CP0_TCSTATUS | ||
95 | andi t2, t2, TCSTATUS_IXMT | ||
96 | _ehb | ||
97 | DMT 8 # dmt t0 | ||
98 | move t1,ra | ||
99 | jal mips_ihb | ||
100 | move ra,t1 | ||
101 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
102 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | 90 | mfc0 t1, CP0_STATUS /* Do we really need this? */ |
103 | li a3, 0xff01 | 91 | li a3, 0xff01 |
104 | and t1, a3 | 92 | and t1, a3 |
@@ -107,18 +95,6 @@ | |||
107 | and a2, a3 | 95 | and a2, a3 |
108 | or a2, t1 | 96 | or a2, t1 |
109 | mtc0 a2, CP0_STATUS | 97 | mtc0 a2, CP0_STATUS |
110 | #ifdef CONFIG_MIPS_MT_SMTC | ||
111 | _ehb | ||
112 | andi t0, t0, VPECONTROL_TE | ||
113 | beqz t0, 1f | ||
114 | emt | ||
115 | 1: | ||
116 | mfc0 t1, CP0_TCSTATUS | ||
117 | xori t1, t1, TCSTATUS_IXMT | ||
118 | or t1, t1, t2 | ||
119 | mtc0 t1, CP0_TCSTATUS | ||
120 | _ehb | ||
121 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
122 | move v0, a0 | 98 | move v0, a0 |
123 | jr ra | 99 | jr ra |
124 | END(resume) | 100 | END(resume) |
@@ -176,19 +152,10 @@ LEAF(_restore_msa) | |||
176 | #define FPU_DEFAULT 0x00000000 | 152 | #define FPU_DEFAULT 0x00000000 |
177 | 153 | ||
178 | LEAF(_init_fpu) | 154 | LEAF(_init_fpu) |
179 | #ifdef CONFIG_MIPS_MT_SMTC | ||
180 | /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ | ||
181 | mfc0 t0, CP0_TCSTATUS | ||
182 | /* Bit position is the same for Status, TCStatus */ | ||
183 | li t1, ST0_CU1 | ||
184 | or t0, t1 | ||
185 | mtc0 t0, CP0_TCSTATUS | ||
186 | #else /* Normal MIPS CU1 enable */ | ||
187 | mfc0 t0, CP0_STATUS | 155 | mfc0 t0, CP0_STATUS |
188 | li t1, ST0_CU1 | 156 | li t1, ST0_CU1 |
189 | or t0, t1 | 157 | or t0, t1 |
190 | mtc0 t0, CP0_STATUS | 158 | mtc0 t0, CP0_STATUS |
191 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
192 | enable_fpu_hazard | 159 | enable_fpu_hazard |
193 | 160 | ||
194 | li t1, FPU_DEFAULT | 161 | li t1, FPU_DEFAULT |
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 9c1aca00fd54..5a66b975989e 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c | |||
@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) | |||
36 | unsigned long flags; | 36 | unsigned long flags; |
37 | int i; | 37 | int i; |
38 | 38 | ||
39 | /* Ought not to be strictly necessary for SMTC builds */ | ||
40 | local_irq_save(flags); | 39 | local_irq_save(flags); |
41 | vpeflags = dvpe(); | 40 | vpeflags = dvpe(); |
42 | set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); | 41 | set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); |
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 3ef55fb7ac03..64d06f6a9adf 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c | |||
@@ -49,14 +49,11 @@ static void cmp_init_secondary(void) | |||
49 | 49 | ||
50 | /* Enable per-cpu interrupts: platform specific */ | 50 | /* Enable per-cpu interrupts: platform specific */ |
51 | 51 | ||
52 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 52 | #ifdef CONFIG_MIPS_MT_SMP |
53 | if (cpu_has_mipsmt) | 53 | if (cpu_has_mipsmt) |
54 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & | 54 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & |
55 | TCBIND_CURVPE; | 55 | TCBIND_CURVPE; |
56 | #endif | 56 | #endif |
57 | #ifdef CONFIG_MIPS_MT_SMTC | ||
58 | c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; | ||
59 | #endif | ||
60 | } | 57 | } |
61 | 58 | ||
62 | static void cmp_smp_finish(void) | 59 | static void cmp_smp_finish(void) |
@@ -135,10 +132,6 @@ void __init cmp_smp_setup(void) | |||
135 | unsigned int mvpconf0 = read_c0_mvpconf0(); | 132 | unsigned int mvpconf0 = read_c0_mvpconf0(); |
136 | 133 | ||
137 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | 134 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
138 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
139 | unsigned int mvpconf0 = read_c0_mvpconf0(); | ||
140 | |||
141 | nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
142 | #endif | 135 | #endif |
143 | smp_num_siblings = nvpe; | 136 | smp_num_siblings = nvpe; |
144 | } | 137 | } |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0a022ee33b2a..35bb05a13f05 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -43,10 +43,6 @@ | |||
43 | #include <asm/time.h> | 43 | #include <asm/time.h> |
44 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
45 | 45 | ||
46 | #ifdef CONFIG_MIPS_MT_SMTC | ||
47 | #include <asm/mipsmtregs.h> | ||
48 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
49 | |||
50 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 46 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
51 | 47 | ||
52 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | 48 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
@@ -102,12 +98,6 @@ asmlinkage void start_secondary(void) | |||
102 | { | 98 | { |
103 | unsigned int cpu; | 99 | unsigned int cpu; |
104 | 100 | ||
105 | #ifdef CONFIG_MIPS_MT_SMTC | ||
106 | /* Only do cpu_probe for first TC of CPU */ | ||
107 | if ((read_c0_tcbind() & TCBIND_CURTC) != 0) | ||
108 | __cpu_name[smp_processor_id()] = __cpu_name[0]; | ||
109 | else | ||
110 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
111 | cpu_probe(); | 101 | cpu_probe(); |
112 | cpu_report(); | 102 | cpu_report(); |
113 | per_cpu_trap_init(false); | 103 | per_cpu_trap_init(false); |
@@ -238,13 +228,10 @@ static void flush_tlb_mm_ipi(void *mm) | |||
238 | * o collapses to normal function call on UP kernels | 228 | * o collapses to normal function call on UP kernels |
239 | * o collapses to normal function call on systems with a single shared | 229 | * o collapses to normal function call on systems with a single shared |
240 | * primary cache. | 230 | * primary cache. |
241 | * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. | ||
242 | */ | 231 | */ |
243 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | 232 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) |
244 | { | 233 | { |
245 | #ifndef CONFIG_MIPS_MT_SMTC | ||
246 | smp_call_function(func, info, 1); | 234 | smp_call_function(func, info, 1); |
247 | #endif | ||
248 | } | 235 | } |
249 | 236 | ||
250 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) | 237 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) |
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S deleted file mode 100644 index 2866863a39df..000000000000 --- a/arch/mips/kernel/smtc-asm.S +++ /dev/null | |||
@@ -1,133 +0,0 @@ | |||
1 | /* | ||
2 | * Assembly Language Functions for MIPS MT SMTC support | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ | ||
7 | |||
8 | #include <asm/regdef.h> | ||
9 | #include <asm/asmmacro.h> | ||
10 | #include <asm/stackframe.h> | ||
11 | #include <asm/irqflags.h> | ||
12 | |||
13 | /* | ||
14 | * "Software Interrupt" linkage. | ||
15 | * | ||
16 | * This is invoked when an "Interrupt" is sent from one TC to another, | ||
17 | * where the TC to be interrupted is halted, has it's Restart address | ||
18 | * and Status values saved by the "remote control" thread, then modified | ||
19 | * to cause execution to begin here, in kenel mode. This code then | ||
20 | * disguises the TC state as that of an exception and transfers | ||
21 | * control to the general exception or vectored interrupt handler. | ||
22 | */ | ||
23 | .set noreorder | ||
24 | |||
25 | /* | ||
26 | The __smtc_ipi_vector would use k0 and k1 as temporaries and | ||
27 | 1) Set EXL (this is per-VPE, so this can't be done by proxy!) | ||
28 | 2) Restore the K/CU and IXMT bits to the pre "exception" state | ||
29 | (EXL means no interrupts and access to the kernel map). | ||
30 | 3) Set EPC to be the saved value of TCRestart. | ||
31 | 4) Jump to the exception handler entry point passed by the sender. | ||
32 | |||
33 | CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * Reviled and slandered vision: Set EXL and restore K/CU/IXMT | ||
38 | * state of pre-halt thread, then save everything and call | ||
39 | * thought some function pointer to imaginary_exception, which | ||
40 | * will parse a register value or memory message queue to | ||
41 | * deliver things like interprocessor interrupts. On return | ||
42 | * from that function, jump to the global ret_from_irq code | ||
43 | * to invoke the scheduler and return as appropriate. | ||
44 | */ | ||
45 | |||
46 | #define PT_PADSLOT4 (PT_R0-8) | ||
47 | #define PT_PADSLOT5 (PT_R0-4) | ||
48 | |||
49 | .text | ||
50 | .align 5 | ||
51 | FEXPORT(__smtc_ipi_vector) | ||
52 | #ifdef CONFIG_CPU_MICROMIPS | ||
53 | nop | ||
54 | #endif | ||
55 | .set noat | ||
56 | /* Disable thread scheduling to make Status update atomic */ | ||
57 | DMT 27 # dmt k1 | ||
58 | _ehb | ||
59 | /* Set EXL */ | ||
60 | mfc0 k0,CP0_STATUS | ||
61 | ori k0,k0,ST0_EXL | ||
62 | mtc0 k0,CP0_STATUS | ||
63 | _ehb | ||
64 | /* Thread scheduling now inhibited by EXL. Restore TE state. */ | ||
65 | andi k1,k1,VPECONTROL_TE | ||
66 | beqz k1,1f | ||
67 | emt | ||
68 | 1: | ||
69 | /* | ||
70 | * The IPI sender has put some information on the anticipated | ||
71 | * kernel stack frame. If we were in user mode, this will be | ||
72 | * built above the saved kernel SP. If we were already in the | ||
73 | * kernel, it will be built above the current CPU SP. | ||
74 | * | ||
75 | * Were we in kernel mode, as indicated by CU0? | ||
76 | */ | ||
77 | sll k1,k0,3 | ||
78 | .set noreorder | ||
79 | bltz k1,2f | ||
80 | move k1,sp | ||
81 | .set reorder | ||
82 | /* | ||
83 | * If previously in user mode, set CU0 and use kernel stack. | ||
84 | */ | ||
85 | li k1,ST0_CU0 | ||
86 | or k1,k1,k0 | ||
87 | mtc0 k1,CP0_STATUS | ||
88 | _ehb | ||
89 | get_saved_sp | ||
90 | /* Interrupting TC will have pre-set values in slots in the new frame */ | ||
91 | 2: subu k1,k1,PT_SIZE | ||
92 | /* Load TCStatus Value */ | ||
93 | lw k0,PT_TCSTATUS(k1) | ||
94 | /* Write it to TCStatus to restore CU/KSU/IXMT state */ | ||
95 | mtc0 k0,$2,1 | ||
96 | _ehb | ||
97 | lw k0,PT_EPC(k1) | ||
98 | mtc0 k0,CP0_EPC | ||
99 | /* Save all will redundantly recompute the SP, but use it for now */ | ||
100 | SAVE_ALL | ||
101 | CLI | ||
102 | TRACE_IRQS_OFF | ||
103 | /* Function to be invoked passed stack pad slot 5 */ | ||
104 | lw t0,PT_PADSLOT5(sp) | ||
105 | /* Argument from sender passed in stack pad slot 4 */ | ||
106 | lw a0,PT_PADSLOT4(sp) | ||
107 | LONG_L s0, TI_REGS($28) | ||
108 | LONG_S sp, TI_REGS($28) | ||
109 | PTR_LA ra, ret_from_irq | ||
110 | jr t0 | ||
111 | |||
112 | /* | ||
113 | * Called from idle loop to provoke processing of queued IPIs | ||
114 | * First IPI message in queue passed as argument. | ||
115 | */ | ||
116 | |||
117 | LEAF(self_ipi) | ||
118 | /* Before anything else, block interrupts */ | ||
119 | mfc0 t0,CP0_TCSTATUS | ||
120 | ori t1,t0,TCSTATUS_IXMT | ||
121 | mtc0 t1,CP0_TCSTATUS | ||
122 | _ehb | ||
123 | /* We know we're in kernel mode, so prepare stack frame */ | ||
124 | subu t1,sp,PT_SIZE | ||
125 | sw ra,PT_EPC(t1) | ||
126 | sw a0,PT_PADSLOT4(t1) | ||
127 | la t2,ipi_decode | ||
128 | sw t2,PT_PADSLOT5(t1) | ||
129 | /* Save pre-disable value of TCStatus */ | ||
130 | sw t0,PT_TCSTATUS(t1) | ||
131 | j __smtc_ipi_vector | ||
132 | nop | ||
133 | END(self_ipi) | ||
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c deleted file mode 100644 index 38635a996cbf..000000000000 --- a/arch/mips/kernel/smtc-proc.c +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | /* | ||
2 | * /proc hooks for SMTC kernel | ||
3 | * Copyright (C) 2005 Mips Technologies, Inc | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | |||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <linux/atomic.h> | ||
14 | #include <asm/hardirq.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/mipsregs.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <linux/proc_fs.h> | ||
19 | #include <linux/seq_file.h> | ||
20 | |||
21 | #include <asm/smtc_proc.h> | ||
22 | |||
23 | /* | ||
24 | * /proc diagnostic and statistics hooks | ||
25 | */ | ||
26 | |||
27 | /* | ||
28 | * Statistics gathered | ||
29 | */ | ||
30 | unsigned long selfipis[NR_CPUS]; | ||
31 | |||
32 | struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
33 | |||
34 | atomic_t smtc_fpu_recoveries; | ||
35 | |||
36 | static int smtc_proc_show(struct seq_file *m, void *v) | ||
37 | { | ||
38 | int i; | ||
39 | extern unsigned long ebase; | ||
40 | |||
41 | seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); | ||
42 | seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); | ||
43 | seq_printf(m, "EBASE: 0x%08lx\n", ebase); | ||
44 | seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); | ||
45 | for (i=0; i < NR_CPUS; i++) | ||
46 | seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||
47 | seq_printf(m, "Self-IPIs by CPU:\n"); | ||
48 | for(i = 0; i < NR_CPUS; i++) | ||
49 | seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||
50 | seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", | ||
51 | atomic_read(&smtc_fpu_recoveries)); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int smtc_proc_open(struct inode *inode, struct file *file) | ||
56 | { | ||
57 | return single_open(file, smtc_proc_show, NULL); | ||
58 | } | ||
59 | |||
60 | static const struct file_operations smtc_proc_fops = { | ||
61 | .open = smtc_proc_open, | ||
62 | .read = seq_read, | ||
63 | .llseek = seq_lseek, | ||
64 | .release = single_release, | ||
65 | }; | ||
66 | |||
67 | void init_smtc_stats(void) | ||
68 | { | ||
69 | int i; | ||
70 | |||
71 | for (i=0; i<NR_CPUS; i++) { | ||
72 | smtc_cpu_stats[i].timerints = 0; | ||
73 | smtc_cpu_stats[i].selfipis = 0; | ||
74 | } | ||
75 | |||
76 | atomic_set(&smtc_fpu_recoveries, 0); | ||
77 | |||
78 | proc_create("smtc", 0444, NULL, &smtc_proc_fops); | ||
79 | } | ||
80 | |||
81 | static int proc_cpuinfo_chain_call(struct notifier_block *nfb, | ||
82 | unsigned long action_unused, void *data) | ||
83 | { | ||
84 | struct proc_cpuinfo_notifier_args *pcn = data; | ||
85 | struct seq_file *m = pcn->m; | ||
86 | unsigned long n = pcn->n; | ||
87 | |||
88 | if (!cpu_has_mipsmt) | ||
89 | return NOTIFY_OK; | ||
90 | |||
91 | seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); | ||
92 | seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); | ||
93 | |||
94 | return NOTIFY_OK; | ||
95 | } | ||
96 | |||
97 | static int __init proc_cpuinfo_notifier_init(void) | ||
98 | { | ||
99 | return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); | ||
100 | } | ||
101 | |||
102 | subsys_initcall(proc_cpuinfo_notifier_init); | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c deleted file mode 100644 index c1681d65dd5c..000000000000 --- a/arch/mips/kernel/smtc.c +++ /dev/null | |||
@@ -1,1528 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or | ||
3 | * modify it under the terms of the GNU General Public License | ||
4 | * as published by the Free Software Foundation; either version 2 | ||
5 | * of the License, or (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
17 | * Copyright (C) 2008 Kevin D. Kissell | ||
18 | */ | ||
19 | |||
20 | #include <linux/clockchips.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/cpumask.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/kernel_stat.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/ftrace.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include <asm/cpu.h> | ||
32 | #include <asm/processor.h> | ||
33 | #include <linux/atomic.h> | ||
34 | #include <asm/hardirq.h> | ||
35 | #include <asm/hazards.h> | ||
36 | #include <asm/irq.h> | ||
37 | #include <asm/idle.h> | ||
38 | #include <asm/mmu_context.h> | ||
39 | #include <asm/mipsregs.h> | ||
40 | #include <asm/cacheflush.h> | ||
41 | #include <asm/time.h> | ||
42 | #include <asm/addrspace.h> | ||
43 | #include <asm/smtc.h> | ||
44 | #include <asm/smtc_proc.h> | ||
45 | #include <asm/setup.h> | ||
46 | |||
47 | /* | ||
48 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
49 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
50 | * in this table. | ||
51 | */ | ||
52 | unsigned long irq_hwmask[NR_IRQS]; | ||
53 | |||
54 | #define LOCK_MT_PRA() \ | ||
55 | local_irq_save(flags); \ | ||
56 | mtflags = dmt() | ||
57 | |||
58 | #define UNLOCK_MT_PRA() \ | ||
59 | emt(mtflags); \ | ||
60 | local_irq_restore(flags) | ||
61 | |||
62 | #define LOCK_CORE_PRA() \ | ||
63 | local_irq_save(flags); \ | ||
64 | mtflags = dvpe() | ||
65 | |||
66 | #define UNLOCK_CORE_PRA() \ | ||
67 | evpe(mtflags); \ | ||
68 | local_irq_restore(flags) | ||
69 | |||
70 | /* | ||
71 | * Data structures purely associated with SMTC parallelism | ||
72 | */ | ||
73 | |||
74 | |||
75 | /* | ||
76 | * Table for tracking ASIDs whose lifetime is prolonged. | ||
77 | */ | ||
78 | |||
79 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
80 | |||
81 | /* | ||
82 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | ||
83 | */ | ||
84 | |||
85 | #define IPIBUF_PER_CPU 4 | ||
86 | |||
87 | struct smtc_ipi_q IPIQ[NR_CPUS]; | ||
88 | static struct smtc_ipi_q freeIPIq; | ||
89 | |||
90 | |||
91 | /* | ||
92 | * Number of FPU contexts for each VPE | ||
93 | */ | ||
94 | |||
95 | static int smtc_nconf1[MAX_SMTC_VPES]; | ||
96 | |||
97 | |||
98 | /* Forward declarations */ | ||
99 | |||
100 | void ipi_decode(struct smtc_ipi *); | ||
101 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); | ||
102 | static void setup_cross_vpe_interrupts(unsigned int nvpe); | ||
103 | void init_smtc_stats(void); | ||
104 | |||
105 | /* Global SMTC Status */ | ||
106 | |||
107 | unsigned int smtc_status; | ||
108 | |||
109 | /* Boot command line configuration overrides */ | ||
110 | |||
111 | static int vpe0limit; | ||
112 | static int ipibuffers; | ||
113 | static int nostlb; | ||
114 | static int asidmask; | ||
115 | unsigned long smtc_asid_mask = 0xff; | ||
116 | |||
117 | static int __init vpe0tcs(char *str) | ||
118 | { | ||
119 | get_option(&str, &vpe0limit); | ||
120 | |||
121 | return 1; | ||
122 | } | ||
123 | |||
124 | static int __init ipibufs(char *str) | ||
125 | { | ||
126 | get_option(&str, &ipibuffers); | ||
127 | return 1; | ||
128 | } | ||
129 | |||
130 | static int __init stlb_disable(char *s) | ||
131 | { | ||
132 | nostlb = 1; | ||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | static int __init asidmask_set(char *str) | ||
137 | { | ||
138 | get_option(&str, &asidmask); | ||
139 | switch (asidmask) { | ||
140 | case 0x1: | ||
141 | case 0x3: | ||
142 | case 0x7: | ||
143 | case 0xf: | ||
144 | case 0x1f: | ||
145 | case 0x3f: | ||
146 | case 0x7f: | ||
147 | case 0xff: | ||
148 | smtc_asid_mask = (unsigned long)asidmask; | ||
149 | break; | ||
150 | default: | ||
151 | printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); | ||
152 | } | ||
153 | return 1; | ||
154 | } | ||
155 | |||
156 | __setup("vpe0tcs=", vpe0tcs); | ||
157 | __setup("ipibufs=", ipibufs); | ||
158 | __setup("nostlb", stlb_disable); | ||
159 | __setup("asidmask=", asidmask_set); | ||
160 | |||
161 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | ||
162 | |||
163 | static int hang_trig; | ||
164 | |||
165 | static int __init hangtrig_enable(char *s) | ||
166 | { | ||
167 | hang_trig = 1; | ||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | |||
172 | __setup("hangtrig", hangtrig_enable); | ||
173 | |||
174 | #define DEFAULT_BLOCKED_IPI_LIMIT 32 | ||
175 | |||
176 | static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; | ||
177 | |||
178 | static int __init tintq(char *str) | ||
179 | { | ||
180 | get_option(&str, &timerq_limit); | ||
181 | return 1; | ||
182 | } | ||
183 | |||
184 | __setup("tintq=", tintq); | ||
185 | |||
186 | static int imstuckcount[MAX_SMTC_VPES][8]; | ||
187 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | ||
188 | static int vpemask[MAX_SMTC_VPES][8] = { | ||
189 | {0, 0, 1, 0, 0, 0, 0, 1}, | ||
190 | {0, 0, 0, 0, 0, 0, 0, 1} | ||
191 | }; | ||
192 | int tcnoprog[NR_CPUS]; | ||
193 | static atomic_t idle_hook_initialized = ATOMIC_INIT(0); | ||
194 | static int clock_hang_reported[NR_CPUS]; | ||
195 | |||
196 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | ||
197 | |||
198 | /* | ||
199 | * Configure shared TLB - VPC configuration bit must be set by caller | ||
200 | */ | ||
201 | |||
202 | static void smtc_configure_tlb(void) | ||
203 | { | ||
204 | int i, tlbsiz, vpes; | ||
205 | unsigned long mvpconf0; | ||
206 | unsigned long config1val; | ||
207 | |||
208 | /* Set up ASID preservation table */ | ||
209 | for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { | ||
210 | for(i = 0; i < MAX_SMTC_ASIDS; i++) { | ||
211 | smtc_live_asid[vpes][i] = 0; | ||
212 | } | ||
213 | } | ||
214 | mvpconf0 = read_c0_mvpconf0(); | ||
215 | |||
216 | if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) | ||
217 | >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { | ||
218 | /* If we have multiple VPEs, try to share the TLB */ | ||
219 | if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { | ||
220 | /* | ||
221 | * If TLB sizing is programmable, shared TLB | ||
222 | * size is the total available complement. | ||
223 | * Otherwise, we have to take the sum of all | ||
224 | * static VPE TLB entries. | ||
225 | */ | ||
226 | if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) | ||
227 | >> MVPCONF0_PTLBE_SHIFT)) == 0) { | ||
228 | /* | ||
229 | * If there's more than one VPE, there had better | ||
230 | * be more than one TC, because we need one to bind | ||
231 | * to each VPE in turn to be able to read | ||
232 | * its configuration state! | ||
233 | */ | ||
234 | settc(1); | ||
235 | /* Stop the TC from doing anything foolish */ | ||
236 | write_tc_c0_tchalt(TCHALT_H); | ||
237 | mips_ihb(); | ||
238 | /* No need to un-Halt - that happens later anyway */ | ||
239 | for (i=0; i < vpes; i++) { | ||
240 | write_tc_c0_tcbind(i); | ||
241 | /* | ||
242 | * To be 100% sure we're really getting the right | ||
243 | * information, we exit the configuration state | ||
244 | * and do an IHB after each rebinding. | ||
245 | */ | ||
246 | write_c0_mvpcontrol( | ||
247 | read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | ||
248 | mips_ihb(); | ||
249 | /* | ||
250 | * Only count if the MMU Type indicated is TLB | ||
251 | */ | ||
252 | if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { | ||
253 | config1val = read_vpe_c0_config1(); | ||
254 | tlbsiz += ((config1val >> 25) & 0x3f) + 1; | ||
255 | } | ||
256 | |||
257 | /* Put core back in configuration state */ | ||
258 | write_c0_mvpcontrol( | ||
259 | read_c0_mvpcontrol() | MVPCONTROL_VPC ); | ||
260 | mips_ihb(); | ||
261 | } | ||
262 | } | ||
263 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); | ||
264 | ehb(); | ||
265 | |||
266 | /* | ||
267 | * Setup kernel data structures to use software total, | ||
268 | * rather than read the per-VPE Config1 value. The values | ||
269 | * for "CPU 0" gets copied to all the other CPUs as part | ||
270 | * of their initialization in smtc_cpu_setup(). | ||
271 | */ | ||
272 | |||
273 | /* MIPS32 limits TLB indices to 64 */ | ||
274 | if (tlbsiz > 64) | ||
275 | tlbsiz = 64; | ||
276 | cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; | ||
277 | smtc_status |= SMTC_TLB_SHARED; | ||
278 | local_flush_tlb_all(); | ||
279 | |||
280 | printk("TLB of %d entry pairs shared by %d VPEs\n", | ||
281 | tlbsiz, vpes); | ||
282 | } else { | ||
283 | printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); | ||
284 | } | ||
285 | } | ||
286 | } | ||
287 | |||
288 | |||
289 | /* | ||
290 | * Incrementally build the CPU map out of constituent MIPS MT cores, | ||
291 | * using the specified available VPEs and TCs. Plaform code needs | ||
292 | * to ensure that each MIPS MT core invokes this routine on reset, | ||
293 | * one at a time(!). | ||
294 | * | ||
295 | * This version of the build_cpu_map and prepare_cpus routines assumes | ||
296 | * that *all* TCs of a MIPS MT core will be used for Linux, and that | ||
297 | * they will be spread across *all* available VPEs (to minimise the | ||
298 | * loss of efficiency due to exception service serialization). | ||
299 | * An improved version would pick up configuration information and | ||
300 | * possibly leave some TCs/VPEs as "slave" processors. | ||
301 | * | ||
302 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | ||
303 | * cpu_possible_mask and the logical/physical mappings. | ||
304 | */ | ||
305 | |||
306 | int __init smtc_build_cpu_map(int start_cpu_slot) | ||
307 | { | ||
308 | int i, ntcs; | ||
309 | |||
310 | /* | ||
311 | * The CPU map isn't actually used for anything at this point, | ||
312 | * so it's not clear what else we should do apart from set | ||
313 | * everything up so that "logical" = "physical". | ||
314 | */ | ||
315 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
316 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { | ||
317 | set_cpu_possible(i, true); | ||
318 | __cpu_number_map[i] = i; | ||
319 | __cpu_logical_map[i] = i; | ||
320 | } | ||
321 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
322 | /* Initialize map of CPUs with FPUs */ | ||
323 | cpus_clear(mt_fpu_cpumask); | ||
324 | #endif | ||
325 | |||
326 | /* One of those TC's is the one booting, and not a secondary... */ | ||
327 | printk("%i available secondary CPU TC(s)\n", i - 1); | ||
328 | |||
329 | return i; | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * Common setup before any secondaries are started | ||
334 | * Make sure all CPUs are in a sensible state before we boot any of the | ||
335 | * secondaries. | ||
336 | * | ||
337 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly | ||
338 | * as possible across the available VPEs. | ||
339 | */ | ||
340 | |||
341 | static void smtc_tc_setup(int vpe, int tc, int cpu) | ||
342 | { | ||
343 | static int cp1contexts[MAX_SMTC_VPES]; | ||
344 | |||
345 | /* | ||
346 | * Make a local copy of the available FPU contexts in order | ||
347 | * to keep track of TCs that can have one. | ||
348 | */ | ||
349 | if (tc == 1) | ||
350 | { | ||
351 | /* | ||
352 | * FIXME: Multi-core SMTC hasn't been tested and the | ||
353 | * maximum number of VPEs may change. | ||
354 | */ | ||
355 | cp1contexts[0] = smtc_nconf1[0] - 1; | ||
356 | cp1contexts[1] = smtc_nconf1[1]; | ||
357 | } | ||
358 | |||
359 | settc(tc); | ||
360 | write_tc_c0_tchalt(TCHALT_H); | ||
361 | mips_ihb(); | ||
362 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | ||
363 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | ||
364 | | TCSTATUS_A); | ||
365 | /* | ||
366 | * TCContext gets an offset from the base of the IPIQ array | ||
367 | * to be used in low-level code to detect the presence of | ||
368 | * an active IPI queue. | ||
369 | */ | ||
370 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
371 | |||
372 | /* Bind TC to VPE. */ | ||
373 | write_tc_c0_tcbind(vpe); | ||
374 | |||
375 | /* In general, all TCs should have the same cpu_data indications. */ | ||
376 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); | ||
377 | |||
378 | /* Check to see if there is a FPU context available for this TC. */ | ||
379 | if (!cp1contexts[vpe]) | ||
380 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | ||
381 | else | ||
382 | cp1contexts[vpe]--; | ||
383 | |||
384 | /* Store the TC and VPE into the cpu_data structure. */ | ||
385 | cpu_data[cpu].vpe_id = vpe; | ||
386 | cpu_data[cpu].tc_id = tc; | ||
387 | |||
388 | /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ | ||
389 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Tweak to get Count registers synced as closely as possible. The | ||
394 | * value seems good for 34K-class cores. | ||
395 | */ | ||
396 | |||
397 | #define CP0_SKEW 8 | ||
398 | |||
399 | void smtc_prepare_cpus(int cpus) | ||
400 | { | ||
401 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | ||
402 | unsigned long flags; | ||
403 | unsigned long val; | ||
404 | int nipi; | ||
405 | struct smtc_ipi *pipi; | ||
406 | |||
407 | /* disable interrupts so we can disable MT */ | ||
408 | local_irq_save(flags); | ||
409 | /* disable MT so we can configure */ | ||
410 | dvpe(); | ||
411 | dmt(); | ||
412 | |||
413 | spin_lock_init(&freeIPIq.lock); | ||
414 | |||
415 | /* | ||
416 | * We probably don't have as many VPEs as we do SMP "CPUs", | ||
417 | * but it's possible - and in any case we'll never use more! | ||
418 | */ | ||
419 | for (i=0; i<NR_CPUS; i++) { | ||
420 | IPIQ[i].head = IPIQ[i].tail = NULL; | ||
421 | spin_lock_init(&IPIQ[i].lock); | ||
422 | IPIQ[i].depth = 0; | ||
423 | IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ | ||
424 | } | ||
425 | |||
426 | /* cpu_data index starts at zero */ | ||
427 | cpu = 0; | ||
428 | cpu_data[cpu].vpe_id = 0; | ||
429 | cpu_data[cpu].tc_id = 0; | ||
430 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
431 | cpu++; | ||
432 | |||
433 | /* Report on boot-time options */ | ||
434 | mips_mt_set_cpuoptions(); | ||
435 | if (vpelimit > 0) | ||
436 | printk("Limit of %d VPEs set\n", vpelimit); | ||
437 | if (tclimit > 0) | ||
438 | printk("Limit of %d TCs set\n", tclimit); | ||
439 | if (nostlb) { | ||
440 | printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); | ||
441 | } | ||
442 | if (asidmask) | ||
443 | printk("ASID mask value override to 0x%x\n", asidmask); | ||
444 | |||
445 | /* Temporary */ | ||
446 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | ||
447 | if (hang_trig) | ||
448 | printk("Logic Analyser Trigger on suspected TC hang\n"); | ||
449 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | ||
450 | |||
451 | /* Put MVPE's into 'configuration state' */ | ||
452 | write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); | ||
453 | |||
454 | val = read_c0_mvpconf0(); | ||
455 | nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
456 | if (vpelimit > 0 && nvpe > vpelimit) | ||
457 | nvpe = vpelimit; | ||
458 | ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
459 | if (ntc > NR_CPUS) | ||
460 | ntc = NR_CPUS; | ||
461 | if (tclimit > 0 && ntc > tclimit) | ||
462 | ntc = tclimit; | ||
463 | slop = ntc % nvpe; | ||
464 | for (i = 0; i < nvpe; i++) { | ||
465 | tcpervpe[i] = ntc / nvpe; | ||
466 | if (slop) { | ||
467 | if((slop - i) > 0) tcpervpe[i]++; | ||
468 | } | ||
469 | } | ||
470 | /* Handle command line override for VPE0 */ | ||
471 | if (vpe0limit > ntc) vpe0limit = ntc; | ||
472 | if (vpe0limit > 0) { | ||
473 | int slopslop; | ||
474 | if (vpe0limit < tcpervpe[0]) { | ||
475 | /* Reducing TC count - distribute to others */ | ||
476 | slop = tcpervpe[0] - vpe0limit; | ||
477 | slopslop = slop % (nvpe - 1); | ||
478 | tcpervpe[0] = vpe0limit; | ||
479 | for (i = 1; i < nvpe; i++) { | ||
480 | tcpervpe[i] += slop / (nvpe - 1); | ||
481 | if(slopslop && ((slopslop - (i - 1) > 0))) | ||
482 | tcpervpe[i]++; | ||
483 | } | ||
484 | } else if (vpe0limit > tcpervpe[0]) { | ||
485 | /* Increasing TC count - steal from others */ | ||
486 | slop = vpe0limit - tcpervpe[0]; | ||
487 | slopslop = slop % (nvpe - 1); | ||
488 | tcpervpe[0] = vpe0limit; | ||
489 | for (i = 1; i < nvpe; i++) { | ||
490 | tcpervpe[i] -= slop / (nvpe - 1); | ||
491 | if(slopslop && ((slopslop - (i - 1) > 0))) | ||
492 | tcpervpe[i]--; | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | |||
497 | /* Set up shared TLB */ | ||
498 | smtc_configure_tlb(); | ||
499 | |||
500 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | ||
501 | /* Get number of CP1 contexts for each VPE. */ | ||
502 | if (tc == 0) | ||
503 | { | ||
504 | /* | ||
505 | * Do not call settc() for TC0 or the FPU context | ||
506 | * value will be incorrect. Besides, we know that | ||
507 | * we are TC0 anyway. | ||
508 | */ | ||
509 | smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & | ||
510 | VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); | ||
511 | if (nvpe == 2) | ||
512 | { | ||
513 | settc(1); | ||
514 | smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & | ||
515 | VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); | ||
516 | settc(0); | ||
517 | } | ||
518 | } | ||
519 | if (tcpervpe[vpe] == 0) | ||
520 | continue; | ||
521 | if (vpe != 0) | ||
522 | printk(", "); | ||
523 | printk("VPE %d: TC", vpe); | ||
524 | for (i = 0; i < tcpervpe[vpe]; i++) { | ||
525 | /* | ||
526 | * TC 0 is bound to VPE 0 at reset, | ||
527 | * and is presumably executing this | ||
528 | * code. Leave it alone! | ||
529 | */ | ||
530 | if (tc != 0) { | ||
531 | smtc_tc_setup(vpe, tc, cpu); | ||
532 | if (vpe != 0) { | ||
533 | /* | ||
534 | * Set MVP bit (possibly again). Do it | ||
535 | * here to catch CPUs that have no TCs | ||
536 | * bound to the VPE at reset. In that | ||
537 | * case, a TC must be bound to the VPE | ||
538 | * before we can set VPEControl[MVP] | ||
539 | */ | ||
540 | write_vpe_c0_vpeconf0( | ||
541 | read_vpe_c0_vpeconf0() | | ||
542 | VPECONF0_MVP); | ||
543 | } | ||
544 | cpu++; | ||
545 | } | ||
546 | printk(" %d", tc); | ||
547 | tc++; | ||
548 | } | ||
549 | if (vpe != 0) { | ||
550 | /* | ||
551 | * Allow this VPE to control others. | ||
552 | */ | ||
553 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | | ||
554 | VPECONF0_MVP); | ||
555 | |||
556 | /* | ||
557 | * Clear any stale software interrupts from VPE's Cause | ||
558 | */ | ||
559 | write_vpe_c0_cause(0); | ||
560 | |||
561 | /* | ||
562 | * Clear ERL/EXL of VPEs other than 0 | ||
563 | * and set restricted interrupt enable/mask. | ||
564 | */ | ||
565 | write_vpe_c0_status((read_vpe_c0_status() | ||
566 | & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) | ||
567 | | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 | ||
568 | | ST0_IE)); | ||
569 | /* | ||
570 | * set config to be the same as vpe0, | ||
571 | * particularly kseg0 coherency alg | ||
572 | */ | ||
573 | write_vpe_c0_config(read_c0_config()); | ||
574 | /* Clear any pending timer interrupt */ | ||
575 | write_vpe_c0_compare(0); | ||
576 | /* Propagate Config7 */ | ||
577 | write_vpe_c0_config7(read_c0_config7()); | ||
578 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); | ||
579 | ehb(); | ||
580 | } | ||
581 | /* enable multi-threading within VPE */ | ||
582 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | ||
583 | /* enable the VPE */ | ||
584 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Pull any physically present but unused TCs out of circulation. | ||
589 | */ | ||
590 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { | ||
591 | set_cpu_possible(tc, false); | ||
592 | set_cpu_present(tc, false); | ||
593 | tc++; | ||
594 | } | ||
595 | |||
596 | /* release config state */ | ||
597 | write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | ||
598 | |||
599 | printk("\n"); | ||
600 | |||
601 | /* Set up coprocessor affinity CPU mask(s) */ | ||
602 | |||
603 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
604 | for (tc = 0; tc < ntc; tc++) { | ||
605 | if (cpu_data[tc].options & MIPS_CPU_FPU) | ||
606 | cpu_set(tc, mt_fpu_cpumask); | ||
607 | } | ||
608 | #endif | ||
609 | |||
610 | /* set up ipi interrupts... */ | ||
611 | |||
612 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ | ||
613 | |||
614 | setup_cross_vpe_interrupts(nvpe); | ||
615 | |||
616 | /* Set up queue of free IPI "messages". */ | ||
617 | nipi = NR_CPUS * IPIBUF_PER_CPU; | ||
618 | if (ipibuffers > 0) | ||
619 | nipi = ipibuffers; | ||
620 | |||
621 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | ||
622 | if (pipi == NULL) | ||
623 | panic("kmalloc of IPI message buffers failed"); | ||
624 | else | ||
625 | printk("IPI buffer pool of %d buffers\n", nipi); | ||
626 | for (i = 0; i < nipi; i++) { | ||
627 | smtc_ipi_nq(&freeIPIq, pipi); | ||
628 | pipi++; | ||
629 | } | ||
630 | |||
631 | /* Arm multithreading and enable other VPEs - but all TCs are Halted */ | ||
632 | emt(EMT_ENABLE); | ||
633 | evpe(EVPE_ENABLE); | ||
634 | local_irq_restore(flags); | ||
635 | /* Initialize SMTC /proc statistics/diagnostics */ | ||
636 | init_smtc_stats(); | ||
637 | } | ||
638 | |||
639 | |||
640 | /* | ||
641 | * Setup the PC, SP, and GP of a secondary processor and start it | ||
642 | * running! | ||
643 | * smp_bootstrap is the place to resume from | ||
644 | * __KSTK_TOS(idle) is apparently the stack pointer | ||
645 | * (unsigned long)idle->thread_info the gp | ||
646 | * | ||
647 | */ | ||
648 | void smtc_boot_secondary(int cpu, struct task_struct *idle) | ||
649 | { | ||
650 | extern u32 kernelsp[NR_CPUS]; | ||
651 | unsigned long flags; | ||
652 | int mtflags; | ||
653 | |||
654 | LOCK_MT_PRA(); | ||
655 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
656 | dvpe(); | ||
657 | } | ||
658 | settc(cpu_data[cpu].tc_id); | ||
659 | |||
660 | /* pc */ | ||
661 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
662 | |||
663 | /* stack pointer */ | ||
664 | kernelsp[cpu] = __KSTK_TOS(idle); | ||
665 | write_tc_gpr_sp(__KSTK_TOS(idle)); | ||
666 | |||
667 | /* global pointer */ | ||
668 | write_tc_gpr_gp((unsigned long)task_thread_info(idle)); | ||
669 | |||
670 | smtc_status |= SMTC_MTC_ACTIVE; | ||
671 | write_tc_c0_tchalt(0); | ||
672 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
673 | evpe(EVPE_ENABLE); | ||
674 | } | ||
675 | UNLOCK_MT_PRA(); | ||
676 | } | ||
677 | |||
678 | void smtc_init_secondary(void) | ||
679 | { | ||
680 | } | ||
681 | |||
682 | void smtc_smp_finish(void) | ||
683 | { | ||
684 | int cpu = smp_processor_id(); | ||
685 | |||
686 | /* | ||
687 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
688 | * Like per_cpu_trap_init() hack, this assumes that | ||
689 | * SMTC init code assigns TCs consdecutively and | ||
690 | * in ascending order across available VPEs. | ||
691 | */ | ||
692 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
693 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
694 | |||
695 | local_irq_enable(); | ||
696 | |||
697 | printk("TC %d going on-line as CPU %d\n", | ||
698 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | ||
699 | } | ||
700 | |||
701 | void smtc_cpus_done(void) | ||
702 | { | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * Support for SMTC-optimized driver IRQ registration | ||
707 | */ | ||
708 | |||
709 | /* | ||
710 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | ||
711 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | ||
712 | * in this table. | ||
713 | */ | ||
714 | |||
715 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
716 | unsigned long hwmask) | ||
717 | { | ||
718 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | ||
719 | unsigned int vpe = current_cpu_data.vpe_id; | ||
720 | |||
721 | vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; | ||
722 | #endif | ||
723 | irq_hwmask[irq] = hwmask; | ||
724 | |||
725 | return setup_irq(irq, new); | ||
726 | } | ||
727 | |||
728 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
729 | /* | ||
730 | * Support for IRQ affinity to TCs | ||
731 | */ | ||
732 | |||
733 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
734 | { | ||
735 | /* | ||
736 | * If a "fast path" cache of quickly decodable affinity state | ||
737 | * is maintained, this is where it gets done, on a call up | ||
738 | * from the platform affinity code. | ||
739 | */ | ||
740 | } | ||
741 | |||
742 | void smtc_forward_irq(struct irq_data *d) | ||
743 | { | ||
744 | unsigned int irq = d->irq; | ||
745 | int target; | ||
746 | |||
747 | /* | ||
748 | * OK wise guy, now figure out how to get the IRQ | ||
749 | * to be serviced on an authorized "CPU". | ||
750 | * | ||
751 | * Ideally, to handle the situation where an IRQ has multiple | ||
752 | * eligible CPUS, we would maintain state per IRQ that would | ||
753 | * allow a fair distribution of service requests. Since the | ||
754 | * expected use model is any-or-only-one, for simplicity | ||
755 | * and efficiency, we just pick the easiest one to find. | ||
756 | */ | ||
757 | |||
758 | target = cpumask_first(d->affinity); | ||
759 | |||
760 | /* | ||
761 | * We depend on the platform code to have correctly processed | ||
762 | * IRQ affinity change requests to ensure that the IRQ affinity | ||
763 | * mask has been purged of bits corresponding to nonexistent and | ||
764 | * offline "CPUs", and to TCs bound to VPEs other than the VPE | ||
765 | * connected to the physical interrupt input for the interrupt | ||
766 | * in question. Otherwise we have a nasty problem with interrupt | ||
767 | * mask management. This is best handled in non-performance-critical | ||
768 | * platform IRQ affinity setting code, to minimize interrupt-time | ||
769 | * checks. | ||
770 | */ | ||
771 | |||
772 | /* If no one is eligible, service locally */ | ||
773 | if (target >= NR_CPUS) | ||
774 | do_IRQ_no_affinity(irq); | ||
775 | else | ||
776 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); | ||
777 | } | ||
778 | |||
779 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
780 | |||
781 | /* | ||
782 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | ||
783 | * Within a VPE one TC can interrupt another by different approaches. | ||
784 | * The easiest to get right would probably be to make all TCs except | ||
785 | * the target IXMT and set a software interrupt, but an IXMT-based | ||
786 | * scheme requires that a handler must run before a new IPI could | ||
787 | * be sent, which would break the "broadcast" loops in MIPS MT. | ||
788 | * A more gonzo approach within a VPE is to halt the TC, extract | ||
789 | * its Restart, Status, and a couple of GPRs, and program the Restart | ||
790 | * address to emulate an interrupt. | ||
791 | * | ||
792 | * Within a VPE, one can be confident that the target TC isn't in | ||
793 | * a critical EXL state when halted, since the write to the Halt | ||
794 | * register could not have issued on the writing thread if the | ||
795 | * halting thread had EXL set. So k0 and k1 of the target TC | ||
796 | * can be used by the injection code. Across VPEs, one can't | ||
797 | * be certain that the target TC isn't in a critical exception | ||
798 | * state. So we try a two-step process of sending a software | ||
799 | * interrupt to the target VPE, which either handles the event | ||
800 | * itself (if it was the target) or injects the event within | ||
801 | * the VPE. | ||
802 | */ | ||
803 | |||
804 | static void smtc_ipi_qdump(void) | ||
805 | { | ||
806 | int i; | ||
807 | struct smtc_ipi *temp; | ||
808 | |||
809 | for (i = 0; i < NR_CPUS ;i++) { | ||
810 | pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", | ||
811 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, | ||
812 | IPIQ[i].depth); | ||
813 | temp = IPIQ[i].head; | ||
814 | |||
815 | while (temp != IPIQ[i].tail) { | ||
816 | pr_debug("%d %d %d: ", temp->type, temp->dest, | ||
817 | (int)temp->arg); | ||
818 | #ifdef SMTC_IPI_DEBUG | ||
819 | pr_debug("%u %lu\n", temp->sender, temp->stamp); | ||
820 | #else | ||
821 | pr_debug("\n"); | ||
822 | #endif | ||
823 | temp = temp->flink; | ||
824 | } | ||
825 | } | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * The standard atomic.h primitives don't quite do what we want | ||
830 | * here: We need an atomic add-and-return-previous-value (which | ||
831 | * could be done with atomic_add_return and a decrement) and an | ||
832 | * atomic set/zero-and-return-previous-value (which can't really | ||
833 | * be done with the atomic.h primitives). And since this is | ||
834 | * MIPS MT, we can assume that we have LL/SC. | ||
835 | */ | ||
836 | static inline int atomic_postincrement(atomic_t *v) | ||
837 | { | ||
838 | unsigned long result; | ||
839 | |||
840 | unsigned long temp; | ||
841 | |||
842 | __asm__ __volatile__( | ||
843 | "1: ll %0, %2 \n" | ||
844 | " addu %1, %0, 1 \n" | ||
845 | " sc %1, %2 \n" | ||
846 | " beqz %1, 1b \n" | ||
847 | __WEAK_LLSC_MB | ||
848 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | ||
849 | : "m" (v->counter) | ||
850 | : "memory"); | ||
851 | |||
852 | return result; | ||
853 | } | ||
854 | |||
855 | void smtc_send_ipi(int cpu, int type, unsigned int action) | ||
856 | { | ||
857 | int tcstatus; | ||
858 | struct smtc_ipi *pipi; | ||
859 | unsigned long flags; | ||
860 | int mtflags; | ||
861 | unsigned long tcrestart; | ||
862 | int set_resched_flag = (type == LINUX_SMP_IPI && | ||
863 | action == SMP_RESCHEDULE_YOURSELF); | ||
864 | |||
865 | if (cpu == smp_processor_id()) { | ||
866 | printk("Cannot Send IPI to self!\n"); | ||
867 | return; | ||
868 | } | ||
869 | if (set_resched_flag && IPIQ[cpu].resched_flag != 0) | ||
870 | return; /* There is a reschedule queued already */ | ||
871 | |||
872 | /* Set up a descriptor, to be delivered either promptly or queued */ | ||
873 | pipi = smtc_ipi_dq(&freeIPIq); | ||
874 | if (pipi == NULL) { | ||
875 | bust_spinlocks(1); | ||
876 | mips_mt_regdump(dvpe()); | ||
877 | panic("IPI Msg. Buffers Depleted"); | ||
878 | } | ||
879 | pipi->type = type; | ||
880 | pipi->arg = (void *)action; | ||
881 | pipi->dest = cpu; | ||
882 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | ||
883 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | ||
884 | IPIQ[cpu].resched_flag |= set_resched_flag; | ||
885 | smtc_ipi_nq(&IPIQ[cpu], pipi); | ||
886 | LOCK_CORE_PRA(); | ||
887 | settc(cpu_data[cpu].tc_id); | ||
888 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); | ||
889 | UNLOCK_CORE_PRA(); | ||
890 | } else { | ||
891 | /* | ||
892 | * Not sufficient to do a LOCK_MT_PRA (dmt) here, | ||
893 | * since ASID shootdown on the other VPE may | ||
894 | * collide with this operation. | ||
895 | */ | ||
896 | LOCK_CORE_PRA(); | ||
897 | settc(cpu_data[cpu].tc_id); | ||
898 | /* Halt the targeted TC */ | ||
899 | write_tc_c0_tchalt(TCHALT_H); | ||
900 | mips_ihb(); | ||
901 | |||
902 | /* | ||
903 | * Inspect TCStatus - if IXMT is set, we have to queue | ||
904 | * a message. Otherwise, we set up the "interrupt" | ||
905 | * of the other TC | ||
906 | */ | ||
907 | tcstatus = read_tc_c0_tcstatus(); | ||
908 | |||
909 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | ||
910 | /* | ||
911 | * If we're in the the irq-off version of the wait | ||
912 | * loop, we need to force exit from the wait and | ||
913 | * do a direct post of the IPI. | ||
914 | */ | ||
915 | if (cpu_wait == r4k_wait_irqoff) { | ||
916 | tcrestart = read_tc_c0_tcrestart(); | ||
917 | if (address_is_in_r4k_wait_irqoff(tcrestart)) { | ||
918 | write_tc_c0_tcrestart(__pastwait); | ||
919 | tcstatus &= ~TCSTATUS_IXMT; | ||
920 | write_tc_c0_tcstatus(tcstatus); | ||
921 | goto postdirect; | ||
922 | } | ||
923 | } | ||
924 | /* | ||
925 | * Otherwise we queue the message for the target TC | ||
926 | * to pick up when he does a local_irq_restore() | ||
927 | */ | ||
928 | write_tc_c0_tchalt(0); | ||
929 | UNLOCK_CORE_PRA(); | ||
930 | IPIQ[cpu].resched_flag |= set_resched_flag; | ||
931 | smtc_ipi_nq(&IPIQ[cpu], pipi); | ||
932 | } else { | ||
933 | postdirect: | ||
934 | post_direct_ipi(cpu, pipi); | ||
935 | write_tc_c0_tchalt(0); | ||
936 | UNLOCK_CORE_PRA(); | ||
937 | } | ||
938 | } | ||
939 | } | ||
940 | |||
941 | /* | ||
942 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set | ||
943 | */ | ||
944 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) | ||
945 | { | ||
946 | struct pt_regs *kstack; | ||
947 | unsigned long tcstatus; | ||
948 | unsigned long tcrestart; | ||
949 | extern u32 kernelsp[NR_CPUS]; | ||
950 | extern void __smtc_ipi_vector(void); | ||
951 | //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); | ||
952 | |||
953 | /* Extract Status, EPC from halted TC */ | ||
954 | tcstatus = read_tc_c0_tcstatus(); | ||
955 | tcrestart = read_tc_c0_tcrestart(); | ||
956 | /* If TCRestart indicates a WAIT instruction, advance the PC */ | ||
957 | if ((tcrestart & 0x80000000) | ||
958 | && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { | ||
959 | tcrestart += 4; | ||
960 | } | ||
961 | /* | ||
962 | * Save on TC's future kernel stack | ||
963 | * | ||
964 | * CU bit of Status is indicator that TC was | ||
965 | * already running on a kernel stack... | ||
966 | */ | ||
967 | if (tcstatus & ST0_CU0) { | ||
968 | /* Note that this "- 1" is pointer arithmetic */ | ||
969 | kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; | ||
970 | } else { | ||
971 | kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; | ||
972 | } | ||
973 | |||
974 | kstack->cp0_epc = (long)tcrestart; | ||
975 | /* Save TCStatus */ | ||
976 | kstack->cp0_tcstatus = tcstatus; | ||
977 | /* Pass token of operation to be performed kernel stack pad area */ | ||
978 | kstack->pad0[4] = (unsigned long)pipi; | ||
979 | /* Pass address of function to be called likewise */ | ||
980 | kstack->pad0[5] = (unsigned long)&ipi_decode; | ||
981 | /* Set interrupt exempt and kernel mode */ | ||
982 | tcstatus |= TCSTATUS_IXMT; | ||
983 | tcstatus &= ~TCSTATUS_TKSU; | ||
984 | write_tc_c0_tcstatus(tcstatus); | ||
985 | ehb(); | ||
986 | /* Set TC Restart address to be SMTC IPI vector */ | ||
987 | write_tc_c0_tcrestart(__smtc_ipi_vector); | ||
988 | } | ||
989 | |||
990 | static void ipi_resched_interrupt(void) | ||
991 | { | ||
992 | scheduler_ipi(); | ||
993 | } | ||
994 | |||
995 | static void ipi_call_interrupt(void) | ||
996 | { | ||
997 | /* Invoke generic function invocation code in smp.c */ | ||
998 | smp_call_function_interrupt(); | ||
999 | } | ||
1000 | |||
1001 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | ||
1002 | |||
1003 | static void __irq_entry smtc_clock_tick_interrupt(void) | ||
1004 | { | ||
1005 | unsigned int cpu = smp_processor_id(); | ||
1006 | struct clock_event_device *cd; | ||
1007 | int irq = MIPS_CPU_IRQ_BASE + 1; | ||
1008 | |||
1009 | irq_enter(); | ||
1010 | kstat_incr_irq_this_cpu(irq); | ||
1011 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
1012 | cd->event_handler(cd); | ||
1013 | irq_exit(); | ||
1014 | } | ||
1015 | |||
1016 | void ipi_decode(struct smtc_ipi *pipi) | ||
1017 | { | ||
1018 | void *arg_copy = pipi->arg; | ||
1019 | int type_copy = pipi->type; | ||
1020 | |||
1021 | smtc_ipi_nq(&freeIPIq, pipi); | ||
1022 | |||
1023 | switch (type_copy) { | ||
1024 | case SMTC_CLOCK_TICK: | ||
1025 | smtc_clock_tick_interrupt(); | ||
1026 | break; | ||
1027 | |||
1028 | case LINUX_SMP_IPI: | ||
1029 | switch ((int)arg_copy) { | ||
1030 | case SMP_RESCHEDULE_YOURSELF: | ||
1031 | ipi_resched_interrupt(); | ||
1032 | break; | ||
1033 | case SMP_CALL_FUNCTION: | ||
1034 | ipi_call_interrupt(); | ||
1035 | break; | ||
1036 | default: | ||
1037 | printk("Impossible SMTC IPI Argument %p\n", arg_copy); | ||
1038 | break; | ||
1039 | } | ||
1040 | break; | ||
1041 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
1042 | case IRQ_AFFINITY_IPI: | ||
1043 | /* | ||
1044 | * Accept a "forwarded" interrupt that was initially | ||
1045 | * taken by a TC who doesn't have affinity for the IRQ. | ||
1046 | */ | ||
1047 | do_IRQ_no_affinity((int)arg_copy); | ||
1048 | break; | ||
1049 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
1050 | default: | ||
1051 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | ||
1052 | break; | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | /* | ||
1057 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
1058 | * so it reuses the current exception frame rather than set up a | ||
1059 | * new one with self_ipi. | ||
1060 | */ | ||
1061 | |||
1062 | void deferred_smtc_ipi(void) | ||
1063 | { | ||
1064 | int cpu = smp_processor_id(); | ||
1065 | |||
1066 | /* | ||
1067 | * Test is not atomic, but much faster than a dequeue, | ||
1068 | * and the vast majority of invocations will have a null queue. | ||
1069 | * If irq_disabled when this was called, then any IPIs queued | ||
1070 | * after we test last will be taken on the next irq_enable/restore. | ||
1071 | * If interrupts were enabled, then any IPIs added after the | ||
1072 | * last test will be taken directly. | ||
1073 | */ | ||
1074 | |||
1075 | while (IPIQ[cpu].head != NULL) { | ||
1076 | struct smtc_ipi_q *q = &IPIQ[cpu]; | ||
1077 | struct smtc_ipi *pipi; | ||
1078 | unsigned long flags; | ||
1079 | |||
1080 | /* | ||
1081 | * It may be possible we'll come in with interrupts | ||
1082 | * already enabled. | ||
1083 | */ | ||
1084 | local_irq_save(flags); | ||
1085 | spin_lock(&q->lock); | ||
1086 | pipi = __smtc_ipi_dq(q); | ||
1087 | spin_unlock(&q->lock); | ||
1088 | if (pipi != NULL) { | ||
1089 | if (pipi->type == LINUX_SMP_IPI && | ||
1090 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) | ||
1091 | IPIQ[cpu].resched_flag = 0; | ||
1092 | ipi_decode(pipi); | ||
1093 | } | ||
1094 | /* | ||
1095 | * The use of the __raw_local restore isn't | ||
1096 | * as obviously necessary here as in smtc_ipi_replay(), | ||
1097 | * but it's more efficient, given that we're already | ||
1098 | * running down the IPI queue. | ||
1099 | */ | ||
1100 | __arch_local_irq_restore(flags); | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | * Cross-VPE interrupts in the SMTC prototype use "software interrupts" | ||
1106 | * set via cross-VPE MTTR manipulation of the Cause register. It would be | ||
1107 | * in some regards preferable to have external logic for "doorbell" hardware | ||
1108 | * interrupts. | ||
1109 | */ | ||
1110 | |||
1111 | static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; | ||
1112 | |||
1113 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | ||
1114 | { | ||
1115 | int my_vpe = cpu_data[smp_processor_id()].vpe_id; | ||
1116 | int my_tc = cpu_data[smp_processor_id()].tc_id; | ||
1117 | int cpu; | ||
1118 | struct smtc_ipi *pipi; | ||
1119 | unsigned long tcstatus; | ||
1120 | int sent; | ||
1121 | unsigned long flags; | ||
1122 | unsigned int mtflags; | ||
1123 | unsigned int vpflags; | ||
1124 | |||
1125 | /* | ||
1126 | * So long as cross-VPE interrupts are done via | ||
1127 | * MFTR/MTTR read-modify-writes of Cause, we need | ||
1128 | * to stop other VPEs whenever the local VPE does | ||
1129 | * anything similar. | ||
1130 | */ | ||
1131 | local_irq_save(flags); | ||
1132 | vpflags = dvpe(); | ||
1133 | clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); | ||
1134 | set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); | ||
1135 | irq_enable_hazard(); | ||
1136 | evpe(vpflags); | ||
1137 | local_irq_restore(flags); | ||
1138 | |||
1139 | /* | ||
1140 | * Cross-VPE Interrupt handler: Try to directly deliver IPIs | ||
1141 | * queued for TCs on this VPE other than the current one. | ||
1142 | * Return-from-interrupt should cause us to drain the queue | ||
1143 | * for the current TC, so we ought not to have to do it explicitly here. | ||
1144 | */ | ||
1145 | |||
1146 | for_each_online_cpu(cpu) { | ||
1147 | if (cpu_data[cpu].vpe_id != my_vpe) | ||
1148 | continue; | ||
1149 | |||
1150 | pipi = smtc_ipi_dq(&IPIQ[cpu]); | ||
1151 | if (pipi != NULL) { | ||
1152 | if (cpu_data[cpu].tc_id != my_tc) { | ||
1153 | sent = 0; | ||
1154 | LOCK_MT_PRA(); | ||
1155 | settc(cpu_data[cpu].tc_id); | ||
1156 | write_tc_c0_tchalt(TCHALT_H); | ||
1157 | mips_ihb(); | ||
1158 | tcstatus = read_tc_c0_tcstatus(); | ||
1159 | if ((tcstatus & TCSTATUS_IXMT) == 0) { | ||
1160 | post_direct_ipi(cpu, pipi); | ||
1161 | sent = 1; | ||
1162 | } | ||
1163 | write_tc_c0_tchalt(0); | ||
1164 | UNLOCK_MT_PRA(); | ||
1165 | if (!sent) { | ||
1166 | smtc_ipi_req(&IPIQ[cpu], pipi); | ||
1167 | } | ||
1168 | } else { | ||
1169 | /* | ||
1170 | * ipi_decode() should be called | ||
1171 | * with interrupts off | ||
1172 | */ | ||
1173 | local_irq_save(flags); | ||
1174 | if (pipi->type == LINUX_SMP_IPI && | ||
1175 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) | ||
1176 | IPIQ[cpu].resched_flag = 0; | ||
1177 | ipi_decode(pipi); | ||
1178 | local_irq_restore(flags); | ||
1179 | } | ||
1180 | } | ||
1181 | } | ||
1182 | |||
1183 | return IRQ_HANDLED; | ||
1184 | } | ||
1185 | |||
1186 | static void ipi_irq_dispatch(void) | ||
1187 | { | ||
1188 | do_IRQ(cpu_ipi_irq); | ||
1189 | } | ||
1190 | |||
1191 | static struct irqaction irq_ipi = { | ||
1192 | .handler = ipi_interrupt, | ||
1193 | .flags = IRQF_PERCPU, | ||
1194 | .name = "SMTC_IPI" | ||
1195 | }; | ||
1196 | |||
1197 | static void setup_cross_vpe_interrupts(unsigned int nvpe) | ||
1198 | { | ||
1199 | if (nvpe < 1) | ||
1200 | return; | ||
1201 | |||
1202 | if (!cpu_has_vint) | ||
1203 | panic("SMTC Kernel requires Vectored Interrupt support"); | ||
1204 | |||
1205 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); | ||
1206 | |||
1207 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | ||
1208 | |||
1209 | irq_set_handler(cpu_ipi_irq, handle_percpu_irq); | ||
1210 | } | ||
1211 | |||
1212 | /* | ||
1213 | * SMTC-specific hacks invoked from elsewhere in the kernel. | ||
1214 | */ | ||
1215 | |||
1216 | /* | ||
1217 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
1218 | */ | ||
1219 | |||
1220 | void smtc_ipi_replay(void) | ||
1221 | { | ||
1222 | unsigned int cpu = smp_processor_id(); | ||
1223 | |||
1224 | /* | ||
1225 | * To the extent that we've ever turned interrupts off, | ||
1226 | * we may have accumulated deferred IPIs. This is subtle. | ||
1227 | * we should be OK: If we pick up something and dispatch | ||
1228 | * it here, that's great. If we see nothing, but concurrent | ||
1229 | * with this operation, another TC sends us an IPI, IXMT | ||
1230 | * is clear, and we'll handle it as a real pseudo-interrupt | ||
1231 | * and not a pseudo-pseudo interrupt. The important thing | ||
1232 | * is to do the last check for queued message *after* the | ||
1233 | * re-enabling of interrupts. | ||
1234 | */ | ||
1235 | while (IPIQ[cpu].head != NULL) { | ||
1236 | struct smtc_ipi_q *q = &IPIQ[cpu]; | ||
1237 | struct smtc_ipi *pipi; | ||
1238 | unsigned long flags; | ||
1239 | |||
1240 | /* | ||
1241 | * It's just possible we'll come in with interrupts | ||
1242 | * already enabled. | ||
1243 | */ | ||
1244 | local_irq_save(flags); | ||
1245 | |||
1246 | spin_lock(&q->lock); | ||
1247 | pipi = __smtc_ipi_dq(q); | ||
1248 | spin_unlock(&q->lock); | ||
1249 | /* | ||
1250 | ** But use a raw restore here to avoid recursion. | ||
1251 | */ | ||
1252 | __arch_local_irq_restore(flags); | ||
1253 | |||
1254 | if (pipi) { | ||
1255 | self_ipi(pipi); | ||
1256 | smtc_cpu_stats[cpu].selfipis++; | ||
1257 | } | ||
1258 | } | ||
1259 | } | ||
1260 | |||
1261 | EXPORT_SYMBOL(smtc_ipi_replay); | ||
1262 | |||
1263 | void smtc_idle_loop_hook(void) | ||
1264 | { | ||
1265 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | ||
1266 | int im; | ||
1267 | int flags; | ||
1268 | int mtflags; | ||
1269 | int bit; | ||
1270 | int vpe; | ||
1271 | int tc; | ||
1272 | int hook_ntcs; | ||
1273 | /* | ||
1274 | * printk within DMT-protected regions can deadlock, | ||
1275 | * so buffer diagnostic messages for later output. | ||
1276 | */ | ||
1277 | char *pdb_msg; | ||
1278 | char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ | ||
1279 | |||
1280 | if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ | ||
1281 | if (atomic_add_return(1, &idle_hook_initialized) == 1) { | ||
1282 | int mvpconf0; | ||
1283 | /* Tedious stuff to just do once */ | ||
1284 | mvpconf0 = read_c0_mvpconf0(); | ||
1285 | hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
1286 | if (hook_ntcs > NR_CPUS) | ||
1287 | hook_ntcs = NR_CPUS; | ||
1288 | for (tc = 0; tc < hook_ntcs; tc++) { | ||
1289 | tcnoprog[tc] = 0; | ||
1290 | clock_hang_reported[tc] = 0; | ||
1291 | } | ||
1292 | for (vpe = 0; vpe < 2; vpe++) | ||
1293 | for (im = 0; im < 8; im++) | ||
1294 | imstuckcount[vpe][im] = 0; | ||
1295 | printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); | ||
1296 | atomic_set(&idle_hook_initialized, 1000); | ||
1297 | } else { | ||
1298 | /* Someone else is initializing in parallel - let 'em finish */ | ||
1299 | while (atomic_read(&idle_hook_initialized) < 1000) | ||
1300 | ; | ||
1301 | } | ||
1302 | } | ||
1303 | |||
1304 | /* Have we stupidly left IXMT set somewhere? */ | ||
1305 | if (read_c0_tcstatus() & 0x400) { | ||
1306 | write_c0_tcstatus(read_c0_tcstatus() & ~0x400); | ||
1307 | ehb(); | ||
1308 | printk("Dangling IXMT in cpu_idle()\n"); | ||
1309 | } | ||
1310 | |||
1311 | /* Have we stupidly left an IM bit turned off? */ | ||
1312 | #define IM_LIMIT 2000 | ||
1313 | local_irq_save(flags); | ||
1314 | mtflags = dmt(); | ||
1315 | pdb_msg = &id_ho_db_msg[0]; | ||
1316 | im = read_c0_status(); | ||
1317 | vpe = current_cpu_data.vpe_id; | ||
1318 | for (bit = 0; bit < 8; bit++) { | ||
1319 | /* | ||
1320 | * In current prototype, I/O interrupts | ||
1321 | * are masked for VPE > 0 | ||
1322 | */ | ||
1323 | if (vpemask[vpe][bit]) { | ||
1324 | if (!(im & (0x100 << bit))) | ||
1325 | imstuckcount[vpe][bit]++; | ||
1326 | else | ||
1327 | imstuckcount[vpe][bit] = 0; | ||
1328 | if (imstuckcount[vpe][bit] > IM_LIMIT) { | ||
1329 | set_c0_status(0x100 << bit); | ||
1330 | ehb(); | ||
1331 | imstuckcount[vpe][bit] = 0; | ||
1332 | pdb_msg += sprintf(pdb_msg, | ||
1333 | "Dangling IM %d fixed for VPE %d\n", bit, | ||
1334 | vpe); | ||
1335 | } | ||
1336 | } | ||
1337 | } | ||
1338 | |||
1339 | emt(mtflags); | ||
1340 | local_irq_restore(flags); | ||
1341 | if (pdb_msg != &id_ho_db_msg[0]) | ||
1342 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | ||
1343 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | ||
1344 | |||
1345 | smtc_ipi_replay(); | ||
1346 | } | ||
1347 | |||
1348 | void smtc_soft_dump(void) | ||
1349 | { | ||
1350 | int i; | ||
1351 | |||
1352 | printk("Counter Interrupts taken per CPU (TC)\n"); | ||
1353 | for (i=0; i < NR_CPUS; i++) { | ||
1354 | printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||
1355 | } | ||
1356 | printk("Self-IPI invocations:\n"); | ||
1357 | for (i=0; i < NR_CPUS; i++) { | ||
1358 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||
1359 | } | ||
1360 | smtc_ipi_qdump(); | ||
1361 | printk("%d Recoveries of \"stolen\" FPU\n", | ||
1362 | atomic_read(&smtc_fpu_recoveries)); | ||
1363 | } | ||
1364 | |||
1365 | |||
1366 | /* | ||
1367 | * TLB management routines special to SMTC | ||
1368 | */ | ||
1369 | |||
1370 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | ||
1371 | { | ||
1372 | unsigned long flags, mtflags, tcstat, prevhalt, asid; | ||
1373 | int tlb, i; | ||
1374 | |||
1375 | /* | ||
1376 | * It would be nice to be able to use a spinlock here, | ||
1377 | * but this is invoked from within TLB flush routines | ||
1378 | * that protect themselves with DVPE, so if a lock is | ||
1379 | * held by another TC, it'll never be freed. | ||
1380 | * | ||
1381 | * DVPE/DMT must not be done with interrupts enabled, | ||
1382 | * so even so most callers will already have disabled | ||
1383 | * them, let's be really careful... | ||
1384 | */ | ||
1385 | |||
1386 | local_irq_save(flags); | ||
1387 | if (smtc_status & SMTC_TLB_SHARED) { | ||
1388 | mtflags = dvpe(); | ||
1389 | tlb = 0; | ||
1390 | } else { | ||
1391 | mtflags = dmt(); | ||
1392 | tlb = cpu_data[cpu].vpe_id; | ||
1393 | } | ||
1394 | asid = asid_cache(cpu); | ||
1395 | |||
1396 | do { | ||
1397 | if (!((asid += ASID_INC) & ASID_MASK) ) { | ||
1398 | if (cpu_has_vtag_icache) | ||
1399 | flush_icache_all(); | ||
1400 | /* Traverse all online CPUs (hack requires contiguous range) */ | ||
1401 | for_each_online_cpu(i) { | ||
1402 | /* | ||
1403 | * We don't need to worry about our own CPU, nor those of | ||
1404 | * CPUs who don't share our TLB. | ||
1405 | */ | ||
1406 | if ((i != smp_processor_id()) && | ||
1407 | ((smtc_status & SMTC_TLB_SHARED) || | ||
1408 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { | ||
1409 | settc(cpu_data[i].tc_id); | ||
1410 | prevhalt = read_tc_c0_tchalt() & TCHALT_H; | ||
1411 | if (!prevhalt) { | ||
1412 | write_tc_c0_tchalt(TCHALT_H); | ||
1413 | mips_ihb(); | ||
1414 | } | ||
1415 | tcstat = read_tc_c0_tcstatus(); | ||
1416 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); | ||
1417 | if (!prevhalt) | ||
1418 | write_tc_c0_tchalt(0); | ||
1419 | } | ||
1420 | } | ||
1421 | if (!asid) /* fix version if needed */ | ||
1422 | asid = ASID_FIRST_VERSION; | ||
1423 | local_flush_tlb_all(); /* start new asid cycle */ | ||
1424 | } | ||
1425 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); | ||
1426 | |||
1427 | /* | ||
1428 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | ||
1429 | */ | ||
1430 | for_each_online_cpu(i) { | ||
1431 | if ((smtc_status & SMTC_TLB_SHARED) || | ||
1432 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
1433 | cpu_context(i, mm) = asid_cache(i) = asid; | ||
1434 | } | ||
1435 | |||
1436 | if (smtc_status & SMTC_TLB_SHARED) | ||
1437 | evpe(mtflags); | ||
1438 | else | ||
1439 | emt(mtflags); | ||
1440 | local_irq_restore(flags); | ||
1441 | } | ||
1442 | |||
1443 | /* | ||
1444 | * Invoked from macros defined in mmu_context.h | ||
1445 | * which must already have disabled interrupts | ||
1446 | * and done a DVPE or DMT as appropriate. | ||
1447 | */ | ||
1448 | |||
1449 | void smtc_flush_tlb_asid(unsigned long asid) | ||
1450 | { | ||
1451 | int entry; | ||
1452 | unsigned long ehi; | ||
1453 | |||
1454 | entry = read_c0_wired(); | ||
1455 | |||
1456 | /* Traverse all non-wired entries */ | ||
1457 | while (entry < current_cpu_data.tlbsize) { | ||
1458 | write_c0_index(entry); | ||
1459 | ehb(); | ||
1460 | tlb_read(); | ||
1461 | ehb(); | ||
1462 | ehi = read_c0_entryhi(); | ||
1463 | if ((ehi & ASID_MASK) == asid) { | ||
1464 | /* | ||
1465 | * Invalidate only entries with specified ASID, | ||
1466 | * makiing sure all entries differ. | ||
1467 | */ | ||
1468 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); | ||
1469 | write_c0_entrylo0(0); | ||
1470 | write_c0_entrylo1(0); | ||
1471 | mtc0_tlbw_hazard(); | ||
1472 | tlb_write_indexed(); | ||
1473 | } | ||
1474 | entry++; | ||
1475 | } | ||
1476 | write_c0_index(PARKED_INDEX); | ||
1477 | tlbw_use_hazard(); | ||
1478 | } | ||
1479 | |||
1480 | /* | ||
1481 | * Support for single-threading cache flush operations. | ||
1482 | */ | ||
1483 | |||
1484 | static int halt_state_save[NR_CPUS]; | ||
1485 | |||
1486 | /* | ||
1487 | * To really, really be sure that nothing is being done | ||
1488 | * by other TCs, halt them all. This code assumes that | ||
1489 | * a DVPE has already been done, so while their Halted | ||
1490 | * state is theoretically architecturally unstable, in | ||
1491 | * practice, it's not going to change while we're looking | ||
1492 | * at it. | ||
1493 | */ | ||
1494 | |||
1495 | void smtc_cflush_lockdown(void) | ||
1496 | { | ||
1497 | int cpu; | ||
1498 | |||
1499 | for_each_online_cpu(cpu) { | ||
1500 | if (cpu != smp_processor_id()) { | ||
1501 | settc(cpu_data[cpu].tc_id); | ||
1502 | halt_state_save[cpu] = read_tc_c0_tchalt(); | ||
1503 | write_tc_c0_tchalt(TCHALT_H); | ||
1504 | } | ||
1505 | } | ||
1506 | mips_ihb(); | ||
1507 | } | ||
1508 | |||
1509 | /* It would be cheating to change the cpu_online states during a flush! */ | ||
1510 | |||
1511 | void smtc_cflush_release(void) | ||
1512 | { | ||
1513 | int cpu; | ||
1514 | |||
1515 | /* | ||
1516 | * Start with a hazard barrier to ensure | ||
1517 | * that all CACHE ops have played through. | ||
1518 | */ | ||
1519 | mips_ihb(); | ||
1520 | |||
1521 | for_each_online_cpu(cpu) { | ||
1522 | if (cpu != smp_processor_id()) { | ||
1523 | settc(cpu_data[cpu].tc_id); | ||
1524 | write_tc_c0_tchalt(halt_state_save[cpu]); | ||
1525 | } | ||
1526 | } | ||
1527 | mips_ihb(); | ||
1528 | } | ||
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index c24ad5f4b324..2242bdd4370e 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * not have done anything significant (but they may have had interrupts | 6 | * not have done anything significant (but they may have had interrupts |
7 | * enabled briefly - prom_smp_finish() should not be responsible for enabling | 7 | * enabled briefly - prom_smp_finish() should not be responsible for enabling |
8 | * interrupts...) | 8 | * interrupts...) |
9 | * | ||
10 | * FIXME: broken for SMTC | ||
11 | */ | 9 | */ |
12 | 10 | ||
13 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu) | |||
33 | unsigned long flags; | 31 | unsigned long flags; |
34 | unsigned int initcount; | 32 | unsigned int initcount; |
35 | 33 | ||
36 | #ifdef CONFIG_MIPS_MT_SMTC | ||
37 | /* | ||
38 | * SMTC needs to synchronise per VPE, not per CPU | ||
39 | * ignore for now | ||
40 | */ | ||
41 | return; | ||
42 | #endif | ||
43 | |||
44 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); | 34 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); |
45 | 35 | ||
46 | local_irq_save(flags); | 36 | local_irq_save(flags); |
@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu) | |||
110 | int i; | 100 | int i; |
111 | unsigned int initcount; | 101 | unsigned int initcount; |
112 | 102 | ||
113 | #ifdef CONFIG_MIPS_MT_SMTC | ||
114 | /* | ||
115 | * SMTC needs to synchronise per VPE, not per CPU | ||
116 | * ignore for now | ||
117 | */ | ||
118 | return; | ||
119 | #endif | ||
120 | |||
121 | /* | 103 | /* |
122 | * Not every cpu is online at the time this gets called, | 104 | * Not every cpu is online at the time this gets called, |
123 | * so we first wait for the master to say everyone is ready | 105 | * so we first wait for the master to say everyone is ready |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index dcb8e5d3bb8a..8d0170969e22 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <asm/cpu-features.h> | 26 | #include <asm/cpu-features.h> |
27 | #include <asm/cpu-type.h> | 27 | #include <asm/cpu-type.h> |
28 | #include <asm/div64.h> | 28 | #include <asm/div64.h> |
29 | #include <asm/smtc_ipi.h> | ||
30 | #include <asm/time.h> | 29 | #include <asm/time.h> |
31 | 30 | ||
32 | /* | 31 | /* |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 074e857ced28..3a2672907f80 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -370,9 +370,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
370 | { | 370 | { |
371 | static int die_counter; | 371 | static int die_counter; |
372 | int sig = SIGSEGV; | 372 | int sig = SIGSEGV; |
373 | #ifdef CONFIG_MIPS_MT_SMTC | ||
374 | unsigned long dvpret; | ||
375 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
376 | 373 | ||
377 | oops_enter(); | 374 | oops_enter(); |
378 | 375 | ||
@@ -382,13 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
382 | 379 | ||
383 | console_verbose(); | 380 | console_verbose(); |
384 | raw_spin_lock_irq(&die_lock); | 381 | raw_spin_lock_irq(&die_lock); |
385 | #ifdef CONFIG_MIPS_MT_SMTC | ||
386 | dvpret = dvpe(); | ||
387 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
388 | bust_spinlocks(1); | 382 | bust_spinlocks(1); |
389 | #ifdef CONFIG_MIPS_MT_SMTC | ||
390 | mips_mt_regdump(dvpret); | ||
391 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
392 | 383 | ||
393 | printk("%s[#%d]:\n", str, ++die_counter); | 384 | printk("%s[#%d]:\n", str, ++die_counter); |
394 | show_registers(regs); | 385 | show_registers(regs); |
@@ -1759,19 +1750,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1759 | extern char rollback_except_vec_vi; | 1750 | extern char rollback_except_vec_vi; |
1760 | char *vec_start = using_rollback_handler() ? | 1751 | char *vec_start = using_rollback_handler() ? |
1761 | &rollback_except_vec_vi : &except_vec_vi; | 1752 | &rollback_except_vec_vi : &except_vec_vi; |
1762 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1763 | /* | ||
1764 | * We need to provide the SMTC vectored interrupt handler | ||
1765 | * not only with the address of the handler, but with the | ||
1766 | * Status.IM bit to be masked before going there. | ||
1767 | */ | ||
1768 | extern char except_vec_vi_mori; | ||
1769 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | ||
1770 | const int mori_offset = &except_vec_vi_mori - vec_start + 2; | ||
1771 | #else | ||
1772 | const int mori_offset = &except_vec_vi_mori - vec_start; | ||
1773 | #endif | ||
1774 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1775 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | 1753 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) |
1776 | const int lui_offset = &except_vec_vi_lui - vec_start + 2; | 1754 | const int lui_offset = &except_vec_vi_lui - vec_start + 2; |
1777 | const int ori_offset = &except_vec_vi_ori - vec_start + 2; | 1755 | const int ori_offset = &except_vec_vi_ori - vec_start + 2; |
@@ -1795,12 +1773,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1795 | #else | 1773 | #else |
1796 | handler_len); | 1774 | handler_len); |
1797 | #endif | 1775 | #endif |
1798 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1799 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ | ||
1800 | |||
1801 | h = (u16 *)(b + mori_offset); | ||
1802 | *h = (0x100 << n); | ||
1803 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1804 | h = (u16 *)(b + lui_offset); | 1776 | h = (u16 *)(b + lui_offset); |
1805 | *h = (handler >> 16) & 0xffff; | 1777 | *h = (handler >> 16) & 0xffff; |
1806 | h = (u16 *)(b + ori_offset); | 1778 | h = (u16 *)(b + ori_offset); |
@@ -1870,20 +1842,6 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1870 | unsigned int cpu = smp_processor_id(); | 1842 | unsigned int cpu = smp_processor_id(); |
1871 | unsigned int status_set = ST0_CU0; | 1843 | unsigned int status_set = ST0_CU0; |
1872 | unsigned int hwrena = cpu_hwrena_impl_bits; | 1844 | unsigned int hwrena = cpu_hwrena_impl_bits; |
1873 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1874 | int secondaryTC = 0; | ||
1875 | int bootTC = (cpu == 0); | ||
1876 | |||
1877 | /* | ||
1878 | * Only do per_cpu_trap_init() for first TC of Each VPE. | ||
1879 | * Note that this hack assumes that the SMTC init code | ||
1880 | * assigns TCs consecutively and in ascending order. | ||
1881 | */ | ||
1882 | |||
1883 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
1884 | ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | ||
1885 | secondaryTC = 1; | ||
1886 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1887 | 1845 | ||
1888 | /* | 1846 | /* |
1889 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1847 | * Disable coprocessors and select 32-bit or 64-bit addressing |
@@ -1911,10 +1869,6 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1911 | if (hwrena) | 1869 | if (hwrena) |
1912 | write_c0_hwrena(hwrena); | 1870 | write_c0_hwrena(hwrena); |
1913 | 1871 | ||
1914 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1915 | if (!secondaryTC) { | ||
1916 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1917 | |||
1918 | if (cpu_has_veic || cpu_has_vint) { | 1872 | if (cpu_has_veic || cpu_has_vint) { |
1919 | unsigned long sr = set_c0_status(ST0_BEV); | 1873 | unsigned long sr = set_c0_status(ST0_BEV); |
1920 | write_c0_ebase(ebase); | 1874 | write_c0_ebase(ebase); |
@@ -1949,10 +1903,6 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1949 | cp0_perfcount_irq = -1; | 1903 | cp0_perfcount_irq = -1; |
1950 | } | 1904 | } |
1951 | 1905 | ||
1952 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1953 | } | ||
1954 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1955 | |||
1956 | if (!cpu_data[cpu].asid_cache) | 1906 | if (!cpu_data[cpu].asid_cache) |
1957 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1907 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
1958 | 1908 | ||
@@ -1961,23 +1911,10 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1961 | BUG_ON(current->mm); | 1911 | BUG_ON(current->mm); |
1962 | enter_lazy_tlb(&init_mm, current); | 1912 | enter_lazy_tlb(&init_mm, current); |
1963 | 1913 | ||
1964 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1965 | if (bootTC) { | ||
1966 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1967 | /* Boot CPU's cache setup in setup_arch(). */ | 1914 | /* Boot CPU's cache setup in setup_arch(). */ |
1968 | if (!is_boot_cpu) | 1915 | if (!is_boot_cpu) |
1969 | cpu_cache_init(); | 1916 | cpu_cache_init(); |
1970 | tlb_init(); | 1917 | tlb_init(); |
1971 | #ifdef CONFIG_MIPS_MT_SMTC | ||
1972 | } else if (!secondaryTC) { | ||
1973 | /* | ||
1974 | * First TC in non-boot VPE must do subset of tlb_init() | ||
1975 | * for MMU countrol registers. | ||
1976 | */ | ||
1977 | write_c0_pagemask(PM_DEFAULT_MASK); | ||
1978 | write_c0_wired(0); | ||
1979 | } | ||
1980 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
1981 | TLBMISS_HANDLER_SETUP(); | 1918 | TLBMISS_HANDLER_SETUP(); |
1982 | } | 1919 | } |
1983 | 1920 | ||
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 949ae0e17018..2e003b11a098 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c | |||
@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v) | |||
127 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 127 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * SMTC/SMVP kernels manage VPE enable independently, | 130 | * SMVP kernels manage VPE enable independently, but uniprocessor |
131 | * but uniprocessor kernels need to turn it on, even | 131 | * kernels need to turn it on, even if that wasn't the pre-dvpe() state. |
132 | * if that wasn't the pre-dvpe() state. | ||
133 | */ | 132 | */ |
134 | #ifdef CONFIG_SMP | 133 | #ifdef CONFIG_SMP |
135 | evpe(vpeflags); | 134 | evpe(vpeflags); |
@@ -454,12 +453,11 @@ int __init vpe_module_init(void) | |||
454 | 453 | ||
455 | settc(tc); | 454 | settc(tc); |
456 | 455 | ||
457 | /* Any TC that is bound to VPE0 gets left as is - in | 456 | /* |
458 | * case we are running SMTC on VPE0. A TC that is bound | 457 | * A TC that is bound to any other VPE gets bound to |
459 | * to any other VPE gets bound to VPE0, ideally I'd like | 458 | * VPE0, ideally I'd like to make it homeless but it |
460 | * to make it homeless but it doesn't appear to let me | 459 | * doesn't appear to let me bind a TC to a non-existent |
461 | * bind a TC to a non-existent VPE. Which is perfectly | 460 | * VPE. Which is perfectly reasonable. |
462 | * reasonable. | ||
463 | * | 461 | * |
464 | * The (un)bound state is visible to an EJTAG probe so | 462 | * The (un)bound state is visible to an EJTAG probe so |
465 | * may notify GDB... | 463 | * may notify GDB... |
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 85685e1cdb89..030568a70ac4 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
@@ -61,7 +61,7 @@ | |||
61 | /* we have a cascade of 8 irqs */ | 61 | /* we have a cascade of 8 irqs */ |
62 | #define MIPS_CPU_IRQ_CASCADE 8 | 62 | #define MIPS_CPU_IRQ_CASCADE 8 |
63 | 63 | ||
64 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 64 | #ifdef CONFIG_MIPS_MT_SMP |
65 | int gic_present; | 65 | int gic_present; |
66 | #endif | 66 | #endif |
67 | 67 | ||
@@ -440,7 +440,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
440 | arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); | 440 | arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); |
441 | #endif | 441 | #endif |
442 | 442 | ||
443 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | 443 | #ifndef CONFIG_MIPS_MT_SMP |
444 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | 444 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | |
445 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | 445 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); |
446 | #else | 446 | #else |
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index 6807f7172eaf..57bcdaf1f1c8 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
16 | #include <linux/stringify.h> | 16 | #include <linux/stringify.h> |
17 | 17 | ||
18 | #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) | 18 | #ifndef CONFIG_CPU_MIPSR2 |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * For cli() we have to insert nops to make sure that the new value | 21 | * For cli() we have to insert nops to make sure that the new value |
@@ -42,12 +42,7 @@ notrace void arch_local_irq_disable(void) | |||
42 | __asm__ __volatile__( | 42 | __asm__ __volatile__( |
43 | " .set push \n" | 43 | " .set push \n" |
44 | " .set noat \n" | 44 | " .set noat \n" |
45 | #ifdef CONFIG_MIPS_MT_SMTC | 45 | #if defined(CONFIG_CPU_MIPSR2) |
46 | " mfc0 $1, $2, 1 \n" | ||
47 | " ori $1, 0x400 \n" | ||
48 | " .set noreorder \n" | ||
49 | " mtc0 $1, $2, 1 \n" | ||
50 | #elif defined(CONFIG_CPU_MIPSR2) | ||
51 | /* see irqflags.h for inline function */ | 46 | /* see irqflags.h for inline function */ |
52 | #else | 47 | #else |
53 | " mfc0 $1,$12 \n" | 48 | " mfc0 $1,$12 \n" |
@@ -77,13 +72,7 @@ notrace unsigned long arch_local_irq_save(void) | |||
77 | " .set push \n" | 72 | " .set push \n" |
78 | " .set reorder \n" | 73 | " .set reorder \n" |
79 | " .set noat \n" | 74 | " .set noat \n" |
80 | #ifdef CONFIG_MIPS_MT_SMTC | 75 | #if defined(CONFIG_CPU_MIPSR2) |
81 | " mfc0 %[flags], $2, 1 \n" | ||
82 | " ori $1, %[flags], 0x400 \n" | ||
83 | " .set noreorder \n" | ||
84 | " mtc0 $1, $2, 1 \n" | ||
85 | " andi %[flags], %[flags], 0x400 \n" | ||
86 | #elif defined(CONFIG_CPU_MIPSR2) | ||
87 | /* see irqflags.h for inline function */ | 76 | /* see irqflags.h for inline function */ |
88 | #else | 77 | #else |
89 | " mfc0 %[flags], $12 \n" | 78 | " mfc0 %[flags], $12 \n" |
@@ -108,29 +97,13 @@ notrace void arch_local_irq_restore(unsigned long flags) | |||
108 | { | 97 | { |
109 | unsigned long __tmp1; | 98 | unsigned long __tmp1; |
110 | 99 | ||
111 | #ifdef CONFIG_MIPS_MT_SMTC | ||
112 | /* | ||
113 | * SMTC kernel needs to do a software replay of queued | ||
114 | * IPIs, at the cost of branch and call overhead on each | ||
115 | * local_irq_restore() | ||
116 | */ | ||
117 | if (unlikely(!(flags & 0x0400))) | ||
118 | smtc_ipi_replay(); | ||
119 | #endif | ||
120 | preempt_disable(); | 100 | preempt_disable(); |
121 | 101 | ||
122 | __asm__ __volatile__( | 102 | __asm__ __volatile__( |
123 | " .set push \n" | 103 | " .set push \n" |
124 | " .set noreorder \n" | 104 | " .set noreorder \n" |
125 | " .set noat \n" | 105 | " .set noat \n" |
126 | #ifdef CONFIG_MIPS_MT_SMTC | 106 | #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) |
127 | " mfc0 $1, $2, 1 \n" | ||
128 | " andi %[flags], 0x400 \n" | ||
129 | " ori $1, 0x400 \n" | ||
130 | " xori $1, 0x400 \n" | ||
131 | " or %[flags], $1 \n" | ||
132 | " mtc0 %[flags], $2, 1 \n" | ||
133 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
134 | /* see irqflags.h for inline function */ | 107 | /* see irqflags.h for inline function */ |
135 | #elif defined(CONFIG_CPU_MIPSR2) | 108 | #elif defined(CONFIG_CPU_MIPSR2) |
136 | /* see irqflags.h for inline function */ | 109 | /* see irqflags.h for inline function */ |
@@ -163,14 +136,7 @@ notrace void __arch_local_irq_restore(unsigned long flags) | |||
163 | " .set push \n" | 136 | " .set push \n" |
164 | " .set noreorder \n" | 137 | " .set noreorder \n" |
165 | " .set noat \n" | 138 | " .set noat \n" |
166 | #ifdef CONFIG_MIPS_MT_SMTC | 139 | #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) |
167 | " mfc0 $1, $2, 1 \n" | ||
168 | " andi %[flags], 0x400 \n" | ||
169 | " ori $1, 0x400 \n" | ||
170 | " xori $1, 0x400 \n" | ||
171 | " or %[flags], $1 \n" | ||
172 | " mtc0 %[flags], $2, 1 \n" | ||
173 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
174 | /* see irqflags.h for inline function */ | 140 | /* see irqflags.h for inline function */ |
175 | #elif defined(CONFIG_CPU_MIPSR2) | 141 | #elif defined(CONFIG_CPU_MIPSR2) |
176 | /* see irqflags.h for inline function */ | 142 | /* see irqflags.h for inline function */ |
@@ -192,4 +158,4 @@ notrace void __arch_local_irq_restore(unsigned long flags) | |||
192 | } | 158 | } |
193 | EXPORT_SYMBOL(__arch_local_irq_restore); | 159 | EXPORT_SYMBOL(__arch_local_irq_restore); |
194 | 160 | ||
195 | #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ | 161 | #endif /* !CONFIG_CPU_MIPSR2 */ |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7bc14ffc7a1c..5c2128283ba6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -50,7 +50,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) | |||
50 | { | 50 | { |
51 | preempt_disable(); | 51 | preempt_disable(); |
52 | 52 | ||
53 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | 53 | #ifndef CONFIG_MIPS_MT_SMP |
54 | smp_call_function(func, info, 1); | 54 | smp_call_function(func, info, 1); |
55 | #endif | 55 | #endif |
56 | func(info); | 56 | func(info); |
@@ -427,7 +427,7 @@ static void r4k___flush_cache_all(void) | |||
427 | 427 | ||
428 | static inline int has_valid_asid(const struct mm_struct *mm) | 428 | static inline int has_valid_asid(const struct mm_struct *mm) |
429 | { | 429 | { |
430 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 430 | #ifdef CONFIG_MIPS_MT_SMP |
431 | int i; | 431 | int i; |
432 | 432 | ||
433 | for_each_online_cpu(i) | 433 | for_each_online_cpu(i) |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 4fc74c78265a..609a0cd749ff 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -44,27 +44,6 @@ | |||
44 | #include <asm/tlb.h> | 44 | #include <asm/tlb.h> |
45 | #include <asm/fixmap.h> | 45 | #include <asm/fixmap.h> |
46 | 46 | ||
47 | /* Atomicity and interruptability */ | ||
48 | #ifdef CONFIG_MIPS_MT_SMTC | ||
49 | |||
50 | #include <asm/mipsmtregs.h> | ||
51 | |||
52 | #define ENTER_CRITICAL(flags) \ | ||
53 | { \ | ||
54 | unsigned int mvpflags; \ | ||
55 | local_irq_save(flags);\ | ||
56 | mvpflags = dvpe() | ||
57 | #define EXIT_CRITICAL(flags) \ | ||
58 | evpe(mvpflags); \ | ||
59 | local_irq_restore(flags); \ | ||
60 | } | ||
61 | #else | ||
62 | |||
63 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||
64 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||
65 | |||
66 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
67 | |||
68 | /* | 47 | /* |
69 | * We have up to 8 empty zeroed pages so we can map one of the right colour | 48 | * We have up to 8 empty zeroed pages so we can map one of the right colour |
70 | * when needed. This is necessary only on R4000 / R4400 SC and MC versions | 49 | * when needed. This is necessary only on R4000 / R4400 SC and MC versions |
@@ -100,20 +79,6 @@ void setup_zero_pages(void) | |||
100 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; | 79 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
101 | } | 80 | } |
102 | 81 | ||
103 | #ifdef CONFIG_MIPS_MT_SMTC | ||
104 | static pte_t *kmap_coherent_pte; | ||
105 | static void __init kmap_coherent_init(void) | ||
106 | { | ||
107 | unsigned long vaddr; | ||
108 | |||
109 | /* cache the first coherent kmap pte */ | ||
110 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
111 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
112 | } | ||
113 | #else | ||
114 | static inline void kmap_coherent_init(void) {} | ||
115 | #endif | ||
116 | |||
117 | void *kmap_coherent(struct page *page, unsigned long addr) | 82 | void *kmap_coherent(struct page *page, unsigned long addr) |
118 | { | 83 | { |
119 | enum fixed_addresses idx; | 84 | enum fixed_addresses idx; |
@@ -126,12 +91,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
126 | 91 | ||
127 | pagefault_disable(); | 92 | pagefault_disable(); |
128 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); | 93 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); |
129 | #ifdef CONFIG_MIPS_MT_SMTC | ||
130 | idx += FIX_N_COLOURS * smp_processor_id() + | ||
131 | (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); | ||
132 | #else | ||
133 | idx += in_interrupt() ? FIX_N_COLOURS : 0; | 94 | idx += in_interrupt() ? FIX_N_COLOURS : 0; |
134 | #endif | ||
135 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | 95 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); |
136 | pte = mk_pte(page, PAGE_KERNEL); | 96 | pte = mk_pte(page, PAGE_KERNEL); |
137 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 97 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
@@ -140,44 +100,29 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
140 | entrylo = pte_to_entrylo(pte_val(pte)); | 100 | entrylo = pte_to_entrylo(pte_val(pte)); |
141 | #endif | 101 | #endif |
142 | 102 | ||
143 | ENTER_CRITICAL(flags); | 103 | local_irq_save(flags); |
144 | old_ctx = read_c0_entryhi(); | 104 | old_ctx = read_c0_entryhi(); |
145 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); | 105 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); |
146 | write_c0_entrylo0(entrylo); | 106 | write_c0_entrylo0(entrylo); |
147 | write_c0_entrylo1(entrylo); | 107 | write_c0_entrylo1(entrylo); |
148 | #ifdef CONFIG_MIPS_MT_SMTC | ||
149 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
150 | /* preload TLB instead of local_flush_tlb_one() */ | ||
151 | mtc0_tlbw_hazard(); | ||
152 | tlb_probe(); | ||
153 | tlb_probe_hazard(); | ||
154 | tlbidx = read_c0_index(); | ||
155 | mtc0_tlbw_hazard(); | ||
156 | if (tlbidx < 0) | ||
157 | tlb_write_random(); | ||
158 | else | ||
159 | tlb_write_indexed(); | ||
160 | #else | ||
161 | tlbidx = read_c0_wired(); | 108 | tlbidx = read_c0_wired(); |
162 | write_c0_wired(tlbidx + 1); | 109 | write_c0_wired(tlbidx + 1); |
163 | write_c0_index(tlbidx); | 110 | write_c0_index(tlbidx); |
164 | mtc0_tlbw_hazard(); | 111 | mtc0_tlbw_hazard(); |
165 | tlb_write_indexed(); | 112 | tlb_write_indexed(); |
166 | #endif | ||
167 | tlbw_use_hazard(); | 113 | tlbw_use_hazard(); |
168 | write_c0_entryhi(old_ctx); | 114 | write_c0_entryhi(old_ctx); |
169 | EXIT_CRITICAL(flags); | 115 | local_irq_restore(flags); |
170 | 116 | ||
171 | return (void*) vaddr; | 117 | return (void*) vaddr; |
172 | } | 118 | } |
173 | 119 | ||
174 | void kunmap_coherent(void) | 120 | void kunmap_coherent(void) |
175 | { | 121 | { |
176 | #ifndef CONFIG_MIPS_MT_SMTC | ||
177 | unsigned int wired; | 122 | unsigned int wired; |
178 | unsigned long flags, old_ctx; | 123 | unsigned long flags, old_ctx; |
179 | 124 | ||
180 | ENTER_CRITICAL(flags); | 125 | local_irq_save(flags); |
181 | old_ctx = read_c0_entryhi(); | 126 | old_ctx = read_c0_entryhi(); |
182 | wired = read_c0_wired() - 1; | 127 | wired = read_c0_wired() - 1; |
183 | write_c0_wired(wired); | 128 | write_c0_wired(wired); |
@@ -189,8 +134,7 @@ void kunmap_coherent(void) | |||
189 | tlb_write_indexed(); | 134 | tlb_write_indexed(); |
190 | tlbw_use_hazard(); | 135 | tlbw_use_hazard(); |
191 | write_c0_entryhi(old_ctx); | 136 | write_c0_entryhi(old_ctx); |
192 | EXIT_CRITICAL(flags); | 137 | local_irq_restore(flags); |
193 | #endif | ||
194 | pagefault_enable(); | 138 | pagefault_enable(); |
195 | } | 139 | } |
196 | 140 | ||
@@ -256,7 +200,7 @@ EXPORT_SYMBOL_GPL(copy_from_user_page); | |||
256 | void __init fixrange_init(unsigned long start, unsigned long end, | 200 | void __init fixrange_init(unsigned long start, unsigned long end, |
257 | pgd_t *pgd_base) | 201 | pgd_t *pgd_base) |
258 | { | 202 | { |
259 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) | 203 | #ifdef CONFIG_HIGHMEM |
260 | pgd_t *pgd; | 204 | pgd_t *pgd; |
261 | pud_t *pud; | 205 | pud_t *pud; |
262 | pmd_t *pmd; | 206 | pmd_t *pmd; |
@@ -327,8 +271,6 @@ void __init paging_init(void) | |||
327 | #ifdef CONFIG_HIGHMEM | 271 | #ifdef CONFIG_HIGHMEM |
328 | kmap_init(); | 272 | kmap_init(); |
329 | #endif | 273 | #endif |
330 | kmap_coherent_init(); | ||
331 | |||
332 | #ifdef CONFIG_ZONE_DMA | 274 | #ifdef CONFIG_ZONE_DMA |
333 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | 275 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
334 | #endif | 276 | #endif |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index eeaf50f5df2b..403fa804e4f4 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -25,28 +25,6 @@ | |||
25 | 25 | ||
26 | extern void build_tlb_refill_handler(void); | 26 | extern void build_tlb_refill_handler(void); |
27 | 27 | ||
28 | /* Atomicity and interruptability */ | ||
29 | #ifdef CONFIG_MIPS_MT_SMTC | ||
30 | |||
31 | #include <asm/smtc.h> | ||
32 | #include <asm/mipsmtregs.h> | ||
33 | |||
34 | #define ENTER_CRITICAL(flags) \ | ||
35 | { \ | ||
36 | unsigned int mvpflags; \ | ||
37 | local_irq_save(flags);\ | ||
38 | mvpflags = dvpe() | ||
39 | #define EXIT_CRITICAL(flags) \ | ||
40 | evpe(mvpflags); \ | ||
41 | local_irq_restore(flags); \ | ||
42 | } | ||
43 | #else | ||
44 | |||
45 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||
46 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||
47 | |||
48 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
49 | |||
50 | /* | 28 | /* |
51 | * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, | 29 | * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, |
52 | * unfortunately, itlb is not totally transparent to software. | 30 | * unfortunately, itlb is not totally transparent to software. |
@@ -75,7 +53,7 @@ void local_flush_tlb_all(void) | |||
75 | unsigned long old_ctx; | 53 | unsigned long old_ctx; |
76 | int entry, ftlbhighset; | 54 | int entry, ftlbhighset; |
77 | 55 | ||
78 | ENTER_CRITICAL(flags); | 56 | local_irq_save(flags); |
79 | /* Save old context and create impossible VPN2 value */ | 57 | /* Save old context and create impossible VPN2 value */ |
80 | old_ctx = read_c0_entryhi(); | 58 | old_ctx = read_c0_entryhi(); |
81 | write_c0_entrylo0(0); | 59 | write_c0_entrylo0(0); |
@@ -112,7 +90,7 @@ void local_flush_tlb_all(void) | |||
112 | tlbw_use_hazard(); | 90 | tlbw_use_hazard(); |
113 | write_c0_entryhi(old_ctx); | 91 | write_c0_entryhi(old_ctx); |
114 | flush_itlb(); | 92 | flush_itlb(); |
115 | EXIT_CRITICAL(flags); | 93 | local_irq_restore(flags); |
116 | } | 94 | } |
117 | EXPORT_SYMBOL(local_flush_tlb_all); | 95 | EXPORT_SYMBOL(local_flush_tlb_all); |
118 | 96 | ||
@@ -142,7 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
142 | if (cpu_context(cpu, mm) != 0) { | 120 | if (cpu_context(cpu, mm) != 0) { |
143 | unsigned long size, flags; | 121 | unsigned long size, flags; |
144 | 122 | ||
145 | ENTER_CRITICAL(flags); | 123 | local_irq_save(flags); |
146 | start = round_down(start, PAGE_SIZE << 1); | 124 | start = round_down(start, PAGE_SIZE << 1); |
147 | end = round_up(end, PAGE_SIZE << 1); | 125 | end = round_up(end, PAGE_SIZE << 1); |
148 | size = (end - start) >> (PAGE_SHIFT + 1); | 126 | size = (end - start) >> (PAGE_SHIFT + 1); |
@@ -176,7 +154,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
176 | drop_mmu_context(mm, cpu); | 154 | drop_mmu_context(mm, cpu); |
177 | } | 155 | } |
178 | flush_itlb(); | 156 | flush_itlb(); |
179 | EXIT_CRITICAL(flags); | 157 | local_irq_restore(flags); |
180 | } | 158 | } |
181 | } | 159 | } |
182 | 160 | ||
@@ -184,7 +162,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
184 | { | 162 | { |
185 | unsigned long size, flags; | 163 | unsigned long size, flags; |
186 | 164 | ||
187 | ENTER_CRITICAL(flags); | 165 | local_irq_save(flags); |
188 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 166 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
189 | size = (size + 1) >> 1; | 167 | size = (size + 1) >> 1; |
190 | if (size <= (current_cpu_data.tlbsizeftlbsets ? | 168 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
@@ -220,7 +198,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
220 | local_flush_tlb_all(); | 198 | local_flush_tlb_all(); |
221 | } | 199 | } |
222 | flush_itlb(); | 200 | flush_itlb(); |
223 | EXIT_CRITICAL(flags); | 201 | local_irq_restore(flags); |
224 | } | 202 | } |
225 | 203 | ||
226 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 204 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
@@ -233,7 +211,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
233 | 211 | ||
234 | newpid = cpu_asid(cpu, vma->vm_mm); | 212 | newpid = cpu_asid(cpu, vma->vm_mm); |
235 | page &= (PAGE_MASK << 1); | 213 | page &= (PAGE_MASK << 1); |
236 | ENTER_CRITICAL(flags); | 214 | local_irq_save(flags); |
237 | oldpid = read_c0_entryhi(); | 215 | oldpid = read_c0_entryhi(); |
238 | write_c0_entryhi(page | newpid); | 216 | write_c0_entryhi(page | newpid); |
239 | mtc0_tlbw_hazard(); | 217 | mtc0_tlbw_hazard(); |
@@ -253,7 +231,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
253 | finish: | 231 | finish: |
254 | write_c0_entryhi(oldpid); | 232 | write_c0_entryhi(oldpid); |
255 | flush_itlb_vm(vma); | 233 | flush_itlb_vm(vma); |
256 | EXIT_CRITICAL(flags); | 234 | local_irq_restore(flags); |
257 | } | 235 | } |
258 | } | 236 | } |
259 | 237 | ||
@@ -266,7 +244,7 @@ void local_flush_tlb_one(unsigned long page) | |||
266 | unsigned long flags; | 244 | unsigned long flags; |
267 | int oldpid, idx; | 245 | int oldpid, idx; |
268 | 246 | ||
269 | ENTER_CRITICAL(flags); | 247 | local_irq_save(flags); |
270 | oldpid = read_c0_entryhi(); | 248 | oldpid = read_c0_entryhi(); |
271 | page &= (PAGE_MASK << 1); | 249 | page &= (PAGE_MASK << 1); |
272 | write_c0_entryhi(page); | 250 | write_c0_entryhi(page); |
@@ -285,7 +263,7 @@ void local_flush_tlb_one(unsigned long page) | |||
285 | } | 263 | } |
286 | write_c0_entryhi(oldpid); | 264 | write_c0_entryhi(oldpid); |
287 | flush_itlb(); | 265 | flush_itlb(); |
288 | EXIT_CRITICAL(flags); | 266 | local_irq_restore(flags); |
289 | } | 267 | } |
290 | 268 | ||
291 | /* | 269 | /* |
@@ -308,7 +286,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
308 | if (current->active_mm != vma->vm_mm) | 286 | if (current->active_mm != vma->vm_mm) |
309 | return; | 287 | return; |
310 | 288 | ||
311 | ENTER_CRITICAL(flags); | 289 | local_irq_save(flags); |
312 | 290 | ||
313 | pid = read_c0_entryhi() & ASID_MASK; | 291 | pid = read_c0_entryhi() & ASID_MASK; |
314 | address &= (PAGE_MASK << 1); | 292 | address &= (PAGE_MASK << 1); |
@@ -358,7 +336,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
358 | } | 336 | } |
359 | tlbw_use_hazard(); | 337 | tlbw_use_hazard(); |
360 | flush_itlb_vm(vma); | 338 | flush_itlb_vm(vma); |
361 | EXIT_CRITICAL(flags); | 339 | local_irq_restore(flags); |
362 | } | 340 | } |
363 | 341 | ||
364 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | 342 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, |
@@ -369,7 +347,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
369 | unsigned long old_pagemask; | 347 | unsigned long old_pagemask; |
370 | unsigned long old_ctx; | 348 | unsigned long old_ctx; |
371 | 349 | ||
372 | ENTER_CRITICAL(flags); | 350 | local_irq_save(flags); |
373 | /* Save old context and create impossible VPN2 value */ | 351 | /* Save old context and create impossible VPN2 value */ |
374 | old_ctx = read_c0_entryhi(); | 352 | old_ctx = read_c0_entryhi(); |
375 | old_pagemask = read_c0_pagemask(); | 353 | old_pagemask = read_c0_pagemask(); |
@@ -389,7 +367,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
389 | tlbw_use_hazard(); /* What is the hazard here? */ | 367 | tlbw_use_hazard(); /* What is the hazard here? */ |
390 | write_c0_pagemask(old_pagemask); | 368 | write_c0_pagemask(old_pagemask); |
391 | local_flush_tlb_all(); | 369 | local_flush_tlb_all(); |
392 | EXIT_CRITICAL(flags); | 370 | local_irq_restore(flags); |
393 | } | 371 | } |
394 | 372 | ||
395 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 373 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -399,13 +377,13 @@ int __init has_transparent_hugepage(void) | |||
399 | unsigned int mask; | 377 | unsigned int mask; |
400 | unsigned long flags; | 378 | unsigned long flags; |
401 | 379 | ||
402 | ENTER_CRITICAL(flags); | 380 | local_irq_save(flags); |
403 | write_c0_pagemask(PM_HUGE_MASK); | 381 | write_c0_pagemask(PM_HUGE_MASK); |
404 | back_to_back_c0_hazard(); | 382 | back_to_back_c0_hazard(); |
405 | mask = read_c0_pagemask(); | 383 | mask = read_c0_pagemask(); |
406 | write_c0_pagemask(PM_DEFAULT_MASK); | 384 | write_c0_pagemask(PM_DEFAULT_MASK); |
407 | 385 | ||
408 | EXIT_CRITICAL(flags); | 386 | local_irq_restore(flags); |
409 | 387 | ||
410 | return mask == PM_HUGE_MASK; | 388 | return mask == PM_HUGE_MASK; |
411 | } | 389 | } |
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index eae0ba3876d9..a85160137904 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile | |||
@@ -8,6 +8,3 @@ | |||
8 | obj-y := malta-amon.o malta-display.o malta-init.o \ | 8 | obj-y := malta-amon.o malta-display.o malta-init.o \ |
9 | malta-int.o malta-memory.o malta-platform.o \ | 9 | malta-int.o malta-memory.o malta-platform.o \ |
10 | malta-reset.o malta-setup.o malta-time.o | 10 | malta-reset.o malta-setup.o malta-time.o |
11 | |||
12 | # FIXME FIXME FIXME | ||
13 | obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o | ||
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 4f9e44d358b7..0f60256d3784 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c | |||
@@ -116,8 +116,6 @@ phys_t mips_cpc_default_phys_base(void) | |||
116 | return CPC_BASE_ADDR; | 116 | return CPC_BASE_ADDR; |
117 | } | 117 | } |
118 | 118 | ||
119 | extern struct plat_smp_ops msmtc_smp_ops; | ||
120 | |||
121 | void __init prom_init(void) | 119 | void __init prom_init(void) |
122 | { | 120 | { |
123 | mips_display_message("LINUX"); | 121 | mips_display_message("LINUX"); |
@@ -304,8 +302,4 @@ mips_pci_controller: | |||
304 | return; | 302 | return; |
305 | if (!register_vsmp_smp_ops()) | 303 | if (!register_vsmp_smp_ops()) |
306 | return; | 304 | return; |
307 | |||
308 | #ifdef CONFIG_MIPS_MT_SMTC | ||
309 | register_smp_ops(&msmtc_smp_ops); | ||
310 | #endif | ||
311 | } | 305 | } |
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index b71ee809191a..ecc2785f7858 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -504,28 +504,9 @@ void __init arch_init_irq(void) | |||
504 | } else if (cpu_has_vint) { | 504 | } else if (cpu_has_vint) { |
505 | set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); | 505 | set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); |
506 | set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); | 506 | set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); |
507 | #ifdef CONFIG_MIPS_MT_SMTC | ||
508 | setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq, | ||
509 | (0x100 << MIPSCPU_INT_I8259A)); | ||
510 | setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, | ||
511 | &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI)); | ||
512 | /* | ||
513 | * Temporary hack to ensure that the subsidiary device | ||
514 | * interrupts coing in via the i8259A, but associated | ||
515 | * with low IRQ numbers, will restore the Status.IM | ||
516 | * value associated with the i8259A. | ||
517 | */ | ||
518 | { | ||
519 | int i; | ||
520 | |||
521 | for (i = 0; i < 16; i++) | ||
522 | irq_hwmask[i] = (0x100 << MIPSCPU_INT_I8259A); | ||
523 | } | ||
524 | #else /* Not SMTC */ | ||
525 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); | 507 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); |
526 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, | 508 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, |
527 | &corehi_irqaction); | 509 | &corehi_irqaction); |
528 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
529 | } else { | 510 | } else { |
530 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); | 511 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); |
531 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, | 512 | setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, |
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index bf621516afff..db7c9e5826a6 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c | |||
@@ -77,11 +77,7 @@ const char *get_system_type(void) | |||
77 | return "MIPS Malta"; | 77 | return "MIPS Malta"; |
78 | } | 78 | } |
79 | 79 | ||
80 | #if defined(CONFIG_MIPS_MT_SMTC) | ||
81 | const char display_string[] = " SMTC LINUX ON MALTA "; | ||
82 | #else | ||
83 | const char display_string[] = " LINUX ON MALTA "; | 80 | const char display_string[] = " LINUX ON MALTA "; |
84 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
85 | 81 | ||
86 | #ifdef CONFIG_BLK_DEV_FD | 82 | #ifdef CONFIG_BLK_DEV_FD |
87 | static void __init fd_activate(void) | 83 | static void __init fd_activate(void) |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c deleted file mode 100644 index c4849904f013..000000000000 --- a/arch/mips/mti-malta/malta-smtc.c +++ /dev/null | |||
@@ -1,162 +0,0 @@ | |||
1 | /* | ||
2 | * Malta Platform-specific hooks for SMP operation | ||
3 | */ | ||
4 | #include <linux/irq.h> | ||
5 | #include <linux/init.h> | ||
6 | |||
7 | #include <asm/mipsregs.h> | ||
8 | #include <asm/mipsmtregs.h> | ||
9 | #include <asm/smtc.h> | ||
10 | #include <asm/smtc_ipi.h> | ||
11 | |||
12 | /* VPE/SMP Prototype implements platform interfaces directly */ | ||
13 | |||
14 | /* | ||
15 | * Cause the specified action to be performed on a targeted "CPU" | ||
16 | */ | ||
17 | |||
18 | static void msmtc_send_ipi_single(int cpu, unsigned int action) | ||
19 | { | ||
20 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | ||
21 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | ||
22 | } | ||
23 | |||
24 | static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) | ||
25 | { | ||
26 | unsigned int i; | ||
27 | |||
28 | for_each_cpu(i, mask) | ||
29 | msmtc_send_ipi_single(i, action); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Post-config but pre-boot cleanup entry point | ||
34 | */ | ||
35 | static void msmtc_init_secondary(void) | ||
36 | { | ||
37 | int myvpe; | ||
38 | |||
39 | /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ | ||
40 | myvpe = read_c0_tcbind() & TCBIND_CURVPE; | ||
41 | if (myvpe != 0) { | ||
42 | /* Ideally, this should be done only once per VPE, but... */ | ||
43 | clear_c0_status(ST0_IM); | ||
44 | set_c0_status((0x100 << cp0_compare_irq) | ||
45 | | (0x100 << MIPS_CPU_IPI_IRQ)); | ||
46 | if (cp0_perfcount_irq >= 0) | ||
47 | set_c0_status(0x100 << cp0_perfcount_irq); | ||
48 | } | ||
49 | |||
50 | smtc_init_secondary(); | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Platform "CPU" startup hook | ||
55 | */ | ||
56 | static void msmtc_boot_secondary(int cpu, struct task_struct *idle) | ||
57 | { | ||
58 | smtc_boot_secondary(cpu, idle); | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * SMP initialization finalization entry point | ||
63 | */ | ||
64 | static void msmtc_smp_finish(void) | ||
65 | { | ||
66 | smtc_smp_finish(); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Hook for after all CPUs are online | ||
71 | */ | ||
72 | |||
73 | static void msmtc_cpus_done(void) | ||
74 | { | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Platform SMP pre-initialization | ||
79 | * | ||
80 | * As noted above, we can assume a single CPU for now | ||
81 | * but it may be multithreaded. | ||
82 | */ | ||
83 | |||
84 | static void __init msmtc_smp_setup(void) | ||
85 | { | ||
86 | /* | ||
87 | * we won't get the definitive value until | ||
88 | * we've run smtc_prepare_cpus later, but | ||
89 | * we would appear to need an upper bound now. | ||
90 | */ | ||
91 | smp_num_siblings = smtc_build_cpu_map(0); | ||
92 | } | ||
93 | |||
94 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) | ||
95 | { | ||
96 | smtc_prepare_cpus(max_cpus); | ||
97 | } | ||
98 | |||
99 | struct plat_smp_ops msmtc_smp_ops = { | ||
100 | .send_ipi_single = msmtc_send_ipi_single, | ||
101 | .send_ipi_mask = msmtc_send_ipi_mask, | ||
102 | .init_secondary = msmtc_init_secondary, | ||
103 | .smp_finish = msmtc_smp_finish, | ||
104 | .cpus_done = msmtc_cpus_done, | ||
105 | .boot_secondary = msmtc_boot_secondary, | ||
106 | .smp_setup = msmtc_smp_setup, | ||
107 | .prepare_cpus = msmtc_prepare_cpus, | ||
108 | }; | ||
109 | |||
110 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
111 | /* | ||
112 | * IRQ affinity hook | ||
113 | */ | ||
114 | |||
115 | |||
116 | int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, | ||
117 | bool force) | ||
118 | { | ||
119 | cpumask_t tmask; | ||
120 | int cpu = 0; | ||
121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | ||
122 | |||
123 | /* | ||
124 | * On the legacy Malta development board, all I/O interrupts | ||
125 | * are routed through the 8259 and combined in a single signal | ||
126 | * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, | ||
127 | * that signal is brought to IP2 of both VPEs. To avoid racing | ||
128 | * concurrent interrupt service events, IP2 is enabled only on | ||
129 | * one VPE, by convention VPE0. So long as no bits are ever | ||
130 | * cleared in the affinity mask, there will never be any | ||
131 | * interrupt forwarding. But as soon as a program or operator | ||
132 | * sets affinity for one of the related IRQs, we need to make | ||
133 | * sure that we don't ever try to forward across the VPE boundary, | ||
134 | * at least not until we engineer a system where the interrupt | ||
135 | * _ack() or _end() function can somehow know that it corresponds | ||
136 | * to an interrupt taken on another VPE, and perform the appropriate | ||
137 | * restoration of Status.IM state using MFTR/MTTR instead of the | ||
138 | * normal local behavior. We also ensure that no attempt will | ||
139 | * be made to forward to an offline "CPU". | ||
140 | */ | ||
141 | |||
142 | cpumask_copy(&tmask, affinity); | ||
143 | for_each_cpu(cpu, affinity) { | ||
144 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | ||
145 | cpu_clear(cpu, tmask); | ||
146 | } | ||
147 | cpumask_copy(d->affinity, &tmask); | ||
148 | |||
149 | if (cpus_empty(tmask)) | ||
150 | /* | ||
151 | * We could restore a default mask here, but the | ||
152 | * runtime code can anyway deal with the null set | ||
153 | */ | ||
154 | printk(KERN_WARNING | ||
155 | "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq); | ||
156 | |||
157 | /* Do any generic SMTC IRQ affinity setup */ | ||
158 | smtc_set_irq_affinity(d->irq, tmask); | ||
159 | |||
160 | return IRQ_SET_MASK_OK_NOCOPY; | ||
161 | } | ||
162 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
diff --git a/arch/mips/pmcs-msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile index 9201c8b3858d..d4f7220f2485 100644 --- a/arch/mips/pmcs-msp71xx/Makefile +++ b/arch/mips/pmcs-msp71xx/Makefile | |||
@@ -10,4 +10,3 @@ obj-$(CONFIG_PCI) += msp_pci.o | |||
10 | obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o | 10 | obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o |
11 | obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o | 11 | obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o |
12 | obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o | 12 | obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o |
13 | obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o | ||
diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c index 9da5619c00a5..941744aabb51 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq.c +++ b/arch/mips/pmcs-msp71xx/msp_irq.c | |||
@@ -32,7 +32,7 @@ extern void msp_vsmp_int_init(void); | |||
32 | 32 | ||
33 | /* vectored interrupt implementation */ | 33 | /* vectored interrupt implementation */ |
34 | 34 | ||
35 | /* SW0/1 interrupts are used for SMP/SMTC */ | 35 | /* SW0/1 interrupts are used for SMP */ |
36 | static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } | 36 | static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } |
37 | static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } | 37 | static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } |
38 | static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } | 38 | static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } |
@@ -138,14 +138,6 @@ void __init arch_init_irq(void) | |||
138 | set_vi_handler(MSP_INT_SEC, sec_int_dispatch); | 138 | set_vi_handler(MSP_INT_SEC, sec_int_dispatch); |
139 | #ifdef CONFIG_MIPS_MT_SMP | 139 | #ifdef CONFIG_MIPS_MT_SMP |
140 | msp_vsmp_int_init(); | 140 | msp_vsmp_int_init(); |
141 | #elif defined CONFIG_MIPS_MT_SMTC | ||
142 | /*Set hwmask for all platform devices */ | ||
143 | irq_hwmask[MSP_INT_MAC0] = C_IRQ0; | ||
144 | irq_hwmask[MSP_INT_MAC1] = C_IRQ1; | ||
145 | irq_hwmask[MSP_INT_USB] = C_IRQ2; | ||
146 | irq_hwmask[MSP_INT_SAR] = C_IRQ3; | ||
147 | irq_hwmask[MSP_INT_SEC] = C_IRQ5; | ||
148 | |||
149 | #endif /* CONFIG_MIPS_MT_SMP */ | 141 | #endif /* CONFIG_MIPS_MT_SMP */ |
150 | #endif /* CONFIG_MIPS_MT */ | 142 | #endif /* CONFIG_MIPS_MT */ |
151 | /* setup the cascaded interrupts */ | 143 | /* setup the cascaded interrupts */ |
@@ -153,8 +145,10 @@ void __init arch_init_irq(void) | |||
153 | setup_irq(MSP_INT_PER, &per_cascade_msp); | 145 | setup_irq(MSP_INT_PER, &per_cascade_msp); |
154 | 146 | ||
155 | #else | 147 | #else |
156 | /* setup the 2nd-level SLP register based interrupt controller */ | 148 | /* |
157 | /* VSMP /SMTC support support is not enabled for SLP */ | 149 | * Setup the 2nd-level SLP register based interrupt controller. |
150 | * VSMP support support is not enabled for SLP. | ||
151 | */ | ||
158 | msp_slp_irq_init(); | 152 | msp_slp_irq_init(); |
159 | 153 | ||
160 | /* setup the cascaded SLP/PER interrupts */ | 154 | /* setup the cascaded SLP/PER interrupts */ |
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c index e49b499f66db..b8df2f7b3328 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c +++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c | |||
@@ -120,10 +120,9 @@ static void msp_cic_irq_ack(struct irq_data *d) | |||
120 | * hurt for the others | 120 | * hurt for the others |
121 | */ | 121 | */ |
122 | *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); | 122 | *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); |
123 | smtc_im_ack_irq(d->irq); | ||
124 | } | 123 | } |
125 | 124 | ||
126 | /*Note: Limiting to VSMP . Not tested in SMTC */ | 125 | /* Note: Limiting to VSMP. */ |
127 | 126 | ||
128 | #ifdef CONFIG_MIPS_MT_SMP | 127 | #ifdef CONFIG_MIPS_MT_SMP |
129 | static int msp_cic_irq_set_affinity(struct irq_data *d, | 128 | static int msp_cic_irq_set_affinity(struct irq_data *d, |
@@ -183,10 +182,6 @@ void __init msp_cic_irq_init(void) | |||
183 | for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { | 182 | for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { |
184 | irq_set_chip_and_handler(i, &msp_cic_irq_controller, | 183 | irq_set_chip_and_handler(i, &msp_cic_irq_controller, |
185 | handle_level_irq); | 184 | handle_level_irq); |
186 | #ifdef CONFIG_MIPS_MT_SMTC | ||
187 | /* Mask of CIC interrupt */ | ||
188 | irq_hwmask[i] = C_IRQ4; | ||
189 | #endif | ||
190 | } | 185 | } |
191 | 186 | ||
192 | /* Initialize the PER interrupt sub-system */ | 187 | /* Initialize the PER interrupt sub-system */ |
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_per.c b/arch/mips/pmcs-msp71xx/msp_irq_per.c index d1fd530479d4..a111836bcec2 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq_per.c +++ b/arch/mips/pmcs-msp71xx/msp_irq_per.c | |||
@@ -113,9 +113,6 @@ void __init msp_per_irq_init(void) | |||
113 | /* initialize all the IRQ descriptors */ | 113 | /* initialize all the IRQ descriptors */ |
114 | for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { | 114 | for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { |
115 | irq_set_chip(i, &msp_per_irq_controller); | 115 | irq_set_chip(i, &msp_per_irq_controller); |
116 | #ifdef CONFIG_MIPS_MT_SMTC | ||
117 | irq_hwmask[i] = C_IRQ4; | ||
118 | #endif | ||
119 | } | 116 | } |
120 | } | 117 | } |
121 | 118 | ||
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c index ba9d518dc624..4f925e06c414 100644 --- a/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/arch/mips/pmcs-msp71xx/msp_setup.c | |||
@@ -147,8 +147,6 @@ void __init plat_mem_setup(void) | |||
147 | pm_power_off = msp_power_off; | 147 | pm_power_off = msp_power_off; |
148 | } | 148 | } |
149 | 149 | ||
150 | extern struct plat_smp_ops msp_smtc_smp_ops; | ||
151 | |||
152 | void __init prom_init(void) | 150 | void __init prom_init(void) |
153 | { | 151 | { |
154 | unsigned long family; | 152 | unsigned long family; |
@@ -229,9 +227,5 @@ void __init prom_init(void) | |||
229 | */ | 227 | */ |
230 | msp_serial_setup(); | 228 | msp_serial_setup(); |
231 | 229 | ||
232 | if (register_vsmp_smp_ops()) { | 230 | register_vsmp_smp_ops(); |
233 | #ifdef CONFIG_MIPS_MT_SMTC | ||
234 | register_smp_ops(&msp_smtc_smp_ops); | ||
235 | #endif | ||
236 | } | ||
237 | } | 231 | } |
diff --git a/arch/mips/pmcs-msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c deleted file mode 100644 index 6b5607fce279..000000000000 --- a/arch/mips/pmcs-msp71xx/msp_smtc.c +++ /dev/null | |||
@@ -1,104 +0,0 @@ | |||
1 | /* | ||
2 | * MSP71xx Platform-specific hooks for SMP operation | ||
3 | */ | ||
4 | #include <linux/irq.h> | ||
5 | #include <linux/init.h> | ||
6 | |||
7 | #include <asm/mipsmtregs.h> | ||
8 | #include <asm/mipsregs.h> | ||
9 | #include <asm/smtc.h> | ||
10 | #include <asm/smtc_ipi.h> | ||
11 | |||
12 | /* VPE/SMP Prototype implements platform interfaces directly */ | ||
13 | |||
14 | /* | ||
15 | * Cause the specified action to be performed on a targeted "CPU" | ||
16 | */ | ||
17 | |||
18 | static void msp_smtc_send_ipi_single(int cpu, unsigned int action) | ||
19 | { | ||
20 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | ||
21 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | ||
22 | } | ||
23 | |||
24 | static void msp_smtc_send_ipi_mask(const struct cpumask *mask, | ||
25 | unsigned int action) | ||
26 | { | ||
27 | unsigned int i; | ||
28 | |||
29 | for_each_cpu(i, mask) | ||
30 | msp_smtc_send_ipi_single(i, action); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * Post-config but pre-boot cleanup entry point | ||
35 | */ | ||
36 | static void msp_smtc_init_secondary(void) | ||
37 | { | ||
38 | int myvpe; | ||
39 | |||
40 | /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ | ||
41 | myvpe = read_c0_tcbind() & TCBIND_CURVPE; | ||
42 | if (myvpe > 0) | ||
43 | change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | | ||
44 | STATUSF_IP6 | STATUSF_IP7); | ||
45 | smtc_init_secondary(); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Platform "CPU" startup hook | ||
50 | */ | ||
51 | static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle) | ||
52 | { | ||
53 | smtc_boot_secondary(cpu, idle); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * SMP initialization finalization entry point | ||
58 | */ | ||
59 | static void msp_smtc_smp_finish(void) | ||
60 | { | ||
61 | smtc_smp_finish(); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Hook for after all CPUs are online | ||
66 | */ | ||
67 | |||
68 | static void msp_smtc_cpus_done(void) | ||
69 | { | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Platform SMP pre-initialization | ||
74 | * | ||
75 | * As noted above, we can assume a single CPU for now | ||
76 | * but it may be multithreaded. | ||
77 | */ | ||
78 | |||
79 | static void __init msp_smtc_smp_setup(void) | ||
80 | { | ||
81 | /* | ||
82 | * we won't get the definitive value until | ||
83 | * we've run smtc_prepare_cpus later, but | ||
84 | */ | ||
85 | |||
86 | if (read_c0_config3() & (1 << 2)) | ||
87 | smp_num_siblings = smtc_build_cpu_map(0); | ||
88 | } | ||
89 | |||
90 | static void __init msp_smtc_prepare_cpus(unsigned int max_cpus) | ||
91 | { | ||
92 | smtc_prepare_cpus(max_cpus); | ||
93 | } | ||
94 | |||
95 | struct plat_smp_ops msp_smtc_smp_ops = { | ||
96 | .send_ipi_single = msp_smtc_send_ipi_single, | ||
97 | .send_ipi_mask = msp_smtc_send_ipi_mask, | ||
98 | .init_secondary = msp_smtc_init_secondary, | ||
99 | .smp_finish = msp_smtc_smp_finish, | ||
100 | .cpus_done = msp_smtc_cpus_done, | ||
101 | .boot_secondary = msp_smtc_boot_secondary, | ||
102 | .smp_setup = msp_smtc_smp_setup, | ||
103 | .prepare_cpus = msp_smtc_prepare_cpus, | ||
104 | }; | ||