aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/lib
diff options
context:
space:
mode:
authorJim Quinlan <jim2101024@gmail.com>2012-09-06 11:36:56 -0400
committerRalf Baechle <ralf@linux-mips.org>2012-11-09 04:59:21 -0500
commite97c5b609880d97313b13eb71830fca62cee50c2 (patch)
treed43e3033f37cc9181dfe145b1019237429cfb418 /arch/mips/lib
parent92d11594f688c8b55b51e80f2eac4417396237a4 (diff)
MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus
For non MIPSr2 processors, such as the BMIPS 5000, calls to arch_local_irq_disable() and others may be preempted, and in doing so a stale value may be restored to c0_status. This fix disables preemption for such processors prior to the call and enables it after the call. Those functions that needed this fix have been "outlined" to mips-atomic.c, as they are no longer good candidates for inlining. This bug was observed in a BMIPS 5000, occuring once every few hours in a continuous reboot test. It was traced to the write_lock_irq() function which was being invoked in release_task() in exit.c. By placing a number of "nops" inbetween the mfc0/mtc0 pair in arch_local_irq_disable(), which is called by write_lock_irq(), we were able to greatly increase the occurance of this bug. Similarly, the application of this commit silenced the bug. Signed-off-by: Jim Quinlan <jim2101024@gmail.com> Cc: linux-mips@linux-mips.org Cc: David Daney <ddaney.cavm@gmail.com> Cc: Kevin Cernekee cernekee@gmail.com Cc: Jim Quinlan <jim2101024@gmail.com> Patchwork: https://patchwork.linux-mips.org/patch/4321/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/mips-atomic.c176
2 files changed, 178 insertions, 1 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index a7b893714354..eeddc58802e1 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ 5lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o 6 mips-atomic.o strlen_user.o strncpy_user.o \
7 strnlen_user.o uncached.o
7 8
8obj-y += iomap.o 9obj-y += iomap.o
9obj-$(CONFIG_PCI) += iomap-pci.o 10obj-$(CONFIG_PCI) += iomap-pci.o
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 000000000000..e091430dbeb1
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,176 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#include <asm/irqflags.h>
12#include <asm/hazards.h>
13#include <linux/compiler.h>
14#include <linux/preempt.h>
15#include <linux/export.h>
16
17#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
18
19/*
20 * For cli() we have to insert nops to make sure that the new value
21 * has actually arrived in the status register before the end of this
22 * macro.
23 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
24 * no nops at all.
25 */
26/*
27 * For TX49, operating only IE bit is not enough.
28 *
29 * If mfc0 $12 follows store and the mfc0 is last instruction of a
30 * page and fetching the next instruction causes TLB miss, the result
31 * of the mfc0 might wrongly contain EXL bit.
32 *
33 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
34 *
35 * Workaround: mask EXL bit of the result or place a nop before mfc0.
36 */
37__asm__(
38 " .macro arch_local_irq_disable\n"
39 " .set push \n"
40 " .set noat \n"
41#ifdef CONFIG_MIPS_MT_SMTC
42 " mfc0 $1, $2, 1 \n"
43 " ori $1, 0x400 \n"
44 " .set noreorder \n"
45 " mtc0 $1, $2, 1 \n"
46#elif defined(CONFIG_CPU_MIPSR2)
47 /* see irqflags.h for inline function */
48#else
49 " mfc0 $1,$12 \n"
50 " ori $1,0x1f \n"
51 " xori $1,0x1f \n"
52 " .set noreorder \n"
53 " mtc0 $1,$12 \n"
54#endif
55 " irq_disable_hazard \n"
56 " .set pop \n"
57 " .endm \n");
58
59void arch_local_irq_disable(void)
60{
61 preempt_disable();
62 __asm__ __volatile__(
63 "arch_local_irq_disable"
64 : /* no outputs */
65 : /* no inputs */
66 : "memory");
67 preempt_enable();
68}
69EXPORT_SYMBOL(arch_local_irq_disable);
70
71
72__asm__(
73 " .macro arch_local_irq_save result \n"
74 " .set push \n"
75 " .set reorder \n"
76 " .set noat \n"
77#ifdef CONFIG_MIPS_MT_SMTC
78 " mfc0 \\result, $2, 1 \n"
79 " ori $1, \\result, 0x400 \n"
80 " .set noreorder \n"
81 " mtc0 $1, $2, 1 \n"
82 " andi \\result, \\result, 0x400 \n"
83#elif defined(CONFIG_CPU_MIPSR2)
84 /* see irqflags.h for inline function */
85#else
86 " mfc0 \\result, $12 \n"
87 " ori $1, \\result, 0x1f \n"
88 " xori $1, 0x1f \n"
89 " .set noreorder \n"
90 " mtc0 $1, $12 \n"
91#endif
92 " irq_disable_hazard \n"
93 " .set pop \n"
94 " .endm \n");
95
96unsigned long arch_local_irq_save(void)
97{
98 unsigned long flags;
99 preempt_disable();
100 asm volatile("arch_local_irq_save\t%0"
101 : "=r" (flags)
102 : /* no inputs */
103 : "memory");
104 preempt_enable();
105 return flags;
106}
107EXPORT_SYMBOL(arch_local_irq_save);
108
109
110__asm__(
111 " .macro arch_local_irq_restore flags \n"
112 " .set push \n"
113 " .set noreorder \n"
114 " .set noat \n"
115#ifdef CONFIG_MIPS_MT_SMTC
116 "mfc0 $1, $2, 1 \n"
117 "andi \\flags, 0x400 \n"
118 "ori $1, 0x400 \n"
119 "xori $1, 0x400 \n"
120 "or \\flags, $1 \n"
121 "mtc0 \\flags, $2, 1 \n"
122#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
123 /* see irqflags.h for inline function */
124#elif defined(CONFIG_CPU_MIPSR2)
125 /* see irqflags.h for inline function */
126#else
127 " mfc0 $1, $12 \n"
128 " andi \\flags, 1 \n"
129 " ori $1, 0x1f \n"
130 " xori $1, 0x1f \n"
131 " or \\flags, $1 \n"
132 " mtc0 \\flags, $12 \n"
133#endif
134 " irq_disable_hazard \n"
135 " .set pop \n"
136 " .endm \n");
137
138void arch_local_irq_restore(unsigned long flags)
139{
140 unsigned long __tmp1;
141
142#ifdef CONFIG_MIPS_MT_SMTC
143 /*
144 * SMTC kernel needs to do a software replay of queued
145 * IPIs, at the cost of branch and call overhead on each
146 * local_irq_restore()
147 */
148 if (unlikely(!(flags & 0x0400)))
149 smtc_ipi_replay();
150#endif
151 preempt_disable();
152 __asm__ __volatile__(
153 "arch_local_irq_restore\t%0"
154 : "=r" (__tmp1)
155 : "0" (flags)
156 : "memory");
157 preempt_enable();
158}
159EXPORT_SYMBOL(arch_local_irq_restore);
160
161
162void __arch_local_irq_restore(unsigned long flags)
163{
164 unsigned long __tmp1;
165
166 preempt_disable();
167 __asm__ __volatile__(
168 "arch_local_irq_restore\t%0"
169 : "=r" (__tmp1)
170 : "0" (flags)
171 : "memory");
172 preempt_enable();
173}
174EXPORT_SYMBOL(__arch_local_irq_restore);
175
176#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */