aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-03-28 13:30:02 -0400
committerDavid Howells <dhowells@redhat.com>2012-03-28 13:30:02 -0400
commitc9034c3a1d67486284b3b569595a4bd377aaf8cc (patch)
tree3411246adeaf5e7fdaa8a110a92d4afa0f87078c
parentc140d87995b68b428f70635c2e4071e4e8b3256e (diff)
Disintegrate asm/system.h for M32R
Disintegrate asm/system.h for M32R. Signed-off-by: David Howells <dhowells@redhat.com> cc: linux-m32r@ml.linux-m32r.org
-rw-r--r--arch/m32r/include/asm/atomic.h3
-rw-r--r--arch/m32r/include/asm/barrier.h94
-rw-r--r--arch/m32r/include/asm/bitops.h3
-rw-r--r--arch/m32r/include/asm/cmpxchg.h221
-rw-r--r--arch/m32r/include/asm/dcache_clear.h29
-rw-r--r--arch/m32r/include/asm/exec.h14
-rw-r--r--arch/m32r/include/asm/local.h1
-rw-r--r--arch/m32r/include/asm/spinlock.h1
-rw-r--r--arch/m32r/include/asm/switch_to.h51
-rw-r--r--arch/m32r/include/asm/system.h373
-rw-r--r--arch/m32r/kernel/ptrace.c1
-rw-r--r--arch/m32r/kernel/traps.c1
-rw-r--r--arch/m32r/mm/fault-nommu.c1
-rw-r--r--arch/m32r/mm/fault.c1
-rw-r--r--arch/m32r/platforms/m32104ut/setup.c1
-rw-r--r--arch/m32r/platforms/m32700ut/setup.c1
-rw-r--r--arch/m32r/platforms/mappi/setup.c1
-rw-r--r--arch/m32r/platforms/mappi2/setup.c1
-rw-r--r--arch/m32r/platforms/mappi3/setup.c1
-rw-r--r--arch/m32r/platforms/oaks32r/setup.c1
-rw-r--r--arch/m32r/platforms/opsput/setup.c1
-rw-r--r--arch/m32r/platforms/usrv/setup.c1
22 files changed, 420 insertions, 382 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 1e7f29fb21f2..0d81697c326c 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -11,7 +11,8 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/system.h> 14#include <asm/cmpxchg.h>
15#include <asm/dcache_clear.h>
15 16
16/* 17/*
17 * Atomic operations that C can't guarantee us. Useful for 18 * Atomic operations that C can't guarantee us. Useful for
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
new file mode 100644
index 000000000000..6976621efd3f
--- /dev/null
+++ b/arch/m32r/include/asm/barrier.h
@@ -0,0 +1,94 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_BARRIER_H
10#define _ASM_M32R_BARRIER_H
11
12#define nop() __asm__ __volatile__ ("nop" : : )
13
14/*
15 * Memory barrier.
16 *
17 * mb() prevents loads and stores being reordered across this point.
18 * rmb() prevents loads being reordered across this point.
19 * wmb() prevents stores being reordered across this point.
20 */
21#define mb() barrier()
22#define rmb() mb()
23#define wmb() mb()
24
25/**
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 *
58 * because the read of "*q" depends on the read of "p" and these
59 * two reads are separated by a read_barrier_depends(). However,
60 * the following code, with the same initial values for "a" and "b":
61 *
62 * <programlisting>
63 * CPU 0 CPU 1
64 *
65 * a = 2;
66 * memory_barrier();
67 * b = 3; y = b;
68 * read_barrier_depends();
69 * x = a;
70 * </programlisting>
71 *
72 * does not enforce ordering, since there is no data dependency between
73 * the read of "a" and the read of "b". Therefore, on some CPUs, such
74 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
75 * in cases like this where there are no data dependencies.
76 **/
77
78#define read_barrier_depends() do { } while (0)
79
80#ifdef CONFIG_SMP
81#define smp_mb() mb()
82#define smp_rmb() rmb()
83#define smp_wmb() wmb()
84#define smp_read_barrier_depends() read_barrier_depends()
85#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
86#else
87#define smp_mb() barrier()
88#define smp_rmb() barrier()
89#define smp_wmb() barrier()
90#define smp_read_barrier_depends() do { } while (0)
91#define set_mb(var, value) do { var = value; barrier(); } while (0)
92#endif
93
94#endif /* _ASM_M32R_BARRIER_H */
diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h
index 6300f22cdbdb..d3dea9ac7d4e 100644
--- a/arch/m32r/include/asm/bitops.h
+++ b/arch/m32r/include/asm/bitops.h
@@ -16,9 +16,10 @@
16#endif 16#endif
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/irqflags.h>
19#include <asm/assembler.h> 20#include <asm/assembler.h>
20#include <asm/system.h>
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/dcache_clear.h>
22#include <asm/types.h> 23#include <asm/types.h>
23 24
24/* 25/*
diff --git a/arch/m32r/include/asm/cmpxchg.h b/arch/m32r/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..de651db20b43
--- /dev/null
+++ b/arch/m32r/include/asm/cmpxchg.h
@@ -0,0 +1,221 @@
1#ifndef _ASM_M32R_CMPXCHG_H
2#define _ASM_M32R_CMPXCHG_H
3
4/*
5 * M32R version:
6 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
7 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
8 */
9
10#include <linux/irqflags.h>
11#include <asm/assembler.h>
12#include <asm/dcache_clear.h>
13
14extern void __xchg_called_with_bad_pointer(void);
15
16static __always_inline unsigned long
17__xchg(unsigned long x, volatile void *ptr, int size)
18{
19 unsigned long flags;
20 unsigned long tmp = 0;
21
22 local_irq_save(flags);
23
24 switch (size) {
25#ifndef CONFIG_SMP
26 case 1:
27 __asm__ __volatile__ (
28 "ldb %0, @%2 \n\t"
29 "stb %1, @%2 \n\t"
30 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
31 break;
32 case 2:
33 __asm__ __volatile__ (
34 "ldh %0, @%2 \n\t"
35 "sth %1, @%2 \n\t"
36 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
37 break;
38 case 4:
39 __asm__ __volatile__ (
40 "ld %0, @%2 \n\t"
41 "st %1, @%2 \n\t"
42 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
43 break;
44#else /* CONFIG_SMP */
45 case 4:
46 __asm__ __volatile__ (
47 DCACHE_CLEAR("%0", "r4", "%2")
48 "lock %0, @%2; \n\t"
49 "unlock %1, @%2; \n\t"
50 : "=&r" (tmp) : "r" (x), "r" (ptr)
51 : "memory"
52#ifdef CONFIG_CHIP_M32700_TS1
53 , "r4"
54#endif /* CONFIG_CHIP_M32700_TS1 */
55 );
56 break;
57#endif /* CONFIG_SMP */
58 default:
59 __xchg_called_with_bad_pointer();
60 }
61
62 local_irq_restore(flags);
63
64 return (tmp);
65}
66
67#define xchg(ptr, x) \
68 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
69
70static __always_inline unsigned long
71__xchg_local(unsigned long x, volatile void *ptr, int size)
72{
73 unsigned long flags;
74 unsigned long tmp = 0;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 __asm__ __volatile__ (
81 "ldb %0, @%2 \n\t"
82 "stb %1, @%2 \n\t"
83 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
84 break;
85 case 2:
86 __asm__ __volatile__ (
87 "ldh %0, @%2 \n\t"
88 "sth %1, @%2 \n\t"
89 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
90 break;
91 case 4:
92 __asm__ __volatile__ (
93 "ld %0, @%2 \n\t"
94 "st %1, @%2 \n\t"
95 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
96 break;
97 default:
98 __xchg_called_with_bad_pointer();
99 }
100
101 local_irq_restore(flags);
102
103 return (tmp);
104}
105
106#define xchg_local(ptr, x) \
107 ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
108 sizeof(*(ptr))))
109
110#define __HAVE_ARCH_CMPXCHG 1
111
112static inline unsigned long
113__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
114{
115 unsigned long flags;
116 unsigned int retval;
117
118 local_irq_save(flags);
119 __asm__ __volatile__ (
120 DCACHE_CLEAR("%0", "r4", "%1")
121 M32R_LOCK" %0, @%1; \n"
122 " bne %0, %2, 1f; \n"
123 M32R_UNLOCK" %3, @%1; \n"
124 " bra 2f; \n"
125 " .fillinsn \n"
126 "1:"
127 M32R_UNLOCK" %0, @%1; \n"
128 " .fillinsn \n"
129 "2:"
130 : "=&r" (retval)
131 : "r" (p), "r" (old), "r" (new)
132 : "cbit", "memory"
133#ifdef CONFIG_CHIP_M32700_TS1
134 , "r4"
135#endif /* CONFIG_CHIP_M32700_TS1 */
136 );
137 local_irq_restore(flags);
138
139 return retval;
140}
141
142static inline unsigned long
143__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
144 unsigned int new)
145{
146 unsigned long flags;
147 unsigned int retval;
148
149 local_irq_save(flags);
150 __asm__ __volatile__ (
151 DCACHE_CLEAR("%0", "r4", "%1")
152 "ld %0, @%1; \n"
153 " bne %0, %2, 1f; \n"
154 "st %3, @%1; \n"
155 " bra 2f; \n"
156 " .fillinsn \n"
157 "1:"
158 "st %0, @%1; \n"
159 " .fillinsn \n"
160 "2:"
161 : "=&r" (retval)
162 : "r" (p), "r" (old), "r" (new)
163 : "cbit", "memory"
164#ifdef CONFIG_CHIP_M32700_TS1
165 , "r4"
166#endif /* CONFIG_CHIP_M32700_TS1 */
167 );
168 local_irq_restore(flags);
169
170 return retval;
171}
172
173/* This function doesn't exist, so you'll get a linker error
174 if something tries to do an invalid cmpxchg(). */
175extern void __cmpxchg_called_with_bad_pointer(void);
176
177static inline unsigned long
178__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
179{
180 switch (size) {
181 case 4:
182 return __cmpxchg_u32(ptr, old, new);
183#if 0 /* we don't have __cmpxchg_u64 */
184 case 8:
185 return __cmpxchg_u64(ptr, old, new);
186#endif /* 0 */
187 }
188 __cmpxchg_called_with_bad_pointer();
189 return old;
190}
191
192#define cmpxchg(ptr, o, n) \
193 ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
194 (unsigned long)(n), sizeof(*(ptr))))
195
196#include <asm-generic/cmpxchg-local.h>
197
198static inline unsigned long __cmpxchg_local(volatile void *ptr,
199 unsigned long old,
200 unsigned long new, int size)
201{
202 switch (size) {
203 case 4:
204 return __cmpxchg_local_u32(ptr, old, new);
205 default:
206 return __cmpxchg_local_generic(ptr, old, new, size);
207 }
208
209 return old;
210}
211
212/*
213 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
214 * them available.
215 */
216#define cmpxchg_local(ptr, o, n) \
217 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
218 (unsigned long)(n), sizeof(*(ptr))))
219#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
220
221#endif /* _ASM_M32R_CMPXCHG_H */
diff --git a/arch/m32r/include/asm/dcache_clear.h b/arch/m32r/include/asm/dcache_clear.h
new file mode 100644
index 000000000000..a0ae06c2e9e7
--- /dev/null
+++ b/arch/m32r/include/asm/dcache_clear.h
@@ -0,0 +1,29 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_DCACHE_CLEAR_H
10#define _ASM_M32R_DCACHE_CLEAR_H
11
12#ifdef CONFIG_CHIP_M32700_TS1
13#define DCACHE_CLEAR(reg0, reg1, addr) \
14 "seth "reg1", #high(dcache_dummy); \n\t" \
15 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
16 "lock "reg0", @"reg1"; \n\t" \
17 "add3 "reg0", "addr", #0x1000; \n\t" \
18 "ld "reg0", @"reg0"; \n\t" \
19 "add3 "reg0", "addr", #0x2000; \n\t" \
20 "ld "reg0", @"reg0"; \n\t" \
21 "unlock "reg0", @"reg1"; \n\t"
22 /* FIXME: This workaround code cannot handle kernel modules
23 * correctly under SMP environment.
24 */
25#else /* CONFIG_CHIP_M32700_TS1 */
26#define DCACHE_CLEAR(reg0, reg1, addr)
27#endif /* CONFIG_CHIP_M32700_TS1 */
28
29#endif /* _ASM_M32R_DCACHE_CLEAR_H */
diff --git a/arch/m32r/include/asm/exec.h b/arch/m32r/include/asm/exec.h
new file mode 100644
index 000000000000..c805dbd75b5d
--- /dev/null
+++ b/arch/m32r/include/asm/exec.h
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_EXEC_H
10#define _ASM_M32R_EXEC_H
11
12#define arch_align_stack(x) (x)
13
14#endif /* _ASM_M32R_EXEC_H */
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 734bca87018a..4045db3e4f65 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -12,7 +12,6 @@
12 12
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/system.h>
16#include <asm/local.h> 15#include <asm/local.h>
17 16
18/* 17/*
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index b0ea2f26da3b..fa13694eaae3 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <asm/dcache_clear.h>
14#include <asm/page.h> 15#include <asm/page.h>
15 16
16/* 17/*
diff --git a/arch/m32r/include/asm/switch_to.h b/arch/m32r/include/asm/switch_to.h
new file mode 100644
index 000000000000..4b262f7a8fe9
--- /dev/null
+++ b/arch/m32r/include/asm/switch_to.h
@@ -0,0 +1,51 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_SWITCH_TO_H
10#define _ASM_M32R_SWITCH_TO_H
11
12/*
13 * switch_to(prev, next) should switch from task `prev' to `next'
14 * `prev' will never be the same as `next'.
15 *
16 * `next' and `prev' should be struct task_struct, but it isn't always defined
17 */
18
19#if defined(CONFIG_FRAME_POINTER) || \
20 !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
21#define M32R_PUSH_FP " push fp\n"
22#define M32R_POP_FP " pop fp\n"
23#else
24#define M32R_PUSH_FP ""
25#define M32R_POP_FP ""
26#endif
27
28#define switch_to(prev, next, last) do { \
29 __asm__ __volatile__ ( \
30 " seth lr, #high(1f) \n" \
31 " or3 lr, lr, #low(1f) \n" \
32 " st lr, @%4 ; store old LR \n" \
33 " ld lr, @%5 ; load new LR \n" \
34 M32R_PUSH_FP \
35 " st sp, @%2 ; store old SP \n" \
36 " ld sp, @%3 ; load new SP \n" \
37 " push %1 ; store `prev' on new stack \n" \
38 " jmp lr \n" \
39 " .fillinsn \n" \
40 "1: \n" \
41 " pop %0 ; restore `__last' from new stack \n" \
42 M32R_POP_FP \
43 : "=r" (last) \
44 : "0" (prev), \
45 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
46 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
47 : "memory", "lr" \
48 ); \
49} while(0)
50
51#endif /* _ASM_M32R_SWITCH_TO_H */
diff --git a/arch/m32r/include/asm/system.h b/arch/m32r/include/asm/system.h
index 13c46794ccb1..a55c384fdcf3 100644
--- a/arch/m32r/include/asm/system.h
+++ b/arch/m32r/include/asm/system.h
@@ -1,367 +1,6 @@
1#ifndef _ASM_M32R_SYSTEM_H 1/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
2#define _ASM_M32R_SYSTEM_H 2#include <asm/barrier.h>
3 3#include <asm/cmpxchg.h>
4/* 4#include <asm/dcache_clear.h>
5 * This file is subject to the terms and conditions of the GNU General Public 5#include <asm/exec.h>
6 * License. See the file "COPYING" in the main directory of this archive 6#include <asm/switch_to.h>
7 * for more details.
8 *
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
11 */
12
13#include <linux/compiler.h>
14#include <linux/irqflags.h>
15#include <asm/assembler.h>
16
17#ifdef __KERNEL__
18
19/*
20 * switch_to(prev, next) should switch from task `prev' to `next'
21 * `prev' will never be the same as `next'.
22 *
23 * `next' and `prev' should be struct task_struct, but it isn't always defined
24 */
25
26#if defined(CONFIG_FRAME_POINTER) || \
27 !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
28#define M32R_PUSH_FP " push fp\n"
29#define M32R_POP_FP " pop fp\n"
30#else
31#define M32R_PUSH_FP ""
32#define M32R_POP_FP ""
33#endif
34
35#define switch_to(prev, next, last) do { \
36 __asm__ __volatile__ ( \
37 " seth lr, #high(1f) \n" \
38 " or3 lr, lr, #low(1f) \n" \
39 " st lr, @%4 ; store old LR \n" \
40 " ld lr, @%5 ; load new LR \n" \
41 M32R_PUSH_FP \
42 " st sp, @%2 ; store old SP \n" \
43 " ld sp, @%3 ; load new SP \n" \
44 " push %1 ; store `prev' on new stack \n" \
45 " jmp lr \n" \
46 " .fillinsn \n" \
47 "1: \n" \
48 " pop %0 ; restore `__last' from new stack \n" \
49 M32R_POP_FP \
50 : "=r" (last) \
51 : "0" (prev), \
52 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
53 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
54 : "memory", "lr" \
55 ); \
56} while(0)
57
58#define nop() __asm__ __volatile__ ("nop" : : )
59
60#define xchg(ptr, x) \
61 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
62#define xchg_local(ptr, x) \
63 ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
64 sizeof(*(ptr))))
65
66extern void __xchg_called_with_bad_pointer(void);
67
68#ifdef CONFIG_CHIP_M32700_TS1
69#define DCACHE_CLEAR(reg0, reg1, addr) \
70 "seth "reg1", #high(dcache_dummy); \n\t" \
71 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
72 "lock "reg0", @"reg1"; \n\t" \
73 "add3 "reg0", "addr", #0x1000; \n\t" \
74 "ld "reg0", @"reg0"; \n\t" \
75 "add3 "reg0", "addr", #0x2000; \n\t" \
76 "ld "reg0", @"reg0"; \n\t" \
77 "unlock "reg0", @"reg1"; \n\t"
78 /* FIXME: This workaround code cannot handle kernel modules
79 * correctly under SMP environment.
80 */
81#else /* CONFIG_CHIP_M32700_TS1 */
82#define DCACHE_CLEAR(reg0, reg1, addr)
83#endif /* CONFIG_CHIP_M32700_TS1 */
84
85static __always_inline unsigned long
86__xchg(unsigned long x, volatile void *ptr, int size)
87{
88 unsigned long flags;
89 unsigned long tmp = 0;
90
91 local_irq_save(flags);
92
93 switch (size) {
94#ifndef CONFIG_SMP
95 case 1:
96 __asm__ __volatile__ (
97 "ldb %0, @%2 \n\t"
98 "stb %1, @%2 \n\t"
99 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
100 break;
101 case 2:
102 __asm__ __volatile__ (
103 "ldh %0, @%2 \n\t"
104 "sth %1, @%2 \n\t"
105 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
106 break;
107 case 4:
108 __asm__ __volatile__ (
109 "ld %0, @%2 \n\t"
110 "st %1, @%2 \n\t"
111 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
112 break;
113#else /* CONFIG_SMP */
114 case 4:
115 __asm__ __volatile__ (
116 DCACHE_CLEAR("%0", "r4", "%2")
117 "lock %0, @%2; \n\t"
118 "unlock %1, @%2; \n\t"
119 : "=&r" (tmp) : "r" (x), "r" (ptr)
120 : "memory"
121#ifdef CONFIG_CHIP_M32700_TS1
122 , "r4"
123#endif /* CONFIG_CHIP_M32700_TS1 */
124 );
125 break;
126#endif /* CONFIG_SMP */
127 default:
128 __xchg_called_with_bad_pointer();
129 }
130
131 local_irq_restore(flags);
132
133 return (tmp);
134}
135
136static __always_inline unsigned long
137__xchg_local(unsigned long x, volatile void *ptr, int size)
138{
139 unsigned long flags;
140 unsigned long tmp = 0;
141
142 local_irq_save(flags);
143
144 switch (size) {
145 case 1:
146 __asm__ __volatile__ (
147 "ldb %0, @%2 \n\t"
148 "stb %1, @%2 \n\t"
149 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
150 break;
151 case 2:
152 __asm__ __volatile__ (
153 "ldh %0, @%2 \n\t"
154 "sth %1, @%2 \n\t"
155 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
156 break;
157 case 4:
158 __asm__ __volatile__ (
159 "ld %0, @%2 \n\t"
160 "st %1, @%2 \n\t"
161 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
162 break;
163 default:
164 __xchg_called_with_bad_pointer();
165 }
166
167 local_irq_restore(flags);
168
169 return (tmp);
170}
171
172#define __HAVE_ARCH_CMPXCHG 1
173
174static inline unsigned long
175__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
176{
177 unsigned long flags;
178 unsigned int retval;
179
180 local_irq_save(flags);
181 __asm__ __volatile__ (
182 DCACHE_CLEAR("%0", "r4", "%1")
183 M32R_LOCK" %0, @%1; \n"
184 " bne %0, %2, 1f; \n"
185 M32R_UNLOCK" %3, @%1; \n"
186 " bra 2f; \n"
187 " .fillinsn \n"
188 "1:"
189 M32R_UNLOCK" %0, @%1; \n"
190 " .fillinsn \n"
191 "2:"
192 : "=&r" (retval)
193 : "r" (p), "r" (old), "r" (new)
194 : "cbit", "memory"
195#ifdef CONFIG_CHIP_M32700_TS1
196 , "r4"
197#endif /* CONFIG_CHIP_M32700_TS1 */
198 );
199 local_irq_restore(flags);
200
201 return retval;
202}
203
204static inline unsigned long
205__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
206 unsigned int new)
207{
208 unsigned long flags;
209 unsigned int retval;
210
211 local_irq_save(flags);
212 __asm__ __volatile__ (
213 DCACHE_CLEAR("%0", "r4", "%1")
214 "ld %0, @%1; \n"
215 " bne %0, %2, 1f; \n"
216 "st %3, @%1; \n"
217 " bra 2f; \n"
218 " .fillinsn \n"
219 "1:"
220 "st %0, @%1; \n"
221 " .fillinsn \n"
222 "2:"
223 : "=&r" (retval)
224 : "r" (p), "r" (old), "r" (new)
225 : "cbit", "memory"
226#ifdef CONFIG_CHIP_M32700_TS1
227 , "r4"
228#endif /* CONFIG_CHIP_M32700_TS1 */
229 );
230 local_irq_restore(flags);
231
232 return retval;
233}
234
235/* This function doesn't exist, so you'll get a linker error
236 if something tries to do an invalid cmpxchg(). */
237extern void __cmpxchg_called_with_bad_pointer(void);
238
239static inline unsigned long
240__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
241{
242 switch (size) {
243 case 4:
244 return __cmpxchg_u32(ptr, old, new);
245#if 0 /* we don't have __cmpxchg_u64 */
246 case 8:
247 return __cmpxchg_u64(ptr, old, new);
248#endif /* 0 */
249 }
250 __cmpxchg_called_with_bad_pointer();
251 return old;
252}
253
254#define cmpxchg(ptr, o, n) \
255 ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
256 (unsigned long)(n), sizeof(*(ptr))))
257
258#include <asm-generic/cmpxchg-local.h>
259
260static inline unsigned long __cmpxchg_local(volatile void *ptr,
261 unsigned long old,
262 unsigned long new, int size)
263{
264 switch (size) {
265 case 4:
266 return __cmpxchg_local_u32(ptr, old, new);
267 default:
268 return __cmpxchg_local_generic(ptr, old, new, size);
269 }
270
271 return old;
272}
273
274/*
275 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
276 * them available.
277 */
278#define cmpxchg_local(ptr, o, n) \
279 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
280 (unsigned long)(n), sizeof(*(ptr))))
281#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
282
283#endif /* __KERNEL__ */
284
285/*
286 * Memory barrier.
287 *
288 * mb() prevents loads and stores being reordered across this point.
289 * rmb() prevents loads being reordered across this point.
290 * wmb() prevents stores being reordered across this point.
291 */
292#define mb() barrier()
293#define rmb() mb()
294#define wmb() mb()
295
296/**
297 * read_barrier_depends - Flush all pending reads that subsequents reads
298 * depend on.
299 *
300 * No data-dependent reads from memory-like regions are ever reordered
301 * over this barrier. All reads preceding this primitive are guaranteed
302 * to access memory (but not necessarily other CPUs' caches) before any
303 * reads following this primitive that depend on the data return by
304 * any of the preceding reads. This primitive is much lighter weight than
305 * rmb() on most CPUs, and is never heavier weight than is
306 * rmb().
307 *
308 * These ordering constraints are respected by both the local CPU
309 * and the compiler.
310 *
311 * Ordering is not guaranteed by anything other than these primitives,
312 * not even by data dependencies. See the documentation for
313 * memory_barrier() for examples and URLs to more information.
314 *
315 * For example, the following code would force ordering (the initial
316 * value of "a" is zero, "b" is one, and "p" is "&a"):
317 *
318 * <programlisting>
319 * CPU 0 CPU 1
320 *
321 * b = 2;
322 * memory_barrier();
323 * p = &b; q = p;
324 * read_barrier_depends();
325 * d = *q;
326 * </programlisting>
327 *
328 *
329 * because the read of "*q" depends on the read of "p" and these
330 * two reads are separated by a read_barrier_depends(). However,
331 * the following code, with the same initial values for "a" and "b":
332 *
333 * <programlisting>
334 * CPU 0 CPU 1
335 *
336 * a = 2;
337 * memory_barrier();
338 * b = 3; y = b;
339 * read_barrier_depends();
340 * x = a;
341 * </programlisting>
342 *
343 * does not enforce ordering, since there is no data dependency between
344 * the read of "a" and the read of "b". Therefore, on some CPUs, such
345 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
346 * in cases like this where there are no data dependencies.
347 **/
348
349#define read_barrier_depends() do { } while (0)
350
351#ifdef CONFIG_SMP
352#define smp_mb() mb()
353#define smp_rmb() rmb()
354#define smp_wmb() wmb()
355#define smp_read_barrier_depends() read_barrier_depends()
356#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
357#else
358#define smp_mb() barrier()
359#define smp_rmb() barrier()
360#define smp_wmb() barrier()
361#define smp_read_barrier_depends() do { } while (0)
362#define set_mb(var, value) do { var = value; barrier(); } while (0)
363#endif
364
365#define arch_align_stack(x) (x)
366
367#endif /* _ASM_M32R_SYSTEM_H */
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 20743754f2b2..4c03361537aa 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -29,7 +29,6 @@
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32#include <asm/system.h>
33#include <asm/processor.h> 32#include <asm/processor.h>
34#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
35 34
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index ee6a9199561c..3bcb207e5b6d 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -18,7 +18,6 @@
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20 20
21#include <asm/system.h>
22#include <asm/uaccess.h> 21#include <asm/uaccess.h>
23#include <asm/io.h> 22#include <asm/io.h>
24#include <linux/atomic.h> 23#include <linux/atomic.h>
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c
index 888aab1157ed..80f18cc6f547 100644
--- a/arch/m32r/mm/fault-nommu.c
+++ b/arch/m32r/mm/fault-nommu.c
@@ -22,7 +22,6 @@
22#include <linux/vt_kern.h> /* For unblank_screen() */ 22#include <linux/vt_kern.h> /* For unblank_screen() */
23 23
24#include <asm/m32r.h> 24#include <asm/m32r.h>
25#include <asm/system.h>
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
27#include <asm/pgalloc.h> 26#include <asm/pgalloc.h>
28#include <asm/pgtable.h> 27#include <asm/pgtable.h>
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 2c9aeb453847..3cdfa9c1d091 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -26,7 +26,6 @@
26#include <linux/module.h> 26#include <linux/module.h>
27 27
28#include <asm/m32r.h> 28#include <asm/m32r.h>
29#include <asm/system.h>
30#include <asm/uaccess.h> 29#include <asm/uaccess.h>
31#include <asm/hardirq.h> 30#include <asm/hardirq.h>
32#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 34671d32cefc..e2dd778aeac7 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -13,7 +13,6 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/device.h> 14#include <linux/device.h>
15 15
16#include <asm/system.h>
17#include <asm/m32r.h> 16#include <asm/m32r.h>
18#include <asm/io.h> 17#include <asm/io.h>
19 18
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 1053e1cb7401..9a4ba8a8589d 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -16,7 +16,6 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18 18
19#include <asm/system.h>
20#include <asm/m32r.h> 19#include <asm/m32r.h>
21#include <asm/io.h> 20#include <asm/io.h>
22 21
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index 35130ac3f8d1..767d2f4d6ded 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14 14
15#include <asm/system.h>
16#include <asm/m32r.h> 15#include <asm/m32r.h>
17#include <asm/io.h> 16#include <asm/io.h>
18 17
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index f3ed6b60a5f8..76d665abf51e 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14 14
15#include <asm/system.h>
16#include <asm/m32r.h> 15#include <asm/m32r.h>
17#include <asm/io.h> 16#include <asm/io.h>
18 17
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 2408e356ad10..a3646d4b05bd 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14 14
15#include <asm/system.h>
16#include <asm/m32r.h> 15#include <asm/m32r.h>
17#include <asm/io.h> 16#include <asm/io.h>
18 17
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index 83b46b067a17..f8373c069524 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -11,7 +11,6 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14#include <asm/system.h>
15#include <asm/m32r.h> 14#include <asm/m32r.h>
16#include <asm/io.h> 15#include <asm/io.h>
17 16
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index 32660705f5fd..cd0170483e83 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -17,7 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#include <asm/system.h>
21#include <asm/m32r.h> 20#include <asm/m32r.h>
22#include <asm/io.h> 21#include <asm/io.h>
23 22
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 0c7a1e8c77b0..dcde0ec777f6 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -11,7 +11,6 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14#include <asm/system.h>
15#include <asm/m32r.h> 14#include <asm/m32r.h>
16#include <asm/io.h> 15#include <asm/io.h>
17 16