aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
commit0195c00244dc2e9f522475868fa278c473ba7339 (patch)
treef97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/m32r/include/asm
parentf21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff)
parent141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff)
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
Diffstat (limited to 'arch/m32r/include/asm')
-rw-r--r--arch/m32r/include/asm/atomic.h3
-rw-r--r--arch/m32r/include/asm/barrier.h94
-rw-r--r--arch/m32r/include/asm/bitops.h3
-rw-r--r--arch/m32r/include/asm/cmpxchg.h221
-rw-r--r--arch/m32r/include/asm/dcache_clear.h29
-rw-r--r--arch/m32r/include/asm/exec.h14
-rw-r--r--arch/m32r/include/asm/local.h1
-rw-r--r--arch/m32r/include/asm/spinlock.h1
-rw-r--r--arch/m32r/include/asm/switch_to.h51
-rw-r--r--arch/m32r/include/asm/system.h367
10 files changed, 414 insertions, 370 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 1e7f29fb21f2..0d81697c326c 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -11,7 +11,8 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/system.h> 14#include <asm/cmpxchg.h>
15#include <asm/dcache_clear.h>
15 16
16/* 17/*
17 * Atomic operations that C can't guarantee us. Useful for 18 * Atomic operations that C can't guarantee us. Useful for
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
new file mode 100644
index 000000000000..6976621efd3f
--- /dev/null
+++ b/arch/m32r/include/asm/barrier.h
@@ -0,0 +1,94 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_BARRIER_H
10#define _ASM_M32R_BARRIER_H
11
12#define nop() __asm__ __volatile__ ("nop" : : )
13
14/*
15 * Memory barrier.
16 *
17 * mb() prevents loads and stores being reordered across this point.
18 * rmb() prevents loads being reordered across this point.
19 * wmb() prevents stores being reordered across this point.
20 */
21#define mb() barrier()
22#define rmb() mb()
23#define wmb() mb()
24
25/**
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 *
58 * because the read of "*q" depends on the read of "p" and these
59 * two reads are separated by a read_barrier_depends(). However,
60 * the following code, with the same initial values for "a" and "b":
61 *
62 * <programlisting>
63 * CPU 0 CPU 1
64 *
65 * a = 2;
66 * memory_barrier();
67 * b = 3; y = b;
68 * read_barrier_depends();
69 * x = a;
70 * </programlisting>
71 *
72 * does not enforce ordering, since there is no data dependency between
73 * the read of "a" and the read of "b". Therefore, on some CPUs, such
74 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
75 * in cases like this where there are no data dependencies.
76 **/
77
78#define read_barrier_depends() do { } while (0)
79
80#ifdef CONFIG_SMP
81#define smp_mb() mb()
82#define smp_rmb() rmb()
83#define smp_wmb() wmb()
84#define smp_read_barrier_depends() read_barrier_depends()
85#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
86#else
87#define smp_mb() barrier()
88#define smp_rmb() barrier()
89#define smp_wmb() barrier()
90#define smp_read_barrier_depends() do { } while (0)
91#define set_mb(var, value) do { var = value; barrier(); } while (0)
92#endif
93
94#endif /* _ASM_M32R_BARRIER_H */
diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h
index 6300f22cdbdb..d3dea9ac7d4e 100644
--- a/arch/m32r/include/asm/bitops.h
+++ b/arch/m32r/include/asm/bitops.h
@@ -16,9 +16,10 @@
16#endif 16#endif
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/irqflags.h>
19#include <asm/assembler.h> 20#include <asm/assembler.h>
20#include <asm/system.h>
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/dcache_clear.h>
22#include <asm/types.h> 23#include <asm/types.h>
23 24
24/* 25/*
diff --git a/arch/m32r/include/asm/cmpxchg.h b/arch/m32r/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..de651db20b43
--- /dev/null
+++ b/arch/m32r/include/asm/cmpxchg.h
@@ -0,0 +1,221 @@
1#ifndef _ASM_M32R_CMPXCHG_H
2#define _ASM_M32R_CMPXCHG_H
3
4/*
5 * M32R version:
6 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
7 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
8 */
9
10#include <linux/irqflags.h>
11#include <asm/assembler.h>
12#include <asm/dcache_clear.h>
13
14extern void __xchg_called_with_bad_pointer(void);
15
16static __always_inline unsigned long
17__xchg(unsigned long x, volatile void *ptr, int size)
18{
19 unsigned long flags;
20 unsigned long tmp = 0;
21
22 local_irq_save(flags);
23
24 switch (size) {
25#ifndef CONFIG_SMP
26 case 1:
27 __asm__ __volatile__ (
28 "ldb %0, @%2 \n\t"
29 "stb %1, @%2 \n\t"
30 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
31 break;
32 case 2:
33 __asm__ __volatile__ (
34 "ldh %0, @%2 \n\t"
35 "sth %1, @%2 \n\t"
36 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
37 break;
38 case 4:
39 __asm__ __volatile__ (
40 "ld %0, @%2 \n\t"
41 "st %1, @%2 \n\t"
42 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
43 break;
44#else /* CONFIG_SMP */
45 case 4:
46 __asm__ __volatile__ (
47 DCACHE_CLEAR("%0", "r4", "%2")
48 "lock %0, @%2; \n\t"
49 "unlock %1, @%2; \n\t"
50 : "=&r" (tmp) : "r" (x), "r" (ptr)
51 : "memory"
52#ifdef CONFIG_CHIP_M32700_TS1
53 , "r4"
54#endif /* CONFIG_CHIP_M32700_TS1 */
55 );
56 break;
57#endif /* CONFIG_SMP */
58 default:
59 __xchg_called_with_bad_pointer();
60 }
61
62 local_irq_restore(flags);
63
64 return (tmp);
65}
66
67#define xchg(ptr, x) \
68 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
69
70static __always_inline unsigned long
71__xchg_local(unsigned long x, volatile void *ptr, int size)
72{
73 unsigned long flags;
74 unsigned long tmp = 0;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 __asm__ __volatile__ (
81 "ldb %0, @%2 \n\t"
82 "stb %1, @%2 \n\t"
83 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
84 break;
85 case 2:
86 __asm__ __volatile__ (
87 "ldh %0, @%2 \n\t"
88 "sth %1, @%2 \n\t"
89 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
90 break;
91 case 4:
92 __asm__ __volatile__ (
93 "ld %0, @%2 \n\t"
94 "st %1, @%2 \n\t"
95 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
96 break;
97 default:
98 __xchg_called_with_bad_pointer();
99 }
100
101 local_irq_restore(flags);
102
103 return (tmp);
104}
105
106#define xchg_local(ptr, x) \
107 ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
108 sizeof(*(ptr))))
109
110#define __HAVE_ARCH_CMPXCHG 1
111
112static inline unsigned long
113__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
114{
115 unsigned long flags;
116 unsigned int retval;
117
118 local_irq_save(flags);
119 __asm__ __volatile__ (
120 DCACHE_CLEAR("%0", "r4", "%1")
121 M32R_LOCK" %0, @%1; \n"
122 " bne %0, %2, 1f; \n"
123 M32R_UNLOCK" %3, @%1; \n"
124 " bra 2f; \n"
125 " .fillinsn \n"
126 "1:"
127 M32R_UNLOCK" %0, @%1; \n"
128 " .fillinsn \n"
129 "2:"
130 : "=&r" (retval)
131 : "r" (p), "r" (old), "r" (new)
132 : "cbit", "memory"
133#ifdef CONFIG_CHIP_M32700_TS1
134 , "r4"
135#endif /* CONFIG_CHIP_M32700_TS1 */
136 );
137 local_irq_restore(flags);
138
139 return retval;
140}
141
142static inline unsigned long
143__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
144 unsigned int new)
145{
146 unsigned long flags;
147 unsigned int retval;
148
149 local_irq_save(flags);
150 __asm__ __volatile__ (
151 DCACHE_CLEAR("%0", "r4", "%1")
152 "ld %0, @%1; \n"
153 " bne %0, %2, 1f; \n"
154 "st %3, @%1; \n"
155 " bra 2f; \n"
156 " .fillinsn \n"
157 "1:"
158 "st %0, @%1; \n"
159 " .fillinsn \n"
160 "2:"
161 : "=&r" (retval)
162 : "r" (p), "r" (old), "r" (new)
163 : "cbit", "memory"
164#ifdef CONFIG_CHIP_M32700_TS1
165 , "r4"
166#endif /* CONFIG_CHIP_M32700_TS1 */
167 );
168 local_irq_restore(flags);
169
170 return retval;
171}
172
173/* This function doesn't exist, so you'll get a linker error
174 if something tries to do an invalid cmpxchg(). */
175extern void __cmpxchg_called_with_bad_pointer(void);
176
177static inline unsigned long
178__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
179{
180 switch (size) {
181 case 4:
182 return __cmpxchg_u32(ptr, old, new);
183#if 0 /* we don't have __cmpxchg_u64 */
184 case 8:
185 return __cmpxchg_u64(ptr, old, new);
186#endif /* 0 */
187 }
188 __cmpxchg_called_with_bad_pointer();
189 return old;
190}
191
192#define cmpxchg(ptr, o, n) \
193 ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
194 (unsigned long)(n), sizeof(*(ptr))))
195
196#include <asm-generic/cmpxchg-local.h>
197
198static inline unsigned long __cmpxchg_local(volatile void *ptr,
199 unsigned long old,
200 unsigned long new, int size)
201{
202 switch (size) {
203 case 4:
204 return __cmpxchg_local_u32(ptr, old, new);
205 default:
206 return __cmpxchg_local_generic(ptr, old, new, size);
207 }
208
209 return old;
210}
211
212/*
213 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
214 * them available.
215 */
216#define cmpxchg_local(ptr, o, n) \
217 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
218 (unsigned long)(n), sizeof(*(ptr))))
219#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
220
221#endif /* _ASM_M32R_CMPXCHG_H */
diff --git a/arch/m32r/include/asm/dcache_clear.h b/arch/m32r/include/asm/dcache_clear.h
new file mode 100644
index 000000000000..a0ae06c2e9e7
--- /dev/null
+++ b/arch/m32r/include/asm/dcache_clear.h
@@ -0,0 +1,29 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_DCACHE_CLEAR_H
10#define _ASM_M32R_DCACHE_CLEAR_H
11
12#ifdef CONFIG_CHIP_M32700_TS1
13#define DCACHE_CLEAR(reg0, reg1, addr) \
14 "seth "reg1", #high(dcache_dummy); \n\t" \
15 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
16 "lock "reg0", @"reg1"; \n\t" \
17 "add3 "reg0", "addr", #0x1000; \n\t" \
18 "ld "reg0", @"reg0"; \n\t" \
19 "add3 "reg0", "addr", #0x2000; \n\t" \
20 "ld "reg0", @"reg0"; \n\t" \
21 "unlock "reg0", @"reg1"; \n\t"
22 /* FIXME: This workaround code cannot handle kernel modules
23 * correctly under SMP environment.
24 */
25#else /* CONFIG_CHIP_M32700_TS1 */
26#define DCACHE_CLEAR(reg0, reg1, addr)
27#endif /* CONFIG_CHIP_M32700_TS1 */
28
29#endif /* _ASM_M32R_DCACHE_CLEAR_H */
diff --git a/arch/m32r/include/asm/exec.h b/arch/m32r/include/asm/exec.h
new file mode 100644
index 000000000000..c805dbd75b5d
--- /dev/null
+++ b/arch/m32r/include/asm/exec.h
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_EXEC_H
10#define _ASM_M32R_EXEC_H
11
12#define arch_align_stack(x) (x)
13
14#endif /* _ASM_M32R_EXEC_H */
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 734bca87018a..4045db3e4f65 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -12,7 +12,6 @@
12 12
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/system.h>
16#include <asm/local.h> 15#include <asm/local.h>
17 16
18/* 17/*
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index b0ea2f26da3b..fa13694eaae3 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <asm/dcache_clear.h>
14#include <asm/page.h> 15#include <asm/page.h>
15 16
16/* 17/*
diff --git a/arch/m32r/include/asm/switch_to.h b/arch/m32r/include/asm/switch_to.h
new file mode 100644
index 000000000000..4b262f7a8fe9
--- /dev/null
+++ b/arch/m32r/include/asm/switch_to.h
@@ -0,0 +1,51 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
7 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
8 */
9#ifndef _ASM_M32R_SWITCH_TO_H
10#define _ASM_M32R_SWITCH_TO_H
11
12/*
13 * switch_to(prev, next) should switch from task `prev' to `next'
14 * `prev' will never be the same as `next'.
15 *
16 * `next' and `prev' should be struct task_struct, but it isn't always defined
17 */
18
19#if defined(CONFIG_FRAME_POINTER) || \
20 !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
21#define M32R_PUSH_FP " push fp\n"
22#define M32R_POP_FP " pop fp\n"
23#else
24#define M32R_PUSH_FP ""
25#define M32R_POP_FP ""
26#endif
27
28#define switch_to(prev, next, last) do { \
29 __asm__ __volatile__ ( \
30 " seth lr, #high(1f) \n" \
31 " or3 lr, lr, #low(1f) \n" \
32 " st lr, @%4 ; store old LR \n" \
33 " ld lr, @%5 ; load new LR \n" \
34 M32R_PUSH_FP \
35 " st sp, @%2 ; store old SP \n" \
36 " ld sp, @%3 ; load new SP \n" \
37 " push %1 ; store `prev' on new stack \n" \
38 " jmp lr \n" \
39 " .fillinsn \n" \
40 "1: \n" \
41 " pop %0 ; restore `__last' from new stack \n" \
42 M32R_POP_FP \
43 : "=r" (last) \
44 : "0" (prev), \
45 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
46 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
47 : "memory", "lr" \
48 ); \
49} while(0)
50
51#endif /* _ASM_M32R_SWITCH_TO_H */
diff --git a/arch/m32r/include/asm/system.h b/arch/m32r/include/asm/system.h
deleted file mode 100644
index 13c46794ccb1..000000000000
--- a/arch/m32r/include/asm/system.h
+++ /dev/null
@@ -1,367 +0,0 @@
1#ifndef _ASM_M32R_SYSTEM_H
2#define _ASM_M32R_SYSTEM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
11 */
12
13#include <linux/compiler.h>
14#include <linux/irqflags.h>
15#include <asm/assembler.h>
16
17#ifdef __KERNEL__
18
19/*
20 * switch_to(prev, next) should switch from task `prev' to `next'
21 * `prev' will never be the same as `next'.
22 *
23 * `next' and `prev' should be struct task_struct, but it isn't always defined
24 */
25
26#if defined(CONFIG_FRAME_POINTER) || \
27 !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
28#define M32R_PUSH_FP " push fp\n"
29#define M32R_POP_FP " pop fp\n"
30#else
31#define M32R_PUSH_FP ""
32#define M32R_POP_FP ""
33#endif
34
35#define switch_to(prev, next, last) do { \
36 __asm__ __volatile__ ( \
37 " seth lr, #high(1f) \n" \
38 " or3 lr, lr, #low(1f) \n" \
39 " st lr, @%4 ; store old LR \n" \
40 " ld lr, @%5 ; load new LR \n" \
41 M32R_PUSH_FP \
42 " st sp, @%2 ; store old SP \n" \
43 " ld sp, @%3 ; load new SP \n" \
44 " push %1 ; store `prev' on new stack \n" \
45 " jmp lr \n" \
46 " .fillinsn \n" \
47 "1: \n" \
48 " pop %0 ; restore `__last' from new stack \n" \
49 M32R_POP_FP \
50 : "=r" (last) \
51 : "0" (prev), \
52 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
53 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
54 : "memory", "lr" \
55 ); \
56} while(0)
57
58#define nop() __asm__ __volatile__ ("nop" : : )
59
60#define xchg(ptr, x) \
61 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
62#define xchg_local(ptr, x) \
63 ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
64 sizeof(*(ptr))))
65
66extern void __xchg_called_with_bad_pointer(void);
67
68#ifdef CONFIG_CHIP_M32700_TS1
69#define DCACHE_CLEAR(reg0, reg1, addr) \
70 "seth "reg1", #high(dcache_dummy); \n\t" \
71 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
72 "lock "reg0", @"reg1"; \n\t" \
73 "add3 "reg0", "addr", #0x1000; \n\t" \
74 "ld "reg0", @"reg0"; \n\t" \
75 "add3 "reg0", "addr", #0x2000; \n\t" \
76 "ld "reg0", @"reg0"; \n\t" \
77 "unlock "reg0", @"reg1"; \n\t"
78 /* FIXME: This workaround code cannot handle kernel modules
79 * correctly under SMP environment.
80 */
81#else /* CONFIG_CHIP_M32700_TS1 */
82#define DCACHE_CLEAR(reg0, reg1, addr)
83#endif /* CONFIG_CHIP_M32700_TS1 */
84
85static __always_inline unsigned long
86__xchg(unsigned long x, volatile void *ptr, int size)
87{
88 unsigned long flags;
89 unsigned long tmp = 0;
90
91 local_irq_save(flags);
92
93 switch (size) {
94#ifndef CONFIG_SMP
95 case 1:
96 __asm__ __volatile__ (
97 "ldb %0, @%2 \n\t"
98 "stb %1, @%2 \n\t"
99 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
100 break;
101 case 2:
102 __asm__ __volatile__ (
103 "ldh %0, @%2 \n\t"
104 "sth %1, @%2 \n\t"
105 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
106 break;
107 case 4:
108 __asm__ __volatile__ (
109 "ld %0, @%2 \n\t"
110 "st %1, @%2 \n\t"
111 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
112 break;
113#else /* CONFIG_SMP */
114 case 4:
115 __asm__ __volatile__ (
116 DCACHE_CLEAR("%0", "r4", "%2")
117 "lock %0, @%2; \n\t"
118 "unlock %1, @%2; \n\t"
119 : "=&r" (tmp) : "r" (x), "r" (ptr)
120 : "memory"
121#ifdef CONFIG_CHIP_M32700_TS1
122 , "r4"
123#endif /* CONFIG_CHIP_M32700_TS1 */
124 );
125 break;
126#endif /* CONFIG_SMP */
127 default:
128 __xchg_called_with_bad_pointer();
129 }
130
131 local_irq_restore(flags);
132
133 return (tmp);
134}
135
136static __always_inline unsigned long
137__xchg_local(unsigned long x, volatile void *ptr, int size)
138{
139 unsigned long flags;
140 unsigned long tmp = 0;
141
142 local_irq_save(flags);
143
144 switch (size) {
145 case 1:
146 __asm__ __volatile__ (
147 "ldb %0, @%2 \n\t"
148 "stb %1, @%2 \n\t"
149 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
150 break;
151 case 2:
152 __asm__ __volatile__ (
153 "ldh %0, @%2 \n\t"
154 "sth %1, @%2 \n\t"
155 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
156 break;
157 case 4:
158 __asm__ __volatile__ (
159 "ld %0, @%2 \n\t"
160 "st %1, @%2 \n\t"
161 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
162 break;
163 default:
164 __xchg_called_with_bad_pointer();
165 }
166
167 local_irq_restore(flags);
168
169 return (tmp);
170}
171
172#define __HAVE_ARCH_CMPXCHG 1
173
174static inline unsigned long
175__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
176{
177 unsigned long flags;
178 unsigned int retval;
179
180 local_irq_save(flags);
181 __asm__ __volatile__ (
182 DCACHE_CLEAR("%0", "r4", "%1")
183 M32R_LOCK" %0, @%1; \n"
184 " bne %0, %2, 1f; \n"
185 M32R_UNLOCK" %3, @%1; \n"
186 " bra 2f; \n"
187 " .fillinsn \n"
188 "1:"
189 M32R_UNLOCK" %0, @%1; \n"
190 " .fillinsn \n"
191 "2:"
192 : "=&r" (retval)
193 : "r" (p), "r" (old), "r" (new)
194 : "cbit", "memory"
195#ifdef CONFIG_CHIP_M32700_TS1
196 , "r4"
197#endif /* CONFIG_CHIP_M32700_TS1 */
198 );
199 local_irq_restore(flags);
200
201 return retval;
202}
203
204static inline unsigned long
205__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
206 unsigned int new)
207{
208 unsigned long flags;
209 unsigned int retval;
210
211 local_irq_save(flags);
212 __asm__ __volatile__ (
213 DCACHE_CLEAR("%0", "r4", "%1")
214 "ld %0, @%1; \n"
215 " bne %0, %2, 1f; \n"
216 "st %3, @%1; \n"
217 " bra 2f; \n"
218 " .fillinsn \n"
219 "1:"
220 "st %0, @%1; \n"
221 " .fillinsn \n"
222 "2:"
223 : "=&r" (retval)
224 : "r" (p), "r" (old), "r" (new)
225 : "cbit", "memory"
226#ifdef CONFIG_CHIP_M32700_TS1
227 , "r4"
228#endif /* CONFIG_CHIP_M32700_TS1 */
229 );
230 local_irq_restore(flags);
231
232 return retval;
233}
234
235/* This function doesn't exist, so you'll get a linker error
236 if something tries to do an invalid cmpxchg(). */
237extern void __cmpxchg_called_with_bad_pointer(void);
238
239static inline unsigned long
240__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
241{
242 switch (size) {
243 case 4:
244 return __cmpxchg_u32(ptr, old, new);
245#if 0 /* we don't have __cmpxchg_u64 */
246 case 8:
247 return __cmpxchg_u64(ptr, old, new);
248#endif /* 0 */
249 }
250 __cmpxchg_called_with_bad_pointer();
251 return old;
252}
253
254#define cmpxchg(ptr, o, n) \
255 ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
256 (unsigned long)(n), sizeof(*(ptr))))
257
258#include <asm-generic/cmpxchg-local.h>
259
260static inline unsigned long __cmpxchg_local(volatile void *ptr,
261 unsigned long old,
262 unsigned long new, int size)
263{
264 switch (size) {
265 case 4:
266 return __cmpxchg_local_u32(ptr, old, new);
267 default:
268 return __cmpxchg_local_generic(ptr, old, new, size);
269 }
270
271 return old;
272}
273
274/*
275 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
276 * them available.
277 */
278#define cmpxchg_local(ptr, o, n) \
279 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
280 (unsigned long)(n), sizeof(*(ptr))))
281#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
282
283#endif /* __KERNEL__ */
284
285/*
286 * Memory barrier.
287 *
288 * mb() prevents loads and stores being reordered across this point.
289 * rmb() prevents loads being reordered across this point.
290 * wmb() prevents stores being reordered across this point.
291 */
292#define mb() barrier()
293#define rmb() mb()
294#define wmb() mb()
295
296/**
297 * read_barrier_depends - Flush all pending reads that subsequents reads
298 * depend on.
299 *
300 * No data-dependent reads from memory-like regions are ever reordered
301 * over this barrier. All reads preceding this primitive are guaranteed
302 * to access memory (but not necessarily other CPUs' caches) before any
303 * reads following this primitive that depend on the data return by
304 * any of the preceding reads. This primitive is much lighter weight than
305 * rmb() on most CPUs, and is never heavier weight than is
306 * rmb().
307 *
308 * These ordering constraints are respected by both the local CPU
309 * and the compiler.
310 *
311 * Ordering is not guaranteed by anything other than these primitives,
312 * not even by data dependencies. See the documentation for
313 * memory_barrier() for examples and URLs to more information.
314 *
315 * For example, the following code would force ordering (the initial
316 * value of "a" is zero, "b" is one, and "p" is "&a"):
317 *
318 * <programlisting>
319 * CPU 0 CPU 1
320 *
321 * b = 2;
322 * memory_barrier();
323 * p = &b; q = p;
324 * read_barrier_depends();
325 * d = *q;
326 * </programlisting>
327 *
328 *
329 * because the read of "*q" depends on the read of "p" and these
330 * two reads are separated by a read_barrier_depends(). However,
331 * the following code, with the same initial values for "a" and "b":
332 *
333 * <programlisting>
334 * CPU 0 CPU 1
335 *
336 * a = 2;
337 * memory_barrier();
338 * b = 3; y = b;
339 * read_barrier_depends();
340 * x = a;
341 * </programlisting>
342 *
343 * does not enforce ordering, since there is no data dependency between
344 * the read of "a" and the read of "b". Therefore, on some CPUs, such
345 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
346 * in cases like this where there are no data dependencies.
347 **/
348
349#define read_barrier_depends() do { } while (0)
350
351#ifdef CONFIG_SMP
352#define smp_mb() mb()
353#define smp_rmb() rmb()
354#define smp_wmb() wmb()
355#define smp_read_barrier_depends() read_barrier_depends()
356#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
357#else
358#define smp_mb() barrier()
359#define smp_rmb() barrier()
360#define smp_wmb() barrier()
361#define smp_read_barrier_depends() do { } while (0)
362#define set_mb(var, value) do { var = value; barrier(); } while (0)
363#endif
364
365#define arch_align_stack(x) (x)
366
367#endif /* _ASM_M32R_SYSTEM_H */