aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/include/asm
diff options
context:
space:
mode:
authorGreg Ungerer <gerg@uclinux.org>2011-05-05 08:32:12 -0400
committerGreg Ungerer <gerg@uclinux.org>2011-07-24 21:20:40 -0400
commitf941f5caa0e947f4ed060a751ca4a3bf552af625 (patch)
treef6811757e568e74307abd39534b1d7e7813c8d0f /arch/m68k/include/asm
parent10f939ff3ab80514da3bd96357bb54a8a59b9225 (diff)
m68k: merge MMU and non MMU versions of system.h
The non-MMU m68k targets can use the same asm/system.h as the MMU targets. So switch the current system_mm.h to be system.h and remove system_no.h. The assembly support code for the non-MMU resume functions needs to be modified to match the now common switch_to() macro. Specifically this means correctly saving and restoring the status flags in the case of the ColdFire resume, and some reordering of the code to not use registers before they are saved or after they are restored. Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/m68k/include/asm')
-rw-r--r--arch/m68k/include/asm/entry_no.h12
-rw-r--r--arch/m68k/include/asm/system.h194
-rw-r--r--arch/m68k/include/asm/system_mm.h193
-rw-r--r--arch/m68k/include/asm/system_no.h153
4 files changed, 197 insertions, 355 deletions
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 627d69bacc58..68611e3dbb1d 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -96,11 +96,11 @@
96.endm 96.endm
97 97
98.macro RDUSP 98.macro RDUSP
99 movel sw_usp,%a2 99 movel sw_usp,%a3
100.endm 100.endm
101 101
102.macro WRUSP 102.macro WRUSP
103 movel %a0,sw_usp 103 movel %a3,sw_usp
104.endm 104.endm
105 105
106#else /* !CONFIG_COLDFIRE_SW_A7 */ 106#else /* !CONFIG_COLDFIRE_SW_A7 */
@@ -127,13 +127,13 @@
127.endm 127.endm
128 128
129.macro RDUSP 129.macro RDUSP
130 /*move %usp,%a2*/ 130 /*move %usp,%a3*/
131 .word 0x4e6a 131 .word 0x4e6b
132.endm 132.endm
133 133
134.macro WRUSP 134.macro WRUSP
135 /*move %a0,%usp*/ 135 /*move %a3,%usp*/
136 .word 0x4e60 136 .word 0x4e63
137.endm 137.endm
138 138
139#endif /* !CONFIG_COLDFIRE_SW_A7 */ 139#endif /* !CONFIG_COLDFIRE_SW_A7 */
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/system.h
index ccea925ff4f5..47b01f4726bc 100644
--- a/arch/m68k/include/asm/system.h
+++ b/arch/m68k/include/asm/system.h
@@ -1,5 +1,193 @@
1#ifdef __uClinux__ 1#ifndef _M68K_SYSTEM_H
2#include "system_no.h" 2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
3#else 101#else
4#include "system_mm.h" 102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
5#endif 132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
deleted file mode 100644
index 47b01f4726bc..000000000000
--- a/arch/m68k/include/asm/system_mm.h
+++ /dev/null
@@ -1,193 +0,0 @@
1#ifndef _M68K_SYSTEM_H
2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <linux/irqflags.h>
7#include <asm/segment.h>
8#include <asm/entry.h>
9
10#ifdef __KERNEL__
11
12/*
13 * switch_to(n) should switch tasks to task ptr, first checking that
14 * ptr isn't the current task, in which case it does nothing. This
15 * also clears the TS-flag if the task we switched to has used the
16 * math co-processor latest.
17 */
18/*
19 * switch_to() saves the extra registers, that are not saved
20 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
21 * a0-a1. Some of these are used by schedule() and its predecessors
22 * and so we might get see unexpected behaviors when a task returns
23 * with unexpected register values.
24 *
25 * syscall stores these registers itself and none of them are used
26 * by syscall after the function in the syscall has been called.
27 *
28 * Beware that resume now expects *next to be in d1 and the offset of
29 * tss to be in a1. This saves a few instructions as we no longer have
30 * to push them onto the stack and read them back right after.
31 *
32 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
33 *
34 * Changed 96/09/19 by Andreas Schwab
35 * pass prev in a0, next in a1
36 */
37asmlinkage void resume(void);
38#define switch_to(prev,next,last) do { \
39 register void *_prev __asm__ ("a0") = (prev); \
40 register void *_next __asm__ ("a1") = (next); \
41 register void *_last __asm__ ("d1"); \
42 __asm__ __volatile__("jbsr resume" \
43 : "=a" (_prev), "=a" (_next), "=d" (_last) \
44 : "0" (_prev), "1" (_next) \
45 : "d0", "d2", "d3", "d4", "d5"); \
46 (last) = _last; \
47} while (0)
48
49
50/*
51 * Force strict CPU ordering.
52 * Not really required on m68k...
53 */
54#define nop() do { asm volatile ("nop"); barrier(); } while (0)
55#define mb() barrier()
56#define rmb() barrier()
57#define wmb() barrier()
58#define read_barrier_depends() ((void)0)
59#define set_mb(var, value) ({ (var) = (value); wmb(); })
60
61#define smp_mb() barrier()
62#define smp_rmb() barrier()
63#define smp_wmb() barrier()
64#define smp_read_barrier_depends() ((void)0)
65
66#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
67
68struct __xchg_dummy { unsigned long a[100]; };
69#define __xg(x) ((volatile struct __xchg_dummy *)(x))
70
71#ifndef CONFIG_RMW_INSNS
72static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
73{
74 unsigned long flags, tmp;
75
76 local_irq_save(flags);
77
78 switch (size) {
79 case 1:
80 tmp = *(u8 *)ptr;
81 *(u8 *)ptr = x;
82 x = tmp;
83 break;
84 case 2:
85 tmp = *(u16 *)ptr;
86 *(u16 *)ptr = x;
87 x = tmp;
88 break;
89 case 4:
90 tmp = *(u32 *)ptr;
91 *(u32 *)ptr = x;
92 x = tmp;
93 break;
94 default:
95 BUG();
96 }
97
98 local_irq_restore(flags);
99 return x;
100}
101#else
102static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103{
104 switch (size) {
105 case 1:
106 __asm__ __volatile__
107 ("moveb %2,%0\n\t"
108 "1:\n\t"
109 "casb %0,%1,%2\n\t"
110 "jne 1b"
111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
112 break;
113 case 2:
114 __asm__ __volatile__
115 ("movew %2,%0\n\t"
116 "1:\n\t"
117 "casw %0,%1,%2\n\t"
118 "jne 1b"
119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
120 break;
121 case 4:
122 __asm__ __volatile__
123 ("movel %2,%0\n\t"
124 "1:\n\t"
125 "casl %0,%1,%2\n\t"
126 "jne 1b"
127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 }
130 return x;
131}
132#endif
133
134#include <asm-generic/cmpxchg-local.h>
135
136#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
137
138/*
139 * Atomic compare and exchange. Compare OLD with MEM, if identical,
140 * store NEW in MEM. Return the initial value in MEM. Success is
141 * indicated by comparing RETURN with OLD.
142 */
143#ifdef CONFIG_RMW_INSNS
144#define __HAVE_ARCH_CMPXCHG 1
145
146static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
147 unsigned long new, int size)
148{
149 switch (size) {
150 case 1:
151 __asm__ __volatile__ ("casb %0,%2,%1"
152 : "=d" (old), "=m" (*(char *)p)
153 : "d" (new), "0" (old), "m" (*(char *)p));
154 break;
155 case 2:
156 __asm__ __volatile__ ("casw %0,%2,%1"
157 : "=d" (old), "=m" (*(short *)p)
158 : "d" (new), "0" (old), "m" (*(short *)p));
159 break;
160 case 4:
161 __asm__ __volatile__ ("casl %0,%2,%1"
162 : "=d" (old), "=m" (*(int *)p)
163 : "d" (new), "0" (old), "m" (*(int *)p));
164 break;
165 }
166 return old;
167}
168
169#define cmpxchg(ptr, o, n) \
170 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
171 (unsigned long)(n), sizeof(*(ptr))))
172#define cmpxchg_local(ptr, o, n) \
173 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
174 (unsigned long)(n), sizeof(*(ptr))))
175#else
176
177/*
178 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
179 * them available.
180 */
181#define cmpxchg_local(ptr, o, n) \
182 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
183 (unsigned long)(n), sizeof(*(ptr))))
184
185#include <asm-generic/cmpxchg.h>
186
187#endif
188
189#define arch_align_stack(x) (x)
190
191#endif /* __KERNEL__ */
192
193#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
deleted file mode 100644
index 6fe9f93bc3ff..000000000000
--- a/arch/m68k/include/asm/system_no.h
+++ /dev/null
@@ -1,153 +0,0 @@
1#ifndef _M68KNOMMU_SYSTEM_H
2#define _M68KNOMMU_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/irqflags.h>
6#include <asm/segment.h>
7#include <asm/entry.h>
8
9/*
10 * switch_to(n) should switch tasks to task ptr, first checking that
11 * ptr isn't the current task, in which case it does nothing. This
12 * also clears the TS-flag if the task we switched to has used the
13 * math co-processor latest.
14 */
15/*
16 * switch_to() saves the extra registers, that are not saved
17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
18 * a0-a1. Some of these are used by schedule() and its predecessors
19 * and so we might get see unexpected behaviors when a task returns
20 * with unexpected register values.
21 *
22 * syscall stores these registers itself and none of them are used
23 * by syscall after the function in the syscall has been called.
24 *
25 * Beware that resume now expects *next to be in d1 and the offset of
26 * tss to be in a1. This saves a few instructions as we no longer have
27 * to push them onto the stack and read them back right after.
28 *
29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
30 *
31 * Changed 96/09/19 by Andreas Schwab
32 * pass prev in a0, next in a1, offset of tss in d1, and whether
33 * the mm structures are shared in d2 (to avoid atc flushing).
34 */
35asmlinkage void resume(void);
36#define switch_to(prev,next,last) \
37{ \
38 void *_last; \
39 __asm__ __volatile__( \
40 "movel %1, %%a0\n\t" \
41 "movel %2, %%a1\n\t" \
42 "jbsr resume\n\t" \
43 "movel %%d1, %0\n\t" \
44 : "=d" (_last) \
45 : "d" (prev), "d" (next) \
46 : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
47 (last) = _last; \
48}
49
50#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
51
52/*
53 * Force strict CPU ordering.
54 * Not really required on m68k...
55 */
56#define nop() asm volatile ("nop"::)
57#define mb() asm volatile ("" : : :"memory")
58#define rmb() asm volatile ("" : : :"memory")
59#define wmb() asm volatile ("" : : :"memory")
60#define set_mb(var, value) ({ (var) = (value); wmb(); })
61
62#define smp_mb() barrier()
63#define smp_rmb() barrier()
64#define smp_wmb() barrier()
65#define smp_read_barrier_depends() do { } while(0)
66
67#define read_barrier_depends() ((void)0)
68
69#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
70
71struct __xchg_dummy { unsigned long a[100]; };
72#define __xg(x) ((volatile struct __xchg_dummy *)(x))
73
74#ifndef CONFIG_RMW_INSNS
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 unsigned long tmp, flags;
78
79 local_irq_save(flags);
80
81 switch (size) {
82 case 1:
83 __asm__ __volatile__
84 ("moveb %2,%0\n\t"
85 "moveb %1,%2"
86 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
87 break;
88 case 2:
89 __asm__ __volatile__
90 ("movew %2,%0\n\t"
91 "movew %1,%2"
92 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
93 break;
94 case 4:
95 __asm__ __volatile__
96 ("movel %2,%0\n\t"
97 "movel %1,%2"
98 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
99 break;
100 }
101 local_irq_restore(flags);
102 return tmp;
103}
104#else
105static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
106{
107 switch (size) {
108 case 1:
109 __asm__ __volatile__
110 ("moveb %2,%0\n\t"
111 "1:\n\t"
112 "casb %0,%1,%2\n\t"
113 "jne 1b"
114 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
115 break;
116 case 2:
117 __asm__ __volatile__
118 ("movew %2,%0\n\t"
119 "1:\n\t"
120 "casw %0,%1,%2\n\t"
121 "jne 1b"
122 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
123 break;
124 case 4:
125 __asm__ __volatile__
126 ("movel %2,%0\n\t"
127 "1:\n\t"
128 "casl %0,%1,%2\n\t"
129 "jne 1b"
130 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
131 break;
132 }
133 return x;
134}
135#endif
136
137#include <asm-generic/cmpxchg-local.h>
138
139/*
140 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
141 * them available.
142 */
143#define cmpxchg_local(ptr, o, n) \
144 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
145 (unsigned long)(n), sizeof(*(ptr))))
146#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
147
148#include <asm-generic/cmpxchg.h>
149
150#define arch_align_stack(x) (x)
151
152
153#endif /* _M68KNOMMU_SYSTEM_H */