diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 18:58:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 18:58:21 -0400 |
commit | 0195c00244dc2e9f522475868fa278c473ba7339 (patch) | |
tree | f97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/m68k/include/asm | |
parent | f21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff) | |
parent | 141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff) |
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells:
"Here are a bunch of patches to disintegrate asm/system.h into a set of
separate bits to relieve the problem of circular inclusion
dependencies.
I've built all the working defconfigs from all the arches that I can
and made sure that they don't break.
The reason for these patches is that I recently encountered a circular
dependency problem that came about when I produced some patches to
optimise get_order() by rewriting it to use ilog2().
This uses bitops - and on the SH arch asm/bitops.h drags in
asm-generic/get_order.h by a circuituous route involving asm/system.h.
The main difficulty seems to be asm/system.h. It holds a number of
low level bits with no/few dependencies that are commonly used (eg.
memory barriers) and a number of bits with more dependencies that
aren't used in many places (eg. switch_to()).
These patches break asm/system.h up into the following core pieces:
(1) asm/barrier.h
Move memory barriers here. This already done for MIPS and Alpha.
(2) asm/switch_to.h
Move switch_to() and related stuff here.
(3) asm/exec.h
Move arch_align_stack() here. Other process execution related bits
could perhaps go here from asm/processor.h.
(4) asm/cmpxchg.h
Move xchg() and cmpxchg() here as they're full word atomic ops and
frequently used by atomic_xchg() and atomic_cmpxchg().
(5) asm/bug.h
Move die() and related bits.
(6) asm/auxvec.h
Move AT_VECTOR_SIZE_ARCH here.
Other arch headers are created as needed on a per-arch basis."
Fixed up some conflicts from other header file cleanups and moving code
around that has happened in the meantime, so David's testing is somewhat
weakened by that. We'll find out anything that got broken and fix it..
* tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits)
Delete all instances of asm/system.h
Remove all #inclusions of asm/system.h
Add #includes needed to permit the removal of asm/system.h
Move all declarations of free_initmem() to linux/mm.h
Disintegrate asm/system.h for OpenRISC
Split arch_align_stack() out from asm-generic/system.h
Split the switch_to() wrapper out of asm-generic/system.h
Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h
Create asm-generic/barrier.h
Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h
Disintegrate asm/system.h for Xtensa
Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt]
Disintegrate asm/system.h for Tile
Disintegrate asm/system.h for Sparc
Disintegrate asm/system.h for SH
Disintegrate asm/system.h for Score
Disintegrate asm/system.h for S390
Disintegrate asm/system.h for PowerPC
Disintegrate asm/system.h for PA-RISC
Disintegrate asm/system.h for MN10300
...
Diffstat (limited to 'arch/m68k/include/asm')
-rw-r--r-- | arch/m68k/include/asm/atomic.h | 2 | ||||
-rw-r--r-- | arch/m68k/include/asm/barrier.h | 20 | ||||
-rw-r--r-- | arch/m68k/include/asm/cmpxchg.h (renamed from arch/m68k/include/asm/system.h) | 95 | ||||
-rw-r--r-- | arch/m68k/include/asm/exec.h | 6 | ||||
-rw-r--r-- | arch/m68k/include/asm/sun3xflop.h | 1 | ||||
-rw-r--r-- | arch/m68k/include/asm/switch_to.h | 41 |
6 files changed, 89 insertions, 76 deletions
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 4eba796c00d4..336e6173794f 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define __ARCH_M68K_ATOMIC__ | 2 | #define __ARCH_M68K_ATOMIC__ |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/system.h> | 5 | #include <linux/irqflags.h> |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Atomic operations that C can't guarantee us. Useful for | 8 | * Atomic operations that C can't guarantee us. Useful for |
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h new file mode 100644 index 000000000000..445ce22c23cb --- /dev/null +++ b/arch/m68k/include/asm/barrier.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _M68K_BARRIER_H | ||
2 | #define _M68K_BARRIER_H | ||
3 | |||
4 | /* | ||
5 | * Force strict CPU ordering. | ||
6 | * Not really required on m68k... | ||
7 | */ | ||
8 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) | ||
9 | #define mb() barrier() | ||
10 | #define rmb() barrier() | ||
11 | #define wmb() barrier() | ||
12 | #define read_barrier_depends() ((void)0) | ||
13 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) | ||
14 | |||
15 | #define smp_mb() barrier() | ||
16 | #define smp_rmb() barrier() | ||
17 | #define smp_wmb() barrier() | ||
18 | #define smp_read_barrier_depends() ((void)0) | ||
19 | |||
20 | #endif /* _M68K_BARRIER_H */ | ||
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/cmpxchg.h index 8dc68178716c..5c81d0eae5cf 100644 --- a/arch/m68k/include/asm/system.h +++ b/arch/m68k/include/asm/cmpxchg.h | |||
@@ -1,74 +1,13 @@ | |||
1 | #ifndef _M68K_SYSTEM_H | 1 | #ifndef __ARCH_M68K_CMPXCHG__ |
2 | #define _M68K_SYSTEM_H | 2 | #define __ARCH_M68K_CMPXCHG__ |
3 | 3 | ||
4 | #include <linux/linkage.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/bug.h> | ||
7 | #include <linux/irqflags.h> | 4 | #include <linux/irqflags.h> |
8 | #include <asm/segment.h> | ||
9 | #include <asm/entry.h> | ||
10 | |||
11 | #ifdef __KERNEL__ | ||
12 | |||
13 | /* | ||
14 | * switch_to(n) should switch tasks to task ptr, first checking that | ||
15 | * ptr isn't the current task, in which case it does nothing. This | ||
16 | * also clears the TS-flag if the task we switched to has used the | ||
17 | * math co-processor latest. | ||
18 | */ | ||
19 | /* | ||
20 | * switch_to() saves the extra registers, that are not saved | ||
21 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | ||
22 | * a0-a1. Some of these are used by schedule() and its predecessors | ||
23 | * and so we might get see unexpected behaviors when a task returns | ||
24 | * with unexpected register values. | ||
25 | * | ||
26 | * syscall stores these registers itself and none of them are used | ||
27 | * by syscall after the function in the syscall has been called. | ||
28 | * | ||
29 | * Beware that resume now expects *next to be in d1 and the offset of | ||
30 | * tss to be in a1. This saves a few instructions as we no longer have | ||
31 | * to push them onto the stack and read them back right after. | ||
32 | * | ||
33 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | ||
34 | * | ||
35 | * Changed 96/09/19 by Andreas Schwab | ||
36 | * pass prev in a0, next in a1 | ||
37 | */ | ||
38 | asmlinkage void resume(void); | ||
39 | #define switch_to(prev,next,last) do { \ | ||
40 | register void *_prev __asm__ ("a0") = (prev); \ | ||
41 | register void *_next __asm__ ("a1") = (next); \ | ||
42 | register void *_last __asm__ ("d1"); \ | ||
43 | __asm__ __volatile__("jbsr resume" \ | ||
44 | : "=a" (_prev), "=a" (_next), "=d" (_last) \ | ||
45 | : "0" (_prev), "1" (_next) \ | ||
46 | : "d0", "d2", "d3", "d4", "d5"); \ | ||
47 | (last) = _last; \ | ||
48 | } while (0) | ||
49 | |||
50 | |||
51 | /* | ||
52 | * Force strict CPU ordering. | ||
53 | * Not really required on m68k... | ||
54 | */ | ||
55 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) | ||
56 | #define mb() barrier() | ||
57 | #define rmb() barrier() | ||
58 | #define wmb() barrier() | ||
59 | #define read_barrier_depends() ((void)0) | ||
60 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) | ||
61 | |||
62 | #define smp_mb() barrier() | ||
63 | #define smp_rmb() barrier() | ||
64 | #define smp_wmb() barrier() | ||
65 | #define smp_read_barrier_depends() ((void)0) | ||
66 | |||
67 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
68 | 5 | ||
69 | struct __xchg_dummy { unsigned long a[100]; }; | 6 | struct __xchg_dummy { unsigned long a[100]; }; |
70 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | 7 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) |
71 | 8 | ||
9 | extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); | ||
10 | |||
72 | #ifndef CONFIG_RMW_INSNS | 11 | #ifndef CONFIG_RMW_INSNS |
73 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 12 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
74 | { | 13 | { |
@@ -93,7 +32,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
93 | x = tmp; | 32 | x = tmp; |
94 | break; | 33 | break; |
95 | default: | 34 | default: |
96 | BUG(); | 35 | tmp = __invalid_xchg_size(x, ptr, size); |
36 | break; | ||
97 | } | 37 | } |
98 | 38 | ||
99 | local_irq_restore(flags); | 39 | local_irq_restore(flags); |
@@ -103,7 +43,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
103 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 43 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
104 | { | 44 | { |
105 | switch (size) { | 45 | switch (size) { |
106 | case 1: | 46 | case 1: |
107 | __asm__ __volatile__ | 47 | __asm__ __volatile__ |
108 | ("moveb %2,%0\n\t" | 48 | ("moveb %2,%0\n\t" |
109 | "1:\n\t" | 49 | "1:\n\t" |
@@ -111,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
111 | "jne 1b" | 51 | "jne 1b" |
112 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 52 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
113 | break; | 53 | break; |
114 | case 2: | 54 | case 2: |
115 | __asm__ __volatile__ | 55 | __asm__ __volatile__ |
116 | ("movew %2,%0\n\t" | 56 | ("movew %2,%0\n\t" |
117 | "1:\n\t" | 57 | "1:\n\t" |
@@ -119,7 +59,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
119 | "jne 1b" | 59 | "jne 1b" |
120 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 60 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
121 | break; | 61 | break; |
122 | case 4: | 62 | case 4: |
123 | __asm__ __volatile__ | 63 | __asm__ __volatile__ |
124 | ("movel %2,%0\n\t" | 64 | ("movel %2,%0\n\t" |
125 | "1:\n\t" | 65 | "1:\n\t" |
@@ -127,15 +67,23 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
127 | "jne 1b" | 67 | "jne 1b" |
128 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 68 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
129 | break; | 69 | break; |
70 | default: | ||
71 | x = __invalid_xchg_size(x, ptr, size); | ||
72 | break; | ||
130 | } | 73 | } |
131 | return x; | 74 | return x; |
132 | } | 75 | } |
133 | #endif | 76 | #endif |
134 | 77 | ||
78 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
79 | |||
135 | #include <asm-generic/cmpxchg-local.h> | 80 | #include <asm-generic/cmpxchg-local.h> |
136 | 81 | ||
137 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 82 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
138 | 83 | ||
84 | extern unsigned long __invalid_cmpxchg_size(volatile void *, | ||
85 | unsigned long, unsigned long, int); | ||
86 | |||
139 | /* | 87 | /* |
140 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 88 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
141 | * store NEW in MEM. Return the initial value in MEM. Success is | 89 | * store NEW in MEM. Return the initial value in MEM. Success is |
@@ -163,6 +111,9 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, | |||
163 | : "=d" (old), "=m" (*(int *)p) | 111 | : "=d" (old), "=m" (*(int *)p) |
164 | : "d" (new), "0" (old), "m" (*(int *)p)); | 112 | : "d" (new), "0" (old), "m" (*(int *)p)); |
165 | break; | 113 | break; |
114 | default: | ||
115 | old = __invalid_cmpxchg_size(p, old, new, size); | ||
116 | break; | ||
166 | } | 117 | } |
167 | return old; | 118 | return old; |
168 | } | 119 | } |
@@ -187,8 +138,4 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, | |||
187 | 138 | ||
188 | #endif | 139 | #endif |
189 | 140 | ||
190 | #define arch_align_stack(x) (x) | 141 | #endif /* __ARCH_M68K_CMPXCHG__ */ |
191 | |||
192 | #endif /* __KERNEL__ */ | ||
193 | |||
194 | #endif /* _M68K_SYSTEM_H */ | ||
diff --git a/arch/m68k/include/asm/exec.h b/arch/m68k/include/asm/exec.h new file mode 100644 index 000000000000..0499adf90230 --- /dev/null +++ b/arch/m68k/include/asm/exec.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _M68K_EXEC_H | ||
2 | #define _M68K_EXEC_H | ||
3 | |||
4 | #define arch_align_stack(x) (x) | ||
5 | |||
6 | #endif /* _M68K_EXEC_H */ | ||
diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h index 32c45f84ac60..95231e2f9d64 100644 --- a/arch/m68k/include/asm/sun3xflop.h +++ b/arch/m68k/include/asm/sun3xflop.h | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
13 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
14 | #include <asm/system.h> | ||
15 | #include <asm/irq.h> | 14 | #include <asm/irq.h> |
16 | #include <asm/sun3x.h> | 15 | #include <asm/sun3x.h> |
17 | 16 | ||
diff --git a/arch/m68k/include/asm/switch_to.h b/arch/m68k/include/asm/switch_to.h new file mode 100644 index 000000000000..16fd6b634982 --- /dev/null +++ b/arch/m68k/include/asm/switch_to.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _M68K_SWITCH_TO_H | ||
2 | #define _M68K_SWITCH_TO_H | ||
3 | |||
4 | /* | ||
5 | * switch_to(n) should switch tasks to task ptr, first checking that | ||
6 | * ptr isn't the current task, in which case it does nothing. This | ||
7 | * also clears the TS-flag if the task we switched to has used the | ||
8 | * math co-processor latest. | ||
9 | */ | ||
10 | /* | ||
11 | * switch_to() saves the extra registers, that are not saved | ||
12 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | ||
13 | * a0-a1. Some of these are used by schedule() and its predecessors | ||
14 | * and so we might get see unexpected behaviors when a task returns | ||
15 | * with unexpected register values. | ||
16 | * | ||
17 | * syscall stores these registers itself and none of them are used | ||
18 | * by syscall after the function in the syscall has been called. | ||
19 | * | ||
20 | * Beware that resume now expects *next to be in d1 and the offset of | ||
21 | * tss to be in a1. This saves a few instructions as we no longer have | ||
22 | * to push them onto the stack and read them back right after. | ||
23 | * | ||
24 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | ||
25 | * | ||
26 | * Changed 96/09/19 by Andreas Schwab | ||
27 | * pass prev in a0, next in a1 | ||
28 | */ | ||
29 | asmlinkage void resume(void); | ||
30 | #define switch_to(prev,next,last) do { \ | ||
31 | register void *_prev __asm__ ("a0") = (prev); \ | ||
32 | register void *_next __asm__ ("a1") = (next); \ | ||
33 | register void *_last __asm__ ("d1"); \ | ||
34 | __asm__ __volatile__("jbsr resume" \ | ||
35 | : "=a" (_prev), "=a" (_next), "=d" (_last) \ | ||
36 | : "0" (_prev), "1" (_next) \ | ||
37 | : "d0", "d2", "d3", "d4", "d5"); \ | ||
38 | (last) = _last; \ | ||
39 | } while (0) | ||
40 | |||
41 | #endif /* _M68K_SWITCH_TO_H */ | ||