diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-03-16 19:35:26 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-03-16 19:35:26 -0400 |
commit | bd1274dc005c2cee41771a7cc616f4709a6e6323 (patch) | |
tree | fcfe103a499ca9e3d8fa0ecbca5b7d0d274da5ca /arch/arm | |
parent | 1f0090a1eaa1b750a2fc5c99c91b790d5322a1fd (diff) | |
parent | 3ba6e69ad887f8a814267ed36fd4bfbddf8855a9 (diff) |
Merge branch 'v6v7' into devel
Conflicts:
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/proc-fns.h
arch/arm/mm/Kconfig
Diffstat (limited to 'arch/arm')
28 files changed, 191 insertions, 189 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1fd3f280b584..ababf41517d6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -7,7 +7,7 @@ config ARM | |||
7 | select HAVE_MEMBLOCK | 7 | select HAVE_MEMBLOCK |
8 | select RTC_LIB | 8 | select RTC_LIB |
9 | select SYS_SUPPORTS_APM_EMULATION | 9 | select SYS_SUPPORTS_APM_EMULATION |
10 | select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI) | 10 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) |
11 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) | 11 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) |
12 | select HAVE_ARCH_KGDB | 12 | select HAVE_ARCH_KGDB |
13 | select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) | 13 | select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) |
@@ -24,7 +24,7 @@ config ARM | |||
24 | select HAVE_PERF_EVENTS | 24 | select HAVE_PERF_EVENTS |
25 | select PERF_USE_VMALLOC | 25 | select PERF_USE_VMALLOC |
26 | select HAVE_REGS_AND_STACK_ACCESS_API | 26 | select HAVE_REGS_AND_STACK_ACCESS_API |
27 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7)) | 27 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) |
28 | select HAVE_C_RECORDMCOUNT | 28 | select HAVE_C_RECORDMCOUNT |
29 | select HAVE_GENERIC_HARDIRQS | 29 | select HAVE_GENERIC_HARDIRQS |
30 | select HAVE_SPARSE_IRQ | 30 | select HAVE_SPARSE_IRQ |
@@ -456,6 +456,7 @@ config ARCH_IXP4XX | |||
456 | 456 | ||
457 | config ARCH_DOVE | 457 | config ARCH_DOVE |
458 | bool "Marvell Dove" | 458 | bool "Marvell Dove" |
459 | select CPU_V6K | ||
459 | select PCI | 460 | select PCI |
460 | select ARCH_REQUIRE_GPIOLIB | 461 | select ARCH_REQUIRE_GPIOLIB |
461 | select GENERIC_CLOCKEVENTS | 462 | select GENERIC_CLOCKEVENTS |
@@ -1059,7 +1060,7 @@ config XSCALE_PMU | |||
1059 | default y | 1060 | default y |
1060 | 1061 | ||
1061 | config CPU_HAS_PMU | 1062 | config CPU_HAS_PMU |
1062 | depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ | 1063 | depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \ |
1063 | (!ARCH_OMAP3 || OMAP3_EMU) | 1064 | (!ARCH_OMAP3 || OMAP3_EMU) |
1064 | default y | 1065 | default y |
1065 | bool | 1066 | bool |
@@ -1075,7 +1076,7 @@ endif | |||
1075 | 1076 | ||
1076 | config ARM_ERRATA_411920 | 1077 | config ARM_ERRATA_411920 |
1077 | bool "ARM errata: Invalidation of the Instruction Cache operation can fail" | 1078 | bool "ARM errata: Invalidation of the Instruction Cache operation can fail" |
1078 | depends on CPU_V6 | 1079 | depends on CPU_V6 || CPU_V6K |
1079 | help | 1080 | help |
1080 | Invalidation of the Instruction Cache operation can | 1081 | Invalidation of the Instruction Cache operation can |
1081 | fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. | 1082 | fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. |
@@ -1318,6 +1319,7 @@ source "kernel/time/Kconfig" | |||
1318 | config SMP | 1319 | config SMP |
1319 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" | 1320 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" |
1320 | depends on EXPERIMENTAL | 1321 | depends on EXPERIMENTAL |
1322 | depends on CPU_V6K || CPU_V7 | ||
1321 | depends on GENERIC_CLOCKEVENTS | 1323 | depends on GENERIC_CLOCKEVENTS |
1322 | depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ | 1324 | depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ |
1323 | MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ | 1325 | MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ |
@@ -1429,7 +1431,7 @@ config HZ | |||
1429 | 1431 | ||
1430 | config THUMB2_KERNEL | 1432 | config THUMB2_KERNEL |
1431 | bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" | 1433 | bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" |
1432 | depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL | 1434 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL |
1433 | select AEABI | 1435 | select AEABI |
1434 | select ARM_ASM_UNIFIED | 1436 | select ARM_ASM_UNIFIED |
1435 | help | 1437 | help |
@@ -1963,7 +1965,7 @@ config FPE_FASTFPE | |||
1963 | 1965 | ||
1964 | config VFP | 1966 | config VFP |
1965 | bool "VFP-format floating point maths" | 1967 | bool "VFP-format floating point maths" |
1966 | depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON | 1968 | depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON |
1967 | help | 1969 | help |
1968 | Say Y to include VFP support code in the kernel. This is needed | 1970 | Say Y to include VFP support code in the kernel. This is needed |
1969 | if your hardware includes a VFP unit. | 1971 | if your hardware includes a VFP unit. |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 55eca9ed7604..1e20c414d5cf 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) | |||
89 | tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale | 89 | tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale |
90 | tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) | 90 | tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) |
91 | tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) | 91 | tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) |
92 | tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) | ||
92 | 93 | ||
93 | ifeq ($(CONFIG_AEABI),y) | 94 | ifeq ($(CONFIG_AEABI),y) |
94 | CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork | 95 | CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 39859216af00..84ac4d656310 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #if defined(CONFIG_DEBUG_ICEDCC) | 22 | #if defined(CONFIG_DEBUG_ICEDCC) |
23 | 23 | ||
24 | #ifdef CONFIG_CPU_V6 | 24 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
25 | .macro loadsp, rb, tmp | 25 | .macro loadsp, rb, tmp |
26 | .endm | 26 | .endm |
27 | .macro writeb, ch, rb | 27 | .macro writeb, ch, rb |
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index e653a6d3c8d9..4657e877bf8f 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c | |||
@@ -36,7 +36,7 @@ extern void error(char *x); | |||
36 | 36 | ||
37 | #ifdef CONFIG_DEBUG_ICEDCC | 37 | #ifdef CONFIG_DEBUG_ICEDCC |
38 | 38 | ||
39 | #ifdef CONFIG_CPU_V6 | 39 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
40 | 40 | ||
41 | static void icedcc_putc(int ch) | 41 | static void icedcc_putc(int ch) |
42 | { | 42 | { |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 7b1bb2bbaf88..af54ed102f5f 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -149,14 +149,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |||
149 | */ | 149 | */ |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Native endian assembly bitops. nr = 0 -> word 0 bit 0. | ||
153 | */ | ||
154 | extern void _set_bit(int nr, volatile unsigned long * p); | ||
155 | extern void _clear_bit(int nr, volatile unsigned long * p); | ||
156 | extern void _change_bit(int nr, volatile unsigned long * p); | ||
157 | extern int _test_and_set_bit(int nr, volatile unsigned long * p); | ||
158 | extern int _test_and_clear_bit(int nr, volatile unsigned long * p); | ||
159 | extern int _test_and_change_bit(int nr, volatile unsigned long * p); | ||
160 | |||
161 | /* | ||
152 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. | 162 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. |
153 | */ | 163 | */ |
154 | extern void _set_bit_le(int nr, volatile unsigned long * p); | ||
155 | extern void _clear_bit_le(int nr, volatile unsigned long * p); | ||
156 | extern void _change_bit_le(int nr, volatile unsigned long * p); | ||
157 | extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); | ||
158 | extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); | ||
159 | extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); | ||
160 | extern int _find_first_zero_bit_le(const void * p, unsigned size); | 164 | extern int _find_first_zero_bit_le(const void * p, unsigned size); |
161 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); | 165 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); |
162 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); | 166 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); |
@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | |||
165 | /* | 169 | /* |
166 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. | 170 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. |
167 | */ | 171 | */ |
168 | extern void _set_bit_be(int nr, volatile unsigned long * p); | ||
169 | extern void _clear_bit_be(int nr, volatile unsigned long * p); | ||
170 | extern void _change_bit_be(int nr, volatile unsigned long * p); | ||
171 | extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); | ||
172 | extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); | ||
173 | extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); | ||
174 | extern int _find_first_zero_bit_be(const void * p, unsigned size); | 172 | extern int _find_first_zero_bit_be(const void * p, unsigned size); |
175 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); | 173 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); |
176 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); | 174 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); |
@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
180 | /* | 178 | /* |
181 | * The __* form of bitops are non-atomic and may be reordered. | 179 | * The __* form of bitops are non-atomic and may be reordered. |
182 | */ | 180 | */ |
183 | #define ATOMIC_BITOP_LE(name,nr,p) \ | 181 | #define ATOMIC_BITOP(name,nr,p) \ |
184 | (__builtin_constant_p(nr) ? \ | 182 | (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) |
185 | ____atomic_##name(nr, p) : \ | ||
186 | _##name##_le(nr,p)) | ||
187 | |||
188 | #define ATOMIC_BITOP_BE(name,nr,p) \ | ||
189 | (__builtin_constant_p(nr) ? \ | ||
190 | ____atomic_##name(nr, p) : \ | ||
191 | _##name##_be(nr,p)) | ||
192 | #else | 183 | #else |
193 | #define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) | 184 | #define ATOMIC_BITOP(name,nr,p) _##name(nr,p) |
194 | #define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) | ||
195 | #endif | 185 | #endif |
196 | 186 | ||
197 | #define NONATOMIC_BITOP(name,nr,p) \ | 187 | /* |
198 | (____nonatomic_##name(nr, p)) | 188 | * Native endian atomic definitions. |
189 | */ | ||
190 | #define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) | ||
191 | #define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) | ||
192 | #define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) | ||
193 | #define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) | ||
194 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) | ||
195 | #define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) | ||
199 | 196 | ||
200 | #ifndef __ARMEB__ | 197 | #ifndef __ARMEB__ |
201 | /* | 198 | /* |
202 | * These are the little endian, atomic definitions. | 199 | * These are the little endian, atomic definitions. |
203 | */ | 200 | */ |
204 | #define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) | ||
205 | #define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) | ||
206 | #define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) | ||
207 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | ||
208 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | ||
209 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | ||
210 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 201 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
211 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 202 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
212 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 203 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
215 | #define WORD_BITOFF_TO_LE(x) ((x)) | 206 | #define WORD_BITOFF_TO_LE(x) ((x)) |
216 | 207 | ||
217 | #else | 208 | #else |
218 | |||
219 | /* | 209 | /* |
220 | * These are the big endian, atomic definitions. | 210 | * These are the big endian, atomic definitions. |
221 | */ | 211 | */ |
222 | #define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) | ||
223 | #define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) | ||
224 | #define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) | ||
225 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) | ||
226 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) | ||
227 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) | ||
228 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) | 212 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) |
229 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) | 213 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) |
230 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 214 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 18a56640d97d..d5d8d5c72682 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -187,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, | |||
187 | * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 | 187 | * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 |
188 | * will fall through to use __flush_icache_all_generic. | 188 | * will fall through to use __flush_icache_all_generic. |
189 | */ | 189 | */ |
190 | #if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ | 190 | #if (defined(CONFIG_CPU_V7) && \ |
191 | (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ | ||
191 | defined(CONFIG_SMP_ON_UP) | 192 | defined(CONFIG_SMP_ON_UP) |
192 | #define __flush_icache_preferred __cpuc_flush_icache_all | 193 | #define __flush_icache_preferred __cpuc_flush_icache_all |
193 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) | 194 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) |
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index 0591d35001e5..c7afbc552c7f 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h | |||
@@ -109,7 +109,7 @@ | |||
109 | # define MULTI_CACHE 1 | 109 | # define MULTI_CACHE 1 |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #if defined(CONFIG_CPU_V6) | 112 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
113 | //# ifdef _CACHE | 113 | //# ifdef _CACHE |
114 | # define MULTI_CACHE 1 | 114 | # define MULTI_CACHE 1 |
115 | //# else | 115 | //# else |
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index 6469521d092f..e2be7f142668 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h | |||
@@ -230,7 +230,7 @@ | |||
230 | # endif | 230 | # endif |
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | #ifdef CONFIG_CPU_V6 | 233 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
234 | # ifdef CPU_NAME | 234 | # ifdef CPU_NAME |
235 | # undef MULTI_CPU | 235 | # undef MULTI_CPU |
236 | # define MULTI_CPU | 236 | # define MULTI_CPU |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355707dd..fdd3820edff8 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -5,17 +5,52 @@ | |||
5 | #error SMP not supported on pre-ARMv6 CPUs | 5 | #error SMP not supported on pre-ARMv6 CPUs |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | /* | ||
9 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K | ||
10 | * extensions, so when running on UP, we have to patch these instructions away. | ||
11 | */ | ||
12 | #define ALT_SMP(smp, up) \ | ||
13 | "9998: " smp "\n" \ | ||
14 | " .pushsection \".alt.smp.init\", \"a\"\n" \ | ||
15 | " .long 9998b\n" \ | ||
16 | " " up "\n" \ | ||
17 | " .popsection\n" | ||
18 | |||
19 | #ifdef CONFIG_THUMB2_KERNEL | ||
20 | #define SEV ALT_SMP("sev.w", "nop.w") | ||
21 | /* | ||
22 | * For Thumb-2, special care is needed to ensure that the conditional WFE | ||
23 | * instruction really does assemble to exactly 4 bytes (as required by | ||
24 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the | ||
25 | * assembler to insert a extra (16-bit) IT instruction, depending on the | ||
26 | * presence or absence of neighbouring conditional instructions. | ||
27 | * | ||
28 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: | ||
29 | * the assembler won't change IT instructions which are explicitly present | ||
30 | * in the input. | ||
31 | */ | ||
32 | #define WFE(cond) ALT_SMP( \ | ||
33 | "it " cond "\n\t" \ | ||
34 | "wfe" cond ".n", \ | ||
35 | \ | ||
36 | "nop.w" \ | ||
37 | ) | ||
38 | #else | ||
39 | #define SEV ALT_SMP("sev", "nop") | ||
40 | #define WFE(cond) ALT_SMP("wfe" cond, "nop") | ||
41 | #endif | ||
42 | |||
8 | static inline void dsb_sev(void) | 43 | static inline void dsb_sev(void) |
9 | { | 44 | { |
10 | #if __LINUX_ARM_ARCH__ >= 7 | 45 | #if __LINUX_ARM_ARCH__ >= 7 |
11 | __asm__ __volatile__ ( | 46 | __asm__ __volatile__ ( |
12 | "dsb\n" | 47 | "dsb\n" |
13 | "sev" | 48 | SEV |
14 | ); | 49 | ); |
15 | #elif defined(CONFIG_CPU_32v6K) | 50 | #else |
16 | __asm__ __volatile__ ( | 51 | __asm__ __volatile__ ( |
17 | "mcr p15, 0, %0, c7, c10, 4\n" | 52 | "mcr p15, 0, %0, c7, c10, 4\n" |
18 | "sev" | 53 | SEV |
19 | : : "r" (0) | 54 | : : "r" (0) |
20 | ); | 55 | ); |
21 | #endif | 56 | #endif |
@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
46 | __asm__ __volatile__( | 81 | __asm__ __volatile__( |
47 | "1: ldrex %0, [%1]\n" | 82 | "1: ldrex %0, [%1]\n" |
48 | " teq %0, #0\n" | 83 | " teq %0, #0\n" |
49 | #ifdef CONFIG_CPU_32v6K | 84 | WFE("ne") |
50 | " wfene\n" | ||
51 | #endif | ||
52 | " strexeq %0, %2, [%1]\n" | 85 | " strexeq %0, %2, [%1]\n" |
53 | " teqeq %0, #0\n" | 86 | " teqeq %0, #0\n" |
54 | " bne 1b" | 87 | " bne 1b" |
@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
107 | __asm__ __volatile__( | 140 | __asm__ __volatile__( |
108 | "1: ldrex %0, [%1]\n" | 141 | "1: ldrex %0, [%1]\n" |
109 | " teq %0, #0\n" | 142 | " teq %0, #0\n" |
110 | #ifdef CONFIG_CPU_32v6K | 143 | WFE("ne") |
111 | " wfene\n" | ||
112 | #endif | ||
113 | " strexeq %0, %2, [%1]\n" | 144 | " strexeq %0, %2, [%1]\n" |
114 | " teq %0, #0\n" | 145 | " teq %0, #0\n" |
115 | " bne 1b" | 146 | " bne 1b" |
@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
176 | "1: ldrex %0, [%2]\n" | 207 | "1: ldrex %0, [%2]\n" |
177 | " adds %0, %0, #1\n" | 208 | " adds %0, %0, #1\n" |
178 | " strexpl %1, %0, [%2]\n" | 209 | " strexpl %1, %0, [%2]\n" |
179 | #ifdef CONFIG_CPU_32v6K | 210 | WFE("mi") |
180 | " wfemi\n" | ||
181 | #endif | ||
182 | " rsbpls %0, %1, #0\n" | 211 | " rsbpls %0, %1, #0\n" |
183 | " bmi 1b" | 212 | " bmi 1b" |
184 | : "=&r" (tmp), "=&r" (tmp2) | 213 | : "=&r" (tmp), "=&r" (tmp2) |
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60297d5..9a87823642d0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -347,6 +347,7 @@ void cpu_idle_wait(void); | |||
347 | #include <asm-generic/cmpxchg-local.h> | 347 | #include <asm-generic/cmpxchg-local.h> |
348 | 348 | ||
349 | #if __LINUX_ARM_ARCH__ < 6 | 349 | #if __LINUX_ARM_ARCH__ < 6 |
350 | /* min ARCH < ARMv6 */ | ||
350 | 351 | ||
351 | #ifdef CONFIG_SMP | 352 | #ifdef CONFIG_SMP |
352 | #error "SMP is not supported on this platform" | 353 | #error "SMP is not supported on this platform" |
@@ -365,7 +366,7 @@ void cpu_idle_wait(void); | |||
365 | #include <asm-generic/cmpxchg.h> | 366 | #include <asm-generic/cmpxchg.h> |
366 | #endif | 367 | #endif |
367 | 368 | ||
368 | #else /* __LINUX_ARM_ARCH__ >= 6 */ | 369 | #else /* min ARCH >= ARMv6 */ |
369 | 370 | ||
370 | extern void __bad_cmpxchg(volatile void *ptr, int size); | 371 | extern void __bad_cmpxchg(volatile void *ptr, int size); |
371 | 372 | ||
@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
379 | unsigned long oldval, res; | 380 | unsigned long oldval, res; |
380 | 381 | ||
381 | switch (size) { | 382 | switch (size) { |
382 | #ifdef CONFIG_CPU_32v6K | 383 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
383 | case 1: | 384 | case 1: |
384 | do { | 385 | do { |
385 | asm volatile("@ __cmpxchg1\n" | 386 | asm volatile("@ __cmpxchg1\n" |
@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
404 | : "memory", "cc"); | 405 | : "memory", "cc"); |
405 | } while (res); | 406 | } while (res); |
406 | break; | 407 | break; |
407 | #endif /* CONFIG_CPU_32v6K */ | 408 | #endif |
408 | case 4: | 409 | case 4: |
409 | do { | 410 | do { |
410 | asm volatile("@ __cmpxchg4\n" | 411 | asm volatile("@ __cmpxchg4\n" |
@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
450 | unsigned long ret; | 451 | unsigned long ret; |
451 | 452 | ||
452 | switch (size) { | 453 | switch (size) { |
453 | #ifndef CONFIG_CPU_32v6K | 454 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ |
454 | case 1: | 455 | case 1: |
455 | case 2: | 456 | case 2: |
456 | ret = __cmpxchg_local_generic(ptr, old, new, size); | 457 | ret = __cmpxchg_local_generic(ptr, old, new, size); |
457 | break; | 458 | break; |
458 | #endif /* !CONFIG_CPU_32v6K */ | 459 | #endif |
459 | default: | 460 | default: |
460 | ret = __cmpxchg(ptr, old, new, size); | 461 | ret = __cmpxchg(ptr, old, new, size); |
461 | } | 462 | } |
@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
469 | (unsigned long)(n), \ | 470 | (unsigned long)(n), \ |
470 | sizeof(*(ptr)))) | 471 | sizeof(*(ptr)))) |
471 | 472 | ||
472 | #ifdef CONFIG_CPU_32v6K | 473 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
473 | 474 | ||
474 | /* | 475 | /* |
475 | * Note : ARMv7-M (currently unsupported by Linux) does not support | 476 | * Note : ARMv7-M (currently unsupported by Linux) does not support |
@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, | |||
524 | (unsigned long long)(o), \ | 525 | (unsigned long long)(o), \ |
525 | (unsigned long long)(n))) | 526 | (unsigned long long)(n))) |
526 | 527 | ||
527 | #else /* !CONFIG_CPU_32v6K */ | 528 | #else /* min ARCH = ARMv6 */ |
528 | 529 | ||
529 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 530 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
530 | 531 | ||
531 | #endif /* CONFIG_CPU_32v6K */ | 532 | #endif |
532 | 533 | ||
533 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | 534 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
534 | 535 | ||
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff8d104..60843eb0f61c 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h | |||
@@ -28,15 +28,14 @@ | |||
28 | #define tls_emu 1 | 28 | #define tls_emu 1 |
29 | #define has_tls_reg 1 | 29 | #define has_tls_reg 1 |
30 | #define set_tls set_tls_none | 30 | #define set_tls set_tls_none |
31 | #elif __LINUX_ARM_ARCH__ >= 7 || \ | 31 | #elif defined(CONFIG_CPU_V6) |
32 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) | ||
33 | #define tls_emu 0 | ||
34 | #define has_tls_reg 1 | ||
35 | #define set_tls set_tls_v6k | ||
36 | #elif __LINUX_ARM_ARCH__ == 6 | ||
37 | #define tls_emu 0 | 32 | #define tls_emu 0 |
38 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) | 33 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) |
39 | #define set_tls set_tls_v6 | 34 | #define set_tls set_tls_v6 |
35 | #elif defined(CONFIG_CPU_32v6K) | ||
36 | #define tls_emu 0 | ||
37 | #define has_tls_reg 1 | ||
38 | #define set_tls set_tls_v6k | ||
40 | #else | 39 | #else |
41 | #define tls_emu 0 | 40 | #define tls_emu 0 |
42 | #define has_tls_reg 0 | 41 | #define has_tls_reg 0 |
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e5387678..d5d4185f0c24 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c | |||
@@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp); | |||
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | /* bitops */ | 142 | /* bitops */ |
143 | EXPORT_SYMBOL(_set_bit_le); | 143 | EXPORT_SYMBOL(_set_bit); |
144 | EXPORT_SYMBOL(_test_and_set_bit_le); | 144 | EXPORT_SYMBOL(_test_and_set_bit); |
145 | EXPORT_SYMBOL(_clear_bit_le); | 145 | EXPORT_SYMBOL(_clear_bit); |
146 | EXPORT_SYMBOL(_test_and_clear_bit_le); | 146 | EXPORT_SYMBOL(_test_and_clear_bit); |
147 | EXPORT_SYMBOL(_change_bit_le); | 147 | EXPORT_SYMBOL(_change_bit); |
148 | EXPORT_SYMBOL(_test_and_change_bit_le); | 148 | EXPORT_SYMBOL(_test_and_change_bit); |
149 | EXPORT_SYMBOL(_find_first_zero_bit_le); | 149 | EXPORT_SYMBOL(_find_first_zero_bit_le); |
150 | EXPORT_SYMBOL(_find_next_zero_bit_le); | 150 | EXPORT_SYMBOL(_find_next_zero_bit_le); |
151 | EXPORT_SYMBOL(_find_first_bit_le); | 151 | EXPORT_SYMBOL(_find_first_bit_le); |
152 | EXPORT_SYMBOL(_find_next_bit_le); | 152 | EXPORT_SYMBOL(_find_next_bit_le); |
153 | 153 | ||
154 | #ifdef __ARMEB__ | 154 | #ifdef __ARMEB__ |
155 | EXPORT_SYMBOL(_set_bit_be); | ||
156 | EXPORT_SYMBOL(_test_and_set_bit_be); | ||
157 | EXPORT_SYMBOL(_clear_bit_be); | ||
158 | EXPORT_SYMBOL(_test_and_clear_bit_be); | ||
159 | EXPORT_SYMBOL(_change_bit_be); | ||
160 | EXPORT_SYMBOL(_test_and_change_bit_be); | ||
161 | EXPORT_SYMBOL(_find_first_zero_bit_be); | 155 | EXPORT_SYMBOL(_find_first_zero_bit_be); |
162 | EXPORT_SYMBOL(_find_next_zero_bit_be); | 156 | EXPORT_SYMBOL(_find_next_zero_bit_be); |
163 | EXPORT_SYMBOL(_find_first_bit_be); | 157 | EXPORT_SYMBOL(_find_first_bit_be); |
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a0f07521ca8a..d2d983be096d 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -25,7 +25,7 @@ | |||
25 | .macro addruart, rp, rv | 25 | .macro addruart, rp, rv |
26 | .endm | 26 | .endm |
27 | 27 | ||
28 | #if defined(CONFIG_CPU_V6) | 28 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
29 | 29 | ||
30 | .macro senduart, rd, rx | 30 | .macro senduart, rd, rx |
31 | mcr p14, 0, \rd, c0, c5, 0 | 31 | mcr p14, 0, \rd, c0, c5, 0 |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index ae9464900168..051166c2a932 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -76,13 +76,13 @@ | |||
76 | #ifndef CONFIG_THUMB2_KERNEL | 76 | #ifndef CONFIG_THUMB2_KERNEL |
77 | .macro svc_exit, rpsr | 77 | .macro svc_exit, rpsr |
78 | msr spsr_cxsf, \rpsr | 78 | msr spsr_cxsf, \rpsr |
79 | #if defined(CONFIG_CPU_32v6K) | 79 | #if defined(CONFIG_CPU_V6) |
80 | clrex @ clear the exclusive monitor | ||
81 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | ||
82 | #elif defined (CONFIG_CPU_V6) | ||
83 | ldr r0, [sp] | 80 | ldr r0, [sp] |
84 | strex r1, r2, [sp] @ clear the exclusive monitor | 81 | strex r1, r2, [sp] @ clear the exclusive monitor |
85 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr | 82 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr |
83 | #elif defined(CONFIG_CPU_32v6K) | ||
84 | clrex @ clear the exclusive monitor | ||
85 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | ||
86 | #else | 86 | #else |
87 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | 87 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
88 | #endif | 88 | #endif |
@@ -92,10 +92,10 @@ | |||
92 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | 92 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr |
93 | ldr lr, [sp, #\offset + S_PC]! @ get pc | 93 | ldr lr, [sp, #\offset + S_PC]! @ get pc |
94 | msr spsr_cxsf, r1 @ save in spsr_svc | 94 | msr spsr_cxsf, r1 @ save in spsr_svc |
95 | #if defined(CONFIG_CPU_32v6K) | 95 | #if defined(CONFIG_CPU_V6) |
96 | clrex @ clear the exclusive monitor | ||
97 | #elif defined (CONFIG_CPU_V6) | ||
98 | strex r1, r2, [sp] @ clear the exclusive monitor | 96 | strex r1, r2, [sp] @ clear the exclusive monitor |
97 | #elif defined(CONFIG_CPU_32v6K) | ||
98 | clrex @ clear the exclusive monitor | ||
99 | #endif | 99 | #endif |
100 | .if \fast | 100 | .if \fast |
101 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | 101 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c058bfc8532b..6fc2d228db55 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -30,7 +30,7 @@ | |||
30 | * enable the interrupt. | 30 | * enable the interrupt. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #ifdef CONFIG_CPU_V6 | 33 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
34 | enum armv6_perf_types { | 34 | enum armv6_perf_types { |
35 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | 35 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, |
36 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | 36 | ARMV6_PERFCTR_IBUF_STALL = 0x1, |
@@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void) | |||
669 | { | 669 | { |
670 | return NULL; | 670 | return NULL; |
671 | } | 671 | } |
672 | #endif /* CONFIG_CPU_V6 */ | 672 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index d42252918bfb..10d868a5a481 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h | |||
@@ -1,44 +1,52 @@ | |||
1 | 1 | #if __LINUX_ARM_ARCH__ >= 6 | |
2 | #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) | ||
3 | .macro bitop, instr | 2 | .macro bitop, instr |
3 | ands ip, r1, #3 | ||
4 | strneb r1, [ip] @ assert word-aligned | ||
4 | mov r2, #1 | 5 | mov r2, #1 |
5 | and r3, r0, #7 @ Get bit offset | 6 | and r3, r0, #31 @ Get bit offset |
6 | add r1, r1, r0, lsr #3 @ Get byte offset | 7 | mov r0, r0, lsr #5 |
8 | add r1, r1, r0, lsl #2 @ Get word offset | ||
7 | mov r3, r2, lsl r3 | 9 | mov r3, r2, lsl r3 |
8 | 1: ldrexb r2, [r1] | 10 | 1: ldrex r2, [r1] |
9 | \instr r2, r2, r3 | 11 | \instr r2, r2, r3 |
10 | strexb r0, r2, [r1] | 12 | strex r0, r2, [r1] |
11 | cmp r0, #0 | 13 | cmp r0, #0 |
12 | bne 1b | 14 | bne 1b |
13 | mov pc, lr | 15 | bx lr |
14 | .endm | 16 | .endm |
15 | 17 | ||
16 | .macro testop, instr, store | 18 | .macro testop, instr, store |
17 | and r3, r0, #7 @ Get bit offset | 19 | ands ip, r1, #3 |
20 | strneb r1, [ip] @ assert word-aligned | ||
18 | mov r2, #1 | 21 | mov r2, #1 |
19 | add r1, r1, r0, lsr #3 @ Get byte offset | 22 | and r3, r0, #31 @ Get bit offset |
23 | mov r0, r0, lsr #5 | ||
24 | add r1, r1, r0, lsl #2 @ Get word offset | ||
20 | mov r3, r2, lsl r3 @ create mask | 25 | mov r3, r2, lsl r3 @ create mask |
21 | smp_dmb | 26 | smp_dmb |
22 | 1: ldrexb r2, [r1] | 27 | 1: ldrex r2, [r1] |
23 | ands r0, r2, r3 @ save old value of bit | 28 | ands r0, r2, r3 @ save old value of bit |
24 | \instr r2, r2, r3 @ toggle bit | 29 | \instr r2, r2, r3 @ toggle bit |
25 | strexb ip, r2, [r1] | 30 | strex ip, r2, [r1] |
26 | cmp ip, #0 | 31 | cmp ip, #0 |
27 | bne 1b | 32 | bne 1b |
28 | smp_dmb | 33 | smp_dmb |
29 | cmp r0, #0 | 34 | cmp r0, #0 |
30 | movne r0, #1 | 35 | movne r0, #1 |
31 | 2: mov pc, lr | 36 | 2: bx lr |
32 | .endm | 37 | .endm |
33 | #else | 38 | #else |
34 | .macro bitop, instr | 39 | .macro bitop, instr |
35 | and r2, r0, #7 | 40 | ands ip, r1, #3 |
41 | strneb r1, [ip] @ assert word-aligned | ||
42 | and r2, r0, #31 | ||
43 | mov r0, r0, lsr #5 | ||
36 | mov r3, #1 | 44 | mov r3, #1 |
37 | mov r3, r3, lsl r2 | 45 | mov r3, r3, lsl r2 |
38 | save_and_disable_irqs ip | 46 | save_and_disable_irqs ip |
39 | ldrb r2, [r1, r0, lsr #3] | 47 | ldr r2, [r1, r0, lsl #2] |
40 | \instr r2, r2, r3 | 48 | \instr r2, r2, r3 |
41 | strb r2, [r1, r0, lsr #3] | 49 | str r2, [r1, r0, lsl #2] |
42 | restore_irqs ip | 50 | restore_irqs ip |
43 | mov pc, lr | 51 | mov pc, lr |
44 | .endm | 52 | .endm |
@@ -52,11 +60,13 @@ | |||
52 | * to avoid dirtying the data cache. | 60 | * to avoid dirtying the data cache. |
53 | */ | 61 | */ |
54 | .macro testop, instr, store | 62 | .macro testop, instr, store |
55 | add r1, r1, r0, lsr #3 | 63 | ands ip, r1, #3 |
56 | and r3, r0, #7 | 64 | strneb r1, [ip] @ assert word-aligned |
57 | mov r0, #1 | 65 | and r3, r0, #31 |
66 | mov r0, r0, lsr #5 | ||
58 | save_and_disable_irqs ip | 67 | save_and_disable_irqs ip |
59 | ldrb r2, [r1] | 68 | ldr r2, [r1, r0, lsl #2]! |
69 | mov r0, #1 | ||
60 | tst r2, r0, lsl r3 | 70 | tst r2, r0, lsl r3 |
61 | \instr r2, r2, r0, lsl r3 | 71 | \instr r2, r2, r0, lsl r3 |
62 | \store r2, [r1] | 72 | \store r2, [r1] |
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S index 80f3115cbee2..68ed5b62e839 100644 --- a/arch/arm/lib/changebit.S +++ b/arch/arm/lib/changebit.S | |||
@@ -12,12 +12,6 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | /* Purpose : Function to change a bit | 15 | ENTRY(_change_bit) |
16 | * Prototype: int change_bit(int bit, void *addr) | ||
17 | */ | ||
18 | ENTRY(_change_bit_be) | ||
19 | eor r0, r0, #0x18 @ big endian byte ordering | ||
20 | ENTRY(_change_bit_le) | ||
21 | bitop eor | 16 | bitop eor |
22 | ENDPROC(_change_bit_be) | 17 | ENDPROC(_change_bit) |
23 | ENDPROC(_change_bit_le) | ||
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S index 1a63e43a1df0..4c04c3b51eeb 100644 --- a/arch/arm/lib/clearbit.S +++ b/arch/arm/lib/clearbit.S | |||
@@ -12,13 +12,6 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | /* | 15 | ENTRY(_clear_bit) |
16 | * Purpose : Function to clear a bit | ||
17 | * Prototype: int clear_bit(int bit, void *addr) | ||
18 | */ | ||
19 | ENTRY(_clear_bit_be) | ||
20 | eor r0, r0, #0x18 @ big endian byte ordering | ||
21 | ENTRY(_clear_bit_le) | ||
22 | bitop bic | 16 | bitop bic |
23 | ENDPROC(_clear_bit_be) | 17 | ENDPROC(_clear_bit) |
24 | ENDPROC(_clear_bit_le) | ||
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S index 1dd7176c4b2b..bbee5c66a23e 100644 --- a/arch/arm/lib/setbit.S +++ b/arch/arm/lib/setbit.S | |||
@@ -12,13 +12,6 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | /* | 15 | ENTRY(_set_bit) |
16 | * Purpose : Function to set a bit | ||
17 | * Prototype: int set_bit(int bit, void *addr) | ||
18 | */ | ||
19 | ENTRY(_set_bit_be) | ||
20 | eor r0, r0, #0x18 @ big endian byte ordering | ||
21 | ENTRY(_set_bit_le) | ||
22 | bitop orr | 16 | bitop orr |
23 | ENDPROC(_set_bit_be) | 17 | ENDPROC(_set_bit) |
24 | ENDPROC(_set_bit_le) | ||
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S index 5c98dc567f0f..15a4d431f229 100644 --- a/arch/arm/lib/testchangebit.S +++ b/arch/arm/lib/testchangebit.S | |||
@@ -12,9 +12,6 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_test_and_change_bit_be) | 15 | ENTRY(_test_and_change_bit) |
16 | eor r0, r0, #0x18 @ big endian byte ordering | 16 | testop eor, str |
17 | ENTRY(_test_and_change_bit_le) | 17 | ENDPROC(_test_and_change_bit) |
18 | testop eor, strb | ||
19 | ENDPROC(_test_and_change_bit_be) | ||
20 | ENDPROC(_test_and_change_bit_le) | ||
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S index 543d7094d18e..521b66b5b95d 100644 --- a/arch/arm/lib/testclearbit.S +++ b/arch/arm/lib/testclearbit.S | |||
@@ -12,9 +12,6 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_test_and_clear_bit_be) | 15 | ENTRY(_test_and_clear_bit) |
16 | eor r0, r0, #0x18 @ big endian byte ordering | 16 | testop bicne, strne |
17 | ENTRY(_test_and_clear_bit_le) | 17 | ENDPROC(_test_and_clear_bit) |
18 | testop bicne, strneb | ||
19 | ENDPROC(_test_and_clear_bit_be) | ||
20 | ENDPROC(_test_and_clear_bit_le) | ||
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S index 0b3f390401ce..1c98cc2185bb 100644 --- a/arch/arm/lib/testsetbit.S +++ b/arch/arm/lib/testsetbit.S | |||
@@ -12,9 +12,6 @@ | |||
12 | #include "bitops.h" | 12 | #include "bitops.h" |
13 | .text | 13 | .text |
14 | 14 | ||
15 | ENTRY(_test_and_set_bit_be) | 15 | ENTRY(_test_and_set_bit) |
16 | eor r0, r0, #0x18 @ big endian byte ordering | 16 | testop orreq, streq |
17 | ENTRY(_test_and_set_bit_le) | 17 | ENDPROC(_test_and_set_bit) |
18 | testop orreq, streqb | ||
19 | ENDPROC(_test_and_set_bit_be) | ||
20 | ENDPROC(_test_and_set_bit_le) | ||
diff --git a/arch/arm/mach-dove/Kconfig b/arch/arm/mach-dove/Kconfig index a4ed3900912a..dd937c526a45 100644 --- a/arch/arm/mach-dove/Kconfig +++ b/arch/arm/mach-dove/Kconfig | |||
@@ -9,7 +9,7 @@ config MACH_DOVE_DB | |||
9 | Say 'Y' here if you want your kernel to support the | 9 | Say 'Y' here if you want your kernel to support the |
10 | Marvell DB-MV88AP510 Development Board. | 10 | Marvell DB-MV88AP510 Development Board. |
11 | 11 | ||
12 | config MACH_CM_A510 | 12 | config MACH_CM_A510 |
13 | bool "CompuLab CM-A510 Board" | 13 | bool "CompuLab CM-A510 Board" |
14 | help | 14 | help |
15 | Say 'Y' here if you want your kernel to support the | 15 | Say 'Y' here if you want your kernel to support the |
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index 7ca138a943a9..b9a9805e4828 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig | |||
@@ -19,7 +19,7 @@ config REALVIEW_EB_A9MP | |||
19 | config REALVIEW_EB_ARM11MP | 19 | config REALVIEW_EB_ARM11MP |
20 | bool "Support ARM11MPCore Tile" | 20 | bool "Support ARM11MPCore Tile" |
21 | depends on MACH_REALVIEW_EB | 21 | depends on MACH_REALVIEW_EB |
22 | select CPU_V6 | 22 | select CPU_V6K |
23 | select ARCH_HAS_BARRIERS if SMP | 23 | select ARCH_HAS_BARRIERS if SMP |
24 | help | 24 | help |
25 | Enable support for the ARM11MPCore tile fitted to the Realview(R) | 25 | Enable support for the ARM11MPCore tile fitted to the Realview(R) |
@@ -36,7 +36,7 @@ config REALVIEW_EB_ARM11MP_REVB | |||
36 | 36 | ||
37 | config MACH_REALVIEW_PB11MP | 37 | config MACH_REALVIEW_PB11MP |
38 | bool "Support RealView(R) Platform Baseboard for ARM11MPCore" | 38 | bool "Support RealView(R) Platform Baseboard for ARM11MPCore" |
39 | select CPU_V6 | 39 | select CPU_V6K |
40 | select ARM_GIC | 40 | select ARM_GIC |
41 | select HAVE_PATA_PLATFORM | 41 | select HAVE_PATA_PLATFORM |
42 | select ARCH_HAS_BARRIERS if SMP | 42 | select ARCH_HAS_BARRIERS if SMP |
@@ -45,6 +45,7 @@ config MACH_REALVIEW_PB11MP | |||
45 | the ARM11MPCore. This platform has an on-board ARM11MPCore and has | 45 | the ARM11MPCore. This platform has an on-board ARM11MPCore and has |
46 | support for PCI-E and Compact Flash. | 46 | support for PCI-E and Compact Flash. |
47 | 47 | ||
48 | # ARMv6 CPU without K extensions, but does have the new exclusive ops | ||
48 | config MACH_REALVIEW_PB1176 | 49 | config MACH_REALVIEW_PB1176 |
49 | bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" | 50 | bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" |
50 | select CPU_V6 | 51 | select CPU_V6 |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 05b26a03c209..89266382b536 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -390,7 +390,7 @@ config CPU_PJ4 | |||
390 | 390 | ||
391 | # ARMv6 | 391 | # ARMv6 |
392 | config CPU_V6 | 392 | config CPU_V6 |
393 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE | 393 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
394 | select CPU_32v6 | 394 | select CPU_32v6 |
395 | select CPU_ABRT_EV6 | 395 | select CPU_ABRT_EV6 |
396 | select CPU_PABRT_V6 | 396 | select CPU_PABRT_V6 |
@@ -402,16 +402,18 @@ config CPU_V6 | |||
402 | select CPU_TLB_V6 if MMU | 402 | select CPU_TLB_V6 if MMU |
403 | 403 | ||
404 | # ARMv6k | 404 | # ARMv6k |
405 | config CPU_32v6K | 405 | config CPU_V6K |
406 | bool "Support ARM V6K processor extensions" if !SMP | 406 | bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
407 | depends on CPU_V6 || CPU_V7 | 407 | select CPU_32v6 |
408 | default y if SMP | 408 | select CPU_32v6K |
409 | help | 409 | select CPU_ABRT_EV6 |
410 | Say Y here if your ARMv6 processor supports the 'K' extension. | 410 | select CPU_PABRT_V6 |
411 | This enables the kernel to use some instructions not present | 411 | select CPU_CACHE_V6 |
412 | on previous processors, and as such a kernel build with this | 412 | select CPU_CACHE_VIPT |
413 | enabled will not boot on processors with do not support these | 413 | select CPU_CP15_MMU |
414 | instructions. | 414 | select CPU_HAS_ASID if MMU |
415 | select CPU_COPY_V6 if MMU | ||
416 | select CPU_TLB_V6 if MMU | ||
415 | 417 | ||
416 | # ARMv7 | 418 | # ARMv7 |
417 | config CPU_V7 | 419 | config CPU_V7 |
@@ -433,25 +435,33 @@ config CPU_32v3 | |||
433 | bool | 435 | bool |
434 | select TLS_REG_EMUL if SMP || !MMU | 436 | select TLS_REG_EMUL if SMP || !MMU |
435 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 437 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
438 | select CPU_USE_DOMAINS if MMU | ||
436 | 439 | ||
437 | config CPU_32v4 | 440 | config CPU_32v4 |
438 | bool | 441 | bool |
439 | select TLS_REG_EMUL if SMP || !MMU | 442 | select TLS_REG_EMUL if SMP || !MMU |
440 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
444 | select CPU_USE_DOMAINS if MMU | ||
441 | 445 | ||
442 | config CPU_32v4T | 446 | config CPU_32v4T |
443 | bool | 447 | bool |
444 | select TLS_REG_EMUL if SMP || !MMU | 448 | select TLS_REG_EMUL if SMP || !MMU |
445 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 449 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
450 | select CPU_USE_DOMAINS if MMU | ||
446 | 451 | ||
447 | config CPU_32v5 | 452 | config CPU_32v5 |
448 | bool | 453 | bool |
449 | select TLS_REG_EMUL if SMP || !MMU | 454 | select TLS_REG_EMUL if SMP || !MMU |
450 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 455 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
456 | select CPU_USE_DOMAINS if MMU | ||
451 | 457 | ||
452 | config CPU_32v6 | 458 | config CPU_32v6 |
453 | bool | 459 | bool |
454 | select TLS_REG_EMUL if !CPU_32v6K && !MMU | 460 | select TLS_REG_EMUL if !CPU_32v6K && !MMU |
461 | select CPU_USE_DOMAINS if CPU_V6 && MMU | ||
462 | |||
463 | config CPU_32v6K | ||
464 | bool | ||
455 | 465 | ||
456 | config CPU_32v7 | 466 | config CPU_32v7 |
457 | bool | 467 | bool |
@@ -607,8 +617,6 @@ config CPU_CP15_MPU | |||
607 | 617 | ||
608 | config CPU_USE_DOMAINS | 618 | config CPU_USE_DOMAINS |
609 | bool | 619 | bool |
610 | depends on MMU | ||
611 | default y if !CPU_32v6K | ||
612 | help | 620 | help |
613 | This option enables or disables the use of domain switching | 621 | This option enables or disables the use of domain switching |
614 | via the set_fs() function. | 622 | via the set_fs() function. |
@@ -623,7 +631,7 @@ comment "Processor Features" | |||
623 | 631 | ||
624 | config ARM_THUMB | 632 | config ARM_THUMB |
625 | bool "Support Thumb user binaries" | 633 | bool "Support Thumb user binaries" |
626 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON | 634 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON |
627 | default y | 635 | default y |
628 | help | 636 | help |
629 | Say Y if you want to include kernel support for running user space | 637 | Say Y if you want to include kernel support for running user space |
@@ -644,7 +652,7 @@ config ARM_THUMBEE | |||
644 | 652 | ||
645 | config SWP_EMULATE | 653 | config SWP_EMULATE |
646 | bool "Emulate SWP/SWPB instructions" | 654 | bool "Emulate SWP/SWPB instructions" |
647 | depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6 | 655 | depends on !CPU_USE_DOMAINS && CPU_V7 |
648 | select HAVE_PROC_CPU if PROC_FS | 656 | select HAVE_PROC_CPU if PROC_FS |
649 | default y if SMP | 657 | default y if SMP |
650 | help | 658 | help |
@@ -681,7 +689,7 @@ config CPU_BIG_ENDIAN | |||
681 | config CPU_ENDIAN_BE8 | 689 | config CPU_ENDIAN_BE8 |
682 | bool | 690 | bool |
683 | depends on CPU_BIG_ENDIAN | 691 | depends on CPU_BIG_ENDIAN |
684 | default CPU_V6 || CPU_V7 | 692 | default CPU_V6 || CPU_V6K || CPU_V7 |
685 | help | 693 | help |
686 | Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. | 694 | Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. |
687 | 695 | ||
@@ -747,7 +755,7 @@ config CPU_CACHE_ROUND_ROBIN | |||
747 | 755 | ||
748 | config CPU_BPREDICT_DISABLE | 756 | config CPU_BPREDICT_DISABLE |
749 | bool "Disable branch prediction" | 757 | bool "Disable branch prediction" |
750 | depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 | 758 | depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 |
751 | help | 759 | help |
752 | Say Y here to disable branch prediction. If unsure, say N. | 760 | Say Y here to disable branch prediction. If unsure, say N. |
753 | 761 | ||
@@ -767,7 +775,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |||
767 | 775 | ||
768 | config DMA_CACHE_RWFO | 776 | config DMA_CACHE_RWFO |
769 | bool "Enable read/write for ownership DMA cache maintenance" | 777 | bool "Enable read/write for ownership DMA cache maintenance" |
770 | depends on CPU_V6 && SMP | 778 | depends on CPU_V6K && SMP |
771 | default y | 779 | default y |
772 | help | 780 | help |
773 | The Snoop Control Unit on ARM11MPCore does not detect the | 781 | The Snoop Control Unit on ARM11MPCore does not detect the |
@@ -823,7 +831,7 @@ config CACHE_L2X0 | |||
823 | config CACHE_PL310 | 831 | config CACHE_PL310 |
824 | bool | 832 | bool |
825 | depends on CACHE_L2X0 | 833 | depends on CACHE_L2X0 |
826 | default y if CPU_V7 && !CPU_V6 | 834 | default y if CPU_V7 && !(CPU_V6 || CPU_V6K) |
827 | help | 835 | help |
828 | This option enables optimisations for the PL310 cache | 836 | This option enables optimisations for the PL310 cache |
829 | controller. | 837 | controller. |
@@ -856,10 +864,10 @@ config ARM_L1_CACHE_SHIFT | |||
856 | default 5 | 864 | default 5 |
857 | 865 | ||
858 | config ARM_DMA_MEM_BUFFERABLE | 866 | config ARM_DMA_MEM_BUFFERABLE |
859 | bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 | 867 | bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 |
860 | depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ | 868 | depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ |
861 | MACH_REALVIEW_PB11MP) | 869 | MACH_REALVIEW_PB11MP) |
862 | default y if CPU_V6 || CPU_V7 | 870 | default y if CPU_V6 || CPU_V6K || CPU_V7 |
863 | help | 871 | help |
864 | Historically, the kernel has used strongly ordered mappings to | 872 | Historically, the kernel has used strongly ordered mappings to |
865 | provide DMA coherent memory. With the advent of ARMv7, mapping | 873 | provide DMA coherent memory. With the advent of ARMv7, mapping |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 00d74a04af3a..bca7e61928c7 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o | |||
90 | obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o | 90 | obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o |
91 | obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o | 91 | obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o |
92 | obj-$(CONFIG_CPU_V6) += proc-v6.o | 92 | obj-$(CONFIG_CPU_V6) += proc-v6.o |
93 | obj-$(CONFIG_CPU_V6K) += proc-v6.o | ||
93 | obj-$(CONFIG_CPU_V7) += proc-v7.o | 94 | obj-$(CONFIG_CPU_V7) += proc-v7.o |
94 | 95 | ||
95 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 | 96 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 |
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index f332df7f0d37..1478aa522144 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -20,11 +20,11 @@ | |||
20 | */ | 20 | */ |
21 | .align 5 | 21 | .align 5 |
22 | ENTRY(v6_early_abort) | 22 | ENTRY(v6_early_abort) |
23 | #ifdef CONFIG_CPU_32v6K | 23 | #ifdef CONFIG_CPU_V6 |
24 | clrex | ||
25 | #else | ||
26 | sub r1, sp, #4 @ Get unused stack location | 24 | sub r1, sp, #4 @ Get unused stack location |
27 | strex r0, r1, [r1] @ Clear the exclusive monitor | 25 | strex r0, r1, [r1] @ Clear the exclusive monitor |
26 | #elif defined(CONFIG_CPU_32v6K) | ||
27 | clrex | ||
28 | #endif | 28 | #endif |
29 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 29 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
30 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 30 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index b0a98305055c..afe209e1e1f8 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
31 | struct mm_struct *mm = current->mm; | 31 | struct mm_struct *mm = current->mm; |
32 | struct vm_area_struct *vma; | 32 | struct vm_area_struct *vma; |
33 | unsigned long start_addr; | 33 | unsigned long start_addr; |
34 | #ifdef CONFIG_CPU_V6 | 34 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
35 | unsigned int cache_type; | 35 | unsigned int cache_type; |
36 | int do_align = 0, aliasing = 0; | 36 | int do_align = 0, aliasing = 0; |
37 | 37 | ||