diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-25 17:09:41 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-25 17:09:41 -0500 |
commit | 2741ecb4ce5c2d430b5c44b0a169038338c21df5 (patch) | |
tree | 4aa71d7551184ee88f32c7f3660d821133058c32 /arch/arm | |
parent | bc85e585c6d0fab4bde12d60964b2f25802c3163 (diff) | |
parent | 5de813b6cd06460b337f9da9afe316823cf3ef45 (diff) |
Merge branch 'misc2' into devel
Diffstat (limited to 'arch/arm')
70 files changed, 1068 insertions, 561 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 100b90f3778a..e932da033499 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -12,6 +12,7 @@ config ARM | |||
12 | select HAVE_IDE | 12 | select HAVE_IDE |
13 | select RTC_LIB | 13 | select RTC_LIB |
14 | select SYS_SUPPORTS_APM_EMULATION | 14 | select SYS_SUPPORTS_APM_EMULATION |
15 | select GENERIC_ATOMIC64 if (!CPU_32v6K) | ||
15 | select HAVE_OPROFILE | 16 | select HAVE_OPROFILE |
16 | select HAVE_ARCH_KGDB | 17 | select HAVE_ARCH_KGDB |
17 | select HAVE_KPROBES if (!XIP_KERNEL) | 18 | select HAVE_KPROBES if (!XIP_KERNEL) |
@@ -54,6 +55,9 @@ config HAVE_TCM | |||
54 | bool | 55 | bool |
55 | select GENERIC_ALLOCATOR | 56 | select GENERIC_ALLOCATOR |
56 | 57 | ||
58 | config HAVE_PROC_CPU | ||
59 | bool | ||
60 | |||
57 | config NO_IOPORT | 61 | config NO_IOPORT |
58 | bool | 62 | bool |
59 | 63 | ||
@@ -163,6 +167,11 @@ config ARCH_MTD_XIP | |||
163 | config GENERIC_HARDIRQS_NO__DO_IRQ | 167 | config GENERIC_HARDIRQS_NO__DO_IRQ |
164 | def_bool y | 168 | def_bool y |
165 | 169 | ||
170 | config ARM_L1_CACHE_SHIFT_6 | ||
171 | bool | ||
172 | help | ||
173 | Setting ARM L1 cache line size to 64 Bytes. | ||
174 | |||
166 | if OPROFILE | 175 | if OPROFILE |
167 | 176 | ||
168 | config OPROFILE_ARMV6 | 177 | config OPROFILE_ARMV6 |
@@ -649,6 +658,7 @@ config ARCH_S5PC1XX | |||
649 | select GENERIC_GPIO | 658 | select GENERIC_GPIO |
650 | select HAVE_CLK | 659 | select HAVE_CLK |
651 | select CPU_V7 | 660 | select CPU_V7 |
661 | select ARM_L1_CACHE_SHIFT_6 | ||
652 | help | 662 | help |
653 | Samsung S5PC1XX series based systems | 663 | Samsung S5PC1XX series based systems |
654 | 664 | ||
@@ -938,6 +948,19 @@ config ARM_ERRATA_460075 | |||
938 | ACTLR register. Note that setting specific bits in the ACTLR register | 948 | ACTLR register. Note that setting specific bits in the ACTLR register |
939 | may not be available in non-secure mode. | 949 | may not be available in non-secure mode. |
940 | 950 | ||
951 | config PL310_ERRATA_588369 | ||
952 | bool "Clean & Invalidate maintenance operations do not invalidate clean lines" | ||
953 | depends on CACHE_L2X0 && ARCH_OMAP4 | ||
954 | help | ||
955 | The PL310 L2 cache controller implements three types of Clean & | ||
956 | Invalidate maintenance operations: by Physical Address | ||
957 | (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC). | ||
958 | They are architecturally defined to behave as the execution of a | ||
959 | clean operation followed immediately by an invalidate operation, | ||
960 | both performing to the same memory location. This functionality | ||
961 | is not correctly implemented in PL310 as clean lines are not | ||
962 | invalidated as a result of these operations. Note that this errata | ||
963 | uses Texas Instrument's secure monitor api. | ||
941 | endmenu | 964 | endmenu |
942 | 965 | ||
943 | source "arch/arm/common/Kconfig" | 966 | source "arch/arm/common/Kconfig" |
@@ -1255,6 +1278,7 @@ config ALIGNMENT_TRAP | |||
1255 | bool | 1278 | bool |
1256 | depends on CPU_CP15_MMU | 1279 | depends on CPU_CP15_MMU |
1257 | default y if !ARCH_EBSA110 | 1280 | default y if !ARCH_EBSA110 |
1281 | select HAVE_PROC_CPU if PROC_FS | ||
1258 | help | 1282 | help |
1259 | ARM processors cannot fetch/store information which is not | 1283 | ARM processors cannot fetch/store information which is not |
1260 | naturally aligned on the bus, i.e., a 4 byte fetch must start at an | 1284 | naturally aligned on the bus, i.e., a 4 byte fetch must start at an |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 2d4d88ba73bf..97c89e7de7d3 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | # | 5 | # |
6 | 6 | ||
7 | HEAD = head.o | 7 | HEAD = head.o |
8 | OBJS = misc.o | 8 | OBJS = misc.o decompress.o |
9 | FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c | 9 | FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c |
10 | 10 | ||
11 | # | 11 | # |
@@ -106,10 +106,6 @@ lib1funcs = $(obj)/lib1funcs.o | |||
106 | $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE | 106 | $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE |
107 | $(call cmd,shipped) | 107 | $(call cmd,shipped) |
108 | 108 | ||
109 | # Don't allow any static data in misc.o, which | ||
110 | # would otherwise mess up our GOT table | ||
111 | CFLAGS_misc.o := -Dstatic= | ||
112 | |||
113 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ | 109 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ |
114 | $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE | 110 | $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE |
115 | $(call if_changed,ld) | 111 | $(call if_changed,ld) |
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c new file mode 100644 index 000000000000..0da382f33157 --- /dev/null +++ b/arch/arm/boot/compressed/decompress.c | |||
@@ -0,0 +1,45 @@ | |||
1 | #define _LINUX_STRING_H_ | ||
2 | |||
3 | #include <linux/compiler.h> /* for inline */ | ||
4 | #include <linux/types.h> /* for size_t */ | ||
5 | #include <linux/stddef.h> /* for NULL */ | ||
6 | #include <linux/linkage.h> | ||
7 | #include <asm/string.h> | ||
8 | |||
9 | extern unsigned long free_mem_ptr; | ||
10 | extern unsigned long free_mem_end_ptr; | ||
11 | extern void error(char *); | ||
12 | |||
13 | #define STATIC static | ||
14 | |||
15 | #define ARCH_HAS_DECOMP_WDOG | ||
16 | |||
17 | /* Diagnostic functions */ | ||
18 | #ifdef DEBUG | ||
19 | # define Assert(cond,msg) {if(!(cond)) error(msg);} | ||
20 | # define Trace(x) fprintf x | ||
21 | # define Tracev(x) {if (verbose) fprintf x ;} | ||
22 | # define Tracevv(x) {if (verbose>1) fprintf x ;} | ||
23 | # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} | ||
24 | # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} | ||
25 | #else | ||
26 | # define Assert(cond,msg) | ||
27 | # define Trace(x) | ||
28 | # define Tracev(x) | ||
29 | # define Tracevv(x) | ||
30 | # define Tracec(c,x) | ||
31 | # define Tracecv(c,x) | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_KERNEL_GZIP | ||
35 | #include "../../../../lib/decompress_inflate.c" | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_KERNEL_LZO | ||
39 | #include "../../../../lib/decompress_unlzo.c" | ||
40 | #endif | ||
41 | |||
42 | void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) | ||
43 | { | ||
44 | decompress(input, len, NULL, NULL, output, NULL, error); | ||
45 | } | ||
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 56a0d116d271..d32bc71c1f78 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c | |||
@@ -23,8 +23,8 @@ unsigned int __machine_arch_type; | |||
23 | #include <linux/compiler.h> /* for inline */ | 23 | #include <linux/compiler.h> /* for inline */ |
24 | #include <linux/types.h> /* for size_t */ | 24 | #include <linux/types.h> /* for size_t */ |
25 | #include <linux/stddef.h> /* for NULL */ | 25 | #include <linux/stddef.h> /* for NULL */ |
26 | #include <asm/string.h> | ||
27 | #include <linux/linkage.h> | 26 | #include <linux/linkage.h> |
27 | #include <asm/string.h> | ||
28 | 28 | ||
29 | #include <asm/unaligned.h> | 29 | #include <asm/unaligned.h> |
30 | 30 | ||
@@ -117,57 +117,7 @@ static void putstr(const char *ptr) | |||
117 | 117 | ||
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #define __ptr_t void * | 120 | void *memcpy(void *__dest, __const void *__src, size_t __n) |
121 | |||
122 | #define memzero(s,n) __memzero(s,n) | ||
123 | |||
124 | /* | ||
125 | * Optimised C version of memzero for the ARM. | ||
126 | */ | ||
127 | void __memzero (__ptr_t s, size_t n) | ||
128 | { | ||
129 | union { void *vp; unsigned long *ulp; unsigned char *ucp; } u; | ||
130 | int i; | ||
131 | |||
132 | u.vp = s; | ||
133 | |||
134 | for (i = n >> 5; i > 0; i--) { | ||
135 | *u.ulp++ = 0; | ||
136 | *u.ulp++ = 0; | ||
137 | *u.ulp++ = 0; | ||
138 | *u.ulp++ = 0; | ||
139 | *u.ulp++ = 0; | ||
140 | *u.ulp++ = 0; | ||
141 | *u.ulp++ = 0; | ||
142 | *u.ulp++ = 0; | ||
143 | } | ||
144 | |||
145 | if (n & 1 << 4) { | ||
146 | *u.ulp++ = 0; | ||
147 | *u.ulp++ = 0; | ||
148 | *u.ulp++ = 0; | ||
149 | *u.ulp++ = 0; | ||
150 | } | ||
151 | |||
152 | if (n & 1 << 3) { | ||
153 | *u.ulp++ = 0; | ||
154 | *u.ulp++ = 0; | ||
155 | } | ||
156 | |||
157 | if (n & 1 << 2) | ||
158 | *u.ulp++ = 0; | ||
159 | |||
160 | if (n & 1 << 1) { | ||
161 | *u.ucp++ = 0; | ||
162 | *u.ucp++ = 0; | ||
163 | } | ||
164 | |||
165 | if (n & 1) | ||
166 | *u.ucp++ = 0; | ||
167 | } | ||
168 | |||
169 | static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src, | ||
170 | size_t __n) | ||
171 | { | 121 | { |
172 | int i = 0; | 122 | int i = 0; |
173 | unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src; | 123 | unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src; |
@@ -204,59 +154,20 @@ static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src, | |||
204 | /* | 154 | /* |
205 | * gzip delarations | 155 | * gzip delarations |
206 | */ | 156 | */ |
207 | #define STATIC static | ||
208 | |||
209 | /* Diagnostic functions */ | ||
210 | #ifdef DEBUG | ||
211 | # define Assert(cond,msg) {if(!(cond)) error(msg);} | ||
212 | # define Trace(x) fprintf x | ||
213 | # define Tracev(x) {if (verbose) fprintf x ;} | ||
214 | # define Tracevv(x) {if (verbose>1) fprintf x ;} | ||
215 | # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} | ||
216 | # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} | ||
217 | #else | ||
218 | # define Assert(cond,msg) | ||
219 | # define Trace(x) | ||
220 | # define Tracev(x) | ||
221 | # define Tracevv(x) | ||
222 | # define Tracec(c,x) | ||
223 | # define Tracecv(c,x) | ||
224 | #endif | ||
225 | |||
226 | static void error(char *m); | ||
227 | |||
228 | extern char input_data[]; | 157 | extern char input_data[]; |
229 | extern char input_data_end[]; | 158 | extern char input_data_end[]; |
230 | 159 | ||
231 | static unsigned char *output_data; | 160 | unsigned char *output_data; |
232 | static unsigned long output_ptr; | 161 | unsigned long output_ptr; |
233 | |||
234 | static void error(char *m); | ||
235 | 162 | ||
236 | static void putstr(const char *); | 163 | unsigned long free_mem_ptr; |
237 | 164 | unsigned long free_mem_end_ptr; | |
238 | static unsigned long free_mem_ptr; | ||
239 | static unsigned long free_mem_end_ptr; | ||
240 | |||
241 | #ifdef STANDALONE_DEBUG | ||
242 | #define NO_INFLATE_MALLOC | ||
243 | #endif | ||
244 | |||
245 | #define ARCH_HAS_DECOMP_WDOG | ||
246 | |||
247 | #ifdef CONFIG_KERNEL_GZIP | ||
248 | #include "../../../../lib/decompress_inflate.c" | ||
249 | #endif | ||
250 | |||
251 | #ifdef CONFIG_KERNEL_LZO | ||
252 | #include "../../../../lib/decompress_unlzo.c" | ||
253 | #endif | ||
254 | 165 | ||
255 | #ifndef arch_error | 166 | #ifndef arch_error |
256 | #define arch_error(x) | 167 | #define arch_error(x) |
257 | #endif | 168 | #endif |
258 | 169 | ||
259 | static void error(char *x) | 170 | void error(char *x) |
260 | { | 171 | { |
261 | arch_error(x); | 172 | arch_error(x); |
262 | 173 | ||
@@ -272,6 +183,8 @@ asmlinkage void __div0(void) | |||
272 | error("Attempting division by 0!"); | 183 | error("Attempting division by 0!"); |
273 | } | 184 | } |
274 | 185 | ||
186 | extern void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); | ||
187 | |||
275 | #ifndef STANDALONE_DEBUG | 188 | #ifndef STANDALONE_DEBUG |
276 | 189 | ||
277 | unsigned long | 190 | unsigned long |
@@ -292,8 +205,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, | |||
292 | output_ptr = get_unaligned_le32(tmp); | 205 | output_ptr = get_unaligned_le32(tmp); |
293 | 206 | ||
294 | putstr("Uncompressing Linux..."); | 207 | putstr("Uncompressing Linux..."); |
295 | decompress(input_data, input_data_end - input_data, | 208 | do_decompress(input_data, input_data_end - input_data, |
296 | NULL, NULL, output_data, NULL, error); | 209 | output_data, error); |
297 | putstr(" done, booting the kernel.\n"); | 210 | putstr(" done, booting the kernel.\n"); |
298 | return output_ptr; | 211 | return output_ptr; |
299 | } | 212 | } |
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index a5924b9b88bd..7ca9ecff652f 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -14,6 +14,13 @@ SECTIONS | |||
14 | /DISCARD/ : { | 14 | /DISCARD/ : { |
15 | *(.ARM.exidx*) | 15 | *(.ARM.exidx*) |
16 | *(.ARM.extab*) | 16 | *(.ARM.extab*) |
17 | /* | ||
18 | * Discard any r/w data - this produces a link error if we have any, | ||
19 | * which is required for PIC decompression. Local data generates | ||
20 | * GOTOFF relocations, which prevents it being relocated independently | ||
21 | * of the text/got segments. | ||
22 | */ | ||
23 | *(.data) | ||
17 | } | 24 | } |
18 | 25 | ||
19 | . = TEXT_START; | 26 | . = TEXT_START; |
@@ -40,7 +47,6 @@ SECTIONS | |||
40 | .got : { *(.got) } | 47 | .got : { *(.got) } |
41 | _got_end = .; | 48 | _got_end = .; |
42 | .got.plt : { *(.got.plt) } | 49 | .got.plt : { *(.got.plt) } |
43 | .data : { *(.data) } | ||
44 | _edata = .; | 50 | _edata = .; |
45 | 51 | ||
46 | . = BSS_START; | 52 | . = BSS_START; |
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig index 3de640ac294b..c48d7b893869 100644 --- a/arch/arm/configs/omap_4430sdp_defconfig +++ b/arch/arm/configs/omap_4430sdp_defconfig | |||
@@ -242,10 +242,13 @@ CONFIG_CPU_CP15_MMU=y | |||
242 | # CONFIG_CPU_DCACHE_DISABLE is not set | 242 | # CONFIG_CPU_DCACHE_DISABLE is not set |
243 | # CONFIG_CPU_BPREDICT_DISABLE is not set | 243 | # CONFIG_CPU_BPREDICT_DISABLE is not set |
244 | CONFIG_HAS_TLS_REG=y | 244 | CONFIG_HAS_TLS_REG=y |
245 | CONFIG_OUTER_CACHE=y | ||
246 | CONFIG_CACHE_L2X0=y | ||
245 | CONFIG_ARM_L1_CACHE_SHIFT=5 | 247 | CONFIG_ARM_L1_CACHE_SHIFT=5 |
246 | # CONFIG_ARM_ERRATA_430973 is not set | 248 | # CONFIG_ARM_ERRATA_430973 is not set |
247 | # CONFIG_ARM_ERRATA_458693 is not set | 249 | # CONFIG_ARM_ERRATA_458693 is not set |
248 | # CONFIG_ARM_ERRATA_460075 is not set | 250 | # CONFIG_ARM_ERRATA_460075 is not set |
251 | CONFIG_PL310_ERRATA_588369=y | ||
249 | CONFIG_ARM_GIC=y | 252 | CONFIG_ARM_GIC=y |
250 | 253 | ||
251 | # | 254 | # |
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index d0daeab2234e..e8ddec2cb158 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
235 | #define smp_mb__before_atomic_inc() smp_mb() | 235 | #define smp_mb__before_atomic_inc() smp_mb() |
236 | #define smp_mb__after_atomic_inc() smp_mb() | 236 | #define smp_mb__after_atomic_inc() smp_mb() |
237 | 237 | ||
238 | #ifndef CONFIG_GENERIC_ATOMIC64 | ||
239 | typedef struct { | ||
240 | u64 __aligned(8) counter; | ||
241 | } atomic64_t; | ||
242 | |||
243 | #define ATOMIC64_INIT(i) { (i) } | ||
244 | |||
245 | static inline u64 atomic64_read(atomic64_t *v) | ||
246 | { | ||
247 | u64 result; | ||
248 | |||
249 | __asm__ __volatile__("@ atomic64_read\n" | ||
250 | " ldrexd %0, %H0, [%1]" | ||
251 | : "=&r" (result) | ||
252 | : "r" (&v->counter) | ||
253 | ); | ||
254 | |||
255 | return result; | ||
256 | } | ||
257 | |||
258 | static inline void atomic64_set(atomic64_t *v, u64 i) | ||
259 | { | ||
260 | u64 tmp; | ||
261 | |||
262 | __asm__ __volatile__("@ atomic64_set\n" | ||
263 | "1: ldrexd %0, %H0, [%1]\n" | ||
264 | " strexd %0, %2, %H2, [%1]\n" | ||
265 | " teq %0, #0\n" | ||
266 | " bne 1b" | ||
267 | : "=&r" (tmp) | ||
268 | : "r" (&v->counter), "r" (i) | ||
269 | : "cc"); | ||
270 | } | ||
271 | |||
272 | static inline void atomic64_add(u64 i, atomic64_t *v) | ||
273 | { | ||
274 | u64 result; | ||
275 | unsigned long tmp; | ||
276 | |||
277 | __asm__ __volatile__("@ atomic64_add\n" | ||
278 | "1: ldrexd %0, %H0, [%2]\n" | ||
279 | " adds %0, %0, %3\n" | ||
280 | " adc %H0, %H0, %H3\n" | ||
281 | " strexd %1, %0, %H0, [%2]\n" | ||
282 | " teq %1, #0\n" | ||
283 | " bne 1b" | ||
284 | : "=&r" (result), "=&r" (tmp) | ||
285 | : "r" (&v->counter), "r" (i) | ||
286 | : "cc"); | ||
287 | } | ||
288 | |||
289 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||
290 | { | ||
291 | u64 result; | ||
292 | unsigned long tmp; | ||
293 | |||
294 | smp_mb(); | ||
295 | |||
296 | __asm__ __volatile__("@ atomic64_add_return\n" | ||
297 | "1: ldrexd %0, %H0, [%2]\n" | ||
298 | " adds %0, %0, %3\n" | ||
299 | " adc %H0, %H0, %H3\n" | ||
300 | " strexd %1, %0, %H0, [%2]\n" | ||
301 | " teq %1, #0\n" | ||
302 | " bne 1b" | ||
303 | : "=&r" (result), "=&r" (tmp) | ||
304 | : "r" (&v->counter), "r" (i) | ||
305 | : "cc"); | ||
306 | |||
307 | smp_mb(); | ||
308 | |||
309 | return result; | ||
310 | } | ||
311 | |||
312 | static inline void atomic64_sub(u64 i, atomic64_t *v) | ||
313 | { | ||
314 | u64 result; | ||
315 | unsigned long tmp; | ||
316 | |||
317 | __asm__ __volatile__("@ atomic64_sub\n" | ||
318 | "1: ldrexd %0, %H0, [%2]\n" | ||
319 | " subs %0, %0, %3\n" | ||
320 | " sbc %H0, %H0, %H3\n" | ||
321 | " strexd %1, %0, %H0, [%2]\n" | ||
322 | " teq %1, #0\n" | ||
323 | " bne 1b" | ||
324 | : "=&r" (result), "=&r" (tmp) | ||
325 | : "r" (&v->counter), "r" (i) | ||
326 | : "cc"); | ||
327 | } | ||
328 | |||
329 | static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | ||
330 | { | ||
331 | u64 result; | ||
332 | unsigned long tmp; | ||
333 | |||
334 | smp_mb(); | ||
335 | |||
336 | __asm__ __volatile__("@ atomic64_sub_return\n" | ||
337 | "1: ldrexd %0, %H0, [%2]\n" | ||
338 | " subs %0, %0, %3\n" | ||
339 | " sbc %H0, %H0, %H3\n" | ||
340 | " strexd %1, %0, %H0, [%2]\n" | ||
341 | " teq %1, #0\n" | ||
342 | " bne 1b" | ||
343 | : "=&r" (result), "=&r" (tmp) | ||
344 | : "r" (&v->counter), "r" (i) | ||
345 | : "cc"); | ||
346 | |||
347 | smp_mb(); | ||
348 | |||
349 | return result; | ||
350 | } | ||
351 | |||
352 | static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | ||
353 | { | ||
354 | u64 oldval; | ||
355 | unsigned long res; | ||
356 | |||
357 | smp_mb(); | ||
358 | |||
359 | do { | ||
360 | __asm__ __volatile__("@ atomic64_cmpxchg\n" | ||
361 | "ldrexd %1, %H1, [%2]\n" | ||
362 | "mov %0, #0\n" | ||
363 | "teq %1, %3\n" | ||
364 | "teqeq %H1, %H3\n" | ||
365 | "strexdeq %0, %4, %H4, [%2]" | ||
366 | : "=&r" (res), "=&r" (oldval) | ||
367 | : "r" (&ptr->counter), "r" (old), "r" (new) | ||
368 | : "cc"); | ||
369 | } while (res); | ||
370 | |||
371 | smp_mb(); | ||
372 | |||
373 | return oldval; | ||
374 | } | ||
375 | |||
376 | static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | ||
377 | { | ||
378 | u64 result; | ||
379 | unsigned long tmp; | ||
380 | |||
381 | smp_mb(); | ||
382 | |||
383 | __asm__ __volatile__("@ atomic64_xchg\n" | ||
384 | "1: ldrexd %0, %H0, [%2]\n" | ||
385 | " strexd %1, %3, %H3, [%2]\n" | ||
386 | " teq %1, #0\n" | ||
387 | " bne 1b" | ||
388 | : "=&r" (result), "=&r" (tmp) | ||
389 | : "r" (&ptr->counter), "r" (new) | ||
390 | : "cc"); | ||
391 | |||
392 | smp_mb(); | ||
393 | |||
394 | return result; | ||
395 | } | ||
396 | |||
397 | static inline u64 atomic64_dec_if_positive(atomic64_t *v) | ||
398 | { | ||
399 | u64 result; | ||
400 | unsigned long tmp; | ||
401 | |||
402 | smp_mb(); | ||
403 | |||
404 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | ||
405 | "1: ldrexd %0, %H0, [%2]\n" | ||
406 | " subs %0, %0, #1\n" | ||
407 | " sbc %H0, %H0, #0\n" | ||
408 | " teq %H0, #0\n" | ||
409 | " bmi 2f\n" | ||
410 | " strexd %1, %0, %H0, [%2]\n" | ||
411 | " teq %1, #0\n" | ||
412 | " bne 1b\n" | ||
413 | "2:" | ||
414 | : "=&r" (result), "=&r" (tmp) | ||
415 | : "r" (&v->counter) | ||
416 | : "cc"); | ||
417 | |||
418 | smp_mb(); | ||
419 | |||
420 | return result; | ||
421 | } | ||
422 | |||
423 | static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||
424 | { | ||
425 | u64 val; | ||
426 | unsigned long tmp; | ||
427 | int ret = 1; | ||
428 | |||
429 | smp_mb(); | ||
430 | |||
431 | __asm__ __volatile__("@ atomic64_add_unless\n" | ||
432 | "1: ldrexd %0, %H0, [%3]\n" | ||
433 | " teq %0, %4\n" | ||
434 | " teqeq %H0, %H4\n" | ||
435 | " moveq %1, #0\n" | ||
436 | " beq 2f\n" | ||
437 | " adds %0, %0, %5\n" | ||
438 | " adc %H0, %H0, %H5\n" | ||
439 | " strexd %2, %0, %H0, [%3]\n" | ||
440 | " teq %2, #0\n" | ||
441 | " bne 1b\n" | ||
442 | "2:" | ||
443 | : "=&r" (val), "=&r" (ret), "=&r" (tmp) | ||
444 | : "r" (&v->counter), "r" (u), "r" (a) | ||
445 | : "cc"); | ||
446 | |||
447 | if (ret) | ||
448 | smp_mb(); | ||
449 | |||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
454 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | ||
455 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | ||
456 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
457 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | ||
458 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | ||
459 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | ||
460 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
461 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | ||
462 | |||
463 | #else /* !CONFIG_GENERIC_ATOMIC64 */ | ||
464 | #include <asm-generic/atomic64.h> | ||
465 | #endif | ||
238 | #include <asm-generic/atomic-long.h> | 466 | #include <asm-generic/atomic-long.h> |
239 | #endif | 467 | #endif |
240 | #endif | 468 | #endif |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d2a59cfc30ce..c980156f3263 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); | |||
69 | /* | 69 | /* |
70 | * __arm_ioremap takes CPU physical address. | 70 | * __arm_ioremap takes CPU physical address. |
71 | * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page | 71 | * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page |
72 | * The _caller variety takes a __builtin_return_address(0) value for | ||
73 | * /proc/vmalloc to use - and should only be used in non-inline functions. | ||
72 | */ | 74 | */ |
73 | extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); | 75 | extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, |
74 | extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); | 76 | size_t, unsigned int, void *); |
77 | extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, | ||
78 | void *); | ||
79 | |||
80 | extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); | ||
81 | extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); | ||
75 | extern void __iounmap(volatile void __iomem *addr); | 82 | extern void __iounmap(volatile void __iomem *addr); |
76 | 83 | ||
77 | /* | 84 | /* |
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index b2cc1fcd0400..8bffc3ff3acf 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h | |||
@@ -46,12 +46,4 @@ struct sys_timer { | |||
46 | extern struct sys_timer *system_timer; | 46 | extern struct sys_timer *system_timer; |
47 | extern void timer_tick(void); | 47 | extern void timer_tick(void); |
48 | 48 | ||
49 | /* | ||
50 | * Kernel time keeping support. | ||
51 | */ | ||
52 | struct timespec; | ||
53 | extern int (*set_rtc)(void); | ||
54 | extern void save_time_delta(struct timespec *delta, struct timespec *rtc); | ||
55 | extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); | ||
56 | |||
57 | #endif | 49 | #endif |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5421d82a2572..4312ee5e3d0b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -76,6 +76,17 @@ | |||
76 | */ | 76 | */ |
77 | #define IOREMAP_MAX_ORDER 24 | 77 | #define IOREMAP_MAX_ORDER 24 |
78 | 78 | ||
79 | /* | ||
80 | * Size of DMA-consistent memory region. Must be multiple of 2M, | ||
81 | * between 2MB and 14MB inclusive. | ||
82 | */ | ||
83 | #ifndef CONSISTENT_DMA_SIZE | ||
84 | #define CONSISTENT_DMA_SIZE SZ_2M | ||
85 | #endif | ||
86 | |||
87 | #define CONSISTENT_END (0xffe00000UL) | ||
88 | #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) | ||
89 | |||
79 | #else /* CONFIG_MMU */ | 90 | #else /* CONFIG_MMU */ |
80 | 91 | ||
81 | /* | 92 | /* |
@@ -93,11 +104,11 @@ | |||
93 | #endif | 104 | #endif |
94 | 105 | ||
95 | #ifndef PHYS_OFFSET | 106 | #ifndef PHYS_OFFSET |
96 | #define PHYS_OFFSET (CONFIG_DRAM_BASE) | 107 | #define PHYS_OFFSET UL(CONFIG_DRAM_BASE) |
97 | #endif | 108 | #endif |
98 | 109 | ||
99 | #ifndef END_MEM | 110 | #ifndef END_MEM |
100 | #define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) | 111 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) |
101 | #endif | 112 | #endif |
102 | 113 | ||
103 | #ifndef PAGE_OFFSET | 114 | #ifndef PAGE_OFFSET |
@@ -113,14 +124,6 @@ | |||
113 | #endif /* !CONFIG_MMU */ | 124 | #endif /* !CONFIG_MMU */ |
114 | 125 | ||
115 | /* | 126 | /* |
116 | * Size of DMA-consistent memory region. Must be multiple of 2M, | ||
117 | * between 2MB and 14MB inclusive. | ||
118 | */ | ||
119 | #ifndef CONSISTENT_DMA_SIZE | ||
120 | #define CONSISTENT_DMA_SIZE SZ_2M | ||
121 | #endif | ||
122 | |||
123 | /* | ||
124 | * Physical vs virtual RAM address space conversion. These are | 127 | * Physical vs virtual RAM address space conversion. These are |
125 | * private definitions which should NOT be used outside memory.h | 128 | * private definitions which should NOT be used outside memory.h |
126 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. | 129 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584d04a1..68870c776671 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -6,6 +6,7 @@ | |||
6 | typedef struct { | 6 | typedef struct { |
7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
8 | unsigned int id; | 8 | unsigned int id; |
9 | spinlock_t id_lock; | ||
9 | #endif | 10 | #endif |
10 | unsigned int kvm_seq; | 11 | unsigned int kvm_seq; |
11 | } mm_context_t; | 12 | } mm_context_t; |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329dd..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); | |||
43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) | 43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) |
44 | 44 | ||
45 | extern unsigned int cpu_last_asid; | 45 | extern unsigned int cpu_last_asid; |
46 | #ifdef CONFIG_SMP | ||
47 | DECLARE_PER_CPU(struct mm_struct *, current_mm); | ||
48 | #endif | ||
46 | 49 | ||
47 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 50 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
48 | void __new_context(struct mm_struct *mm); | 51 | void __new_context(struct mm_struct *mm); |
49 | 52 | ||
50 | static inline void check_context(struct mm_struct *mm) | 53 | static inline void check_context(struct mm_struct *mm) |
51 | { | 54 | { |
55 | /* | ||
56 | * This code is executed with interrupts enabled. Therefore, | ||
57 | * mm->context.id cannot be updated to the latest ASID version | ||
58 | * on a different CPU (and condition below not triggered) | ||
59 | * without first getting an IPI to reset the context. The | ||
60 | * alternative is to take a read_lock on mm->context.id_lock | ||
61 | * (after changing its type to rwlock_t). | ||
62 | */ | ||
52 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | 63 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) |
53 | __new_context(mm); | 64 | __new_context(mm); |
54 | 65 | ||
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
108 | __flush_icache_all(); | 119 | __flush_icache_all(); |
109 | #endif | 120 | #endif |
110 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | 121 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { |
122 | #ifdef CONFIG_SMP | ||
123 | struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); | ||
124 | *crt_mm = next; | ||
125 | #endif | ||
111 | check_context(next); | 126 | check_context(next); |
112 | cpu_switch_mm(next->pgd, next); | 127 | cpu_switch_mm(next->pgd, next); |
113 | if (cache_is_vivt()) | 128 | if (cache_is_vivt()) |
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e939aa..013cfcdc4839 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h | |||
@@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp); | |||
86 | * All 32bit addresses are effectively valid for vmalloc... | 86 | * All 32bit addresses are effectively valid for vmalloc... |
87 | * Sort of meaningless for non-VM targets. | 87 | * Sort of meaningless for non-VM targets. |
88 | */ | 88 | */ |
89 | #define VMALLOC_START 0 | 89 | #define VMALLOC_START 0UL |
90 | #define VMALLOC_END 0xffffffff | 90 | #define VMALLOC_END 0xffffffffUL |
91 | 91 | ||
92 | #define FIRST_USER_ADDRESS (0) | 92 | #define FIRST_USER_ADDRESS (0) |
93 | 93 | ||
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 5ccce0a9b03c..f392fb4437af 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -223,18 +223,6 @@ extern struct meminfo meminfo; | |||
223 | #define bank_phys_end(bank) ((bank)->start + (bank)->size) | 223 | #define bank_phys_end(bank) ((bank)->start + (bank)->size) |
224 | #define bank_phys_size(bank) (bank)->size | 224 | #define bank_phys_size(bank) (bank)->size |
225 | 225 | ||
226 | /* | ||
227 | * Early command line parameters. | ||
228 | */ | ||
229 | struct early_params { | ||
230 | const char *arg; | ||
231 | void (*fn)(char **p); | ||
232 | }; | ||
233 | |||
234 | #define __early_param(name,fn) \ | ||
235 | static struct early_params __early_##fn __used \ | ||
236 | __attribute__((__section__(".early_param.init"))) = { name, fn } | ||
237 | |||
238 | #endif /* __KERNEL__ */ | 226 | #endif /* __KERNEL__ */ |
239 | 227 | ||
240 | #endif | 228 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c91c64cab922..17eb355707dd 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -5,6 +5,22 @@ | |||
5 | #error SMP not supported on pre-ARMv6 CPUs | 5 | #error SMP not supported on pre-ARMv6 CPUs |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | static inline void dsb_sev(void) | ||
9 | { | ||
10 | #if __LINUX_ARM_ARCH__ >= 7 | ||
11 | __asm__ __volatile__ ( | ||
12 | "dsb\n" | ||
13 | "sev" | ||
14 | ); | ||
15 | #elif defined(CONFIG_CPU_32v6K) | ||
16 | __asm__ __volatile__ ( | ||
17 | "mcr p15, 0, %0, c7, c10, 4\n" | ||
18 | "sev" | ||
19 | : : "r" (0) | ||
20 | ); | ||
21 | #endif | ||
22 | } | ||
23 | |||
8 | /* | 24 | /* |
9 | * ARMv6 Spin-locking. | 25 | * ARMv6 Spin-locking. |
10 | * | 26 | * |
@@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
69 | 85 | ||
70 | __asm__ __volatile__( | 86 | __asm__ __volatile__( |
71 | " str %1, [%0]\n" | 87 | " str %1, [%0]\n" |
72 | #ifdef CONFIG_CPU_32v6K | ||
73 | " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ | ||
74 | " sev" | ||
75 | #endif | ||
76 | : | 88 | : |
77 | : "r" (&lock->lock), "r" (0) | 89 | : "r" (&lock->lock), "r" (0) |
78 | : "cc"); | 90 | : "cc"); |
91 | |||
92 | dsb_sev(); | ||
79 | } | 93 | } |
80 | 94 | ||
81 | /* | 95 | /* |
@@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
132 | 146 | ||
133 | __asm__ __volatile__( | 147 | __asm__ __volatile__( |
134 | "str %1, [%0]\n" | 148 | "str %1, [%0]\n" |
135 | #ifdef CONFIG_CPU_32v6K | ||
136 | " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ | ||
137 | " sev\n" | ||
138 | #endif | ||
139 | : | 149 | : |
140 | : "r" (&rw->lock), "r" (0) | 150 | : "r" (&rw->lock), "r" (0) |
141 | : "cc"); | 151 | : "cc"); |
152 | |||
153 | dsb_sev(); | ||
142 | } | 154 | } |
143 | 155 | ||
144 | /* write_can_lock - would write_trylock() succeed? */ | 156 | /* write_can_lock - would write_trylock() succeed? */ |
@@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
188 | " strex %1, %0, [%2]\n" | 200 | " strex %1, %0, [%2]\n" |
189 | " teq %1, #0\n" | 201 | " teq %1, #0\n" |
190 | " bne 1b" | 202 | " bne 1b" |
191 | #ifdef CONFIG_CPU_32v6K | ||
192 | "\n cmp %0, #0\n" | ||
193 | " mcreq p15, 0, %0, c7, c10, 4\n" | ||
194 | " seveq" | ||
195 | #endif | ||
196 | : "=&r" (tmp), "=&r" (tmp2) | 203 | : "=&r" (tmp), "=&r" (tmp2) |
197 | : "r" (&rw->lock) | 204 | : "r" (&rw->lock) |
198 | : "cc"); | 205 | : "cc"); |
206 | |||
207 | if (tmp == 0) | ||
208 | dsb_sev(); | ||
199 | } | 209 | } |
200 | 210 | ||
201 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 211 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 058e7e90881d..ca88e6a84707 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285; | |||
73 | 73 | ||
74 | struct pt_regs; | 74 | struct pt_regs; |
75 | 75 | ||
76 | void die(const char *msg, struct pt_regs *regs, int err) | 76 | void die(const char *msg, struct pt_regs *regs, int err); |
77 | __attribute__((noreturn)); | ||
78 | 77 | ||
79 | struct siginfo; | 78 | struct siginfo; |
80 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | 79 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 2dfb7d7a66e9..b74970ec02c4 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); | |||
115 | extern void iwmmxt_task_release(struct thread_info *); | 115 | extern void iwmmxt_task_release(struct thread_info *); |
116 | extern void iwmmxt_task_switch(struct thread_info *); | 116 | extern void iwmmxt_task_switch(struct thread_info *); |
117 | 117 | ||
118 | extern void vfp_sync_state(struct thread_info *thread); | 118 | extern void vfp_sync_hwstate(struct thread_info *); |
119 | extern void vfp_flush_hwstate(struct thread_info *); | ||
119 | 120 | ||
120 | #endif | 121 | #endif |
121 | 122 | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index c76e6d2679b8..26d302c28e13 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -17,6 +17,7 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ | |||
17 | process.o ptrace.o return_address.o setup.o signal.o \ | 17 | process.o ptrace.o return_address.o setup.o signal.o \ |
18 | sys_arm.o stacktrace.o time.o traps.o | 18 | sys_arm.o stacktrace.o time.o traps.o |
19 | 19 | ||
20 | obj-$(CONFIG_LEDS) += leds.o | ||
20 | obj-$(CONFIG_OC_ETM) += etm.o | 21 | obj-$(CONFIG_OC_ETM) += etm.o |
21 | 22 | ||
22 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 23 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c new file mode 100644 index 000000000000..31a316c1777b --- /dev/null +++ b/arch/arm/kernel/leds.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * LED support code, ripped out of arch/arm/kernel/time.c | ||
3 | * | ||
4 | * Copyright (C) 1994-2001 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/sysdev.h> | ||
13 | |||
14 | #include <asm/leds.h> | ||
15 | |||
16 | static void dummy_leds_event(led_event_t evt) | ||
17 | { | ||
18 | } | ||
19 | |||
20 | void (*leds_event)(led_event_t) = dummy_leds_event; | ||
21 | |||
22 | struct leds_evt_name { | ||
23 | const char name[8]; | ||
24 | int on; | ||
25 | int off; | ||
26 | }; | ||
27 | |||
28 | static const struct leds_evt_name evt_names[] = { | ||
29 | { "amber", led_amber_on, led_amber_off }, | ||
30 | { "blue", led_blue_on, led_blue_off }, | ||
31 | { "green", led_green_on, led_green_off }, | ||
32 | { "red", led_red_on, led_red_off }, | ||
33 | }; | ||
34 | |||
35 | static ssize_t leds_store(struct sys_device *dev, | ||
36 | struct sysdev_attribute *attr, | ||
37 | const char *buf, size_t size) | ||
38 | { | ||
39 | int ret = -EINVAL, len = strcspn(buf, " "); | ||
40 | |||
41 | if (len > 0 && buf[len] == '\0') | ||
42 | len--; | ||
43 | |||
44 | if (strncmp(buf, "claim", len) == 0) { | ||
45 | leds_event(led_claim); | ||
46 | ret = size; | ||
47 | } else if (strncmp(buf, "release", len) == 0) { | ||
48 | leds_event(led_release); | ||
49 | ret = size; | ||
50 | } else { | ||
51 | int i; | ||
52 | |||
53 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { | ||
54 | if (strlen(evt_names[i].name) != len || | ||
55 | strncmp(buf, evt_names[i].name, len) != 0) | ||
56 | continue; | ||
57 | if (strncmp(buf+len, " on", 3) == 0) { | ||
58 | leds_event(evt_names[i].on); | ||
59 | ret = size; | ||
60 | } else if (strncmp(buf+len, " off", 4) == 0) { | ||
61 | leds_event(evt_names[i].off); | ||
62 | ret = size; | ||
63 | } | ||
64 | break; | ||
65 | } | ||
66 | } | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | ||
71 | |||
72 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | ||
73 | { | ||
74 | leds_event(led_stop); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static int leds_resume(struct sys_device *dev) | ||
79 | { | ||
80 | leds_event(led_start); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int leds_shutdown(struct sys_device *dev) | ||
85 | { | ||
86 | leds_event(led_halted); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static struct sysdev_class leds_sysclass = { | ||
91 | .name = "leds", | ||
92 | .shutdown = leds_shutdown, | ||
93 | .suspend = leds_suspend, | ||
94 | .resume = leds_resume, | ||
95 | }; | ||
96 | |||
97 | static struct sys_device leds_device = { | ||
98 | .id = 0, | ||
99 | .cls = &leds_sysclass, | ||
100 | }; | ||
101 | |||
102 | static int __init leds_init(void) | ||
103 | { | ||
104 | int ret; | ||
105 | ret = sysdev_class_register(&leds_sysclass); | ||
106 | if (ret == 0) | ||
107 | ret = sysdev_register(&leds_device); | ||
108 | if (ret == 0) | ||
109 | ret = sysdev_create_file(&leds_device, &attr_event); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | device_initcall(leds_init); | ||
114 | |||
115 | EXPORT_SYMBOL(leds_event); | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index a2ea3854cb3c..08f899fb76a6 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -499,10 +499,41 @@ static struct undef_hook thumb_break_hook = { | |||
499 | .fn = break_trap, | 499 | .fn = break_trap, |
500 | }; | 500 | }; |
501 | 501 | ||
502 | static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) | ||
503 | { | ||
504 | unsigned int instr2; | ||
505 | void __user *pc; | ||
506 | |||
507 | /* Check the second half of the instruction. */ | ||
508 | pc = (void __user *)(instruction_pointer(regs) + 2); | ||
509 | |||
510 | if (processor_mode(regs) == SVC_MODE) { | ||
511 | instr2 = *(u16 *) pc; | ||
512 | } else { | ||
513 | get_user(instr2, (u16 __user *)pc); | ||
514 | } | ||
515 | |||
516 | if (instr2 == 0xa000) { | ||
517 | ptrace_break(current, regs); | ||
518 | return 0; | ||
519 | } else { | ||
520 | return 1; | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static struct undef_hook thumb2_break_hook = { | ||
525 | .instr_mask = 0xffff, | ||
526 | .instr_val = 0xf7f0, | ||
527 | .cpsr_mask = PSR_T_BIT, | ||
528 | .cpsr_val = PSR_T_BIT, | ||
529 | .fn = thumb2_break_trap, | ||
530 | }; | ||
531 | |||
502 | static int __init ptrace_break_init(void) | 532 | static int __init ptrace_break_init(void) |
503 | { | 533 | { |
504 | register_undef_hook(&arm_break_hook); | 534 | register_undef_hook(&arm_break_hook); |
505 | register_undef_hook(&thumb_break_hook); | 535 | register_undef_hook(&thumb_break_hook); |
536 | register_undef_hook(&thumb2_break_hook); | ||
506 | return 0; | 537 | return 0; |
507 | } | 538 | } |
508 | 539 | ||
@@ -669,7 +700,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data) | |||
669 | union vfp_state *vfp = &thread->vfpstate; | 700 | union vfp_state *vfp = &thread->vfpstate; |
670 | struct user_vfp __user *ufp = data; | 701 | struct user_vfp __user *ufp = data; |
671 | 702 | ||
672 | vfp_sync_state(thread); | 703 | vfp_sync_hwstate(thread); |
673 | 704 | ||
674 | /* copy the floating point registers */ | 705 | /* copy the floating point registers */ |
675 | if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, | 706 | if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, |
@@ -692,7 +723,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) | |||
692 | union vfp_state *vfp = &thread->vfpstate; | 723 | union vfp_state *vfp = &thread->vfpstate; |
693 | struct user_vfp __user *ufp = data; | 724 | struct user_vfp __user *ufp = data; |
694 | 725 | ||
695 | vfp_sync_state(thread); | 726 | vfp_sync_hwstate(thread); |
696 | 727 | ||
697 | /* copy the floating point registers */ | 728 | /* copy the floating point registers */ |
698 | if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, | 729 | if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, |
@@ -703,6 +734,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) | |||
703 | if (get_user(vfp->hard.fpscr, &ufp->fpscr)) | 734 | if (get_user(vfp->hard.fpscr, &ufp->fpscr)) |
704 | return -EFAULT; | 735 | return -EFAULT; |
705 | 736 | ||
737 | vfp_flush_hwstate(thread); | ||
738 | |||
706 | return 0; | 739 | return 0; |
707 | } | 740 | } |
708 | #endif | 741 | #endif |
@@ -712,26 +745,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
712 | int ret; | 745 | int ret; |
713 | 746 | ||
714 | switch (request) { | 747 | switch (request) { |
715 | /* | ||
716 | * read word at location "addr" in the child process. | ||
717 | */ | ||
718 | case PTRACE_PEEKTEXT: | ||
719 | case PTRACE_PEEKDATA: | ||
720 | ret = generic_ptrace_peekdata(child, addr, data); | ||
721 | break; | ||
722 | |||
723 | case PTRACE_PEEKUSR: | 748 | case PTRACE_PEEKUSR: |
724 | ret = ptrace_read_user(child, addr, (unsigned long __user *)data); | 749 | ret = ptrace_read_user(child, addr, (unsigned long __user *)data); |
725 | break; | 750 | break; |
726 | 751 | ||
727 | /* | ||
728 | * write the word at location addr. | ||
729 | */ | ||
730 | case PTRACE_POKETEXT: | ||
731 | case PTRACE_POKEDATA: | ||
732 | ret = generic_ptrace_pokedata(child, addr, data); | ||
733 | break; | ||
734 | |||
735 | case PTRACE_POKEUSR: | 752 | case PTRACE_POKEUSR: |
736 | ret = ptrace_write_user(child, addr, data); | 753 | ret = ptrace_write_user(child, addr, data); |
737 | break; | 754 | break; |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c6c57b640b6b..baf5959d639a 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/proc_fs.h> | ||
27 | 28 | ||
28 | #include <asm/unified.h> | 29 | #include <asm/unified.h> |
29 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
@@ -117,7 +118,7 @@ EXPORT_SYMBOL(elf_platform); | |||
117 | 118 | ||
118 | static const char *cpu_name; | 119 | static const char *cpu_name; |
119 | static const char *machine_name; | 120 | static const char *machine_name; |
120 | static char __initdata command_line[COMMAND_LINE_SIZE]; | 121 | static char __initdata cmd_line[COMMAND_LINE_SIZE]; |
121 | 122 | ||
122 | static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; | 123 | static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; |
123 | static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; | 124 | static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; |
@@ -417,10 +418,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) | |||
417 | * Pick out the memory size. We look for mem=size@start, | 418 | * Pick out the memory size. We look for mem=size@start, |
418 | * where start and size are "size[KkMm]" | 419 | * where start and size are "size[KkMm]" |
419 | */ | 420 | */ |
420 | static void __init early_mem(char **p) | 421 | static int __init early_mem(char *p) |
421 | { | 422 | { |
422 | static int usermem __initdata = 0; | 423 | static int usermem __initdata = 0; |
423 | unsigned long size, start; | 424 | unsigned long size, start; |
425 | char *endp; | ||
424 | 426 | ||
425 | /* | 427 | /* |
426 | * If the user specifies memory size, we | 428 | * If the user specifies memory size, we |
@@ -433,52 +435,15 @@ static void __init early_mem(char **p) | |||
433 | } | 435 | } |
434 | 436 | ||
435 | start = PHYS_OFFSET; | 437 | start = PHYS_OFFSET; |
436 | size = memparse(*p, p); | 438 | size = memparse(p, &endp); |
437 | if (**p == '@') | 439 | if (*endp == '@') |
438 | start = memparse(*p + 1, p); | 440 | start = memparse(endp + 1, NULL); |
439 | 441 | ||
440 | arm_add_memory(start, size); | 442 | arm_add_memory(start, size); |
441 | } | ||
442 | __early_param("mem=", early_mem); | ||
443 | 443 | ||
444 | /* | 444 | return 0; |
445 | * Initial parsing of the command line. | ||
446 | */ | ||
447 | static void __init parse_cmdline(char **cmdline_p, char *from) | ||
448 | { | ||
449 | char c = ' ', *to = command_line; | ||
450 | int len = 0; | ||
451 | |||
452 | for (;;) { | ||
453 | if (c == ' ') { | ||
454 | extern struct early_params __early_begin, __early_end; | ||
455 | struct early_params *p; | ||
456 | |||
457 | for (p = &__early_begin; p < &__early_end; p++) { | ||
458 | int arglen = strlen(p->arg); | ||
459 | |||
460 | if (memcmp(from, p->arg, arglen) == 0) { | ||
461 | if (to != command_line) | ||
462 | to -= 1; | ||
463 | from += arglen; | ||
464 | p->fn(&from); | ||
465 | |||
466 | while (*from != ' ' && *from != '\0') | ||
467 | from++; | ||
468 | break; | ||
469 | } | ||
470 | } | ||
471 | } | ||
472 | c = *from++; | ||
473 | if (!c) | ||
474 | break; | ||
475 | if (COMMAND_LINE_SIZE <= ++len) | ||
476 | break; | ||
477 | *to++ = c; | ||
478 | } | ||
479 | *to = '\0'; | ||
480 | *cmdline_p = command_line; | ||
481 | } | 445 | } |
446 | early_param("mem", early_mem); | ||
482 | 447 | ||
483 | static void __init | 448 | static void __init |
484 | setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) | 449 | setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) |
@@ -739,9 +704,15 @@ void __init setup_arch(char **cmdline_p) | |||
739 | init_mm.end_data = (unsigned long) _edata; | 704 | init_mm.end_data = (unsigned long) _edata; |
740 | init_mm.brk = (unsigned long) _end; | 705 | init_mm.brk = (unsigned long) _end; |
741 | 706 | ||
742 | memcpy(boot_command_line, from, COMMAND_LINE_SIZE); | 707 | /* parse_early_param needs a boot_command_line */ |
743 | boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; | 708 | strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); |
744 | parse_cmdline(cmdline_p, from); | 709 | |
710 | /* populate cmd_line too for later use, preserving boot_command_line */ | ||
711 | strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); | ||
712 | *cmdline_p = cmd_line; | ||
713 | |||
714 | parse_early_param(); | ||
715 | |||
745 | paging_init(mdesc); | 716 | paging_init(mdesc); |
746 | request_standard_resources(&meminfo, mdesc); | 717 | request_standard_resources(&meminfo, mdesc); |
747 | 718 | ||
@@ -782,9 +753,21 @@ static int __init topology_init(void) | |||
782 | 753 | ||
783 | return 0; | 754 | return 0; |
784 | } | 755 | } |
785 | |||
786 | subsys_initcall(topology_init); | 756 | subsys_initcall(topology_init); |
787 | 757 | ||
758 | #ifdef CONFIG_HAVE_PROC_CPU | ||
759 | static int __init proc_cpu_init(void) | ||
760 | { | ||
761 | struct proc_dir_entry *res; | ||
762 | |||
763 | res = proc_mkdir("cpu", NULL); | ||
764 | if (!res) | ||
765 | return -ENOMEM; | ||
766 | return 0; | ||
767 | } | ||
768 | fs_initcall(proc_cpu_init); | ||
769 | #endif | ||
770 | |||
788 | static const char *hwcap_str[] = { | 771 | static const char *hwcap_str[] = { |
789 | "swp", | 772 | "swp", |
790 | "half", | 773 | "half", |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index d38cdf2c8276..28753805d2d1 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -10,11 +10,6 @@ | |||
10 | * | 10 | * |
11 | * This file contains the ARM-specific time handling details: | 11 | * This file contains the ARM-specific time handling details: |
12 | * reading the RTC at bootup, etc... | 12 | * reading the RTC at bootup, etc... |
13 | * | ||
14 | * 1994-07-02 Alan Modra | ||
15 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | ||
16 | * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 | ||
17 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | ||
18 | */ | 13 | */ |
19 | #include <linux/module.h> | 14 | #include <linux/module.h> |
20 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
77 | EXPORT_SYMBOL(profile_pc); | 72 | EXPORT_SYMBOL(profile_pc); |
78 | #endif | 73 | #endif |
79 | 74 | ||
80 | /* | ||
81 | * hook for setting the RTC's idea of the current time. | ||
82 | */ | ||
83 | int (*set_rtc)(void); | ||
84 | |||
85 | #ifndef CONFIG_GENERIC_TIME | 75 | #ifndef CONFIG_GENERIC_TIME |
86 | static unsigned long dummy_gettimeoffset(void) | 76 | static unsigned long dummy_gettimeoffset(void) |
87 | { | 77 | { |
@@ -89,140 +79,6 @@ static unsigned long dummy_gettimeoffset(void) | |||
89 | } | 79 | } |
90 | #endif | 80 | #endif |
91 | 81 | ||
92 | static unsigned long next_rtc_update; | ||
93 | |||
94 | /* | ||
95 | * If we have an externally synchronized linux clock, then update | ||
96 | * CMOS clock accordingly every ~11 minutes. set_rtc() has to be | ||
97 | * called as close as possible to 500 ms before the new second | ||
98 | * starts. | ||
99 | */ | ||
100 | static inline void do_set_rtc(void) | ||
101 | { | ||
102 | if (!ntp_synced() || set_rtc == NULL) | ||
103 | return; | ||
104 | |||
105 | if (next_rtc_update && | ||
106 | time_before((unsigned long)xtime.tv_sec, next_rtc_update)) | ||
107 | return; | ||
108 | |||
109 | if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && | ||
110 | xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) | ||
111 | return; | ||
112 | |||
113 | if (set_rtc()) | ||
114 | /* | ||
115 | * rtc update failed. Try again in 60s | ||
116 | */ | ||
117 | next_rtc_update = xtime.tv_sec + 60; | ||
118 | else | ||
119 | next_rtc_update = xtime.tv_sec + 660; | ||
120 | } | ||
121 | |||
122 | #ifdef CONFIG_LEDS | ||
123 | |||
124 | static void dummy_leds_event(led_event_t evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | void (*leds_event)(led_event_t) = dummy_leds_event; | ||
129 | |||
130 | struct leds_evt_name { | ||
131 | const char name[8]; | ||
132 | int on; | ||
133 | int off; | ||
134 | }; | ||
135 | |||
136 | static const struct leds_evt_name evt_names[] = { | ||
137 | { "amber", led_amber_on, led_amber_off }, | ||
138 | { "blue", led_blue_on, led_blue_off }, | ||
139 | { "green", led_green_on, led_green_off }, | ||
140 | { "red", led_red_on, led_red_off }, | ||
141 | }; | ||
142 | |||
143 | static ssize_t leds_store(struct sys_device *dev, | ||
144 | struct sysdev_attribute *attr, | ||
145 | const char *buf, size_t size) | ||
146 | { | ||
147 | int ret = -EINVAL, len = strcspn(buf, " "); | ||
148 | |||
149 | if (len > 0 && buf[len] == '\0') | ||
150 | len--; | ||
151 | |||
152 | if (strncmp(buf, "claim", len) == 0) { | ||
153 | leds_event(led_claim); | ||
154 | ret = size; | ||
155 | } else if (strncmp(buf, "release", len) == 0) { | ||
156 | leds_event(led_release); | ||
157 | ret = size; | ||
158 | } else { | ||
159 | int i; | ||
160 | |||
161 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { | ||
162 | if (strlen(evt_names[i].name) != len || | ||
163 | strncmp(buf, evt_names[i].name, len) != 0) | ||
164 | continue; | ||
165 | if (strncmp(buf+len, " on", 3) == 0) { | ||
166 | leds_event(evt_names[i].on); | ||
167 | ret = size; | ||
168 | } else if (strncmp(buf+len, " off", 4) == 0) { | ||
169 | leds_event(evt_names[i].off); | ||
170 | ret = size; | ||
171 | } | ||
172 | break; | ||
173 | } | ||
174 | } | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | ||
179 | |||
180 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | ||
181 | { | ||
182 | leds_event(led_stop); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static int leds_resume(struct sys_device *dev) | ||
187 | { | ||
188 | leds_event(led_start); | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static int leds_shutdown(struct sys_device *dev) | ||
193 | { | ||
194 | leds_event(led_halted); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static struct sysdev_class leds_sysclass = { | ||
199 | .name = "leds", | ||
200 | .shutdown = leds_shutdown, | ||
201 | .suspend = leds_suspend, | ||
202 | .resume = leds_resume, | ||
203 | }; | ||
204 | |||
205 | static struct sys_device leds_device = { | ||
206 | .id = 0, | ||
207 | .cls = &leds_sysclass, | ||
208 | }; | ||
209 | |||
210 | static int __init leds_init(void) | ||
211 | { | ||
212 | int ret; | ||
213 | ret = sysdev_class_register(&leds_sysclass); | ||
214 | if (ret == 0) | ||
215 | ret = sysdev_register(&leds_device); | ||
216 | if (ret == 0) | ||
217 | ret = sysdev_create_file(&leds_device, &attr_event); | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | device_initcall(leds_init); | ||
222 | |||
223 | EXPORT_SYMBOL(leds_event); | ||
224 | #endif | ||
225 | |||
226 | #ifdef CONFIG_LEDS_TIMER | 82 | #ifdef CONFIG_LEDS_TIMER |
227 | static inline void do_leds(void) | 83 | static inline void do_leds(void) |
228 | { | 84 | { |
@@ -295,39 +151,6 @@ int do_settimeofday(struct timespec *tv) | |||
295 | EXPORT_SYMBOL(do_settimeofday); | 151 | EXPORT_SYMBOL(do_settimeofday); |
296 | #endif /* !CONFIG_GENERIC_TIME */ | 152 | #endif /* !CONFIG_GENERIC_TIME */ |
297 | 153 | ||
298 | /** | ||
299 | * save_time_delta - Save the offset between system time and RTC time | ||
300 | * @delta: pointer to timespec to store delta | ||
301 | * @rtc: pointer to timespec for current RTC time | ||
302 | * | ||
303 | * Return a delta between the system time and the RTC time, such | ||
304 | * that system time can be restored later with restore_time_delta() | ||
305 | */ | ||
306 | void save_time_delta(struct timespec *delta, struct timespec *rtc) | ||
307 | { | ||
308 | set_normalized_timespec(delta, | ||
309 | xtime.tv_sec - rtc->tv_sec, | ||
310 | xtime.tv_nsec - rtc->tv_nsec); | ||
311 | } | ||
312 | EXPORT_SYMBOL(save_time_delta); | ||
313 | |||
314 | /** | ||
315 | * restore_time_delta - Restore the current system time | ||
316 | * @delta: delta returned by save_time_delta() | ||
317 | * @rtc: pointer to timespec for current RTC time | ||
318 | */ | ||
319 | void restore_time_delta(struct timespec *delta, struct timespec *rtc) | ||
320 | { | ||
321 | struct timespec ts; | ||
322 | |||
323 | set_normalized_timespec(&ts, | ||
324 | delta->tv_sec + rtc->tv_sec, | ||
325 | delta->tv_nsec + rtc->tv_nsec); | ||
326 | |||
327 | do_settimeofday(&ts); | ||
328 | } | ||
329 | EXPORT_SYMBOL(restore_time_delta); | ||
330 | |||
331 | #ifndef CONFIG_GENERIC_CLOCKEVENTS | 154 | #ifndef CONFIG_GENERIC_CLOCKEVENTS |
332 | /* | 155 | /* |
333 | * Kernel system timer support. | 156 | * Kernel system timer support. |
@@ -336,7 +159,6 @@ void timer_tick(void) | |||
336 | { | 159 | { |
337 | profile_tick(CPU_PROFILING); | 160 | profile_tick(CPU_PROFILING); |
338 | do_leds(); | 161 | do_leds(); |
339 | do_set_rtc(); | ||
340 | write_seqlock(&xtime_lock); | 162 | write_seqlock(&xtime_lock); |
341 | do_timer(1); | 163 | do_timer(1); |
342 | write_sequnlock(&xtime_lock); | 164 | write_sequnlock(&xtime_lock); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 3f361a783f43..1621e5327b2a 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -12,15 +12,17 @@ | |||
12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably | 12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably |
13 | * kill the offending process. | 13 | * kill the offending process. |
14 | */ | 14 | */ |
15 | #include <linux/module.h> | ||
16 | #include <linux/signal.h> | 15 | #include <linux/signal.h> |
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/personality.h> | 16 | #include <linux/personality.h> |
19 | #include <linux/kallsyms.h> | 17 | #include <linux/kallsyms.h> |
20 | #include <linux/delay.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/uaccess.h> | ||
21 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
21 | #include <linux/kdebug.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/kexec.h> | ||
24 | #include <linux/delay.h> | ||
22 | #include <linux/init.h> | 25 | #include <linux/init.h> |
23 | #include <linux/uaccess.h> | ||
24 | 26 | ||
25 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
26 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
@@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) | |||
224 | #define S_SMP "" | 226 | #define S_SMP "" |
225 | #endif | 227 | #endif |
226 | 228 | ||
227 | static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) | 229 | static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) |
228 | { | 230 | { |
229 | struct task_struct *tsk = thread->task; | 231 | struct task_struct *tsk = thread->task; |
230 | static int die_counter; | 232 | static int die_counter; |
233 | int ret; | ||
231 | 234 | ||
232 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | 235 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", |
233 | str, err, ++die_counter); | 236 | str, err, ++die_counter); |
234 | sysfs_printk_last_file(); | 237 | sysfs_printk_last_file(); |
238 | |||
239 | /* trap and error numbers are mostly meaningless on ARM */ | ||
240 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | ||
241 | if (ret == NOTIFY_STOP) | ||
242 | return ret; | ||
243 | |||
235 | print_modules(); | 244 | print_modules(); |
236 | __show_regs(regs); | 245 | __show_regs(regs); |
237 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", | 246 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", |
@@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p | |||
243 | dump_backtrace(regs, tsk); | 252 | dump_backtrace(regs, tsk); |
244 | dump_instr(KERN_EMERG, regs); | 253 | dump_instr(KERN_EMERG, regs); |
245 | } | 254 | } |
255 | |||
256 | return ret; | ||
246 | } | 257 | } |
247 | 258 | ||
248 | DEFINE_SPINLOCK(die_lock); | 259 | DEFINE_SPINLOCK(die_lock); |
@@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock); | |||
250 | /* | 261 | /* |
251 | * This function is protected against re-entrancy. | 262 | * This function is protected against re-entrancy. |
252 | */ | 263 | */ |
253 | NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | 264 | void die(const char *str, struct pt_regs *regs, int err) |
254 | { | 265 | { |
255 | struct thread_info *thread = current_thread_info(); | 266 | struct thread_info *thread = current_thread_info(); |
267 | int ret; | ||
256 | 268 | ||
257 | oops_enter(); | 269 | oops_enter(); |
258 | 270 | ||
259 | spin_lock_irq(&die_lock); | 271 | spin_lock_irq(&die_lock); |
260 | console_verbose(); | 272 | console_verbose(); |
261 | bust_spinlocks(1); | 273 | bust_spinlocks(1); |
262 | __die(str, err, thread, regs); | 274 | ret = __die(str, err, thread, regs); |
275 | |||
276 | if (regs && kexec_should_crash(thread->task)) | ||
277 | crash_kexec(regs); | ||
278 | |||
263 | bust_spinlocks(0); | 279 | bust_spinlocks(0); |
264 | add_taint(TAINT_DIE); | 280 | add_taint(TAINT_DIE); |
265 | spin_unlock_irq(&die_lock); | 281 | spin_unlock_irq(&die_lock); |
@@ -267,11 +283,10 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | |||
267 | 283 | ||
268 | if (in_interrupt()) | 284 | if (in_interrupt()) |
269 | panic("Fatal exception in interrupt"); | 285 | panic("Fatal exception in interrupt"); |
270 | |||
271 | if (panic_on_oops) | 286 | if (panic_on_oops) |
272 | panic("Fatal exception"); | 287 | panic("Fatal exception"); |
273 | 288 | if (ret != NOTIFY_STOP) | |
274 | do_exit(SIGSEGV); | 289 | do_exit(SIGSEGV); |
275 | } | 290 | } |
276 | 291 | ||
277 | void arm_notify_die(const char *str, struct pt_regs *regs, | 292 | void arm_notify_die(const char *str, struct pt_regs *regs, |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4957e13ef55b..b16c07914b55 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -43,10 +43,6 @@ SECTIONS | |||
43 | 43 | ||
44 | INIT_SETUP(16) | 44 | INIT_SETUP(16) |
45 | 45 | ||
46 | __early_begin = .; | ||
47 | *(.early_param.init) | ||
48 | __early_end = .; | ||
49 | |||
50 | INIT_CALLS | 46 | INIT_CALLS |
51 | CON_INITCALL | 47 | CON_INITCALL |
52 | SECURITY_INITCALL | 48 | SECURITY_INITCALL |
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h index 41c89386e39b..c45ba1f62a11 100644 --- a/arch/arm/mach-davinci/include/mach/hardware.h +++ b/arch/arm/mach-davinci/include/mach/hardware.h | |||
@@ -27,7 +27,7 @@ | |||
27 | /* | 27 | /* |
28 | * I/O mapping | 28 | * I/O mapping |
29 | */ | 29 | */ |
30 | #define IO_PHYS 0x01c00000 | 30 | #define IO_PHYS 0x01c00000UL |
31 | #define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */ | 31 | #define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */ |
32 | #define IO_SIZE 0x00400000 | 32 | #define IO_SIZE 0x00400000 |
33 | #define IO_VIRT (IO_PHYS + IO_OFFSET) | 33 | #define IO_VIRT (IO_PHYS + IO_OFFSET) |
diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c index 49912b48b1b0..a1c0b6b99edf 100644 --- a/arch/arm/mach-davinci/io.c +++ b/arch/arm/mach-davinci/io.c | |||
@@ -24,7 +24,7 @@ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type) | |||
24 | if (BETWEEN(p, IO_PHYS, IO_SIZE)) | 24 | if (BETWEEN(p, IO_PHYS, IO_SIZE)) |
25 | return XLATE(p, IO_PHYS, IO_VIRT); | 25 | return XLATE(p, IO_PHYS, IO_VIRT); |
26 | 26 | ||
27 | return __arm_ioremap(p, size, type); | 27 | return __arm_ioremap_caller(p, size, type, __builtin_return_address(0)); |
28 | } | 28 | } |
29 | EXPORT_SYMBOL(davinci_ioremap); | 29 | EXPORT_SYMBOL(davinci_ioremap); |
30 | 30 | ||
diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h index 8b2c974755c6..a28792cf761e 100644 --- a/arch/arm/mach-dove/include/mach/vmalloc.h +++ b/arch/arm/mach-dove/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-dove/include/mach/vmalloc.h | 2 | * arch/arm/mach-dove/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfd800000 | 5 | #define VMALLOC_END 0xfd800000UL |
diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h index aed21cd3fe2d..1b3f25d03d39 100644 --- a/arch/arm/mach-ep93xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ep93xx/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-ep93xx/include/mach/vmalloc.h | 2 | * arch/arm/mach-ep93xx/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe800000 | 5 | #define VMALLOC_END 0xfe800000UL |
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 41febc796b1c..e3bc3f6f6b10 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c | |||
@@ -32,12 +32,13 @@ unsigned int mem_fclk_21285 = 50000000; | |||
32 | 32 | ||
33 | EXPORT_SYMBOL(mem_fclk_21285); | 33 | EXPORT_SYMBOL(mem_fclk_21285); |
34 | 34 | ||
35 | static void __init early_fclk(char **arg) | 35 | static int __init early_fclk(char *arg) |
36 | { | 36 | { |
37 | mem_fclk_21285 = simple_strtoul(*arg, arg, 0); | 37 | mem_fclk_21285 = simple_strtoul(arg, NULL, 0); |
38 | return 0; | ||
38 | } | 39 | } |
39 | 40 | ||
40 | __early_param("mem_fclk_21285=", early_fclk); | 41 | early_param("mem_fclk_21285", early_fclk); |
41 | 42 | ||
42 | static int __init parse_tag_memclk(const struct tag *tag) | 43 | static int __init parse_tag_memclk(const struct tag *tag) |
43 | { | 44 | { |
diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h index 83e536d9436c..45371eb86fcb 100644 --- a/arch/arm/mach-gemini/include/mach/vmalloc.h +++ b/arch/arm/mach-gemini/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define VMALLOC_END 0xF0000000 | 10 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c index 529580997814..48642e66c566 100644 --- a/arch/arm/mach-iop13xx/io.c +++ b/arch/arm/mach-iop13xx/io.c | |||
@@ -61,9 +61,9 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, | |||
61 | (cookie - IOP13XX_PCIE_LOWER_MEM_RA)); | 61 | (cookie - IOP13XX_PCIE_LOWER_MEM_RA)); |
62 | break; | 62 | break; |
63 | case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA: | 63 | case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA: |
64 | retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA + | 64 | retval = __arm_ioremap_caller(IOP13XX_PBI_LOWER_MEM_PA + |
65 | (cookie - IOP13XX_PBI_LOWER_MEM_RA), | 65 | (cookie - IOP13XX_PBI_LOWER_MEM_RA), |
66 | size, mtype); | 66 | size, mtype, __builtin_return_address(0)); |
67 | break; | 67 | break; |
68 | case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA: | 68 | case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA: |
69 | retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie); | 69 | retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie); |
@@ -75,7 +75,8 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, | |||
75 | retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie); | 75 | retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie); |
76 | break; | 76 | break; |
77 | default: | 77 | default: |
78 | retval = __arm_ioremap(cookie, size, mtype); | 78 | retval = __arm_ioremap_caller(cookie, size, mtype, |
79 | __builtin_return_address(0)); | ||
79 | } | 80 | } |
80 | 81 | ||
81 | return retval; | 82 | return retval; |
diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h index 85ceb09d85f0..c4862d48e583 100644 --- a/arch/arm/mach-iop32x/include/mach/vmalloc.h +++ b/arch/arm/mach-iop32x/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-iop32x/include/mach/vmalloc.h | 2 | * arch/arm/mach-iop32x/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe000000 | 5 | #define VMALLOC_END 0xfe000000UL |
diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h index f9f99dea9bc4..48331dc23704 100644 --- a/arch/arm/mach-iop33x/include/mach/vmalloc.h +++ b/arch/arm/mach-iop33x/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-iop33x/include/mach/vmalloc.h | 2 | * arch/arm/mach-iop33x/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe000000 | 5 | #define VMALLOC_END 0xfe000000UL |
diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h index d195e35aed3b..61c8dae24f95 100644 --- a/arch/arm/mach-ixp2000/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp2000/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 17 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
18 | * area for the same reason. ;) | 18 | * area for the same reason. ;) |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xfb000000 | 20 | #define VMALLOC_END 0xfb000000UL |
diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h index dd519f678d10..896c56a1c00e 100644 --- a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * specific static I/O. | 7 | * specific static I/O. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define VMALLOC_END (0xec000000) | 10 | #define VMALLOC_END (0xec000000UL) |
diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h index 7b3580b53adf..9bcd64d59854 100644 --- a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * arch/arm/mach-ixp4xx/include/mach/vmalloc.h | 2 | * arch/arm/mach-ixp4xx/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | #define VMALLOC_END (0xFF000000) | 4 | #define VMALLOC_END (0xff000000UL) |
5 | 5 | ||
diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h index 8f48260dcdad..bf162ca3d2c1 100644 --- a/arch/arm/mach-kirkwood/include/mach/vmalloc.h +++ b/arch/arm/mach-kirkwood/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-kirkwood/include/mach/vmalloc.h | 2 | * arch/arm/mach-kirkwood/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe800000 | 5 | #define VMALLOC_END 0xfe800000UL |
diff --git a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h index 3fbd49490bb9..d62da7358b16 100644 --- a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h +++ b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * version 2 as published by the Free Software Foundation. | 7 | * version 2 as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | #define VMALLOC_END (0xe8000000) | 10 | #define VMALLOC_END (0xe8000000UL) |
diff --git a/arch/arm/mach-loki/include/mach/vmalloc.h b/arch/arm/mach-loki/include/mach/vmalloc.h index 8dc3bfcbf9f0..5dcbd865443f 100644 --- a/arch/arm/mach-loki/include/mach/vmalloc.h +++ b/arch/arm/mach-loki/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-loki/include/mach/vmalloc.h | 2 | * arch/arm/mach-loki/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe800000 | 5 | #define VMALLOC_END 0xfe800000UL |
diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h index b60ccaf9fee7..1d0bac003ad0 100644 --- a/arch/arm/mach-mmp/include/mach/vmalloc.h +++ b/arch/arm/mach-mmp/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * linux/arch/arm/mach-mmp/include/mach/vmalloc.h | 2 | * linux/arch/arm/mach-mmp/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe000000 | 5 | #define VMALLOC_END 0xfe000000UL |
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index 1c5e7dac086f..05f96b780aa6 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c | |||
@@ -76,5 +76,6 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | |||
76 | mtype = MT_DEVICE_NONSHARED; | 76 | mtype = MT_DEVICE_NONSHARED; |
77 | } | 77 | } |
78 | 78 | ||
79 | return __arm_ioremap(phys_addr, size, mtype); | 79 | return __arm_ioremap_caller(phys_addr, size, mtype, |
80 | __builtin_return_address(0)); | ||
80 | } | 81 | } |
diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h index 1c4954386a84..ba26fe98e640 100644 --- a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h +++ b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-mv78xx0/include/mach/vmalloc.h | 2 | * arch/arm/mach-mv78xx0/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfe000000 | 5 | #define VMALLOC_END 0xfe000000UL |
diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h index be12e31ea528..f83d574d9445 100644 --- a/arch/arm/mach-nomadik/include/mach/vmalloc.h +++ b/arch/arm/mach-nomadik/include/mach/vmalloc.h | |||
@@ -1,2 +1,2 @@ | |||
1 | 1 | ||
2 | #define VMALLOC_END 0xe8000000 | 2 | #define VMALLOC_END 0xe8000000UL |
diff --git a/arch/arm/mach-ns9xxx/include/mach/vmalloc.h b/arch/arm/mach-ns9xxx/include/mach/vmalloc.h index fe964d3bcc47..c8651974c4b0 100644 --- a/arch/arm/mach-ns9xxx/include/mach/vmalloc.h +++ b/arch/arm/mach-ns9xxx/include/mach/vmalloc.h | |||
@@ -11,6 +11,6 @@ | |||
11 | #ifndef __ASM_ARCH_VMALLOC_H | 11 | #ifndef __ASM_ARCH_VMALLOC_H |
12 | #define __ASM_ARCH_VMALLOC_H | 12 | #define __ASM_ARCH_VMALLOC_H |
13 | 13 | ||
14 | #define VMALLOC_END (0xf0000000) | 14 | #define VMALLOC_END (0xf0000000UL) |
15 | 15 | ||
16 | #endif /* ifndef __ASM_ARCH_VMALLOC_H */ | 16 | #endif /* ifndef __ASM_ARCH_VMALLOC_H */ |
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 0c6be6b4a7e2..8ba8fb5b2514 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <plat/control.h> | 28 | #include <plat/control.h> |
29 | #include <plat/timer-gp.h> | 29 | #include <plat/timer-gp.h> |
30 | #include <asm/hardware/gic.h> | 30 | #include <asm/hardware/gic.h> |
31 | #include <asm/hardware/cache-l2x0.h> | ||
31 | 32 | ||
32 | static struct platform_device sdp4430_lcd_device = { | 33 | static struct platform_device sdp4430_lcd_device = { |
33 | .name = "sdp4430_lcd", | 34 | .name = "sdp4430_lcd", |
@@ -50,6 +51,59 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = { | |||
50 | { OMAP_TAG_LCD, &sdp4430_lcd_config }, | 51 | { OMAP_TAG_LCD, &sdp4430_lcd_config }, |
51 | }; | 52 | }; |
52 | 53 | ||
54 | #ifdef CONFIG_CACHE_L2X0 | ||
55 | noinline void omap_smc1(u32 fn, u32 arg) | ||
56 | { | ||
57 | register u32 r12 asm("r12") = fn; | ||
58 | register u32 r0 asm("r0") = arg; | ||
59 | |||
60 | /* This is common routine cache secure monitor API used to | ||
61 | * modify the PL310 secure registers. | ||
62 | * r0 contains the value to be modified and "r12" contains | ||
63 | * the monitor API number. It uses few CPU registers | ||
64 | * internally and hence they need be backed up including | ||
65 | * link register "lr". | ||
66 | * Explicitly save r11 and r12 the compiler generated code | ||
67 | * won't save it. | ||
68 | */ | ||
69 | asm volatile( | ||
70 | "stmfd r13!, {r11,r12}\n" | ||
71 | "dsb\n" | ||
72 | "smc\n" | ||
73 | "ldmfd r13!, {r11,r12}\n" | ||
74 | : "+r" (r0), "+r" (r12) | ||
75 | : | ||
76 | : "r4", "r5", "r10", "lr", "cc"); | ||
77 | } | ||
78 | EXPORT_SYMBOL(omap_smc1); | ||
79 | |||
80 | static int __init omap_l2_cache_init(void) | ||
81 | { | ||
82 | void __iomem *l2cache_base; | ||
83 | |||
84 | /* To avoid code running on other OMAPs in | ||
85 | * multi-omap builds | ||
86 | */ | ||
87 | if (!cpu_is_omap44xx()) | ||
88 | return -ENODEV; | ||
89 | |||
90 | /* Static mapping, never released */ | ||
91 | l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); | ||
92 | BUG_ON(!l2cache_base); | ||
93 | |||
94 | /* Enable PL310 L2 Cache controller */ | ||
95 | omap_smc1(0x102, 0x1); | ||
96 | |||
97 | /* 32KB way size, 16-way associativity, | ||
98 | * parity disabled | ||
99 | */ | ||
100 | l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff); | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | early_initcall(omap_l2_cache_init); | ||
105 | #endif | ||
106 | |||
53 | static void __init gic_init_irq(void) | 107 | static void __init gic_init_irq(void) |
54 | { | 108 | { |
55 | void __iomem *base; | 109 | void __iomem *base; |
diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h index 7147a297e97f..06b50aeff7b9 100644 --- a/arch/arm/mach-orion5x/include/mach/vmalloc.h +++ b/arch/arm/mach-orion5x/include/mach/vmalloc.h | |||
@@ -2,4 +2,4 @@ | |||
2 | * arch/arm/mach-orion5x/include/mach/vmalloc.h | 2 | * arch/arm/mach-orion5x/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define VMALLOC_END 0xfd800000 | 5 | #define VMALLOC_END 0xfd800000UL |
diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h index e90c5eeb81dd..bfecfbf5f460 100644 --- a/arch/arm/mach-pxa/include/mach/vmalloc.h +++ b/arch/arm/mach-pxa/include/mach/vmalloc.h | |||
@@ -8,4 +8,4 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define VMALLOC_END (0xe8000000) | 11 | #define VMALLOC_END (0xe8000000UL) |
diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h index fe0de1b507ac..a2a4c6861407 100644 --- a/arch/arm/mach-realview/include/mach/vmalloc.h +++ b/arch/arm/mach-realview/include/mach/vmalloc.h | |||
@@ -18,4 +18,4 @@ | |||
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | #define VMALLOC_END 0xf8000000 | 21 | #define VMALLOC_END 0xf8000000UL |
diff --git a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h index 4d4fe4849589..914656820794 100644 --- a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h +++ b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h | |||
@@ -12,6 +12,6 @@ | |||
12 | #ifndef __ASM_ARCH_VMALLOC_H | 12 | #ifndef __ASM_ARCH_VMALLOC_H |
13 | #define __ASM_ARCH_VMALLOC_H | 13 | #define __ASM_ARCH_VMALLOC_H |
14 | 14 | ||
15 | #define VMALLOC_END (0xE0000000) | 15 | #define VMALLOC_END (0xe0000000UL) |
16 | 16 | ||
17 | #endif /* __ASM_ARCH_VMALLOC_H */ | 17 | #endif /* __ASM_ARCH_VMALLOC_H */ |
diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h index ec8fdc5a3606..b3d002398480 100644 --- a/arch/arm/mach-sa1100/include/mach/vmalloc.h +++ b/arch/arm/mach-sa1100/include/mach/vmalloc.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/arm/mach-sa1100/include/mach/vmalloc.h | 2 | * arch/arm/mach-sa1100/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | #define VMALLOC_END (0xe8000000) | 4 | #define VMALLOC_END (0xe8000000UL) |
diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h index b00c51a66fbe..ec423b92b81d 100644 --- a/arch/arm/mach-u300/include/mach/vmalloc.h +++ b/arch/arm/mach-u300/include/mach/vmalloc.h | |||
@@ -9,4 +9,4 @@ | |||
9 | * End must be above the I/O registers and on an even 2MiB boundary. | 9 | * End must be above the I/O registers and on an even 2MiB boundary. |
10 | * Author: Linus Walleij <linus.walleij@stericsson.com> | 10 | * Author: Linus Walleij <linus.walleij@stericsson.com> |
11 | */ | 11 | */ |
12 | #define VMALLOC_END 0xfe800000 | 12 | #define VMALLOC_END 0xfe800000UL |
diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h index 86cdbbce1842..a4945cb41172 100644 --- a/arch/arm/mach-ux500/include/mach/vmalloc.h +++ b/arch/arm/mach-ux500/include/mach/vmalloc.h | |||
@@ -15,4 +15,4 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | #define VMALLOC_END 0xf0000000 | 18 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h index 2f9dfb928533..b067e44500a4 100644 --- a/arch/arm/mach-w90x900/include/mach/vmalloc.h +++ b/arch/arm/mach-w90x900/include/mach/vmalloc.h | |||
@@ -18,6 +18,6 @@ | |||
18 | #ifndef __ASM_ARCH_VMALLOC_H | 18 | #ifndef __ASM_ARCH_VMALLOC_H |
19 | #define __ASM_ARCH_VMALLOC_H | 19 | #define __ASM_ARCH_VMALLOC_H |
20 | 20 | ||
21 | #define VMALLOC_END (0xE0000000) | 21 | #define VMALLOC_END (0xe0000000UL) |
22 | 22 | ||
23 | #endif /* __ASM_ARCH_VMALLOC_H */ | 23 | #endif /* __ASM_ARCH_VMALLOC_H */ |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index baf638487a2d..c4ed9f93f646 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -399,7 +399,7 @@ config CPU_V6 | |||
399 | config CPU_32v6K | 399 | config CPU_32v6K |
400 | bool "Support ARM V6K processor extensions" if !SMP | 400 | bool "Support ARM V6K processor extensions" if !SMP |
401 | depends on CPU_V6 | 401 | depends on CPU_V6 |
402 | default y if SMP && !ARCH_MX3 | 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) |
403 | help | 403 | help |
404 | Say Y here if your ARMv6 processor supports the 'K' extension. | 404 | Say Y here if your ARMv6 processor supports the 'K' extension. |
405 | This enables the kernel to use some instructions not present | 405 | This enables the kernel to use some instructions not present |
@@ -410,7 +410,7 @@ config CPU_32v6K | |||
410 | # ARMv7 | 410 | # ARMv7 |
411 | config CPU_V7 | 411 | config CPU_V7 |
412 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 412 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
413 | select CPU_32v6K | 413 | select CPU_32v6K if !ARCH_OMAP2 |
414 | select CPU_32v7 | 414 | select CPU_32v7 |
415 | select CPU_ABRT_EV7 | 415 | select CPU_ABRT_EV7 |
416 | select CPU_PABRT_V7 | 416 | select CPU_PABRT_V7 |
@@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
754 | config CACHE_L2X0 | 754 | config CACHE_L2X0 |
755 | bool "Enable the L2x0 outer cache controller" | 755 | bool "Enable the L2x0 outer cache controller" |
756 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 756 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
757 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK | 757 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 |
758 | default y | 758 | default y |
759 | select OUTER_CACHE | 759 | select OUTER_CACHE |
760 | help | 760 | help |
@@ -779,5 +779,5 @@ config CACHE_XSC3L2 | |||
779 | 779 | ||
780 | config ARM_L1_CACHE_SHIFT | 780 | config ARM_L1_CACHE_SHIFT |
781 | int | 781 | int |
782 | default 6 if ARCH_OMAP3 || ARCH_S5PC1XX | 782 | default 6 if ARM_L1_CACHE_SHIFT_6 |
783 | default 5 | 783 | default 5 |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b270d6228fe2..0c5eb6983cef 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -898,11 +898,7 @@ static int __init alignment_init(void) | |||
898 | #ifdef CONFIG_PROC_FS | 898 | #ifdef CONFIG_PROC_FS |
899 | struct proc_dir_entry *res; | 899 | struct proc_dir_entry *res; |
900 | 900 | ||
901 | res = proc_mkdir("cpu", NULL); | 901 | res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); |
902 | if (!res) | ||
903 | return -ENOMEM; | ||
904 | |||
905 | res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res); | ||
906 | if (!res) | 902 | if (!res) |
907 | return -ENOMEM; | 903 | return -ENOMEM; |
908 | 904 | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index cb8fc6573b1b..07334632d3e2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -42,6 +42,57 @@ static inline void cache_sync(void) | |||
42 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 42 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline void l2x0_clean_line(unsigned long addr) | ||
46 | { | ||
47 | void __iomem *base = l2x0_base; | ||
48 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
49 | writel(addr, base + L2X0_CLEAN_LINE_PA); | ||
50 | } | ||
51 | |||
52 | static inline void l2x0_inv_line(unsigned long addr) | ||
53 | { | ||
54 | void __iomem *base = l2x0_base; | ||
55 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
56 | writel(addr, base + L2X0_INV_LINE_PA); | ||
57 | } | ||
58 | |||
59 | #ifdef CONFIG_PL310_ERRATA_588369 | ||
60 | static void debug_writel(unsigned long val) | ||
61 | { | ||
62 | extern void omap_smc1(u32 fn, u32 arg); | ||
63 | |||
64 | /* | ||
65 | * Texas Instrument secure monitor api to modify the | ||
66 | * PL310 Debug Control Register. | ||
67 | */ | ||
68 | omap_smc1(0x100, val); | ||
69 | } | ||
70 | |||
71 | static inline void l2x0_flush_line(unsigned long addr) | ||
72 | { | ||
73 | void __iomem *base = l2x0_base; | ||
74 | |||
75 | /* Clean by PA followed by Invalidate by PA */ | ||
76 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
77 | writel(addr, base + L2X0_CLEAN_LINE_PA); | ||
78 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
79 | writel(addr, base + L2X0_INV_LINE_PA); | ||
80 | } | ||
81 | #else | ||
82 | |||
83 | /* Optimised out for non-errata case */ | ||
84 | static inline void debug_writel(unsigned long val) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static inline void l2x0_flush_line(unsigned long addr) | ||
89 | { | ||
90 | void __iomem *base = l2x0_base; | ||
91 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
92 | writel(addr, base + L2X0_CLEAN_INV_LINE_PA); | ||
93 | } | ||
94 | #endif | ||
95 | |||
45 | static inline void l2x0_inv_all(void) | 96 | static inline void l2x0_inv_all(void) |
46 | { | 97 | { |
47 | unsigned long flags; | 98 | unsigned long flags; |
@@ -62,23 +113,24 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
62 | spin_lock_irqsave(&l2x0_lock, flags); | 113 | spin_lock_irqsave(&l2x0_lock, flags); |
63 | if (start & (CACHE_LINE_SIZE - 1)) { | 114 | if (start & (CACHE_LINE_SIZE - 1)) { |
64 | start &= ~(CACHE_LINE_SIZE - 1); | 115 | start &= ~(CACHE_LINE_SIZE - 1); |
65 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 116 | debug_writel(0x03); |
66 | writel(start, base + L2X0_CLEAN_INV_LINE_PA); | 117 | l2x0_flush_line(start); |
118 | debug_writel(0x00); | ||
67 | start += CACHE_LINE_SIZE; | 119 | start += CACHE_LINE_SIZE; |
68 | } | 120 | } |
69 | 121 | ||
70 | if (end & (CACHE_LINE_SIZE - 1)) { | 122 | if (end & (CACHE_LINE_SIZE - 1)) { |
71 | end &= ~(CACHE_LINE_SIZE - 1); | 123 | end &= ~(CACHE_LINE_SIZE - 1); |
72 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 124 | debug_writel(0x03); |
73 | writel(end, base + L2X0_CLEAN_INV_LINE_PA); | 125 | l2x0_flush_line(end); |
126 | debug_writel(0x00); | ||
74 | } | 127 | } |
75 | 128 | ||
76 | while (start < end) { | 129 | while (start < end) { |
77 | unsigned long blk_end = start + min(end - start, 4096UL); | 130 | unsigned long blk_end = start + min(end - start, 4096UL); |
78 | 131 | ||
79 | while (start < blk_end) { | 132 | while (start < blk_end) { |
80 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 133 | l2x0_inv_line(start); |
81 | writel(start, base + L2X0_INV_LINE_PA); | ||
82 | start += CACHE_LINE_SIZE; | 134 | start += CACHE_LINE_SIZE; |
83 | } | 135 | } |
84 | 136 | ||
@@ -103,8 +155,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
103 | unsigned long blk_end = start + min(end - start, 4096UL); | 155 | unsigned long blk_end = start + min(end - start, 4096UL); |
104 | 156 | ||
105 | while (start < blk_end) { | 157 | while (start < blk_end) { |
106 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 158 | l2x0_clean_line(start); |
107 | writel(start, base + L2X0_CLEAN_LINE_PA); | ||
108 | start += CACHE_LINE_SIZE; | 159 | start += CACHE_LINE_SIZE; |
109 | } | 160 | } |
110 | 161 | ||
@@ -128,11 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
128 | while (start < end) { | 179 | while (start < end) { |
129 | unsigned long blk_end = start + min(end - start, 4096UL); | 180 | unsigned long blk_end = start + min(end - start, 4096UL); |
130 | 181 | ||
182 | debug_writel(0x03); | ||
131 | while (start < blk_end) { | 183 | while (start < blk_end) { |
132 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 184 | l2x0_flush_line(start); |
133 | writel(start, base + L2X0_CLEAN_INV_LINE_PA); | ||
134 | start += CACHE_LINE_SIZE; | 185 | start += CACHE_LINE_SIZE; |
135 | } | 186 | } |
187 | debug_writel(0x00); | ||
136 | 188 | ||
137 | if (blk_end < end) { | 189 | if (blk_end < end) { |
138 | spin_unlock_irqrestore(&l2x0_lock, flags); | 190 | spin_unlock_irqrestore(&l2x0_lock, flags); |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa1..b0ee9ba3cfab 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -10,12 +10,17 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | ||
14 | #include <linux/percpu.h> | ||
13 | 15 | ||
14 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
15 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
16 | 18 | ||
17 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_SPINLOCK(cpu_asid_lock); |
18 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | ||
23 | #endif | ||
19 | 24 | ||
20 | /* | 25 | /* |
21 | * We fork()ed a process, and we need a new context for the child | 26 | * We fork()ed a process, and we need a new context for the child |
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; | |||
26 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
27 | { | 32 | { |
28 | mm->context.id = 0; | 33 | mm->context.id = 0; |
34 | spin_lock_init(&mm->context.id_lock); | ||
29 | } | 35 | } |
30 | 36 | ||
37 | static void flush_context(void) | ||
38 | { | ||
39 | /* set the reserved ASID before flushing the TLB */ | ||
40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); | ||
41 | isb(); | ||
42 | local_flush_tlb_all(); | ||
43 | if (icache_is_vivt_asid_tagged()) { | ||
44 | __flush_icache_all(); | ||
45 | dsb(); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | |||
51 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | |||
55 | /* | ||
56 | * Locking needed for multi-threaded applications where the | ||
57 | * same mm->context.id could be set from different CPUs during | ||
58 | * the broadcast. This function is also called via IPI so the | ||
59 | * mm->context.id_lock has to be IRQ-safe. | ||
60 | */ | ||
61 | spin_lock_irqsave(&mm->context.id_lock, flags); | ||
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | ||
63 | /* | ||
64 | * Old version of ASID found. Set the new one and | ||
65 | * reset mm_cpumask(mm). | ||
66 | */ | ||
67 | mm->context.id = asid; | ||
68 | cpumask_clear(mm_cpumask(mm)); | ||
69 | } | ||
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
71 | |||
72 | /* | ||
73 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
74 | */ | ||
75 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Reset the ASID on the current CPU. This function call is broadcast | ||
80 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | ||
81 | */ | ||
82 | static void reset_context(void *info) | ||
83 | { | ||
84 | unsigned int asid; | ||
85 | unsigned int cpu = smp_processor_id(); | ||
86 | struct mm_struct *mm = per_cpu(current_mm, cpu); | ||
87 | |||
88 | /* | ||
89 | * Check if a current_mm was set on this CPU as it might still | ||
90 | * be in the early booting stages and using the reserved ASID. | ||
91 | */ | ||
92 | if (!mm) | ||
93 | return; | ||
94 | |||
95 | smp_rmb(); | ||
96 | asid = cpu_last_asid + cpu + 1; | ||
97 | |||
98 | flush_context(); | ||
99 | set_mm_context(mm, asid); | ||
100 | |||
101 | /* set the new ASID */ | ||
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); | ||
103 | isb(); | ||
104 | } | ||
105 | |||
106 | #else | ||
107 | |||
108 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
109 | { | ||
110 | mm->context.id = asid; | ||
111 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | |||
31 | void __new_context(struct mm_struct *mm) | 116 | void __new_context(struct mm_struct *mm) |
32 | { | 117 | { |
33 | unsigned int asid; | 118 | unsigned int asid; |
34 | 119 | ||
35 | spin_lock(&cpu_asid_lock); | 120 | spin_lock(&cpu_asid_lock); |
121 | #ifdef CONFIG_SMP | ||
122 | /* | ||
123 | * Check the ASID again, in case the change was broadcast from | ||
124 | * another CPU before we acquired the lock. | ||
125 | */ | ||
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | ||
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
128 | spin_unlock(&cpu_asid_lock); | ||
129 | return; | ||
130 | } | ||
131 | #endif | ||
132 | /* | ||
133 | * At this point, it is guaranteed that the current mm (with | ||
134 | * an old ASID) isn't active on any other CPU since the ASIDs | ||
135 | * are changed simultaneously via IPI. | ||
136 | */ | ||
36 | asid = ++cpu_last_asid; | 137 | asid = ++cpu_last_asid; |
37 | if (asid == 0) | 138 | if (asid == 0) |
38 | asid = cpu_last_asid = ASID_FIRST_VERSION; | 139 | asid = cpu_last_asid = ASID_FIRST_VERSION; |
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) | |||
42 | * to start a new version and flush the TLB. | 143 | * to start a new version and flush the TLB. |
43 | */ | 144 | */ |
44 | if (unlikely((asid & ~ASID_MASK) == 0)) { | 145 | if (unlikely((asid & ~ASID_MASK) == 0)) { |
45 | asid = ++cpu_last_asid; | 146 | asid = cpu_last_asid + smp_processor_id() + 1; |
46 | /* set the reserved ASID before flushing the TLB */ | 147 | flush_context(); |
47 | asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" | 148 | #ifdef CONFIG_SMP |
48 | : | 149 | smp_wmb(); |
49 | : "r" (0)); | 150 | smp_call_function(reset_context, NULL, 1); |
50 | isb(); | 151 | #endif |
51 | flush_tlb_all(); | 152 | cpu_last_asid += NR_CPUS; |
52 | if (icache_is_vivt_asid_tagged()) { | ||
53 | __flush_icache_all(); | ||
54 | dsb(); | ||
55 | } | ||
56 | } | 153 | } |
57 | spin_unlock(&cpu_asid_lock); | ||
58 | 154 | ||
59 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | 155 | set_mm_context(mm, asid); |
60 | mm->context.id = asid; | 156 | spin_unlock(&cpu_asid_lock); |
61 | } | 157 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 64daef2173bd..0da7eccf7749 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -29,9 +29,6 @@ | |||
29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | 29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #define CONSISTENT_END (0xffe00000) | ||
33 | #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) | ||
34 | |||
35 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 32 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) |
36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 33 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 34 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbbbe253..7829cb5425f5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
24 | #include <asm/sizes.h> | 24 | #include <asm/sizes.h> |
25 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
26 | #include <asm/fixmap.h> | ||
26 | 27 | ||
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
28 | #include <asm/mach/map.h> | 29 | #include <asm/mach/map.h> |
@@ -32,19 +33,21 @@ | |||
32 | static unsigned long phys_initrd_start __initdata = 0; | 33 | static unsigned long phys_initrd_start __initdata = 0; |
33 | static unsigned long phys_initrd_size __initdata = 0; | 34 | static unsigned long phys_initrd_size __initdata = 0; |
34 | 35 | ||
35 | static void __init early_initrd(char **p) | 36 | static int __init early_initrd(char *p) |
36 | { | 37 | { |
37 | unsigned long start, size; | 38 | unsigned long start, size; |
39 | char *endp; | ||
38 | 40 | ||
39 | start = memparse(*p, p); | 41 | start = memparse(p, &endp); |
40 | if (**p == ',') { | 42 | if (*endp == ',') { |
41 | size = memparse((*p) + 1, p); | 43 | size = memparse(endp + 1, NULL); |
42 | 44 | ||
43 | phys_initrd_start = start; | 45 | phys_initrd_start = start; |
44 | phys_initrd_size = size; | 46 | phys_initrd_size = size; |
45 | } | 47 | } |
48 | return 0; | ||
46 | } | 49 | } |
47 | __early_param("initrd=", early_initrd); | 50 | early_param("initrd", early_initrd); |
48 | 51 | ||
49 | static int __init parse_tag_initrd(const struct tag *tag) | 52 | static int __init parse_tag_initrd(const struct tag *tag) |
50 | { | 53 | { |
@@ -560,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
560 | */ | 563 | */ |
561 | void __init mem_init(void) | 564 | void __init mem_init(void) |
562 | { | 565 | { |
563 | unsigned int codesize, datasize, initsize; | 566 | unsigned long reserved_pages, free_pages; |
564 | int i, node; | 567 | int i, node; |
565 | 568 | ||
566 | #ifndef CONFIG_DISCONTIGMEM | 569 | #ifndef CONFIG_DISCONTIGMEM |
@@ -596,6 +599,33 @@ void __init mem_init(void) | |||
596 | totalram_pages += totalhigh_pages; | 599 | totalram_pages += totalhigh_pages; |
597 | #endif | 600 | #endif |
598 | 601 | ||
602 | reserved_pages = free_pages = 0; | ||
603 | |||
604 | for_each_online_node(node) { | ||
605 | pg_data_t *n = NODE_DATA(node); | ||
606 | struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; | ||
607 | |||
608 | for_each_nodebank(i, &meminfo, node) { | ||
609 | struct membank *bank = &meminfo.bank[i]; | ||
610 | unsigned int pfn1, pfn2; | ||
611 | struct page *page, *end; | ||
612 | |||
613 | pfn1 = bank_pfn_start(bank); | ||
614 | pfn2 = bank_pfn_end(bank); | ||
615 | |||
616 | page = map + pfn1; | ||
617 | end = map + pfn2; | ||
618 | |||
619 | do { | ||
620 | if (PageReserved(page)) | ||
621 | reserved_pages++; | ||
622 | else if (!page_count(page)) | ||
623 | free_pages++; | ||
624 | page++; | ||
625 | } while (page < end); | ||
626 | } | ||
627 | } | ||
628 | |||
599 | /* | 629 | /* |
600 | * Since our memory may not be contiguous, calculate the | 630 | * Since our memory may not be contiguous, calculate the |
601 | * real number of pages we have in this system | 631 | * real number of pages we have in this system |
@@ -608,16 +638,71 @@ void __init mem_init(void) | |||
608 | } | 638 | } |
609 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 639 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |
610 | 640 | ||
611 | codesize = _etext - _text; | 641 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", |
612 | datasize = _end - _data; | 642 | nr_free_pages() << (PAGE_SHIFT-10), |
613 | initsize = __init_end - __init_begin; | 643 | free_pages << (PAGE_SHIFT-10), |
614 | 644 | reserved_pages << (PAGE_SHIFT-10), | |
615 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | ||
616 | "%dK data, %dK init, %luK highmem)\n", | ||
617 | nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, | ||
618 | datasize >> 10, initsize >> 10, | ||
619 | totalhigh_pages << (PAGE_SHIFT-10)); | 645 | totalhigh_pages << (PAGE_SHIFT-10)); |
620 | 646 | ||
647 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | ||
648 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | ||
649 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) | ||
650 | |||
651 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" | ||
652 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
653 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
654 | #ifdef CONFIG_MMU | ||
655 | " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
656 | #endif | ||
657 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
658 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
659 | #ifdef CONFIG_HIGHMEM | ||
660 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
661 | #endif | ||
662 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
663 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
664 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
665 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n", | ||
666 | |||
667 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + | ||
668 | (PAGE_SIZE)), | ||
669 | MLK(FIXADDR_START, FIXADDR_TOP), | ||
670 | #ifdef CONFIG_MMU | ||
671 | MLM(CONSISTENT_BASE, CONSISTENT_END), | ||
672 | #endif | ||
673 | MLM(VMALLOC_START, VMALLOC_END), | ||
674 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | ||
675 | #ifdef CONFIG_HIGHMEM | ||
676 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * | ||
677 | (PAGE_SIZE)), | ||
678 | #endif | ||
679 | MLM(MODULES_VADDR, MODULES_END), | ||
680 | |||
681 | MLK_ROUNDUP(__init_begin, __init_end), | ||
682 | MLK_ROUNDUP(_text, _etext), | ||
683 | MLK_ROUNDUP(_data, _edata)); | ||
684 | |||
685 | #undef MLK | ||
686 | #undef MLM | ||
687 | #undef MLK_ROUNDUP | ||
688 | |||
689 | /* | ||
690 | * Check boundaries twice: Some fundamental inconsistencies can | ||
691 | * be detected at build time already. | ||
692 | */ | ||
693 | #ifdef CONFIG_MMU | ||
694 | BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
695 | BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
696 | |||
697 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
698 | BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
699 | #endif | ||
700 | |||
701 | #ifdef CONFIG_HIGHMEM | ||
702 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | ||
703 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | ||
704 | #endif | ||
705 | |||
621 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 706 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { |
622 | extern int sysctl_overcommit_memory; | 707 | extern int sysctl_overcommit_memory; |
623 | /* | 708 | /* |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ab75c60f7cf..28c8b950ef04 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm) | |||
139 | * which requires the new ioremap'd region to be referenced, the CPU will | 139 | * which requires the new ioremap'd region to be referenced, the CPU will |
140 | * reference the _old_ region. | 140 | * reference the _old_ region. |
141 | * | 141 | * |
142 | * Note that get_vm_area() allocates a guard 4K page, so we need to mask | 142 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
143 | * the size back to 1MB aligned or we will overflow in the loop below. | 143 | * mask the size back to 1MB aligned or we will overflow in the loop below. |
144 | */ | 144 | */ |
145 | static void unmap_area_sections(unsigned long virt, unsigned long size) | 145 | static void unmap_area_sections(unsigned long virt, unsigned long size) |
146 | { | 146 | { |
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
254 | } | 254 | } |
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | 257 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |
258 | /* | 258 | unsigned long offset, size_t size, unsigned int mtype, void *caller) |
259 | * Remap an arbitrary physical address space into the kernel virtual | ||
260 | * address space. Needed when the kernel wants to access high addresses | ||
261 | * directly. | ||
262 | * | ||
263 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
264 | * have to convert them into an offset in a page-aligned mapping, but the | ||
265 | * caller shouldn't need to know that small detail. | ||
266 | * | ||
267 | * 'flags' are the extra L_PTE_ flags that you want to specify for this | ||
268 | * mapping. See <asm/pgtable.h> for more information. | ||
269 | */ | ||
270 | void __iomem * | ||
271 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
272 | unsigned int mtype) | ||
273 | { | 259 | { |
274 | const struct mem_type *type; | 260 | const struct mem_type *type; |
275 | int err; | 261 | int err; |
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
291 | */ | 277 | */ |
292 | size = PAGE_ALIGN(offset + size); | 278 | size = PAGE_ALIGN(offset + size); |
293 | 279 | ||
294 | area = get_vm_area(size, VM_IOREMAP); | 280 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
295 | if (!area) | 281 | if (!area) |
296 | return NULL; | 282 | return NULL; |
297 | addr = (unsigned long)area->addr; | 283 | addr = (unsigned long)area->addr; |
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
318 | flush_cache_vmap(addr, addr + size); | 304 | flush_cache_vmap(addr, addr + size); |
319 | return (void __iomem *) (offset + addr); | 305 | return (void __iomem *) (offset + addr); |
320 | } | 306 | } |
321 | EXPORT_SYMBOL(__arm_ioremap_pfn); | ||
322 | 307 | ||
323 | void __iomem * | 308 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
324 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 309 | unsigned int mtype, void *caller) |
325 | { | 310 | { |
326 | unsigned long last_addr; | 311 | unsigned long last_addr; |
327 | unsigned long offset = phys_addr & ~PAGE_MASK; | 312 | unsigned long offset = phys_addr & ~PAGE_MASK; |
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | |||
334 | if (!size || last_addr < phys_addr) | 319 | if (!size || last_addr < phys_addr) |
335 | return NULL; | 320 | return NULL; |
336 | 321 | ||
337 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | 322 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
323 | caller); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Remap an arbitrary physical address space into the kernel virtual | ||
328 | * address space. Needed when the kernel wants to access high addresses | ||
329 | * directly. | ||
330 | * | ||
331 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
332 | * have to convert them into an offset in a page-aligned mapping, but the | ||
333 | * caller shouldn't need to know that small detail. | ||
334 | */ | ||
335 | void __iomem * | ||
336 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
337 | unsigned int mtype) | ||
338 | { | ||
339 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | ||
340 | __builtin_return_address(0)); | ||
341 | } | ||
342 | EXPORT_SYMBOL(__arm_ioremap_pfn); | ||
343 | |||
344 | void __iomem * | ||
345 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | ||
346 | { | ||
347 | return __arm_ioremap_caller(phys_addr, size, mtype, | ||
348 | __builtin_return_address(0)); | ||
338 | } | 349 | } |
339 | EXPORT_SYMBOL(__arm_ioremap); | 350 | EXPORT_SYMBOL(__arm_ioremap); |
340 | 351 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 1708da82da96..88f5d71248d9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
100 | * writebuffer to be turned off. (Note: the write | 100 | * writebuffer to be turned off. (Note: the write |
101 | * buffer should not be on and the cache off). | 101 | * buffer should not be on and the cache off). |
102 | */ | 102 | */ |
103 | static void __init early_cachepolicy(char **p) | 103 | static int __init early_cachepolicy(char *p) |
104 | { | 104 | { |
105 | int i; | 105 | int i; |
106 | 106 | ||
107 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 107 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
108 | int len = strlen(cache_policies[i].policy); | 108 | int len = strlen(cache_policies[i].policy); |
109 | 109 | ||
110 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | 110 | if (memcmp(p, cache_policies[i].policy, len) == 0) { |
111 | cachepolicy = i; | 111 | cachepolicy = i; |
112 | cr_alignment &= ~cache_policies[i].cr_mask; | 112 | cr_alignment &= ~cache_policies[i].cr_mask; |
113 | cr_no_alignment &= ~cache_policies[i].cr_mask; | 113 | cr_no_alignment &= ~cache_policies[i].cr_mask; |
114 | *p += len; | ||
115 | break; | 114 | break; |
116 | } | 115 | } |
117 | } | 116 | } |
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p) | |||
130 | } | 129 | } |
131 | flush_cache_all(); | 130 | flush_cache_all(); |
132 | set_cr(cr_alignment); | 131 | set_cr(cr_alignment); |
132 | return 0; | ||
133 | } | 133 | } |
134 | __early_param("cachepolicy=", early_cachepolicy); | 134 | early_param("cachepolicy", early_cachepolicy); |
135 | 135 | ||
136 | static void __init early_nocache(char **__unused) | 136 | static int __init early_nocache(char *__unused) |
137 | { | 137 | { |
138 | char *p = "buffered"; | 138 | char *p = "buffered"; |
139 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | 139 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); |
140 | early_cachepolicy(&p); | 140 | early_cachepolicy(p); |
141 | return 0; | ||
141 | } | 142 | } |
142 | __early_param("nocache", early_nocache); | 143 | early_param("nocache", early_nocache); |
143 | 144 | ||
144 | static void __init early_nowrite(char **__unused) | 145 | static int __init early_nowrite(char *__unused) |
145 | { | 146 | { |
146 | char *p = "uncached"; | 147 | char *p = "uncached"; |
147 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | 148 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); |
148 | early_cachepolicy(&p); | 149 | early_cachepolicy(p); |
150 | return 0; | ||
149 | } | 151 | } |
150 | __early_param("nowb", early_nowrite); | 152 | early_param("nowb", early_nowrite); |
151 | 153 | ||
152 | static void __init early_ecc(char **p) | 154 | static int __init early_ecc(char *p) |
153 | { | 155 | { |
154 | if (memcmp(*p, "on", 2) == 0) { | 156 | if (memcmp(p, "on", 2) == 0) |
155 | ecc_mask = PMD_PROTECTION; | 157 | ecc_mask = PMD_PROTECTION; |
156 | *p += 2; | 158 | else if (memcmp(p, "off", 3) == 0) |
157 | } else if (memcmp(*p, "off", 3) == 0) { | ||
158 | ecc_mask = 0; | 159 | ecc_mask = 0; |
159 | *p += 3; | 160 | return 0; |
160 | } | ||
161 | } | 161 | } |
162 | __early_param("ecc=", early_ecc); | 162 | early_param("ecc", early_ecc); |
163 | 163 | ||
164 | static int __init noalign_setup(char *__unused) | 164 | static int __init noalign_setup(char *__unused) |
165 | { | 165 | { |
@@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; | |||
670 | * bytes. This can be used to increase (or decrease) the vmalloc | 670 | * bytes. This can be used to increase (or decrease) the vmalloc |
671 | * area - the default is 128m. | 671 | * area - the default is 128m. |
672 | */ | 672 | */ |
673 | static void __init early_vmalloc(char **arg) | 673 | static int __init early_vmalloc(char *arg) |
674 | { | 674 | { |
675 | vmalloc_reserve = memparse(*arg, arg); | 675 | vmalloc_reserve = memparse(arg, NULL); |
676 | 676 | ||
677 | if (vmalloc_reserve < SZ_16M) { | 677 | if (vmalloc_reserve < SZ_16M) { |
678 | vmalloc_reserve = SZ_16M; | 678 | vmalloc_reserve = SZ_16M; |
@@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg) | |||
687 | "vmalloc area is too big, limiting to %luMB\n", | 687 | "vmalloc area is too big, limiting to %luMB\n", |
688 | vmalloc_reserve >> 20); | 688 | vmalloc_reserve >> 20); |
689 | } | 689 | } |
690 | return 0; | ||
690 | } | 691 | } |
691 | __early_param("vmalloc=", early_vmalloc); | 692 | early_param("vmalloc", early_vmalloc); |
692 | 693 | ||
693 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | 694 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) |
694 | 695 | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 374a8311bc84..9bfeb6b9509a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, | |||
74 | } | 74 | } |
75 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 75 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
76 | 76 | ||
77 | void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, | ||
78 | size_t size, unsigned int mtype, void *caller) | ||
79 | { | ||
80 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | ||
81 | } | ||
82 | |||
77 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 83 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, |
78 | unsigned int mtype) | 84 | unsigned int mtype) |
79 | { | 85 | { |
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | |||
81 | } | 87 | } |
82 | EXPORT_SYMBOL(__arm_ioremap); | 88 | EXPORT_SYMBOL(__arm_ioremap); |
83 | 89 | ||
90 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | ||
91 | unsigned int mtype, void *caller) | ||
92 | { | ||
93 | return __arm_ioremap(phys_addr, size, mtype); | ||
94 | } | ||
95 | |||
84 | void __iounmap(volatile void __iomem *addr) | 96 | void __iounmap(volatile void __iomem *addr) |
85 | { | 97 | { |
86 | } | 98 | } |
diff --git a/arch/arm/plat-iop/io.c b/arch/arm/plat-iop/io.c index ed0bbece0d61..e15bc17db90b 100644 --- a/arch/arm/plat-iop/io.c +++ b/arch/arm/plat-iop/io.c | |||
@@ -34,7 +34,8 @@ void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size, | |||
34 | retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie); | 34 | retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie); |
35 | break; | 35 | break; |
36 | default: | 36 | default: |
37 | retval = __arm_ioremap(cookie, size, mtype); | 37 | retval = __arm_ioremap_caller(cookie, size, mtype, |
38 | __builtin_return_address(0)); | ||
38 | } | 39 | } |
39 | 40 | ||
40 | return retval; | 41 | return retval; |
diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h index 62d97623412f..44243a278434 100644 --- a/arch/arm/plat-mxc/include/mach/vmalloc.h +++ b/arch/arm/plat-mxc/include/mach/vmalloc.h | |||
@@ -21,6 +21,6 @@ | |||
21 | #define __ASM_ARCH_MXC_VMALLOC_H__ | 21 | #define __ASM_ARCH_MXC_VMALLOC_H__ |
22 | 22 | ||
23 | /* vmalloc ending address */ | 23 | /* vmalloc ending address */ |
24 | #define VMALLOC_END 0xF4000000 | 24 | #define VMALLOC_END 0xf4000000UL |
25 | 25 | ||
26 | #endif /* __ASM_ARCH_MXC_VMALLOC_H__ */ | 26 | #endif /* __ASM_ARCH_MXC_VMALLOC_H__ */ |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index e2ea04a4c8a1..2e3eec660864 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -22,6 +22,7 @@ config ARCH_OMAP3 | |||
22 | bool "TI OMAP3" | 22 | bool "TI OMAP3" |
23 | select CPU_V7 | 23 | select CPU_V7 |
24 | select COMMON_CLKDEV | 24 | select COMMON_CLKDEV |
25 | select ARM_L1_CACHE_SHIFT_6 | ||
25 | 26 | ||
26 | config ARCH_OMAP4 | 27 | config ARCH_OMAP4 |
27 | bool "TI OMAP4" | 28 | bool "TI OMAP4" |
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h index ef870de43c29..c7d628ecb467 100644 --- a/arch/arm/plat-omap/include/plat/omap44xx.h +++ b/arch/arm/plat-omap/include/plat/omap44xx.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #define OMAP44XX_GIC_CPU_BASE 0x48240100 | 40 | #define OMAP44XX_GIC_CPU_BASE 0x48240100 |
41 | #define OMAP44XX_SCU_BASE 0x48240000 | 41 | #define OMAP44XX_SCU_BASE 0x48240000 |
42 | #define OMAP44XX_LOCAL_TWD_BASE 0x48240600 | 42 | #define OMAP44XX_LOCAL_TWD_BASE 0x48240600 |
43 | #define OMAP44XX_L2CACHE_BASE 0x48242000 | ||
43 | #define OMAP44XX_WKUPGEN_BASE 0x48281000 | 44 | #define OMAP44XX_WKUPGEN_BASE 0x48281000 |
44 | 45 | ||
45 | #define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000) | 46 | #define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000) |
diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c index 0cfd54f519c4..4cbd4fb3232c 100644 --- a/arch/arm/plat-omap/io.c +++ b/arch/arm/plat-omap/io.c | |||
@@ -128,7 +128,7 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type) | |||
128 | return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT); | 128 | return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT); |
129 | } | 129 | } |
130 | #endif | 130 | #endif |
131 | return __arm_ioremap(p, size, type); | 131 | return __arm_ioremap_caller(p, size, type, __builtin_return_address(0)); |
132 | } | 132 | } |
133 | EXPORT_SYMBOL(omap_ioremap); | 133 | EXPORT_SYMBOL(omap_ioremap); |
134 | 134 | ||
diff --git a/arch/arm/plat-s3c/include/mach/vmalloc.h b/arch/arm/plat-s3c/include/mach/vmalloc.h index bfd2ca6e3074..299d95f365c9 100644 --- a/arch/arm/plat-s3c/include/mach/vmalloc.h +++ b/arch/arm/plat-s3c/include/mach/vmalloc.h | |||
@@ -15,6 +15,6 @@ | |||
15 | #ifndef __ASM_ARCH_VMALLOC_H | 15 | #ifndef __ASM_ARCH_VMALLOC_H |
16 | #define __ASM_ARCH_VMALLOC_H | 16 | #define __ASM_ARCH_VMALLOC_H |
17 | 17 | ||
18 | #define VMALLOC_END (0xE0000000) | 18 | #define VMALLOC_END (0xe0000000UL) |
19 | 19 | ||
20 | #endif /* __ASM_ARCH_VMALLOC_H */ | 20 | #endif /* __ASM_ARCH_VMALLOC_H */ |
diff --git a/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h b/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h index 541b880c1863..943c1a29d641 100644 --- a/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h +++ b/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h | |||
@@ -9,4 +9,4 @@ | |||
9 | * http://www.opensource.org/licenses/gpl-license.html | 9 | * http://www.opensource.org/licenses/gpl-license.html |
10 | * http://www.gnu.org/copyleft/gpl.html | 10 | * http://www.gnu.org/copyleft/gpl.html |
11 | */ | 11 | */ |
12 | #define VMALLOC_END (0xF0000000) | 12 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f60a5400a25b..def19f83d812 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -430,7 +430,11 @@ static inline void vfp_pm_init(void) { } | |||
430 | * saved one. This function is used by the ptrace mechanism. | 430 | * saved one. This function is used by the ptrace mechanism. |
431 | */ | 431 | */ |
432 | #ifdef CONFIG_SMP | 432 | #ifdef CONFIG_SMP |
433 | void vfp_sync_state(struct thread_info *thread) | 433 | void vfp_sync_hwstate(struct thread_info *thread) |
434 | { | ||
435 | } | ||
436 | |||
437 | void vfp_flush_hwstate(struct thread_info *thread) | ||
434 | { | 438 | { |
435 | /* | 439 | /* |
436 | * On SMP systems, the VFP state is automatically saved at every | 440 | * On SMP systems, the VFP state is automatically saved at every |
@@ -441,35 +445,48 @@ void vfp_sync_state(struct thread_info *thread) | |||
441 | thread->vfpstate.hard.cpu = NR_CPUS; | 445 | thread->vfpstate.hard.cpu = NR_CPUS; |
442 | } | 446 | } |
443 | #else | 447 | #else |
444 | void vfp_sync_state(struct thread_info *thread) | 448 | void vfp_sync_hwstate(struct thread_info *thread) |
445 | { | 449 | { |
446 | unsigned int cpu = get_cpu(); | 450 | unsigned int cpu = get_cpu(); |
447 | u32 fpexc = fmrx(FPEXC); | ||
448 | 451 | ||
449 | /* | 452 | /* |
450 | * If VFP is enabled, the previous state was already saved and | 453 | * If the thread we're interested in is the current owner of the |
451 | * last_VFP_context updated. | 454 | * hardware VFP state, then we need to save its state. |
452 | */ | 455 | */ |
453 | if (fpexc & FPEXC_EN) | 456 | if (last_VFP_context[cpu] == &thread->vfpstate) { |
454 | goto out; | 457 | u32 fpexc = fmrx(FPEXC); |
455 | 458 | ||
456 | if (!last_VFP_context[cpu]) | 459 | /* |
457 | goto out; | 460 | * Save the last VFP state on this CPU. |
461 | */ | ||
462 | fmxr(FPEXC, fpexc | FPEXC_EN); | ||
463 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); | ||
464 | fmxr(FPEXC, fpexc); | ||
465 | } | ||
458 | 466 | ||
459 | /* | 467 | put_cpu(); |
460 | * Save the last VFP state on this CPU. | 468 | } |
461 | */ | 469 | |
462 | fmxr(FPEXC, fpexc | FPEXC_EN); | 470 | void vfp_flush_hwstate(struct thread_info *thread) |
463 | vfp_save_state(last_VFP_context[cpu], fpexc); | 471 | { |
464 | fmxr(FPEXC, fpexc); | 472 | unsigned int cpu = get_cpu(); |
465 | 473 | ||
466 | /* | 474 | /* |
467 | * Set the context to NULL to force a reload the next time the thread | 475 | * If the thread we're interested in is the current owner of the |
468 | * uses the VFP. | 476 | * hardware VFP state, then we need to save its state. |
469 | */ | 477 | */ |
470 | last_VFP_context[cpu] = NULL; | 478 | if (last_VFP_context[cpu] == &thread->vfpstate) { |
479 | u32 fpexc = fmrx(FPEXC); | ||
480 | |||
481 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | ||
482 | |||
483 | /* | ||
484 | * Set the context to NULL to force a reload the next time | ||
485 | * the thread uses the VFP. | ||
486 | */ | ||
487 | last_VFP_context[cpu] = NULL; | ||
488 | } | ||
471 | 489 | ||
472 | out: | ||
473 | put_cpu(); | 490 | put_cpu(); |
474 | } | 491 | } |
475 | #endif | 492 | #endif |