aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig56
-rw-r--r--arch/arm/Makefile7
-rw-r--r--arch/arm/boot/Makefile4
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S249
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in3
-rw-r--r--arch/arm/common/gic.c72
-rw-r--r--arch/arm/include/asm/a.out-core.h6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/hardware/gic.h1
-rw-r--r--arch/arm/include/asm/highmem.h29
-rw-r--r--arch/arm/include/asm/mach/irq.h31
-rw-r--r--arch/arm/include/asm/outercache.h1
-rw-r--r--arch/arm/include/asm/processor.h12
-rw-r--r--arch/arm/include/asm/ptrace.h2
-rw-r--r--arch/arm/include/asm/traps.h1
-rw-r--r--arch/arm/include/asm/user.h2
-rw-r--r--arch/arm/kernel/bios32.c5
-rw-r--r--arch/arm/kernel/head-common.S90
-rw-r--r--arch/arm/kernel/head-nommu.S3
-rw-r--r--arch/arm/kernel/head.S9
-rw-r--r--arch/arm/kernel/irq.c50
-rw-r--r--arch/arm/kernel/module.c27
-rw-r--r--arch/arm/kernel/ptrace.c383
-rw-r--r--arch/arm/kernel/ptrace.h37
-rw-r--r--arch/arm/kernel/return_address.c1
-rw-r--r--arch/arm/kernel/setup.c39
-rw-r--r--arch/arm/kernel/signal.c9
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/mach-footbridge/include/mach/hardware.h21
-rw-r--r--arch/arm/mach-footbridge/include/mach/io.h10
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/omap4-common.c7
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/cache-l2x0.c32
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/mm/vmregion.c17
-rw-r--r--arch/arm/vfp/vfpmodule.c9
38 files changed, 461 insertions, 791 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 38bf684448e7..1fd3f280b584 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST
63 depends on GENERIC_CLOCKEVENTS 63 depends on GENERIC_CLOCKEVENTS
64 default y if SMP 64 default y if SMP
65 65
66config KTIME_SCALAR
67 bool
68 default y
69
66config HAVE_TCM 70config HAVE_TCM
67 bool 71 bool
68 select GENERIC_ALLOCATOR 72 select GENERIC_ALLOCATOR
@@ -178,11 +182,6 @@ config FIQ
178config ARCH_MTD_XIP 182config ARCH_MTD_XIP
179 bool 183 bool
180 184
181config ARM_L1_CACHE_SHIFT_6
182 bool
183 help
184 Setting ARM L1 cache line size to 64 Bytes.
185
186config VECTORS_BASE 185config VECTORS_BASE
187 hex 186 hex
188 default 0xffff0000 if MMU || CPU_HIGH_VECTOR 187 default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -1152,7 +1151,7 @@ config ARM_ERRATA_742231
1152 1151
1153config PL310_ERRATA_588369 1152config PL310_ERRATA_588369
1154 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" 1153 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
1155 depends on CACHE_L2X0 && ARCH_OMAP4 1154 depends on CACHE_L2X0
1156 help 1155 help
1157 The PL310 L2 cache controller implements three types of Clean & 1156 The PL310 L2 cache controller implements three types of Clean &
1158 Invalidate maintenance operations: by Physical Address 1157 Invalidate maintenance operations: by Physical Address
@@ -1161,8 +1160,7 @@ config PL310_ERRATA_588369
1161 clean operation followed immediately by an invalidate operation, 1160 clean operation followed immediately by an invalidate operation,
1162 both performing to the same memory location. This functionality 1161 both performing to the same memory location. This functionality
1163 is not correctly implemented in PL310 as clean lines are not 1162 is not correctly implemented in PL310 as clean lines are not
1164 invalidated as a result of these operations. Note that this errata 1163 invalidated as a result of these operations.
1165 uses Texas Instrument's secure monitor api.
1166 1164
1167config ARM_ERRATA_720789 1165config ARM_ERRATA_720789
1168 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" 1166 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
@@ -1176,6 +1174,17 @@ config ARM_ERRATA_720789
1176 tables. The workaround changes the TLB flushing routines to invalidate 1174 tables. The workaround changes the TLB flushing routines to invalidate
1177 entries regardless of the ASID. 1175 entries regardless of the ASID.
1178 1176
1177config PL310_ERRATA_727915
1178 bool "Background Clean & Invalidate by Way operation can cause data corruption"
1179 depends on CACHE_L2X0
1180 help
1181 PL310 implements the Clean & Invalidate by Way L2 cache maintenance
1182 operation (offset 0x7FC). This operation runs in background so that
1183 PL310 can handle normal accesses while it is in progress. Under very
1184 rare circumstances, due to this erratum, write data can be lost when
1185 PL310 treats a cacheable write transaction during a Clean &
1186 Invalidate by Way operation.
1187
1179config ARM_ERRATA_743622 1188config ARM_ERRATA_743622
1180 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" 1189 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
1181 depends on CPU_V7 1190 depends on CPU_V7
@@ -1430,6 +1439,37 @@ config THUMB2_KERNEL
1430 1439
1431 If unsure, say N. 1440 If unsure, say N.
1432 1441
1442config THUMB2_AVOID_R_ARM_THM_JUMP11
1443 bool "Work around buggy Thumb-2 short branch relocations in gas"
1444 depends on THUMB2_KERNEL && MODULES
1445 default y
1446 help
1447 Various binutils versions can resolve Thumb-2 branches to
1448 locally-defined, preemptible global symbols as short-range "b.n"
1449 branch instructions.
1450
1451 This is a problem, because there's no guarantee the final
1452 destination of the symbol, or any candidate locations for a
1453 trampoline, are within range of the branch. For this reason, the
1454 kernel does not support fixing up the R_ARM_THM_JUMP11 (102)
1455 relocation in modules at all, and it makes little sense to add
1456 support.
1457
1458 The symptom is that the kernel fails with an "unsupported
1459 relocation" error when loading some modules.
1460
1461 Until fixed tools are available, passing
1462 -fno-optimize-sibling-calls to gcc should prevent gcc generating
1463 code which hits this problem, at the cost of a bit of extra runtime
1464 stack usage in some cases.
1465
1466 The problem is described in more detail at:
1467 https://bugs.launchpad.net/binutils-linaro/+bug/725126
1468
1469 Only Thumb-2 kernels are affected.
1470
1471 Unless you are sure your tools don't have this problem, say Y.
1472
1433config ARM_ASM_UNIFIED 1473config ARM_ASM_UNIFIED
1434 bool 1474 bool
1435 1475
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index cd56c9129a1a..55eca9ed7604 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -105,6 +105,10 @@ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau
105AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) 105AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
106CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) 106CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
107AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb 107AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
108# Work around buggy relocation from gas if requested:
109ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
110CFLAGS_MODULE +=-fno-optimize-sibling-calls
111endif
108endif 112endif
109 113
110# Need -Uarm for gcc < 3.x 114# Need -Uarm for gcc < 3.x
@@ -281,7 +285,7 @@ bzImage: zImage
281zImage Image xipImage bootpImage uImage: vmlinux 285zImage Image xipImage bootpImage uImage: vmlinux
282 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 286 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
283 287
284zinstall install: vmlinux 288zinstall uinstall install: vmlinux
285 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 289 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
286 290
287# We use MRPROPER_FILES and CLEAN_FILES now 291# We use MRPROPER_FILES and CLEAN_FILES now
@@ -302,6 +306,7 @@ define archhelp
302 echo ' (supply initrd image via make variable INITRD=<path>)' 306 echo ' (supply initrd image via make variable INITRD=<path>)'
303 echo ' install - Install uncompressed kernel' 307 echo ' install - Install uncompressed kernel'
304 echo ' zinstall - Install compressed kernel' 308 echo ' zinstall - Install compressed kernel'
309 echo ' uinstall - Install U-Boot wrapped compressed kernel'
305 echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or' 310 echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
306 echo ' (distribution) /sbin/$(INSTALLKERNEL) or' 311 echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
307 echo ' install to $$(INSTALL_PATH) and run lilo' 312 echo ' install to $$(INSTALL_PATH) and run lilo'
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 4d26f2c52a75..9128fddf1109 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -99,6 +99,10 @@ zinstall: $(obj)/zImage
99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
100 $(obj)/zImage System.map "$(INSTALL_PATH)" 100 $(obj)/zImage System.map "$(INSTALL_PATH)"
101 101
102uinstall: $(obj)/uImage
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
104 $(obj)/uImage System.map "$(INSTALL_PATH)"
105
102zi: 106zi:
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
104 $(obj)/zImage System.map "$(INSTALL_PATH)" 108 $(obj)/zImage System.map "$(INSTALL_PATH)"
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 5f3a1614dc63..f9f77c65dff3 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -98,9 +98,11 @@ endif
98EXTRA_CFLAGS := -fpic -fno-builtin 98EXTRA_CFLAGS := -fpic -fno-builtin
99EXTRA_AFLAGS := -Wa,-march=all 99EXTRA_AFLAGS := -Wa,-march=all
100 100
101# Provide size of uncompressed kernel to the decompressor via a linker symbol.
102LDFLAGS_vmlinux = --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image)
101# Supply ZRELADDR to the decompressor via a linker symbol. 103# Supply ZRELADDR to the decompressor via a linker symbol.
102ifneq ($(CONFIG_AUTO_ZRELADDR),y) 104ifneq ($(CONFIG_AUTO_ZRELADDR),y)
103LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR) 105LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
104endif 106endif
105ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) 107ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
106LDFLAGS_vmlinux += --be8 108LDFLAGS_vmlinux += --be8
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7193884ed8b0..39859216af00 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0
128 .arm @ Always enter in ARM state 128 .arm @ Always enter in ARM state
129start: 129start:
130 .type start,#function 130 .type start,#function
131 THUMB( adr r12, BSYM(1f) ) 131 .rept 7
132 THUMB( bx r12 )
133 THUMB( .rept 6 )
134 ARM( .rept 8 )
135 mov r0, r0 132 mov r0, r0
136 .endr 133 .endr
134 ARM( mov r0, r0 )
135 ARM( b 1f )
136 THUMB( adr r12, BSYM(1f) )
137 THUMB( bx r12 )
137 138
138 b 1f
139 .word 0x016f2818 @ Magic numbers to help the loader 139 .word 0x016f2818 @ Magic numbers to help the loader
140 .word start @ absolute load/run zImage address 140 .word start @ absolute load/run zImage address
141 .word _edata @ zImage end address 141 .word _edata @ zImage end address
@@ -174,9 +174,7 @@ not_angel:
174 */ 174 */
175 175
176 .text 176 .text
177 adr r0, LC0 177
178 ldmia r0, {r1, r2, r3, r5, r6, r11, ip}
179 ldr sp, [r0, #28]
180#ifdef CONFIG_AUTO_ZRELADDR 178#ifdef CONFIG_AUTO_ZRELADDR
181 @ determine final kernel image address 179 @ determine final kernel image address
182 mov r4, pc 180 mov r4, pc
@@ -185,35 +183,108 @@ not_angel:
185#else 183#else
186 ldr r4, =zreladdr 184 ldr r4, =zreladdr
187#endif 185#endif
188 subs r0, r0, r1 @ calculate the delta offset
189 186
190 @ if delta is zero, we are 187 bl cache_on
191 beq not_relocated @ running at the address we 188
192 @ were linked at. 189restart: adr r0, LC0
190 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
191 ldr sp, [r0, #32]
192
193 /*
194 * We might be running at a different address. We need
195 * to fix up various pointers.
196 */
197 sub r0, r0, r1 @ calculate the delta offset
198 add r5, r5, r0 @ _start
199 add r6, r6, r0 @ _edata
193 200
201#ifndef CONFIG_ZBOOT_ROM
202 /* malloc space is above the relocated stack (64k max) */
203 add sp, sp, r0
204 add r10, sp, #0x10000
205#else
194 /* 206 /*
195 * We're running at a different address. We need to fix 207 * With ZBOOT_ROM the bss/stack is non relocatable,
196 * up various pointers: 208 * but someone could still run this code from RAM,
197 * r5 - zImage base address (_start) 209 * in which case our reference is _edata.
198 * r6 - size of decompressed image
199 * r11 - GOT start
200 * ip - GOT end
201 */ 210 */
202 add r5, r5, r0 211 mov r10, r6
212#endif
213
214/*
215 * Check to see if we will overwrite ourselves.
216 * r4 = final kernel address
217 * r5 = start of this image
218 * r9 = size of decompressed image
219 * r10 = end of this image, including bss/stack/malloc space if non XIP
220 * We basically want:
221 * r4 >= r10 -> OK
222 * r4 + image length <= r5 -> OK
223 */
224 cmp r4, r10
225 bhs wont_overwrite
226 add r10, r4, r9
227 cmp r10, r5
228 bls wont_overwrite
229
230/*
231 * Relocate ourselves past the end of the decompressed kernel.
232 * r5 = start of this image
233 * r6 = _edata
234 * r10 = end of the decompressed kernel
235 * Because we always copy ahead, we need to do it from the end and go
236 * backward in case the source and destination overlap.
237 */
238 /* Round up to next 256-byte boundary. */
239 add r10, r10, #256
240 bic r10, r10, #255
241
242 sub r9, r6, r5 @ size to copy
243 add r9, r9, #31 @ rounded up to a multiple
244 bic r9, r9, #31 @ ... of 32 bytes
245 add r6, r9, r5
246 add r9, r9, r10
247
2481: ldmdb r6!, {r0 - r3, r10 - r12, lr}
249 cmp r6, r5
250 stmdb r9!, {r0 - r3, r10 - r12, lr}
251 bhi 1b
252
253 /* Preserve offset to relocated code. */
254 sub r6, r9, r6
255
256 bl cache_clean_flush
257
258 adr r0, BSYM(restart)
259 add r0, r0, r6
260 mov pc, r0
261
262wont_overwrite:
263/*
264 * If delta is zero, we are running at the address we were linked at.
265 * r0 = delta
266 * r2 = BSS start
267 * r3 = BSS end
268 * r4 = kernel execution address
269 * r7 = architecture ID
270 * r8 = atags pointer
271 * r11 = GOT start
272 * r12 = GOT end
273 * sp = stack pointer
274 */
275 teq r0, #0
276 beq not_relocated
203 add r11, r11, r0 277 add r11, r11, r0
204 add ip, ip, r0 278 add r12, r12, r0
205 279
206#ifndef CONFIG_ZBOOT_ROM 280#ifndef CONFIG_ZBOOT_ROM
207 /* 281 /*
208 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 282 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
209 * we need to fix up pointers into the BSS region. 283 * we need to fix up pointers into the BSS region.
210 * r2 - BSS start 284 * Note that the stack pointer has already been fixed up.
211 * r3 - BSS end
212 * sp - stack pointer
213 */ 285 */
214 add r2, r2, r0 286 add r2, r2, r0
215 add r3, r3, r0 287 add r3, r3, r0
216 add sp, sp, r0
217 288
218 /* 289 /*
219 * Relocate all entries in the GOT table. 290 * Relocate all entries in the GOT table.
@@ -221,7 +292,7 @@ not_angel:
2211: ldr r1, [r11, #0] @ relocate entries in the GOT 2921: ldr r1, [r11, #0] @ relocate entries in the GOT
222 add r1, r1, r0 @ table. This fixes up the 293 add r1, r1, r0 @ table. This fixes up the
223 str r1, [r11], #4 @ C references. 294 str r1, [r11], #4 @ C references.
224 cmp r11, ip 295 cmp r11, r12
225 blo 1b 296 blo 1b
226#else 297#else
227 298
@@ -234,7 +305,7 @@ not_angel:
234 cmphs r3, r1 @ _end < entry 305 cmphs r3, r1 @ _end < entry
235 addlo r1, r1, r0 @ table. This fixes up the 306 addlo r1, r1, r0 @ table. This fixes up the
236 str r1, [r11], #4 @ C references. 307 str r1, [r11], #4 @ C references.
237 cmp r11, ip 308 cmp r11, r12
238 blo 1b 309 blo 1b
239#endif 310#endif
240 311
@@ -246,76 +317,24 @@ not_relocated: mov r0, #0
246 cmp r2, r3 317 cmp r2, r3
247 blo 1b 318 blo 1b
248 319
249 /*
250 * The C runtime environment should now be setup
251 * sufficiently. Turn the cache on, set up some
252 * pointers, and start decompressing.
253 */
254 bl cache_on
255
256 mov r1, sp @ malloc space above stack
257 add r2, sp, #0x10000 @ 64k max
258
259/* 320/*
260 * Check to see if we will overwrite ourselves. 321 * The C runtime environment should now be setup sufficiently.
261 * r4 = final kernel address 322 * Set up some pointers, and start decompressing.
262 * r5 = start of this image 323 * r4 = kernel execution address
263 * r6 = size of decompressed image 324 * r7 = architecture ID
264 * r2 = end of malloc space (and therefore this image) 325 * r8 = atags pointer
265 * We basically want:
266 * r4 >= r2 -> OK
267 * r4 + image length <= r5 -> OK
268 */ 326 */
269 cmp r4, r2 327 mov r0, r4
270 bhs wont_overwrite 328 mov r1, sp @ malloc space above stack
271 add r0, r4, r6 329 add r2, sp, #0x10000 @ 64k max
272 cmp r0, r5
273 bls wont_overwrite
274
275 mov r5, r2 @ decompress after malloc space
276 mov r0, r5
277 mov r3, r7 330 mov r3, r7
278 bl decompress_kernel 331 bl decompress_kernel
279
280 add r0, r0, #127 + 128 @ alignment + stack
281 bic r0, r0, #127 @ align the kernel length
282/*
283 * r0 = decompressed kernel length
284 * r1-r3 = unused
285 * r4 = kernel execution address
286 * r5 = decompressed kernel start
287 * r7 = architecture ID
288 * r8 = atags pointer
289 * r9-r12,r14 = corrupted
290 */
291 add r1, r5, r0 @ end of decompressed kernel
292 adr r2, reloc_start
293 ldr r3, LC1
294 add r3, r2, r3
2951: ldmia r2!, {r9 - r12, r14} @ copy relocation code
296 stmia r1!, {r9 - r12, r14}
297 ldmia r2!, {r9 - r12, r14}
298 stmia r1!, {r9 - r12, r14}
299 cmp r2, r3
300 blo 1b
301 mov sp, r1
302 add sp, sp, #128 @ relocate the stack
303
304 bl cache_clean_flush 332 bl cache_clean_flush
305 ARM( add pc, r5, r0 ) @ call relocation code 333 bl cache_off
306 THUMB( add r12, r5, r0 ) 334 mov r0, #0 @ must be zero
307 THUMB( mov pc, r12 ) @ call relocation code 335 mov r1, r7 @ restore architecture number
308 336 mov r2, r8 @ restore atags pointer
309/* 337 mov pc, r4 @ call kernel
310 * We're not in danger of overwriting ourselves. Do this the simple way.
311 *
312 * r4 = kernel execution address
313 * r7 = architecture ID
314 */
315wont_overwrite: mov r0, r4
316 mov r3, r7
317 bl decompress_kernel
318 b call_kernel
319 338
320 .align 2 339 .align 2
321 .type LC0, #object 340 .type LC0, #object
@@ -323,11 +342,11 @@ LC0: .word LC0 @ r1
323 .word __bss_start @ r2 342 .word __bss_start @ r2
324 .word _end @ r3 343 .word _end @ r3
325 .word _start @ r5 344 .word _start @ r5
326 .word _image_size @ r6 345 .word _edata @ r6
346 .word _image_size @ r9
327 .word _got_start @ r11 347 .word _got_start @ r11
328 .word _got_end @ ip 348 .word _got_end @ ip
329 .word user_stack_end @ sp 349 .word user_stack_end @ sp
330LC1: .word reloc_end - reloc_start
331 .size LC0, . - LC0 350 .size LC0, . - LC0
332 351
333#ifdef CONFIG_ARCH_RPC 352#ifdef CONFIG_ARCH_RPC
@@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
353 * On exit, 372 * On exit,
354 * r0, r1, r2, r3, r9, r10, r12 corrupted 373 * r0, r1, r2, r3, r9, r10, r12 corrupted
355 * This routine must preserve: 374 * This routine must preserve:
356 * r4, r5, r6, r7, r8 375 * r4, r7, r8
357 */ 376 */
358 .align 5 377 .align 5
359cache_on: mov r3, #8 @ cache_on function 378cache_on: mov r3, #8 @ cache_on function
@@ -551,43 +570,6 @@ __common_mmu_cache_on:
551#endif 570#endif
552 571
553/* 572/*
554 * All code following this line is relocatable. It is relocated by
555 * the above code to the end of the decompressed kernel image and
556 * executed there. During this time, we have no stacks.
557 *
558 * r0 = decompressed kernel length
559 * r1-r3 = unused
560 * r4 = kernel execution address
561 * r5 = decompressed kernel start
562 * r7 = architecture ID
563 * r8 = atags pointer
564 * r9-r12,r14 = corrupted
565 */
566 .align 5
567reloc_start: add r9, r5, r0
568 sub r9, r9, #128 @ do not copy the stack
569 debug_reloc_start
570 mov r1, r4
5711:
572 .rept 4
573 ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
574 stmia r1!, {r0, r2, r3, r10 - r12, r14}
575 .endr
576
577 cmp r5, r9
578 blo 1b
579 mov sp, r1
580 add sp, sp, #128 @ relocate the stack
581 debug_reloc_end
582
583call_kernel: bl cache_clean_flush
584 bl cache_off
585 mov r0, #0 @ must be zero
586 mov r1, r7 @ restore architecture number
587 mov r2, r8 @ restore atags pointer
588 mov pc, r4 @ call kernel
589
590/*
591 * Here follow the relocatable cache support functions for the 573 * Here follow the relocatable cache support functions for the
592 * various processors. This is a generic hook for locating an 574 * various processors. This is a generic hook for locating an
593 * entry and jumping to an instruction at the specified offset 575 * entry and jumping to an instruction at the specified offset
@@ -791,7 +773,7 @@ proc_types:
791 * On exit, 773 * On exit,
792 * r0, r1, r2, r3, r9, r12 corrupted 774 * r0, r1, r2, r3, r9, r12 corrupted
793 * This routine must preserve: 775 * This routine must preserve:
794 * r4, r6, r7 776 * r4, r7, r8
795 */ 777 */
796 .align 5 778 .align 5
797cache_off: mov r3, #12 @ cache_off function 779cache_off: mov r3, #12 @ cache_off function
@@ -866,7 +848,7 @@ __armv3_mmu_cache_off:
866 * On exit, 848 * On exit,
867 * r1, r2, r3, r9, r10, r11, r12 corrupted 849 * r1, r2, r3, r9, r10, r11, r12 corrupted
868 * This routine must preserve: 850 * This routine must preserve:
869 * r0, r4, r5, r6, r7 851 * r4, r6, r7, r8
870 */ 852 */
871 .align 5 853 .align 5
872cache_clean_flush: 854cache_clean_flush:
@@ -1088,7 +1070,6 @@ memdump: mov r12, r0
1088#endif 1070#endif
1089 1071
1090 .ltorg 1072 .ltorg
1091reloc_end:
1092 1073
1093 .align 1074 .align
1094 .section ".stack", "aw", %nobits 1075 .section ".stack", "aw", %nobits
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index 366a924019ac..5309909d7282 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -43,9 +43,6 @@ SECTIONS
43 43
44 _etext = .; 44 _etext = .;
45 45
46 /* Assume size of decompressed image is 4x the compressed image */
47 _image_size = (_etext - _text) * 4;
48
49 _got_start = .; 46 _got_start = .;
50 .got : { *(.got) } 47 .got : { *(.got) }
51 _got_end = .; 48 _got_end = .;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 224377211151..cb6b041c39d2 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -44,6 +44,19 @@ struct gic_chip_data {
44 void __iomem *cpu_base; 44 void __iomem *cpu_base;
45}; 45};
46 46
47/*
48 * Supported arch specific GIC irq extension.
49 * Default make them NULL.
50 */
51struct irq_chip gic_arch_extn = {
52 .irq_ack = NULL,
53 .irq_mask = NULL,
54 .irq_unmask = NULL,
55 .irq_retrigger = NULL,
56 .irq_set_type = NULL,
57 .irq_set_wake = NULL,
58};
59
47#ifndef MAX_GIC_NR 60#ifndef MAX_GIC_NR
48#define MAX_GIC_NR 1 61#define MAX_GIC_NR 1
49#endif 62#endif
@@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d)
74static void gic_ack_irq(struct irq_data *d) 87static void gic_ack_irq(struct irq_data *d)
75{ 88{
76 spin_lock(&irq_controller_lock); 89 spin_lock(&irq_controller_lock);
90 if (gic_arch_extn.irq_ack)
91 gic_arch_extn.irq_ack(d);
77 writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); 92 writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
78 spin_unlock(&irq_controller_lock); 93 spin_unlock(&irq_controller_lock);
79} 94}
@@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d)
84 99
85 spin_lock(&irq_controller_lock); 100 spin_lock(&irq_controller_lock);
86 writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); 101 writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
102 if (gic_arch_extn.irq_mask)
103 gic_arch_extn.irq_mask(d);
87 spin_unlock(&irq_controller_lock); 104 spin_unlock(&irq_controller_lock);
88} 105}
89 106
@@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d)
92 u32 mask = 1 << (d->irq % 32); 109 u32 mask = 1 << (d->irq % 32);
93 110
94 spin_lock(&irq_controller_lock); 111 spin_lock(&irq_controller_lock);
112 if (gic_arch_extn.irq_unmask)
113 gic_arch_extn.irq_unmask(d);
95 writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); 114 writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
96 spin_unlock(&irq_controller_lock); 115 spin_unlock(&irq_controller_lock);
97} 116}
@@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
116 135
117 spin_lock(&irq_controller_lock); 136 spin_lock(&irq_controller_lock);
118 137
138 if (gic_arch_extn.irq_set_type)
139 gic_arch_extn.irq_set_type(d, type);
140
119 val = readl(base + GIC_DIST_CONFIG + confoff); 141 val = readl(base + GIC_DIST_CONFIG + confoff);
120 if (type == IRQ_TYPE_LEVEL_HIGH) 142 if (type == IRQ_TYPE_LEVEL_HIGH)
121 val &= ~confmask; 143 val &= ~confmask;
@@ -141,32 +163,54 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
141 return 0; 163 return 0;
142} 164}
143 165
166static int gic_retrigger(struct irq_data *d)
167{
168 if (gic_arch_extn.irq_retrigger)
169 return gic_arch_extn.irq_retrigger(d);
170
171 return -ENXIO;
172}
173
144#ifdef CONFIG_SMP 174#ifdef CONFIG_SMP
145static int 175static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
146gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force) 176 bool force)
147{ 177{
148 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 178 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
149 unsigned int shift = (d->irq % 4) * 8; 179 unsigned int shift = (d->irq % 4) * 8;
150 unsigned int cpu = cpumask_first(mask_val); 180 unsigned int cpu = cpumask_first(mask_val);
151 u32 val; 181 u32 val, mask, bit;
152 struct irq_desc *desc;
153 182
154 spin_lock(&irq_controller_lock); 183 if (cpu >= 8)
155 desc = irq_to_desc(d->irq);
156 if (desc == NULL) {
157 spin_unlock(&irq_controller_lock);
158 return -EINVAL; 184 return -EINVAL;
159 } 185
186 mask = 0xff << shift;
187 bit = 1 << (cpu + shift);
188
189 spin_lock(&irq_controller_lock);
160 d->node = cpu; 190 d->node = cpu;
161 val = readl(reg) & ~(0xff << shift); 191 val = readl(reg) & ~mask;
162 val |= 1 << (cpu + shift); 192 writel(val | bit, reg);
163 writel(val, reg);
164 spin_unlock(&irq_controller_lock); 193 spin_unlock(&irq_controller_lock);
165 194
166 return 0; 195 return 0;
167} 196}
168#endif 197#endif
169 198
199#ifdef CONFIG_PM
200static int gic_set_wake(struct irq_data *d, unsigned int on)
201{
202 int ret = -ENXIO;
203
204 if (gic_arch_extn.irq_set_wake)
205 ret = gic_arch_extn.irq_set_wake(d, on);
206
207 return ret;
208}
209
210#else
211#define gic_set_wake NULL
212#endif
213
170static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 214static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
171{ 215{
172 struct gic_chip_data *chip_data = get_irq_data(irq); 216 struct gic_chip_data *chip_data = get_irq_data(irq);
@@ -202,9 +246,11 @@ static struct irq_chip gic_chip = {
202 .irq_mask = gic_mask_irq, 246 .irq_mask = gic_mask_irq,
203 .irq_unmask = gic_unmask_irq, 247 .irq_unmask = gic_unmask_irq,
204 .irq_set_type = gic_set_type, 248 .irq_set_type = gic_set_type,
249 .irq_retrigger = gic_retrigger,
205#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
206 .irq_set_affinity = gic_set_cpu, 251 .irq_set_affinity = gic_set_affinity,
207#endif 252#endif
253 .irq_set_wake = gic_set_wake,
208}; 254};
209 255
210void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 256void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
index 93d04acaa31f..92f10cb5c70c 100644
--- a/arch/arm/include/asm/a.out-core.h
+++ b/arch/arm/include/asm/a.out-core.h
@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
32 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; 32 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
33 dump->u_ssize = 0; 33 dump->u_ssize = 0;
34 34
35 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; 35 memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
36 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
37 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
38 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
39 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
40 36
41 if (dump->start_stack < 0x04000000) 37 if (dump->start_stack < 0x04000000)
42 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; 38 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 20ae96cc0020..ed5bc9e05a4e 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -23,6 +23,8 @@
23#define CPUID_EXT_ISAR4 "c2, 4" 23#define CPUID_EXT_ISAR4 "c2, 4"
24#define CPUID_EXT_ISAR5 "c2, 5" 24#define CPUID_EXT_ISAR5 "c2, 5"
25 25
26extern unsigned int processor_id;
27
26#ifdef CONFIG_CPU_CP15 28#ifdef CONFIG_CPU_CP15
27#define read_cpuid(reg) \ 29#define read_cpuid(reg) \
28 ({ \ 30 ({ \
@@ -43,7 +45,6 @@
43 __val; \ 45 __val; \
44 }) 46 })
45#else 47#else
46extern unsigned int processor_id;
47#define read_cpuid(reg) (processor_id) 48#define read_cpuid(reg) (processor_id)
48#define read_cpuid_ext(reg) 0 49#define read_cpuid_ext(reg) 0
49#endif 50#endif
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 84557d321001..0691f9dcc500 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -34,6 +34,7 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36extern void __iomem *gic_cpu_base_addr; 36extern void __iomem *gic_cpu_base_addr;
37extern struct irq_chip gic_arch_extn;
37 38
38void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); 39void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
39void gic_secondary_init(unsigned int); 40void gic_secondary_init(unsigned int);
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7080e2c8fa62..a4edd19dd3d6 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -19,11 +19,36 @@
19 19
20extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
21 21
22extern void *kmap_high(struct page *page);
23extern void kunmap_high(struct page *page);
24
25/*
26 * The reason for kmap_high_get() is to ensure that the currently kmap'd
27 * page usage count does not decrease to zero while we're using its
28 * existing virtual mapping in an atomic context. With a VIVT cache this
29 * is essential to do, but with a VIPT cache this is only an optimization
30 * so not to pay the price of establishing a second mapping if an existing
31 * one can be used. However, on platforms without hardware TLB maintenance
32 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
33 * the locking involved must also disable IRQs which is incompatible with
34 * the IPI mechanism used by global TLB operations.
35 */
22#define ARCH_NEEDS_KMAP_HIGH_GET 36#define ARCH_NEEDS_KMAP_HIGH_GET
37#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
38#undef ARCH_NEEDS_KMAP_HIGH_GET
39#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
40#error "The sum of features in your kernel config cannot be supported together"
41#endif
42#endif
23 43
24extern void *kmap_high(struct page *page); 44#ifdef ARCH_NEEDS_KMAP_HIGH_GET
25extern void *kmap_high_get(struct page *page); 45extern void *kmap_high_get(struct page *page);
26extern void kunmap_high(struct page *page); 46#else
47static inline void *kmap_high_get(struct page *page)
48{
49 return NULL;
50}
51#endif
27 52
28/* 53/*
29 * The following functions are already defined by <linux/highmem.h> 54 * The following functions are already defined by <linux/highmem.h>
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 22ac140edd9e..febe495d0c6e 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -34,4 +34,35 @@ do { \
34 raw_spin_unlock(&desc->lock); \ 34 raw_spin_unlock(&desc->lock); \
35} while(0) 35} while(0)
36 36
37#ifndef __ASSEMBLY__
38/*
39 * Entry/exit functions for chained handlers where the primary IRQ chip
40 * may implement either fasteoi or level-trigger flow control.
41 */
42static inline void chained_irq_enter(struct irq_chip *chip,
43 struct irq_desc *desc)
44{
45 /* FastEOI controllers require no action on entry. */
46 if (chip->irq_eoi)
47 return;
48
49 if (chip->irq_mask_ack) {
50 chip->irq_mask_ack(&desc->irq_data);
51 } else {
52 chip->irq_mask(&desc->irq_data);
53 if (chip->irq_ack)
54 chip->irq_ack(&desc->irq_data);
55 }
56}
57
58static inline void chained_irq_exit(struct irq_chip *chip,
59 struct irq_desc *desc)
60{
61 if (chip->irq_eoi)
62 chip->irq_eoi(&desc->irq_data);
63 else
64 chip->irq_unmask(&desc->irq_data);
65}
66#endif
67
37#endif 68#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index fc1900925275..348d513afa92 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -31,6 +31,7 @@ struct outer_cache_fns {
31#ifdef CONFIG_OUTER_CACHE_SYNC 31#ifdef CONFIG_OUTER_CACHE_SYNC
32 void (*sync)(void); 32 void (*sync)(void);
33#endif 33#endif
34 void (*set_debug)(unsigned long);
34}; 35};
35 36
36#ifdef CONFIG_OUTER_CACHE 37#ifdef CONFIG_OUTER_CACHE
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 7a1f03c10f1b..b2d9df5667af 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -29,19 +29,7 @@
29#define STACK_TOP_MAX TASK_SIZE 29#define STACK_TOP_MAX TASK_SIZE
30#endif 30#endif
31 31
32union debug_insn {
33 u32 arm;
34 u16 thumb;
35};
36
37struct debug_entry {
38 u32 address;
39 union debug_insn insn;
40};
41
42struct debug_info { 32struct debug_info {
43 int nsaved;
44 struct debug_entry bp[2];
45#ifdef CONFIG_HAVE_HW_BREAKPOINT 33#ifdef CONFIG_HAVE_HW_BREAKPOINT
46 struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; 34 struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
47#endif 35#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 783d50f32618..a8ff22b2a391 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -130,8 +130,6 @@ struct pt_regs {
130 130
131#ifdef __KERNEL__ 131#ifdef __KERNEL__
132 132
133#define arch_has_single_step() (1)
134
135#define user_mode(regs) \ 133#define user_mode(regs) \
136 (((regs)->ARM_cpsr & 0xf) == 0) 134 (((regs)->ARM_cpsr & 0xf) == 0)
137 135
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 1b960d5ef6a5..f90756dc16dc 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr)
45 45
46extern void __init early_trap_init(void); 46extern void __init early_trap_init(void);
47extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 47extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
48extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
48 49
49extern void *vectors_page; 50extern void *vectors_page;
50 51
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index 05ac4b06876a..35917b3a97f9 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -71,7 +71,7 @@ struct user{
71 /* the registers. */ 71 /* the registers. */
72 unsigned long magic; /* To uniquely identify a core file */ 72 unsigned long magic; /* To uniquely identify a core file */
73 char u_comm[32]; /* User command that was responsible */ 73 char u_comm[32]; /* User command that was responsible */
74 int u_debugreg[8]; 74 int u_debugreg[8]; /* No longer used */
75 struct user_fp u_fp; /* FP state */ 75 struct user_fp u_fp; /* FP state */
76 struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ 76 struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
77 /* the FP registers. */ 77 /* the FP registers. */
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index c6273a3bfc25..d86fcd44b220 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw)
583 * Assign resources. 583 * Assign resources.
584 */ 584 */
585 pci_bus_assign_resources(bus); 585 pci_bus_assign_resources(bus);
586
587 /*
588 * Enable bridges
589 */
590 pci_enable_bridges(bus);
586 } 591 }
587 592
588 /* 593 /*
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 8f57515bbdb0..c84b57d27d07 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -25,83 +25,6 @@
25 * machine ID for example). 25 * machine ID for example).
26 */ 26 */
27 __HEAD 27 __HEAD
28__error_a:
29#ifdef CONFIG_DEBUG_LL
30 mov r4, r1 @ preserve machine ID
31 adr r0, str_a1
32 bl printascii
33 mov r0, r4
34 bl printhex8
35 adr r0, str_a2
36 bl printascii
37 adr r3, __lookup_machine_type_data
38 ldmia r3, {r4, r5, r6} @ get machine desc list
39 sub r4, r3, r4 @ get offset between virt&phys
40 add r5, r5, r4 @ convert virt addresses to
41 add r6, r6, r4 @ physical address space
421: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
43 bl printhex8
44 mov r0, #'\t'
45 bl printch
46 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
47 add r0, r0, r4
48 bl printascii
49 mov r0, #'\n'
50 bl printch
51 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
52 cmp r5, r6
53 blo 1b
54 adr r0, str_a3
55 bl printascii
56 b __error
57ENDPROC(__error_a)
58
59str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
60str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
61str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
62 .align
63#else
64 b __error
65#endif
66
67/*
68 * Lookup machine architecture in the linker-build list of architectures.
69 * Note that we can't use the absolute addresses for the __arch_info
70 * lists since we aren't running with the MMU on (and therefore, we are
71 * not in the correct address space). We have to calculate the offset.
72 *
73 * r1 = machine architecture number
74 * Returns:
75 * r3, r4, r6 corrupted
76 * r5 = mach_info pointer in physical address space
77 */
78__lookup_machine_type:
79 adr r3, __lookup_machine_type_data
80 ldmia r3, {r4, r5, r6}
81 sub r3, r3, r4 @ get offset between virt&phys
82 add r5, r5, r3 @ convert virt addresses to
83 add r6, r6, r3 @ physical address space
841: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
85 teq r3, r1 @ matches loader number?
86 beq 2f @ found
87 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
88 cmp r5, r6
89 blo 1b
90 mov r5, #0 @ unknown machine
912: mov pc, lr
92ENDPROC(__lookup_machine_type)
93
94/*
95 * Look in arch/arm/kernel/arch.[ch] for information about the
96 * __arch_info structures.
97 */
98 .align 2
99 .type __lookup_machine_type_data, %object
100__lookup_machine_type_data:
101 .long .
102 .long __arch_info_begin
103 .long __arch_info_end
104 .size __lookup_machine_type_data, . - __lookup_machine_type_data
105 28
106/* Determine validity of the r2 atags pointer. The heuristic requires 29/* Determine validity of the r2 atags pointer. The heuristic requires
107 * that the pointer be aligned, in the first 16k of physical RAM and 30 * that the pointer be aligned, in the first 16k of physical RAM and
@@ -109,8 +32,6 @@ __lookup_machine_type_data:
109 * of this function may be more lenient with the physical address and 32 * of this function may be more lenient with the physical address and
110 * may also be able to move the ATAGS block if necessary. 33 * may also be able to move the ATAGS block if necessary.
111 * 34 *
112 * r8 = machinfo
113 *
114 * Returns: 35 * Returns:
115 * r2 either valid atags pointer, or zero 36 * r2 either valid atags pointer, or zero
116 * r5, r6 corrupted 37 * r5, r6 corrupted
@@ -185,17 +106,6 @@ __mmap_switched_data:
185 .size __mmap_switched_data, . - __mmap_switched_data 106 .size __mmap_switched_data, . - __mmap_switched_data
186 107
187/* 108/*
188 * This provides a C-API version of __lookup_machine_type
189 */
190ENTRY(lookup_machine_type)
191 stmfd sp!, {r4 - r6, lr}
192 mov r1, r0
193 bl __lookup_machine_type
194 mov r0, r5
195 ldmfd sp!, {r4 - r6, pc}
196ENDPROC(lookup_machine_type)
197
198/*
199 * This provides a C-API version of __lookup_processor_type 109 * This provides a C-API version of __lookup_processor_type
200 */ 110 */
201ENTRY(lookup_processor_type) 111ENTRY(lookup_processor_type)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 814ce1a73270..6b1e0ad9ec3b 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -44,9 +44,6 @@ ENTRY(stext)
44 bl __lookup_processor_type @ r5=procinfo r9=cpuid 44 bl __lookup_processor_type @ r5=procinfo r9=cpuid
45 movs r10, r5 @ invalid processor (r5=0)? 45 movs r10, r5 @ invalid processor (r5=0)?
46 beq __error_p @ yes, error 'p' 46 beq __error_p @ yes, error 'p'
47 bl __lookup_machine_type @ r5=machinfo
48 movs r8, r5 @ invalid machine (r5=0)?
49 beq __error_a @ yes, error 'a'
50 47
51 adr lr, BSYM(__after_proc_init) @ return (PIC) address 48 adr lr, BSYM(__after_proc_init) @ return (PIC) address
52 ARM( add pc, r10, #PROCINFO_INITFUNC ) 49 ARM( add pc, r10, #PROCINFO_INITFUNC )
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f06ff9feb0db..60fe2795f4a3 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -87,14 +87,10 @@ ENTRY(stext)
87 movs r10, r5 @ invalid processor (r5=0)? 87 movs r10, r5 @ invalid processor (r5=0)?
88 THUMB( it eq ) @ force fixup-able long branch encoding 88 THUMB( it eq ) @ force fixup-able long branch encoding
89 beq __error_p @ yes, error 'p' 89 beq __error_p @ yes, error 'p'
90 bl __lookup_machine_type @ r5=machinfo
91 movs r8, r5 @ invalid machine (r5=0)?
92 THUMB( it eq ) @ force fixup-able long branch encoding
93 beq __error_a @ yes, error 'a'
94 90
95 /* 91 /*
96 * r1 = machine no, r2 = atags, 92 * r1 = machine no, r2 = atags,
97 * r8 = machinfo, r9 = cpuid, r10 = procinfo 93 * r9 = cpuid, r10 = procinfo
98 */ 94 */
99 bl __vet_atags 95 bl __vet_atags
100#ifdef CONFIG_SMP_ON_UP 96#ifdef CONFIG_SMP_ON_UP
@@ -105,7 +101,7 @@ ENTRY(stext)
105 /* 101 /*
106 * The following calls CPU specific code in a position independent 102 * The following calls CPU specific code in a position independent
107 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 103 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
108 * xxx_proc_info structure selected by __lookup_machine_type 104 * xxx_proc_info structure selected by __lookup_processor_type
109 * above. On return, the CPU will be ready for the MMU to be 105 * above. On return, the CPU will be ready for the MMU to be
110 * turned on, and r0 will hold the CPU control register value. 106 * turned on, and r0 will hold the CPU control register value.
111 */ 107 */
@@ -124,7 +120,6 @@ ENDPROC(stext)
124 * amount which are required to get the kernel running, which 120 * amount which are required to get the kernel running, which
125 * generally means mapping in the kernel code. 121 * generally means mapping in the kernel code.
126 * 122 *
127 * r8 = machinfo
128 * r9 = cpuid 123 * r9 = cpuid
129 * r10 = procinfo 124 * r10 = procinfo
130 * 125 *
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 28536e352deb..3535d3793e65 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void)
179 179
180#ifdef CONFIG_HOTPLUG_CPU 180#ifdef CONFIG_HOTPLUG_CPU
181 181
182static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 182static bool migrate_one_irq(struct irq_data *d)
183{ 183{
184 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); 184 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
185 bool ret = false;
185 186
186 raw_spin_lock_irq(&desc->lock); 187 if (cpu >= nr_cpu_ids) {
187 desc->irq_data.chip->irq_set_affinity(&desc->irq_data, 188 cpu = cpumask_any(cpu_online_mask);
188 cpumask_of(cpu), false); 189 ret = true;
189 raw_spin_unlock_irq(&desc->lock); 190 }
191
192 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
193
194 d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
195
196 return ret;
190} 197}
191 198
192/* 199/*
@@ -198,25 +205,30 @@ void migrate_irqs(void)
198{ 205{
199 unsigned int i, cpu = smp_processor_id(); 206 unsigned int i, cpu = smp_processor_id();
200 struct irq_desc *desc; 207 struct irq_desc *desc;
208 unsigned long flags;
209
210 local_irq_save(flags);
201 211
202 for_each_irq_desc(i, desc) { 212 for_each_irq_desc(i, desc) {
203 struct irq_data *d = &desc->irq_data; 213 struct irq_data *d = &desc->irq_data;
214 bool affinity_broken = false;
204 215
205 if (d->node == cpu) { 216 raw_spin_lock(&desc->lock);
206 unsigned int newcpu = cpumask_any_and(d->affinity, 217 do {
207 cpu_online_mask); 218 if (desc->action == NULL)
208 if (newcpu >= nr_cpu_ids) { 219 break;
209 if (printk_ratelimit())
210 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
211 i, cpu);
212 220
213 cpumask_setall(d->affinity); 221 if (d->node != cpu)
214 newcpu = cpumask_any_and(d->affinity, 222 break;
215 cpu_online_mask);
216 }
217 223
218 route_irq(desc, i, newcpu); 224 affinity_broken = migrate_one_irq(d);
219 } 225 } while (0);
226 raw_spin_unlock(&desc->lock);
227
228 if (affinity_broken && printk_ratelimit())
229 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
220 } 230 }
231
232 local_irq_restore(flags);
221} 233}
222#endif /* CONFIG_HOTPLUG_CPU */ 234#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6d4105e6872f..6fcf22cf385c 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -76,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
76 for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { 76 for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
77 unsigned long loc; 77 unsigned long loc;
78 Elf32_Sym *sym; 78 Elf32_Sym *sym;
79 const char *symname;
79 s32 offset; 80 s32 offset;
80#ifdef CONFIG_THUMB2_KERNEL 81#ifdef CONFIG_THUMB2_KERNEL
81 u32 upper, lower, sign, j1, j2; 82 u32 upper, lower, sign, j1, j2;
@@ -83,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
83 84
84 offset = ELF32_R_SYM(rel->r_info); 85 offset = ELF32_R_SYM(rel->r_info);
85 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { 86 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
86 printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", 87 pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
87 module->name, relindex, i); 88 module->name, relindex, i);
88 return -ENOEXEC; 89 return -ENOEXEC;
89 } 90 }
90 91
91 sym = ((Elf32_Sym *)symsec->sh_addr) + offset; 92 sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
93 symname = strtab + sym->st_name;
92 94
93 if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) { 95 if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
94 printk(KERN_ERR "%s: out of bounds relocation, " 96 pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
95 "section %d reloc %d offset %d size %d\n", 97 module->name, relindex, i, symname,
96 module->name, relindex, i, rel->r_offset, 98 rel->r_offset, dstsec->sh_size);
97 dstsec->sh_size);
98 return -ENOEXEC; 99 return -ENOEXEC;
99 } 100 }
100 101
@@ -120,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
120 if (offset & 3 || 121 if (offset & 3 ||
121 offset <= (s32)0xfe000000 || 122 offset <= (s32)0xfe000000 ||
122 offset >= (s32)0x02000000) { 123 offset >= (s32)0x02000000) {
123 printk(KERN_ERR 124 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
124 "%s: relocation out of range, section " 125 module->name, relindex, i, symname,
125 "%d reloc %d sym '%s'\n", module->name, 126 ELF32_R_TYPE(rel->r_info), loc,
126 relindex, i, strtab + sym->st_name); 127 sym->st_value);
127 return -ENOEXEC; 128 return -ENOEXEC;
128 } 129 }
129 130
@@ -196,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
196 if (!(offset & 1) || 197 if (!(offset & 1) ||
197 offset <= (s32)0xff000000 || 198 offset <= (s32)0xff000000 ||
198 offset >= (s32)0x01000000) { 199 offset >= (s32)0x01000000) {
199 printk(KERN_ERR 200 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
200 "%s: relocation out of range, section " 201 module->name, relindex, i, symname,
201 "%d reloc %d sym '%s'\n", module->name, 202 ELF32_R_TYPE(rel->r_info), loc,
202 relindex, i, strtab + sym->st_name); 203 sym->st_value);
203 return -ENOEXEC; 204 return -ENOEXEC;
204 } 205 }
205 206
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index b13e70f63d71..2bf27f364d09 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -26,8 +26,6 @@
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/traps.h> 27#include <asm/traps.h>
28 28
29#include "ptrace.h"
30
31#define REG_PC 15 29#define REG_PC 15
32#define REG_PSR 16 30#define REG_PSR 16
33/* 31/*
@@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data)
184 return ret; 182 return ret;
185} 183}
186 184
187static inline int
188read_u32(struct task_struct *task, unsigned long addr, u32 *res)
189{
190 int ret;
191
192 ret = access_process_vm(task, addr, res, sizeof(*res), 0);
193
194 return ret == sizeof(*res) ? 0 : -EIO;
195}
196
197static inline int
198read_instr(struct task_struct *task, unsigned long addr, u32 *res)
199{
200 int ret;
201
202 if (addr & 1) {
203 u16 val;
204 ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
205 ret = ret == sizeof(val) ? 0 : -EIO;
206 *res = val;
207 } else {
208 u32 val;
209 ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
210 ret = ret == sizeof(val) ? 0 : -EIO;
211 *res = val;
212 }
213 return ret;
214}
215
216/*
217 * Get value of register `rn' (in the instruction)
218 */
219static unsigned long
220ptrace_getrn(struct task_struct *child, unsigned long insn)
221{
222 unsigned int reg = (insn >> 16) & 15;
223 unsigned long val;
224
225 val = get_user_reg(child, reg);
226 if (reg == 15)
227 val += 8;
228
229 return val;
230}
231
232/*
233 * Get value of operand 2 (in an ALU instruction)
234 */
235static unsigned long
236ptrace_getaluop2(struct task_struct *child, unsigned long insn)
237{
238 unsigned long val;
239 int shift;
240 int type;
241
242 if (insn & 1 << 25) {
243 val = insn & 255;
244 shift = (insn >> 8) & 15;
245 type = 3;
246 } else {
247 val = get_user_reg (child, insn & 15);
248
249 if (insn & (1 << 4))
250 shift = (int)get_user_reg (child, (insn >> 8) & 15);
251 else
252 shift = (insn >> 7) & 31;
253
254 type = (insn >> 5) & 3;
255 }
256
257 switch (type) {
258 case 0: val <<= shift; break;
259 case 1: val >>= shift; break;
260 case 2:
261 val = (((signed long)val) >> shift);
262 break;
263 case 3:
264 val = (val >> shift) | (val << (32 - shift));
265 break;
266 }
267 return val;
268}
269
270/*
271 * Get value of operand 2 (in a LDR instruction)
272 */
273static unsigned long
274ptrace_getldrop2(struct task_struct *child, unsigned long insn)
275{
276 unsigned long val;
277 int shift;
278 int type;
279
280 val = get_user_reg(child, insn & 15);
281 shift = (insn >> 7) & 31;
282 type = (insn >> 5) & 3;
283
284 switch (type) {
285 case 0: val <<= shift; break;
286 case 1: val >>= shift; break;
287 case 2:
288 val = (((signed long)val) >> shift);
289 break;
290 case 3:
291 val = (val >> shift) | (val << (32 - shift));
292 break;
293 }
294 return val;
295}
296
297#define OP_MASK 0x01e00000
298#define OP_AND 0x00000000
299#define OP_EOR 0x00200000
300#define OP_SUB 0x00400000
301#define OP_RSB 0x00600000
302#define OP_ADD 0x00800000
303#define OP_ADC 0x00a00000
304#define OP_SBC 0x00c00000
305#define OP_RSC 0x00e00000
306#define OP_ORR 0x01800000
307#define OP_MOV 0x01a00000
308#define OP_BIC 0x01c00000
309#define OP_MVN 0x01e00000
310
311static unsigned long
312get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
313{
314 u32 alt = 0;
315
316 switch (insn & 0x0e000000) {
317 case 0x00000000:
318 case 0x02000000: {
319 /*
320 * data processing
321 */
322 long aluop1, aluop2, ccbit;
323
324 if ((insn & 0x0fffffd0) == 0x012fff10) {
325 /*
326 * bx or blx
327 */
328 alt = get_user_reg(child, insn & 15);
329 break;
330 }
331
332
333 if ((insn & 0xf000) != 0xf000)
334 break;
335
336 aluop1 = ptrace_getrn(child, insn);
337 aluop2 = ptrace_getaluop2(child, insn);
338 ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
339
340 switch (insn & OP_MASK) {
341 case OP_AND: alt = aluop1 & aluop2; break;
342 case OP_EOR: alt = aluop1 ^ aluop2; break;
343 case OP_SUB: alt = aluop1 - aluop2; break;
344 case OP_RSB: alt = aluop2 - aluop1; break;
345 case OP_ADD: alt = aluop1 + aluop2; break;
346 case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
347 case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
348 case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
349 case OP_ORR: alt = aluop1 | aluop2; break;
350 case OP_MOV: alt = aluop2; break;
351 case OP_BIC: alt = aluop1 & ~aluop2; break;
352 case OP_MVN: alt = ~aluop2; break;
353 }
354 break;
355 }
356
357 case 0x04000000:
358 case 0x06000000:
359 /*
360 * ldr
361 */
362 if ((insn & 0x0010f000) == 0x0010f000) {
363 unsigned long base;
364
365 base = ptrace_getrn(child, insn);
366 if (insn & 1 << 24) {
367 long aluop2;
368
369 if (insn & 0x02000000)
370 aluop2 = ptrace_getldrop2(child, insn);
371 else
372 aluop2 = insn & 0xfff;
373
374 if (insn & 1 << 23)
375 base += aluop2;
376 else
377 base -= aluop2;
378 }
379 read_u32(child, base, &alt);
380 }
381 break;
382
383 case 0x08000000:
384 /*
385 * ldm
386 */
387 if ((insn & 0x00108000) == 0x00108000) {
388 unsigned long base;
389 unsigned int nr_regs;
390
391 if (insn & (1 << 23)) {
392 nr_regs = hweight16(insn & 65535) << 2;
393
394 if (!(insn & (1 << 24)))
395 nr_regs -= 4;
396 } else {
397 if (insn & (1 << 24))
398 nr_regs = -4;
399 else
400 nr_regs = 0;
401 }
402
403 base = ptrace_getrn(child, insn);
404
405 read_u32(child, base + nr_regs, &alt);
406 break;
407 }
408 break;
409
410 case 0x0a000000: {
411 /*
412 * bl or b
413 */
414 signed long displ;
415 /* It's a branch/branch link: instead of trying to
416 * figure out whether the branch will be taken or not,
417 * we'll put a breakpoint at both locations. This is
418 * simpler, more reliable, and probably not a whole lot
419 * slower than the alternative approach of emulating the
420 * branch.
421 */
422 displ = (insn & 0x00ffffff) << 8;
423 displ = (displ >> 6) + 8;
424 if (displ != 0 && displ != 4)
425 alt = pc + displ;
426 }
427 break;
428 }
429
430 return alt;
431}
432
433static int
434swap_insn(struct task_struct *task, unsigned long addr,
435 void *old_insn, void *new_insn, int size)
436{
437 int ret;
438
439 ret = access_process_vm(task, addr, old_insn, size, 0);
440 if (ret == size)
441 ret = access_process_vm(task, addr, new_insn, size, 1);
442 return ret;
443}
444
445static void
446add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
447{
448 int nr = dbg->nsaved;
449
450 if (nr < 2) {
451 u32 new_insn = BREAKINST_ARM;
452 int res;
453
454 res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
455
456 if (res == 4) {
457 dbg->bp[nr].address = addr;
458 dbg->nsaved += 1;
459 }
460 } else
461 printk(KERN_ERR "ptrace: too many breakpoints\n");
462}
463
464/*
465 * Clear one breakpoint in the user program. We copy what the hardware
466 * does and use bit 0 of the address to indicate whether this is a Thumb
467 * breakpoint or an ARM breakpoint.
468 */
469static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
470{
471 unsigned long addr = bp->address;
472 union debug_insn old_insn;
473 int ret;
474
475 if (addr & 1) {
476 ret = swap_insn(task, addr & ~1, &old_insn.thumb,
477 &bp->insn.thumb, 2);
478
479 if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
480 printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
481 "0x%08lx (0x%04x)\n", task->comm,
482 task_pid_nr(task), addr, old_insn.thumb);
483 } else {
484 ret = swap_insn(task, addr & ~3, &old_insn.arm,
485 &bp->insn.arm, 4);
486
487 if (ret != 4 || old_insn.arm != BREAKINST_ARM)
488 printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
489 "0x%08lx (0x%08x)\n", task->comm,
490 task_pid_nr(task), addr, old_insn.arm);
491 }
492}
493
494void ptrace_set_bpt(struct task_struct *child)
495{
496 struct pt_regs *regs;
497 unsigned long pc;
498 u32 insn;
499 int res;
500
501 regs = task_pt_regs(child);
502 pc = instruction_pointer(regs);
503
504 if (thumb_mode(regs)) {
505 printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
506 return;
507 }
508
509 res = read_instr(child, pc, &insn);
510 if (!res) {
511 struct debug_info *dbg = &child->thread.debug;
512 unsigned long alt;
513
514 dbg->nsaved = 0;
515
516 alt = get_branch_address(child, pc, insn);
517 if (alt)
518 add_breakpoint(child, dbg, alt);
519
520 /*
521 * Note that we ignore the result of setting the above
522 * breakpoint since it may fail. When it does, this is
523 * not so much an error, but a forewarning that we may
524 * be receiving a prefetch abort shortly.
525 *
526 * If we don't set this breakpoint here, then we can
527 * lose control of the thread during single stepping.
528 */
529 if (!alt || predicate(insn) != PREDICATE_ALWAYS)
530 add_breakpoint(child, dbg, pc + 4);
531 }
532}
533
534/*
535 * Ensure no single-step breakpoint is pending. Returns non-zero
536 * value if child was being single-stepped.
537 */
538void ptrace_cancel_bpt(struct task_struct *child)
539{
540 int i, nsaved = child->thread.debug.nsaved;
541
542 child->thread.debug.nsaved = 0;
543
544 if (nsaved > 2) {
545 printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
546 nsaved = 2;
547 }
548
549 for (i = 0; i < nsaved; i++)
550 clear_breakpoint(child, &child->thread.debug.bp[i]);
551}
552
553void user_disable_single_step(struct task_struct *task)
554{
555 task->ptrace &= ~PT_SINGLESTEP;
556 ptrace_cancel_bpt(task);
557}
558
559void user_enable_single_step(struct task_struct *task)
560{
561 task->ptrace |= PT_SINGLESTEP;
562}
563
564/* 185/*
565 * Called by kernel/ptrace.c when detaching.. 186 * Called by kernel/ptrace.c when detaching..
566 */ 187 */
567void ptrace_disable(struct task_struct *child) 188void ptrace_disable(struct task_struct *child)
568{ 189{
569 user_disable_single_step(child); 190 /* Nothing to do. */
570} 191}
571 192
572/* 193/*
@@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
576{ 197{
577 siginfo_t info; 198 siginfo_t info;
578 199
579 ptrace_cancel_bpt(tsk);
580
581 info.si_signo = SIGTRAP; 200 info.si_signo = SIGTRAP;
582 info.si_errno = 0; 201 info.si_errno = 0;
583 info.si_code = TRAP_BRKPT; 202 info.si_code = TRAP_BRKPT;
diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h
deleted file mode 100644
index 3926605b82ea..000000000000
--- a/arch/arm/kernel/ptrace.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * linux/arch/arm/kernel/ptrace.h
3 *
4 * Copyright (C) 2000-2003 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/ptrace.h>
11
12extern void ptrace_cancel_bpt(struct task_struct *);
13extern void ptrace_set_bpt(struct task_struct *);
14extern void ptrace_break(struct task_struct *, struct pt_regs *);
15
16/*
17 * Send SIGTRAP if we're single-stepping
18 */
19static inline void single_step_trap(struct task_struct *task)
20{
21 if (task->ptrace & PT_SINGLESTEP) {
22 ptrace_cancel_bpt(task);
23 send_sig(SIGTRAP, task, 1);
24 }
25}
26
27static inline void single_step_clear(struct task_struct *task)
28{
29 if (task->ptrace & PT_SINGLESTEP)
30 ptrace_cancel_bpt(task);
31}
32
33static inline void single_step_set(struct task_struct *task)
34{
35 if (task->ptrace & PT_SINGLESTEP)
36 ptrace_set_bpt(task);
37}
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index df246da4ceca..0b13a72f855d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ftrace.h>
12 13
13#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) 14#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
14#include <linux/sched.h> 15#include <linux/sched.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 5ea4fb718b97..db2382853450 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -308,7 +308,44 @@ static void __init cacheid_init(void)
308 * already provide the required functionality. 308 * already provide the required functionality.
309 */ 309 */
310extern struct proc_info_list *lookup_processor_type(unsigned int); 310extern struct proc_info_list *lookup_processor_type(unsigned int);
311extern struct machine_desc *lookup_machine_type(unsigned int); 311
312static void __init early_print(const char *str, ...)
313{
314 extern void printascii(const char *);
315 char buf[256];
316 va_list ap;
317
318 va_start(ap, str);
319 vsnprintf(buf, sizeof(buf), str, ap);
320 va_end(ap);
321
322#ifdef CONFIG_DEBUG_LL
323 printascii(buf);
324#endif
325 printk("%s", buf);
326}
327
328static struct machine_desc * __init lookup_machine_type(unsigned int type)
329{
330 extern struct machine_desc __arch_info_begin[], __arch_info_end[];
331 struct machine_desc *p;
332
333 for (p = __arch_info_begin; p < __arch_info_end; p++)
334 if (type == p->nr)
335 return p;
336
337 early_print("\n"
338 "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
339 "Available machine support:\n\nID (hex)\tNAME\n", type);
340
341 for (p = __arch_info_begin; p < __arch_info_end; p++)
342 early_print("%08x\t%s\n", p->nr, p->name);
343
344 early_print("\nPlease check your kernel config and/or bootloader.\n");
345
346 while (true)
347 /* can't use cpu_relax() here as it may require MMU setup */;
348}
312 349
313static void __init feat_v6_fixup(void) 350static void __init feat_v6_fixup(void)
314{ 351{
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index abaf8445ce25..cb8398317644 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -20,7 +20,6 @@
20#include <asm/unistd.h> 20#include <asm/unistd.h>
21#include <asm/vfp.h> 21#include <asm/vfp.h>
22 22
23#include "ptrace.h"
24#include "signal.h" 23#include "signal.h"
25 24
26#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 25#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
348 if (restore_sigframe(regs, frame)) 347 if (restore_sigframe(regs, frame))
349 goto badframe; 348 goto badframe;
350 349
351 single_step_trap(current);
352
353 return regs->ARM_r0; 350 return regs->ARM_r0;
354 351
355badframe: 352badframe:
@@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
383 if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) 380 if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
384 goto badframe; 381 goto badframe;
385 382
386 single_step_trap(current);
387
388 return regs->ARM_r0; 383 return regs->ARM_r0;
389 384
390badframe: 385badframe:
@@ -706,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
706 if (try_to_freeze()) 701 if (try_to_freeze())
707 goto no_signal; 702 goto no_signal;
708 703
709 single_step_clear(current);
710
711 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 704 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
712 if (signr > 0) { 705 if (signr > 0) {
713 sigset_t *oldset; 706 sigset_t *oldset;
@@ -726,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
726 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 719 if (test_thread_flag(TIF_RESTORE_SIGMASK))
727 clear_thread_flag(TIF_RESTORE_SIGMASK); 720 clear_thread_flag(TIF_RESTORE_SIGMASK);
728 } 721 }
729 single_step_set(current);
730 return; 722 return;
731 } 723 }
732 724
@@ -772,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
772 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 764 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
773 } 765 }
774 } 766 }
775 single_step_set(current);
776} 767}
777 768
778asmlinkage void 769asmlinkage void
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index ee57640ba2bb..21ac43f1c2d0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -23,6 +23,7 @@
23#include <linux/kexec.h> 23#include <linux/kexec.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/sched.h>
26 27
27#include <asm/atomic.h> 28#include <asm/atomic.h>
28#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
@@ -32,7 +33,6 @@
32#include <asm/unwind.h> 33#include <asm/unwind.h>
33#include <asm/tls.h> 34#include <asm/tls.h>
34 35
35#include "ptrace.h"
36#include "signal.h" 36#include "signal.h"
37 37
38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
256 return ret; 256 return ret;
257} 257}
258 258
259DEFINE_SPINLOCK(die_lock); 259static DEFINE_SPINLOCK(die_lock);
260 260
261/* 261/*
262 * This function is protected against re-entrancy. 262 * This function is protected against re-entrancy.
diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h
index 51dd902043ad..b6fdf23ecf6c 100644
--- a/arch/arm/mach-footbridge/include/mach/hardware.h
+++ b/arch/arm/mach-footbridge/include/mach/hardware.h
@@ -23,26 +23,33 @@
23 * 0xf9000000 0x50000000 1MB Cache flush 23 * 0xf9000000 0x50000000 1MB Cache flush
24 * 0xf0000000 0x80000000 16MB ISA memory 24 * 0xf0000000 0x80000000 16MB ISA memory
25 */ 25 */
26
27#ifdef CONFIG_MMU
28#define MMU_IO(a, b) (a)
29#else
30#define MMU_IO(a, b) (b)
31#endif
32
26#define XBUS_SIZE 0x00100000 33#define XBUS_SIZE 0x00100000
27#define XBUS_BASE 0xff800000 34#define XBUS_BASE MMU_IO(0xff800000, 0x40000000)
28 35
29#define ARMCSR_SIZE 0x00100000 36#define ARMCSR_SIZE 0x00100000
30#define ARMCSR_BASE 0xfe000000 37#define ARMCSR_BASE MMU_IO(0xfe000000, 0x42000000)
31 38
32#define WFLUSH_SIZE 0x00100000 39#define WFLUSH_SIZE 0x00100000
33#define WFLUSH_BASE 0xfd000000 40#define WFLUSH_BASE MMU_IO(0xfd000000, 0x78000000)
34 41
35#define PCIIACK_SIZE 0x00100000 42#define PCIIACK_SIZE 0x00100000
36#define PCIIACK_BASE 0xfc000000 43#define PCIIACK_BASE MMU_IO(0xfc000000, 0x79000000)
37 44
38#define PCICFG1_SIZE 0x01000000 45#define PCICFG1_SIZE 0x01000000
39#define PCICFG1_BASE 0xfb000000 46#define PCICFG1_BASE MMU_IO(0xfb000000, 0x7a000000)
40 47
41#define PCICFG0_SIZE 0x01000000 48#define PCICFG0_SIZE 0x01000000
42#define PCICFG0_BASE 0xfa000000 49#define PCICFG0_BASE MMU_IO(0xfa000000, 0x7b000000)
43 50
44#define PCIMEM_SIZE 0x01000000 51#define PCIMEM_SIZE 0x01000000
45#define PCIMEM_BASE 0xf0000000 52#define PCIMEM_BASE MMU_IO(0xf0000000, 0x80000000)
46 53
47#define XBUS_LEDS ((volatile unsigned char *)(XBUS_BASE + 0x12000)) 54#define XBUS_LEDS ((volatile unsigned char *)(XBUS_BASE + 0x12000))
48#define XBUS_LED_AMBER (1 << 0) 55#define XBUS_LED_AMBER (1 << 0)
diff --git a/arch/arm/mach-footbridge/include/mach/io.h b/arch/arm/mach-footbridge/include/mach/io.h
index 101a4fe90bde..32e4cc397c28 100644
--- a/arch/arm/mach-footbridge/include/mach/io.h
+++ b/arch/arm/mach-footbridge/include/mach/io.h
@@ -14,8 +14,14 @@
14#ifndef __ASM_ARM_ARCH_IO_H 14#ifndef __ASM_ARM_ARCH_IO_H
15#define __ASM_ARM_ARCH_IO_H 15#define __ASM_ARM_ARCH_IO_H
16 16
17#define PCIO_SIZE 0x00100000 17#ifdef CONFIG_MMU
18#define PCIO_BASE 0xff000000 18#define MMU_IO(a, b) (a)
19#else
20#define MMU_IO(a, b) (b)
21#endif
22
23#define PCIO_SIZE 0x00100000
24#define PCIO_BASE MMU_IO(0xff000000, 0x7c000000)
19 25
20#define IO_SPACE_LIMIT 0xffff 26#define IO_SPACE_LIMIT 0xffff
21 27
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 1a2cf6226a55..b69fa0a0299e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -45,6 +45,7 @@ config ARCH_OMAP4
45 select CPU_V7 45 select CPU_V7
46 select ARM_GIC 46 select ARM_GIC
47 select PL310_ERRATA_588369 47 select PL310_ERRATA_588369
48 select PL310_ERRATA_727915
48 select ARM_ERRATA_720789 49 select ARM_ERRATA_720789
49 select ARCH_HAS_OPP 50 select ARCH_HAS_OPP
50 select PM_OPP if PM 51 select PM_OPP if PM
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 19268647ce36..9ef8c29dd817 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -52,6 +52,12 @@ static void omap4_l2x0_disable(void)
52 omap_smc1(0x102, 0x0); 52 omap_smc1(0x102, 0x0);
53} 53}
54 54
55static void omap4_l2x0_set_debug(unsigned long val)
56{
57 /* Program PL310 L2 Cache controller debug register */
58 omap_smc1(0x100, val);
59}
60
55static int __init omap_l2_cache_init(void) 61static int __init omap_l2_cache_init(void)
56{ 62{
57 u32 aux_ctrl = 0; 63 u32 aux_ctrl = 0;
@@ -99,6 +105,7 @@ static int __init omap_l2_cache_init(void)
99 * specific one 105 * specific one
100 */ 106 */
101 outer_cache.disable = omap4_l2x0_disable; 107 outer_cache.disable = omap4_l2x0_disable;
108 outer_cache.set_debug = omap4_l2x0_set_debug;
102 109
103 return 0; 110 return 0;
104} 111}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e4509bae8fc4..05b26a03c209 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -845,6 +845,11 @@ config CACHE_XSC3L2
845 help 845 help
846 This option enables the L2 cache on XScale3. 846 This option enables the L2 cache on XScale3.
847 847
848config ARM_L1_CACHE_SHIFT_6
849 bool
850 help
851 Setting ARM L1 cache line size to 64 Bytes.
852
848config ARM_L1_CACHE_SHIFT 853config ARM_L1_CACHE_SHIFT
849 int 854 int
850 default 6 if ARM_L1_CACHE_SHIFT_6 855 default 6 if ARM_L1_CACHE_SHIFT_6
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index f2ce38e085d2..ef59099a5463 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -73,18 +73,24 @@ static inline void l2x0_inv_line(unsigned long addr)
73 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 73 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
74} 74}
75 75
76#ifdef CONFIG_PL310_ERRATA_588369 76#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
77static void debug_writel(unsigned long val)
78{
79 extern void omap_smc1(u32 fn, u32 arg);
80 77
81 /* 78#define debug_writel(val) outer_cache.set_debug(val)
82 * Texas Instrument secure monitor api to modify the 79
83 * PL310 Debug Control Register. 80static void l2x0_set_debug(unsigned long val)
84 */ 81{
85 omap_smc1(0x100, val); 82 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
86} 83}
84#else
85/* Optimised out for non-errata case */
86static inline void debug_writel(unsigned long val)
87{
88}
89
90#define l2x0_set_debug NULL
91#endif
87 92
93#ifdef CONFIG_PL310_ERRATA_588369
88static inline void l2x0_flush_line(unsigned long addr) 94static inline void l2x0_flush_line(unsigned long addr)
89{ 95{
90 void __iomem *base = l2x0_base; 96 void __iomem *base = l2x0_base;
@@ -97,11 +103,6 @@ static inline void l2x0_flush_line(unsigned long addr)
97} 103}
98#else 104#else
99 105
100/* Optimised out for non-errata case */
101static inline void debug_writel(unsigned long val)
102{
103}
104
105static inline void l2x0_flush_line(unsigned long addr) 106static inline void l2x0_flush_line(unsigned long addr)
106{ 107{
107 void __iomem *base = l2x0_base; 108 void __iomem *base = l2x0_base;
@@ -125,9 +126,11 @@ static void l2x0_flush_all(void)
125 126
126 /* clean all ways */ 127 /* clean all ways */
127 spin_lock_irqsave(&l2x0_lock, flags); 128 spin_lock_irqsave(&l2x0_lock, flags);
129 debug_writel(0x03);
128 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 130 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
129 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 131 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
130 cache_sync(); 132 cache_sync();
133 debug_writel(0x00);
131 spin_unlock_irqrestore(&l2x0_lock, flags); 134 spin_unlock_irqrestore(&l2x0_lock, flags);
132} 135}
133 136
@@ -335,6 +338,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
335 outer_cache.flush_all = l2x0_flush_all; 338 outer_cache.flush_all = l2x0_flush_all;
336 outer_cache.inv_all = l2x0_inv_all; 339 outer_cache.inv_all = l2x0_inv_all;
337 outer_cache.disable = l2x0_disable; 340 outer_cache.disable = l2x0_disable;
341 outer_cache.set_debug = l2x0_set_debug;
338 342
339 printk(KERN_INFO "%s cache controller enabled\n", type); 343 printk(KERN_INFO "%s cache controller enabled\n", type);
340 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 344 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3c67e92f7d59..ff7b43b5885a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -827,16 +827,6 @@ static void __init sanity_check_meminfo(void)
827 * rather difficult. 827 * rather difficult.
828 */ 828 */
829 reason = "with VIPT aliasing cache"; 829 reason = "with VIPT aliasing cache";
830 } else if (is_smp() && tlb_ops_need_broadcast()) {
831 /*
832 * kmap_high needs to occasionally flush TLB entries,
833 * however, if the TLB entries need to be broadcast
834 * we may deadlock:
835 * kmap_high(irqs off)->flush_all_zero_pkmaps->
836 * flush_tlb_kernel_range->smp_call_function_many
837 * (must not be called with irqs off)
838 */
839 reason = "without hardware TLB ops broadcasting";
840 } 830 }
841 if (reason) { 831 if (reason) {
842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 832 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 935993e1b1ef..036fdbfdd62f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -38,7 +38,7 @@ struct arm_vmregion *
38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, 38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp) 39 size_t size, gfp_t gfp)
40{ 40{
41 unsigned long addr = head->vm_start, end = head->vm_end - size; 41 unsigned long start = head->vm_start, addr = head->vm_end;
42 unsigned long flags; 42 unsigned long flags;
43 struct arm_vmregion *c, *new; 43 struct arm_vmregion *c, *new;
44 44
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
54 54
55 spin_lock_irqsave(&head->vm_lock, flags); 55 spin_lock_irqsave(&head->vm_lock, flags);
56 56
57 list_for_each_entry(c, &head->vm_list, vm_list) { 57 addr = rounddown(addr - size, align);
58 if ((addr + size) < addr) 58 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
59 goto nospc; 59 if (addr >= c->vm_end)
60 if ((addr + size) <= c->vm_start)
61 goto found; 60 goto found;
62 addr = ALIGN(c->vm_end, align); 61 addr = rounddown(c->vm_start - size, align);
63 if (addr > end) 62 if (addr < start)
64 goto nospc; 63 goto nospc;
65 } 64 }
66 65
67 found: 66 found:
68 /* 67 /*
69 * Insert this entry _before_ the one we found. 68 * Insert this entry after the one we found.
70 */ 69 */
71 list_add_tail(&new->vm_list, &c->vm_list); 70 list_add(&new->vm_list, &c->vm_list);
72 new->vm_start = addr; 71 new->vm_start = addr;
73 new->vm_end = addr + size; 72 new->vm_end = addr + size;
74 new->vm_active = 1; 73 new->vm_active = 1;
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 0797cb528b46..bbf3da012afd 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -153,7 +153,7 @@ static struct notifier_block vfp_notifier_block = {
153 * Raise a SIGFPE for the current process. 153 * Raise a SIGFPE for the current process.
154 * sicode describes the signal being raised. 154 * sicode describes the signal being raised.
155 */ 155 */
156void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) 156static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
157{ 157{
158 siginfo_t info; 158 siginfo_t info;
159 159
@@ -489,8 +489,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
489 489
490/* 490/*
491 * VFP hardware can lose all context when a CPU goes offline. 491 * VFP hardware can lose all context when a CPU goes offline.
492 * Safely clear our held state when a CPU has been killed, and 492 * As we will be running in SMP mode with CPU hotplug, we will save the
493 * re-enable access to VFP when the CPU comes back online. 493 * hardware state at every thread switch. We clear our held state when
494 * a CPU has been killed, indicating that the VFP hardware doesn't contain
495 * a threads VFP state. When a CPU starts up, we re-enable access to the
496 * VFP hardware.
494 * 497 *
495 * Both CPU_DYING and CPU_STARTING are called on the CPU which 498 * Both CPU_DYING and CPU_STARTING are called on the CPU which
496 * is being offlined/onlined. 499 * is being offlined/onlined.