aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:48:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:48:14 -0400
commit2ca7d674d7ab2220707b2ada0b690c0e7c95e7ac (patch)
tree9c0927ed1d540e5fd704c1f82689870786514655 /arch/arm/kernel
parent2195d2818c37bdf263865f1e9effccdd9fc5f9d4 (diff)
parent87d721ad7a37b7650dd710c88dd5c6a5bf9fe996 (diff)
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (257 commits) [ARM] Update mach-types ARM: 5636/1: Move vendor enum to AMBA include ARM: Fix pfn_valid() for sparse memory [ARM] orion5x: Add LaCie NAS 2Big Network support [ARM] pxa/sharpsl_pm: zaurus c3000 aka spitz: fix resume ARM: 5686/1: at91: Correct AC97 reset line in at91sam9263ek board ARM: 5640/1: This patch modifies the support of AC97 on the at91sam9263 ek board ARM: 5689/1: Update default config of HP Jornada 700-series machines ARM: 5691/1: fix cache aliasing issues between kmap() and kmap_atomic() with highmem ARM: 5688/1: ks8695_serial: disable_irq() lockup ARM: 5687/1: fix an oops with highmem ARM: 5684/1: Add nuc960 platform to w90x900 ARM: 5683/1: Add nuc950 platform to w90x900 ARM: 5682/1: Add cpu.c and dev.c and modify some files of w90p910 platform ARM: 5626/1: add suspend/resume functions to amba-pl011 serial driver ARM: 5625/1: fix hard coded 4K resource size in amba bus detection MMC: MMCI: convert realview MMC to use gpiolib ARM: 5685/1: Make MMCI driver compile without gpiolib ARM: implement highpte ARM: Show FIQ in /proc/interrupts on CONFIG_FIQ ... Fix up trivial conflict in arch/arm/kernel/signal.c. It was due to the TIF_NOTIFY_RESUME addition in commit d0420c83f ("KEYS: Extend TIF_NOTIFY_RESUME to (almost) all architectures") and follow-ups.
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/calls.S10
-rw-r--r--arch/arm/kernel/crunch.c13
-rw-r--r--arch/arm/kernel/entry-armv.S179
-rw-r--r--arch/arm/kernel/entry-common.S57
-rw-r--r--arch/arm/kernel/entry-header.S92
-rw-r--r--arch/arm/kernel/head-common.S15
-rw-r--r--arch/arm/kernel/head-nommu.S16
-rw-r--r--arch/arm/kernel/head.S28
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/module.c53
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/return_address.c71
-rw-r--r--arch/arm/kernel/setup.c28
-rw-r--r--arch/arm/kernel/signal.c86
-rw-r--r--arch/arm/kernel/stacktrace.c4
-rw-r--r--arch/arm/kernel/unwind.c4
19 files changed, 473 insertions, 200 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ff89d0b3abc5..3213c9382b17 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -8,10 +8,12 @@ ifdef CONFIG_DYNAMIC_FTRACE
8CFLAGS_REMOVE_ftrace.o = -pg 8CFLAGS_REMOVE_ftrace.o = -pg
9endif 9endif
10 10
11CFLAGS_REMOVE_return_address.o = -pg
12
11# Object file lists. 13# Object file lists.
12 14
13obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ 15obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \
14 process.o ptrace.o setup.o signal.o \ 16 process.o ptrace.o return_address.o setup.o signal.o \
15 sys_arm.o stacktrace.o time.o traps.o 17 sys_arm.o stacktrace.o time.o traps.o
16 18
17obj-$(CONFIG_ISA_DMA_API) += dma.o 19obj-$(CONFIG_ISA_DMA_API) += dma.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 531e1860e546..0e627705f746 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -186,4 +186,5 @@ EXPORT_SYMBOL(_find_next_bit_be);
186 186
187#ifdef CONFIG_FUNCTION_TRACER 187#ifdef CONFIG_FUNCTION_TRACER
188EXPORT_SYMBOL(mcount); 188EXPORT_SYMBOL(mcount);
189EXPORT_SYMBOL(__gnu_mcount_nc);
189#endif 190#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index f776e72a4cb8..ecfa98954d1d 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -81,7 +81,7 @@
81 CALL(sys_ni_syscall) /* was sys_ssetmask */ 81 CALL(sys_ni_syscall) /* was sys_ssetmask */
82/* 70 */ CALL(sys_setreuid16) 82/* 70 */ CALL(sys_setreuid16)
83 CALL(sys_setregid16) 83 CALL(sys_setregid16)
84 CALL(sys_sigsuspend_wrapper) 84 CALL(sys_sigsuspend)
85 CALL(sys_sigpending) 85 CALL(sys_sigpending)
86 CALL(sys_sethostname) 86 CALL(sys_sethostname)
87/* 75 */ CALL(sys_setrlimit) 87/* 75 */ CALL(sys_setrlimit)
@@ -188,7 +188,7 @@
188 CALL(sys_rt_sigpending) 188 CALL(sys_rt_sigpending)
189 CALL(sys_rt_sigtimedwait) 189 CALL(sys_rt_sigtimedwait)
190 CALL(sys_rt_sigqueueinfo) 190 CALL(sys_rt_sigqueueinfo)
191 CALL(sys_rt_sigsuspend_wrapper) 191 CALL(sys_rt_sigsuspend)
192/* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64)) 192/* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64))
193 CALL(ABI(sys_pwrite64, sys_oabi_pwrite64)) 193 CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
194 CALL(sys_chown16) 194 CALL(sys_chown16)
@@ -344,8 +344,8 @@
344 CALL(sys_readlinkat) 344 CALL(sys_readlinkat)
345 CALL(sys_fchmodat) 345 CALL(sys_fchmodat)
346 CALL(sys_faccessat) 346 CALL(sys_faccessat)
347/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */ 347/* 335 */ CALL(sys_pselect6)
348 CALL(sys_ni_syscall) /* eventually ppoll */ 348 CALL(sys_ppoll)
349 CALL(sys_unshare) 349 CALL(sys_unshare)
350 CALL(sys_set_robust_list) 350 CALL(sys_set_robust_list)
351 CALL(sys_get_robust_list) 351 CALL(sys_get_robust_list)
@@ -355,7 +355,7 @@
355 CALL(sys_vmsplice) 355 CALL(sys_vmsplice)
356 CALL(sys_move_pages) 356 CALL(sys_move_pages)
357/* 345 */ CALL(sys_getcpu) 357/* 345 */ CALL(sys_getcpu)
358 CALL(sys_ni_syscall) /* eventually epoll_pwait */ 358 CALL(sys_epoll_pwait)
359 CALL(sys_kexec_load) 359 CALL(sys_kexec_load)
360 CALL(sys_utimensat) 360 CALL(sys_utimensat)
361 CALL(sys_signalfd) 361 CALL(sys_signalfd)
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
index 99995c2b2312..769abe15cf91 100644
--- a/arch/arm/kernel/crunch.c
+++ b/arch/arm/kernel/crunch.c
@@ -31,7 +31,7 @@ void crunch_task_release(struct thread_info *thread)
31 31
32static int crunch_enabled(u32 devcfg) 32static int crunch_enabled(u32 devcfg)
33{ 33{
34 return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE); 34 return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
35} 35}
36 36
37static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) 37static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
@@ -56,11 +56,16 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
56 break; 56 break;
57 57
58 case THREAD_NOTIFY_SWITCH: 58 case THREAD_NOTIFY_SWITCH:
59 devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG); 59 devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
60 if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { 60 if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
61 devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE; 61 /*
62 * We don't use ep93xx_syscon_swlocked_write() here
63 * because we are on the context switch path and
64 * preemption is already disabled.
65 */
66 devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
62 __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 67 __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
63 __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG); 68 __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
64 } 69 }
65 break; 70 break;
66 } 71 }
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fc8af43c5000..3d727a8a23bc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -34,7 +34,7 @@
34 @ 34 @
35 @ routine called with r0 = irq number, r1 = struct pt_regs * 35 @ routine called with r0 = irq number, r1 = struct pt_regs *
36 @ 36 @
37 adrne lr, 1b 37 adrne lr, BSYM(1b)
38 bne asm_do_IRQ 38 bne asm_do_IRQ
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
@@ -46,13 +46,13 @@
46 */ 46 */
47 test_for_ipi r0, r6, r5, lr 47 test_for_ipi r0, r6, r5, lr
48 movne r0, sp 48 movne r0, sp
49 adrne lr, 1b 49 adrne lr, BSYM(1b)
50 bne do_IPI 50 bne do_IPI
51 51
52#ifdef CONFIG_LOCAL_TIMERS 52#ifdef CONFIG_LOCAL_TIMERS
53 test_for_ltirq r0, r6, r5, lr 53 test_for_ltirq r0, r6, r5, lr
54 movne r0, sp 54 movne r0, sp
55 adrne lr, 1b 55 adrne lr, BSYM(1b)
56 bne do_local_timer 56 bne do_local_timer
57#endif 57#endif
58#endif 58#endif
@@ -70,7 +70,10 @@
70 */ 70 */
71 .macro inv_entry, reason 71 .macro inv_entry, reason
72 sub sp, sp, #S_FRAME_SIZE 72 sub sp, sp, #S_FRAME_SIZE
73 stmib sp, {r1 - lr} 73 ARM( stmib sp, {r1 - lr} )
74 THUMB( stmia sp, {r0 - r12} )
75 THUMB( str sp, [sp, #S_SP] )
76 THUMB( str lr, [sp, #S_LR] )
74 mov r1, #\reason 77 mov r1, #\reason
75 .endm 78 .endm
76 79
@@ -126,17 +129,24 @@ ENDPROC(__und_invalid)
126 .macro svc_entry, stack_hole=0 129 .macro svc_entry, stack_hole=0
127 UNWIND(.fnstart ) 130 UNWIND(.fnstart )
128 UNWIND(.save {r0 - pc} ) 131 UNWIND(.save {r0 - pc} )
129 sub sp, sp, #(S_FRAME_SIZE + \stack_hole) 132 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133#ifdef CONFIG_THUMB2_KERNEL
134 SPFIX( str r0, [sp] ) @ temporarily saved
135 SPFIX( mov r0, sp )
136 SPFIX( tst r0, #4 ) @ test original stack alignment
137 SPFIX( ldr r0, [sp] ) @ restored
138#else
130 SPFIX( tst sp, #4 ) 139 SPFIX( tst sp, #4 )
131 SPFIX( bicne sp, sp, #4 ) 140#endif
132 stmib sp, {r1 - r12} 141 SPFIX( subeq sp, sp, #4 )
142 stmia sp, {r1 - r12}
133 143
134 ldmia r0, {r1 - r3} 144 ldmia r0, {r1 - r3}
135 add r5, sp, #S_SP @ here for interlock avoidance 145 add r5, sp, #S_SP - 4 @ here for interlock avoidance
136 mov r4, #-1 @ "" "" "" "" 146 mov r4, #-1 @ "" "" "" ""
137 add r0, sp, #(S_FRAME_SIZE + \stack_hole) 147 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
138 SPFIX( addne r0, r0, #4 ) 148 SPFIX( addeq r0, r0, #4 )
139 str r1, [sp] @ save the "real" r0 copied 149 str r1, [sp, #-4]! @ save the "real" r0 copied
140 @ from the exception stack 150 @ from the exception stack
141 151
142 mov r1, lr 152 mov r1, lr
@@ -151,6 +161,8 @@ ENDPROC(__und_invalid)
151 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 161 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
152 @ 162 @
153 stmia r5, {r0 - r4} 163 stmia r5, {r0 - r4}
164
165 asm_trace_hardirqs_off
154 .endm 166 .endm
155 167
156 .align 5 168 .align 5
@@ -196,9 +208,8 @@ __dabt_svc:
196 @ 208 @
197 @ restore SPSR and restart the instruction 209 @ restore SPSR and restart the instruction
198 @ 210 @
199 ldr r0, [sp, #S_PSR] 211 ldr r2, [sp, #S_PSR]
200 msr spsr_cxsf, r0 212 svc_exit r2 @ return from exception
201 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
202 UNWIND(.fnend ) 213 UNWIND(.fnend )
203ENDPROC(__dabt_svc) 214ENDPROC(__dabt_svc)
204 215
@@ -206,9 +217,6 @@ ENDPROC(__dabt_svc)
206__irq_svc: 217__irq_svc:
207 svc_entry 218 svc_entry
208 219
209#ifdef CONFIG_TRACE_IRQFLAGS
210 bl trace_hardirqs_off
211#endif
212#ifdef CONFIG_PREEMPT 220#ifdef CONFIG_PREEMPT
213 get_thread_info tsk 221 get_thread_info tsk
214 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 222 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -225,13 +233,12 @@ __irq_svc:
225 tst r0, #_TIF_NEED_RESCHED 233 tst r0, #_TIF_NEED_RESCHED
226 blne svc_preempt 234 blne svc_preempt
227#endif 235#endif
228 ldr r0, [sp, #S_PSR] @ irqs are already disabled 236 ldr r4, [sp, #S_PSR] @ irqs are already disabled
229 msr spsr_cxsf, r0
230#ifdef CONFIG_TRACE_IRQFLAGS 237#ifdef CONFIG_TRACE_IRQFLAGS
231 tst r0, #PSR_I_BIT 238 tst r4, #PSR_I_BIT
232 bleq trace_hardirqs_on 239 bleq trace_hardirqs_on
233#endif 240#endif
234 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 241 svc_exit r4 @ return from exception
235 UNWIND(.fnend ) 242 UNWIND(.fnend )
236ENDPROC(__irq_svc) 243ENDPROC(__irq_svc)
237 244
@@ -266,7 +273,7 @@ __und_svc:
266 @ r0 - instruction 273 @ r0 - instruction
267 @ 274 @
268 ldr r0, [r2, #-4] 275 ldr r0, [r2, #-4]
269 adr r9, 1f 276 adr r9, BSYM(1f)
270 bl call_fpe 277 bl call_fpe
271 278
272 mov r0, sp @ struct pt_regs *regs 279 mov r0, sp @ struct pt_regs *regs
@@ -280,9 +287,8 @@ __und_svc:
280 @ 287 @
281 @ restore SPSR and restart the instruction 288 @ restore SPSR and restart the instruction
282 @ 289 @
283 ldr lr, [sp, #S_PSR] @ Get SVC cpsr 290 ldr r2, [sp, #S_PSR] @ Get SVC cpsr
284 msr spsr_cxsf, lr 291 svc_exit r2 @ return from exception
285 ldmia sp, {r0 - pc}^ @ Restore SVC registers
286 UNWIND(.fnend ) 292 UNWIND(.fnend )
287ENDPROC(__und_svc) 293ENDPROC(__und_svc)
288 294
@@ -323,9 +329,8 @@ __pabt_svc:
323 @ 329 @
324 @ restore SPSR and restart the instruction 330 @ restore SPSR and restart the instruction
325 @ 331 @
326 ldr r0, [sp, #S_PSR] 332 ldr r2, [sp, #S_PSR]
327 msr spsr_cxsf, r0 333 svc_exit r2 @ return from exception
328 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
329 UNWIND(.fnend ) 334 UNWIND(.fnend )
330ENDPROC(__pabt_svc) 335ENDPROC(__pabt_svc)
331 336
@@ -353,7 +358,8 @@ ENDPROC(__pabt_svc)
353 UNWIND(.fnstart ) 358 UNWIND(.fnstart )
354 UNWIND(.cantunwind ) @ don't unwind the user space 359 UNWIND(.cantunwind ) @ don't unwind the user space
355 sub sp, sp, #S_FRAME_SIZE 360 sub sp, sp, #S_FRAME_SIZE
356 stmib sp, {r1 - r12} 361 ARM( stmib sp, {r1 - r12} )
362 THUMB( stmia sp, {r0 - r12} )
357 363
358 ldmia r0, {r1 - r3} 364 ldmia r0, {r1 - r3}
359 add r0, sp, #S_PC @ here for interlock avoidance 365 add r0, sp, #S_PC @ here for interlock avoidance
@@ -372,7 +378,8 @@ ENDPROC(__pabt_svc)
372 @ Also, separately save sp_usr and lr_usr 378 @ Also, separately save sp_usr and lr_usr
373 @ 379 @
374 stmia r0, {r2 - r4} 380 stmia r0, {r2 - r4}
375 stmdb r0, {sp, lr}^ 381 ARM( stmdb r0, {sp, lr}^ )
382 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
376 383
377 @ 384 @
378 @ Enable the alignment trap while in kernel mode 385 @ Enable the alignment trap while in kernel mode
@@ -383,6 +390,8 @@ ENDPROC(__pabt_svc)
383 @ Clear FP to mark the first stack frame 390 @ Clear FP to mark the first stack frame
384 @ 391 @
385 zero_fp 392 zero_fp
393
394 asm_trace_hardirqs_off
386 .endm 395 .endm
387 396
388 .macro kuser_cmpxchg_check 397 .macro kuser_cmpxchg_check
@@ -427,7 +436,7 @@ __dabt_usr:
427 @ 436 @
428 enable_irq 437 enable_irq
429 mov r2, sp 438 mov r2, sp
430 adr lr, ret_from_exception 439 adr lr, BSYM(ret_from_exception)
431 b do_DataAbort 440 b do_DataAbort
432 UNWIND(.fnend ) 441 UNWIND(.fnend )
433ENDPROC(__dabt_usr) 442ENDPROC(__dabt_usr)
@@ -437,9 +446,6 @@ __irq_usr:
437 usr_entry 446 usr_entry
438 kuser_cmpxchg_check 447 kuser_cmpxchg_check
439 448
440#ifdef CONFIG_TRACE_IRQFLAGS
441 bl trace_hardirqs_off
442#endif
443 get_thread_info tsk 449 get_thread_info tsk
444#ifdef CONFIG_PREEMPT 450#ifdef CONFIG_PREEMPT
445 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 451 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -452,7 +458,9 @@ __irq_usr:
452 ldr r0, [tsk, #TI_PREEMPT] 458 ldr r0, [tsk, #TI_PREEMPT]
453 str r8, [tsk, #TI_PREEMPT] 459 str r8, [tsk, #TI_PREEMPT]
454 teq r0, r7 460 teq r0, r7
455 strne r0, [r0, -r0] 461 ARM( strne r0, [r0, -r0] )
462 THUMB( movne r0, #0 )
463 THUMB( strne r0, [r0] )
456#endif 464#endif
457#ifdef CONFIG_TRACE_IRQFLAGS 465#ifdef CONFIG_TRACE_IRQFLAGS
458 bl trace_hardirqs_on 466 bl trace_hardirqs_on
@@ -476,9 +484,10 @@ __und_usr:
476 @ 484 @
477 @ r0 - instruction 485 @ r0 - instruction
478 @ 486 @
479 adr r9, ret_from_exception 487 adr r9, BSYM(ret_from_exception)
480 adr lr, __und_usr_unknown 488 adr lr, BSYM(__und_usr_unknown)
481 tst r3, #PSR_T_BIT @ Thumb mode? 489 tst r3, #PSR_T_BIT @ Thumb mode?
490 itet eq @ explicit IT needed for the 1f label
482 subeq r4, r2, #4 @ ARM instr at LR - 4 491 subeq r4, r2, #4 @ ARM instr at LR - 4
483 subne r4, r2, #2 @ Thumb instr at LR - 2 492 subne r4, r2, #2 @ Thumb instr at LR - 2
4841: ldreqt r0, [r4] 4931: ldreqt r0, [r4]
@@ -488,7 +497,10 @@ __und_usr:
488 beq call_fpe 497 beq call_fpe
489 @ Thumb instruction 498 @ Thumb instruction
490#if __LINUX_ARM_ARCH__ >= 7 499#if __LINUX_ARM_ARCH__ >= 7
4912: ldrht r5, [r4], #2 5002:
501 ARM( ldrht r5, [r4], #2 )
502 THUMB( ldrht r5, [r4] )
503 THUMB( add r4, r4, #2 )
492 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 504 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
493 cmp r0, #0xe800 @ 32bit instruction if xx != 0 505 cmp r0, #0xe800 @ 32bit instruction if xx != 0
494 blo __und_usr_unknown 506 blo __und_usr_unknown
@@ -577,9 +589,11 @@ call_fpe:
577 moveq pc, lr 589 moveq pc, lr
578 get_thread_info r10 @ get current thread 590 get_thread_info r10 @ get current thread
579 and r8, r0, #0x00000f00 @ mask out CP number 591 and r8, r0, #0x00000f00 @ mask out CP number
592 THUMB( lsr r8, r8, #8 )
580 mov r7, #1 593 mov r7, #1
581 add r6, r10, #TI_USED_CP 594 add r6, r10, #TI_USED_CP
582 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] 595 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
596 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
583#ifdef CONFIG_IWMMXT 597#ifdef CONFIG_IWMMXT
584 @ Test if we need to give access to iWMMXt coprocessors 598 @ Test if we need to give access to iWMMXt coprocessors
585 ldr r5, [r10, #TI_FLAGS] 599 ldr r5, [r10, #TI_FLAGS]
@@ -587,36 +601,38 @@ call_fpe:
587 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 601 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
588 bcs iwmmxt_task_enable 602 bcs iwmmxt_task_enable
589#endif 603#endif
590 add pc, pc, r8, lsr #6 604 ARM( add pc, pc, r8, lsr #6 )
591 mov r0, r0 605 THUMB( lsl r8, r8, #2 )
592 606 THUMB( add pc, r8 )
593 mov pc, lr @ CP#0 607 nop
594 b do_fpe @ CP#1 (FPE) 608
595 b do_fpe @ CP#2 (FPE) 609 W(mov) pc, lr @ CP#0
596 mov pc, lr @ CP#3 610 W(b) do_fpe @ CP#1 (FPE)
611 W(b) do_fpe @ CP#2 (FPE)
612 W(mov) pc, lr @ CP#3
597#ifdef CONFIG_CRUNCH 613#ifdef CONFIG_CRUNCH
598 b crunch_task_enable @ CP#4 (MaverickCrunch) 614 b crunch_task_enable @ CP#4 (MaverickCrunch)
599 b crunch_task_enable @ CP#5 (MaverickCrunch) 615 b crunch_task_enable @ CP#5 (MaverickCrunch)
600 b crunch_task_enable @ CP#6 (MaverickCrunch) 616 b crunch_task_enable @ CP#6 (MaverickCrunch)
601#else 617#else
602 mov pc, lr @ CP#4 618 W(mov) pc, lr @ CP#4
603 mov pc, lr @ CP#5 619 W(mov) pc, lr @ CP#5
604 mov pc, lr @ CP#6 620 W(mov) pc, lr @ CP#6
605#endif 621#endif
606 mov pc, lr @ CP#7 622 W(mov) pc, lr @ CP#7
607 mov pc, lr @ CP#8 623 W(mov) pc, lr @ CP#8
608 mov pc, lr @ CP#9 624 W(mov) pc, lr @ CP#9
609#ifdef CONFIG_VFP 625#ifdef CONFIG_VFP
610 b do_vfp @ CP#10 (VFP) 626 W(b) do_vfp @ CP#10 (VFP)
611 b do_vfp @ CP#11 (VFP) 627 W(b) do_vfp @ CP#11 (VFP)
612#else 628#else
613 mov pc, lr @ CP#10 (VFP) 629 W(mov) pc, lr @ CP#10 (VFP)
614 mov pc, lr @ CP#11 (VFP) 630 W(mov) pc, lr @ CP#11 (VFP)
615#endif 631#endif
616 mov pc, lr @ CP#12 632 W(mov) pc, lr @ CP#12
617 mov pc, lr @ CP#13 633 W(mov) pc, lr @ CP#13
618 mov pc, lr @ CP#14 (Debug) 634 W(mov) pc, lr @ CP#14 (Debug)
619 mov pc, lr @ CP#15 (Control) 635 W(mov) pc, lr @ CP#15 (Control)
620 636
621#ifdef CONFIG_NEON 637#ifdef CONFIG_NEON
622 .align 6 638 .align 6
@@ -667,7 +683,7 @@ no_fp: mov pc, lr
667__und_usr_unknown: 683__und_usr_unknown:
668 enable_irq 684 enable_irq
669 mov r0, sp 685 mov r0, sp
670 adr lr, ret_from_exception 686 adr lr, BSYM(ret_from_exception)
671 b do_undefinstr 687 b do_undefinstr
672ENDPROC(__und_usr_unknown) 688ENDPROC(__und_usr_unknown)
673 689
@@ -711,7 +727,10 @@ ENTRY(__switch_to)
711 UNWIND(.cantunwind ) 727 UNWIND(.cantunwind )
712 add ip, r1, #TI_CPU_SAVE 728 add ip, r1, #TI_CPU_SAVE
713 ldr r3, [r2, #TI_TP_VALUE] 729 ldr r3, [r2, #TI_TP_VALUE]
714 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack 730 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
731 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
732 THUMB( str sp, [ip], #4 )
733 THUMB( str lr, [ip], #4 )
715#ifdef CONFIG_MMU 734#ifdef CONFIG_MMU
716 ldr r6, [r2, #TI_CPU_DOMAIN] 735 ldr r6, [r2, #TI_CPU_DOMAIN]
717#endif 736#endif
@@ -736,8 +755,12 @@ ENTRY(__switch_to)
736 ldr r0, =thread_notify_head 755 ldr r0, =thread_notify_head
737 mov r1, #THREAD_NOTIFY_SWITCH 756 mov r1, #THREAD_NOTIFY_SWITCH
738 bl atomic_notifier_call_chain 757 bl atomic_notifier_call_chain
758 THUMB( mov ip, r4 )
739 mov r0, r5 759 mov r0, r5
740 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 760 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
761 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
762 THUMB( ldr sp, [ip], #4 )
763 THUMB( ldr pc, [ip] )
741 UNWIND(.fnend ) 764 UNWIND(.fnend )
742ENDPROC(__switch_to) 765ENDPROC(__switch_to)
743 766
@@ -772,6 +795,7 @@ ENDPROC(__switch_to)
772 * if your compiled code is not going to use the new instructions for other 795 * if your compiled code is not going to use the new instructions for other
773 * purpose. 796 * purpose.
774 */ 797 */
798 THUMB( .arm )
775 799
776 .macro usr_ret, reg 800 .macro usr_ret, reg
777#ifdef CONFIG_ARM_THUMB 801#ifdef CONFIG_ARM_THUMB
@@ -1020,6 +1044,7 @@ __kuser_helper_version: @ 0xffff0ffc
1020 .globl __kuser_helper_end 1044 .globl __kuser_helper_end
1021__kuser_helper_end: 1045__kuser_helper_end:
1022 1046
1047 THUMB( .thumb )
1023 1048
1024/* 1049/*
1025 * Vector stubs. 1050 * Vector stubs.
@@ -1054,17 +1079,23 @@ vector_\name:
1054 @ Prepare for SVC32 mode. IRQs remain disabled. 1079 @ Prepare for SVC32 mode. IRQs remain disabled.
1055 @ 1080 @
1056 mrs r0, cpsr 1081 mrs r0, cpsr
1057 eor r0, r0, #(\mode ^ SVC_MODE) 1082 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1058 msr spsr_cxsf, r0 1083 msr spsr_cxsf, r0
1059 1084
1060 @ 1085 @
1061 @ the branch table must immediately follow this code 1086 @ the branch table must immediately follow this code
1062 @ 1087 @
1063 and lr, lr, #0x0f 1088 and lr, lr, #0x0f
1089 THUMB( adr r0, 1f )
1090 THUMB( ldr lr, [r0, lr, lsl #2] )
1064 mov r0, sp 1091 mov r0, sp
1065 ldr lr, [pc, lr, lsl #2] 1092 ARM( ldr lr, [pc, lr, lsl #2] )
1066 movs pc, lr @ branch to handler in SVC mode 1093 movs pc, lr @ branch to handler in SVC mode
1067ENDPROC(vector_\name) 1094ENDPROC(vector_\name)
1095
1096 .align 2
1097 @ handler addresses follow this label
10981:
1068 .endm 1099 .endm
1069 1100
1070 .globl __stubs_start 1101 .globl __stubs_start
@@ -1202,14 +1233,16 @@ __stubs_end:
1202 1233
1203 .globl __vectors_start 1234 .globl __vectors_start
1204__vectors_start: 1235__vectors_start:
1205 swi SYS_ERROR0 1236 ARM( swi SYS_ERROR0 )
1206 b vector_und + stubs_offset 1237 THUMB( svc #0 )
1207 ldr pc, .LCvswi + stubs_offset 1238 THUMB( nop )
1208 b vector_pabt + stubs_offset 1239 W(b) vector_und + stubs_offset
1209 b vector_dabt + stubs_offset 1240 W(ldr) pc, .LCvswi + stubs_offset
1210 b vector_addrexcptn + stubs_offset 1241 W(b) vector_pabt + stubs_offset
1211 b vector_irq + stubs_offset 1242 W(b) vector_dabt + stubs_offset
1212 b vector_fiq + stubs_offset 1243 W(b) vector_addrexcptn + stubs_offset
1244 W(b) vector_irq + stubs_offset
1245 W(b) vector_fiq + stubs_offset
1213 1246
1214 .globl __vectors_end 1247 .globl __vectors_end
1215__vectors_end: 1248__vectors_end:
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7813ab782fda..807cfebb0f44 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -33,14 +33,7 @@ ret_fast_syscall:
33 /* perform architecture specific actions before user return */ 33 /* perform architecture specific actions before user return */
34 arch_ret_to_user r1, lr 34 arch_ret_to_user r1, lr
35 35
36 @ fast_restore_user_regs 36 restore_user_regs fast = 1, offset = S_OFF
37 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
38 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
39 msr spsr_cxsf, r1 @ save in spsr_svc
40 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
41 mov r0, r0
42 add sp, sp, #S_FRAME_SIZE - S_PC
43 movs pc, lr @ return & move spsr_svc into cpsr
44 UNWIND(.fnend ) 37 UNWIND(.fnend )
45 38
46/* 39/*
@@ -73,14 +66,7 @@ no_work_pending:
73 /* perform architecture specific actions before user return */ 66 /* perform architecture specific actions before user return */
74 arch_ret_to_user r1, lr 67 arch_ret_to_user r1, lr
75 68
76 @ slow_restore_user_regs 69 restore_user_regs fast = 0, offset = 0
77 ldr r1, [sp, #S_PSR] @ get calling cpsr
78 ldr lr, [sp, #S_PC]! @ get pc
79 msr spsr_cxsf, r1 @ save in spsr_svc
80 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
81 mov r0, r0
82 add sp, sp, #S_FRAME_SIZE - S_PC
83 movs pc, lr @ return & move spsr_svc into cpsr
84ENDPROC(ret_to_user) 70ENDPROC(ret_to_user)
85 71
86/* 72/*
@@ -132,6 +118,25 @@ ftrace_call:
132 118
133#else 119#else
134 120
121ENTRY(__gnu_mcount_nc)
122 stmdb sp!, {r0-r3, lr}
123 ldr r0, =ftrace_trace_function
124 ldr r2, [r0]
125 adr r0, ftrace_stub
126 cmp r0, r2
127 bne gnu_trace
128 ldmia sp!, {r0-r3, ip, lr}
129 bx ip
130
131gnu_trace:
132 ldr r1, [sp, #20] @ lr of instrumented routine
133 mov r0, lr
134 sub r0, r0, #MCOUNT_INSN_SIZE
135 mov lr, pc
136 mov pc, r2
137 ldmia sp!, {r0-r3, ip, lr}
138 bx ip
139
135ENTRY(mcount) 140ENTRY(mcount)
136 stmdb sp!, {r0-r3, lr} 141 stmdb sp!, {r0-r3, lr}
137 ldr r0, =ftrace_trace_function 142 ldr r0, =ftrace_trace_function
@@ -182,8 +187,10 @@ ftrace_stub:
182ENTRY(vector_swi) 187ENTRY(vector_swi)
183 sub sp, sp, #S_FRAME_SIZE 188 sub sp, sp, #S_FRAME_SIZE
184 stmia sp, {r0 - r12} @ Calling r0 - r12 189 stmia sp, {r0 - r12} @ Calling r0 - r12
185 add r8, sp, #S_PC 190 ARM( add r8, sp, #S_PC )
186 stmdb r8, {sp, lr}^ @ Calling sp, lr 191 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
192 THUMB( mov r8, sp )
193 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
187 mrs r8, spsr @ called from non-FIQ mode, so ok. 194 mrs r8, spsr @ called from non-FIQ mode, so ok.
188 str lr, [sp, #S_PC] @ Save calling PC 195 str lr, [sp, #S_PC] @ Save calling PC
189 str r8, [sp, #S_PSR] @ Save CPSR 196 str r8, [sp, #S_PSR] @ Save CPSR
@@ -272,7 +279,7 @@ ENTRY(vector_swi)
272 bne __sys_trace 279 bne __sys_trace
273 280
274 cmp scno, #NR_syscalls @ check upper syscall limit 281 cmp scno, #NR_syscalls @ check upper syscall limit
275 adr lr, ret_fast_syscall @ return address 282 adr lr, BSYM(ret_fast_syscall) @ return address
276 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 283 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
277 284
278 add r1, sp, #S_OFF 285 add r1, sp, #S_OFF
@@ -293,7 +300,7 @@ __sys_trace:
293 mov r0, #0 @ trace entry [IP = 0] 300 mov r0, #0 @ trace entry [IP = 0]
294 bl syscall_trace 301 bl syscall_trace
295 302
296 adr lr, __sys_trace_return @ return address 303 adr lr, BSYM(__sys_trace_return) @ return address
297 mov scno, r0 @ syscall number (possibly new) 304 mov scno, r0 @ syscall number (possibly new)
298 add r1, sp, #S_R0 + S_OFF @ pointer to regs 305 add r1, sp, #S_R0 + S_OFF @ pointer to regs
299 cmp scno, #NR_syscalls @ check upper syscall limit 306 cmp scno, #NR_syscalls @ check upper syscall limit
@@ -373,16 +380,6 @@ sys_clone_wrapper:
373 b sys_clone 380 b sys_clone
374ENDPROC(sys_clone_wrapper) 381ENDPROC(sys_clone_wrapper)
375 382
376sys_sigsuspend_wrapper:
377 add r3, sp, #S_OFF
378 b sys_sigsuspend
379ENDPROC(sys_sigsuspend_wrapper)
380
381sys_rt_sigsuspend_wrapper:
382 add r2, sp, #S_OFF
383 b sys_rt_sigsuspend
384ENDPROC(sys_rt_sigsuspend_wrapper)
385
386sys_sigreturn_wrapper: 383sys_sigreturn_wrapper:
387 add r0, sp, #S_OFF 384 add r0, sp, #S_OFF
388 b sys_sigreturn 385 b sys_sigreturn
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 87ab4e157997..a4eaf4f920c5 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -36,11 +36,6 @@
36#endif 36#endif
37 .endm 37 .endm
38 38
39 .macro get_thread_info, rd
40 mov \rd, sp, lsr #13
41 mov \rd, \rd, lsl #13
42 .endm
43
44 .macro alignment_trap, rtemp 39 .macro alignment_trap, rtemp
45#ifdef CONFIG_ALIGNMENT_TRAP 40#ifdef CONFIG_ALIGNMENT_TRAP
46 ldr \rtemp, .LCcralign 41 ldr \rtemp, .LCcralign
@@ -49,6 +44,93 @@
49#endif 44#endif
50 .endm 45 .endm
51 46
47 @
48 @ Store/load the USER SP and LR registers by switching to the SYS
49 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
50 @ available. Should only be called from SVC mode
51 @
52 .macro store_user_sp_lr, rd, rtemp, offset = 0
53 mrs \rtemp, cpsr
54 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
55 msr cpsr_c, \rtemp @ switch to the SYS mode
56
57 str sp, [\rd, #\offset] @ save sp_usr
58 str lr, [\rd, #\offset + 4] @ save lr_usr
59
60 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
61 msr cpsr_c, \rtemp @ switch back to the SVC mode
62 .endm
63
64 .macro load_user_sp_lr, rd, rtemp, offset = 0
65 mrs \rtemp, cpsr
66 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
67 msr cpsr_c, \rtemp @ switch to the SYS mode
68
69 ldr sp, [\rd, #\offset] @ load sp_usr
70 ldr lr, [\rd, #\offset + 4] @ load lr_usr
71
72 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
73 msr cpsr_c, \rtemp @ switch back to the SVC mode
74 .endm
75
76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr
78 msr spsr_cxsf, \rpsr
79 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
80 .endm
81
82 .macro restore_user_regs, fast = 0, offset = 0
83 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
84 ldr lr, [sp, #\offset + S_PC]! @ get pc
85 msr spsr_cxsf, r1 @ save in spsr_svc
86 .if \fast
87 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
88 .else
89 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
90 .endif
91 add sp, sp, #S_FRAME_SIZE - S_PC
92 movs pc, lr @ return & move spsr_svc into cpsr
93 .endm
94
95 .macro get_thread_info, rd
96 mov \rd, sp, lsr #13
97 mov \rd, \rd, lsl #13
98 .endm
99#else /* CONFIG_THUMB2_KERNEL */
100 .macro svc_exit, rpsr
101 ldr r0, [sp, #S_SP] @ top of the stack
102 ldr r1, [sp, #S_PC] @ return address
103 tst r0, #4 @ orig stack 8-byte aligned?
104 stmdb r0, {r1, \rpsr} @ rfe context
105 ldmia sp, {r0 - r12}
106 ldr lr, [sp, #S_LR]
107 addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
108 addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
109 rfeia sp!
110 .endm
111
112 .macro restore_user_regs, fast = 0, offset = 0
113 mov r2, sp
114 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
115 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
116 ldr lr, [sp, #\offset + S_PC] @ get pc
117 add sp, sp, #\offset + S_SP
118 msr spsr_cxsf, r1 @ save in spsr_svc
119 .if \fast
120 ldmdb sp, {r1 - r12} @ get calling r1 - r12
121 .else
122 ldmdb sp, {r0 - r12} @ get calling r0 - r12
123 .endif
124 add sp, sp, #S_FRAME_SIZE - S_SP
125 movs pc, lr @ return & move spsr_svc into cpsr
126 .endm
127
128 .macro get_thread_info, rd
129 mov \rd, sp
130 lsr \rd, \rd, #13
131 mov \rd, \rd, lsl #13
132 .endm
133#endif /* !CONFIG_THUMB2_KERNEL */
52 134
53/* 135/*
54 * These are the registers used in the syscall handler, and allow us to 136 * These are the registers used in the syscall handler, and allow us to
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 991952c644d1..93ad576b2d74 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -14,6 +14,7 @@
14#define ATAG_CORE 0x54410001 14#define ATAG_CORE 0x54410001
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
16 16
17 .align 2
17 .type __switch_data, %object 18 .type __switch_data, %object
18__switch_data: 19__switch_data:
19 .long __mmap_switched 20 .long __mmap_switched
@@ -51,7 +52,9 @@ __mmap_switched:
51 strcc fp, [r6],#4 52 strcc fp, [r6],#4
52 bcc 1b 53 bcc 1b
53 54
54 ldmia r3, {r4, r5, r6, r7, sp} 55 ARM( ldmia r3, {r4, r5, r6, r7, sp})
56 THUMB( ldmia r3, {r4, r5, r6, r7} )
57 THUMB( ldr sp, [r3, #16] )
55 str r9, [r4] @ Save processor ID 58 str r9, [r4] @ Save processor ID
56 str r1, [r5] @ Save machine type 59 str r1, [r5] @ Save machine type
57 str r2, [r6] @ Save atags pointer 60 str r2, [r6] @ Save atags pointer
@@ -155,7 +158,8 @@ ENDPROC(__error)
155 */ 158 */
156__lookup_processor_type: 159__lookup_processor_type:
157 adr r3, 3f 160 adr r3, 3f
158 ldmda r3, {r5 - r7} 161 ldmia r3, {r5 - r7}
162 add r3, r3, #8
159 sub r3, r3, r7 @ get offset between virt&phys 163 sub r3, r3, r7 @ get offset between virt&phys
160 add r5, r5, r3 @ convert virt addresses to 164 add r5, r5, r3 @ convert virt addresses to
161 add r6, r6, r3 @ physical address space 165 add r6, r6, r3 @ physical address space
@@ -185,9 +189,10 @@ ENDPROC(lookup_processor_type)
185 * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for 189 * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
186 * more information about the __proc_info and __arch_info structures. 190 * more information about the __proc_info and __arch_info structures.
187 */ 191 */
188 .long __proc_info_begin 192 .align 2
1933: .long __proc_info_begin
189 .long __proc_info_end 194 .long __proc_info_end
1903: .long . 1954: .long .
191 .long __arch_info_begin 196 .long __arch_info_begin
192 .long __arch_info_end 197 .long __arch_info_end
193 198
@@ -203,7 +208,7 @@ ENDPROC(lookup_processor_type)
203 * r5 = mach_info pointer in physical address space 208 * r5 = mach_info pointer in physical address space
204 */ 209 */
205__lookup_machine_type: 210__lookup_machine_type:
206 adr r3, 3b 211 adr r3, 4b
207 ldmia r3, {r4, r5, r6} 212 ldmia r3, {r4, r5, r6}
208 sub r3, r3, r4 @ get offset between virt&phys 213 sub r3, r3, r4 @ get offset between virt&phys
209 add r5, r5, r3 @ convert virt addresses to 214 add r5, r5, r3 @ convert virt addresses to
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index cc87e1765ed2..e5dfc2895e24 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -34,7 +34,7 @@
34 */ 34 */
35 .section ".text.head", "ax" 35 .section ".text.head", "ax"
36ENTRY(stext) 36ENTRY(stext)
37 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 37 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
38 @ and irqs disabled 38 @ and irqs disabled
39#ifndef CONFIG_CPU_CP15 39#ifndef CONFIG_CPU_CP15
40 ldr r9, =CONFIG_PROCESSOR_ID 40 ldr r9, =CONFIG_PROCESSOR_ID
@@ -50,8 +50,10 @@ ENTRY(stext)
50 50
51 ldr r13, __switch_data @ address to jump to after 51 ldr r13, __switch_data @ address to jump to after
52 @ the initialization is done 52 @ the initialization is done
53 adr lr, __after_proc_init @ return (PIC) address 53 adr lr, BSYM(__after_proc_init) @ return (PIC) address
54 add pc, r10, #PROCINFO_INITFUNC 54 ARM( add pc, r10, #PROCINFO_INITFUNC )
55 THUMB( add r12, r10, #PROCINFO_INITFUNC )
56 THUMB( mov pc, r12 )
55ENDPROC(stext) 57ENDPROC(stext)
56 58
57/* 59/*
@@ -59,7 +61,10 @@ ENDPROC(stext)
59 */ 61 */
60__after_proc_init: 62__after_proc_init:
61#ifdef CONFIG_CPU_CP15 63#ifdef CONFIG_CPU_CP15
62 mrc p15, 0, r0, c1, c0, 0 @ read control reg 64 /*
65 * CP15 system control register value returned in r0 from
66 * the CPU init function.
67 */
63#ifdef CONFIG_ALIGNMENT_TRAP 68#ifdef CONFIG_ALIGNMENT_TRAP
64 orr r0, r0, #CR_A 69 orr r0, r0, #CR_A
65#else 70#else
@@ -82,7 +87,8 @@ __after_proc_init:
82 mcr p15, 0, r0, c1, c0, 0 @ write control reg 87 mcr p15, 0, r0, c1, c0, 0 @ write control reg
83#endif /* CONFIG_CPU_CP15 */ 88#endif /* CONFIG_CPU_CP15 */
84 89
85 mov pc, r13 @ clear the BSS and jump 90 mov r3, r13
91 mov pc, r3 @ clear the BSS and jump
86 @ to start_kernel 92 @ to start_kernel
87ENDPROC(__after_proc_init) 93ENDPROC(__after_proc_init)
88 .ltorg 94 .ltorg
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 21e17dc94cb5..38ccbe1d3b2c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -76,7 +76,7 @@
76 */ 76 */
77 .section ".text.head", "ax" 77 .section ".text.head", "ax"
78ENTRY(stext) 78ENTRY(stext)
79 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 79 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
80 @ and irqs disabled 80 @ and irqs disabled
81 mrc p15, 0, r9, c0, c0 @ get processor id 81 mrc p15, 0, r9, c0, c0 @ get processor id
82 bl __lookup_processor_type @ r5=procinfo r9=cpuid 82 bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -97,8 +97,10 @@ ENTRY(stext)
97 */ 97 */
98 ldr r13, __switch_data @ address to jump to after 98 ldr r13, __switch_data @ address to jump to after
99 @ mmu has been enabled 99 @ mmu has been enabled
100 adr lr, __enable_mmu @ return (PIC) address 100 adr lr, BSYM(__enable_mmu) @ return (PIC) address
101 add pc, r10, #PROCINFO_INITFUNC 101 ARM( add pc, r10, #PROCINFO_INITFUNC )
102 THUMB( add r12, r10, #PROCINFO_INITFUNC )
103 THUMB( mov pc, r12 )
102ENDPROC(stext) 104ENDPROC(stext)
103 105
104#if defined(CONFIG_SMP) 106#if defined(CONFIG_SMP)
@@ -110,7 +112,7 @@ ENTRY(secondary_startup)
110 * the processor type - there is no need to check the machine type 112 * the processor type - there is no need to check the machine type
111 * as it has already been validated by the primary processor. 113 * as it has already been validated by the primary processor.
112 */ 114 */
113 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 115 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
114 mrc p15, 0, r9, c0, c0 @ get processor id 116 mrc p15, 0, r9, c0, c0 @ get processor id
115 bl __lookup_processor_type 117 bl __lookup_processor_type
116 movs r10, r5 @ invalid processor? 118 movs r10, r5 @ invalid processor?
@@ -121,12 +123,15 @@ ENTRY(secondary_startup)
121 * Use the page tables supplied from __cpu_up. 123 * Use the page tables supplied from __cpu_up.
122 */ 124 */
123 adr r4, __secondary_data 125 adr r4, __secondary_data
124 ldmia r4, {r5, r7, r13} @ address to jump to after 126 ldmia r4, {r5, r7, r12} @ address to jump to after
125 sub r4, r4, r5 @ mmu has been enabled 127 sub r4, r4, r5 @ mmu has been enabled
126 ldr r4, [r7, r4] @ get secondary_data.pgdir 128 ldr r4, [r7, r4] @ get secondary_data.pgdir
127 adr lr, __enable_mmu @ return address 129 adr lr, BSYM(__enable_mmu) @ return address
128 add pc, r10, #PROCINFO_INITFUNC @ initialise processor 130 mov r13, r12 @ __secondary_switched address
129 @ (return control reg) 131 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
132 @ (return control reg)
133 THUMB( add r12, r10, #PROCINFO_INITFUNC )
134 THUMB( mov pc, r12 )
130ENDPROC(secondary_startup) 135ENDPROC(secondary_startup)
131 136
132 /* 137 /*
@@ -193,8 +198,8 @@ __turn_mmu_on:
193 mcr p15, 0, r0, c1, c0, 0 @ write control reg 198 mcr p15, 0, r0, c1, c0, 0 @ write control reg
194 mrc p15, 0, r3, c0, c0, 0 @ read id reg 199 mrc p15, 0, r3, c0, c0, 0 @ read id reg
195 mov r3, r3 200 mov r3, r3
196 mov r3, r3 201 mov r3, r13
197 mov pc, r13 202 mov pc, r3
198ENDPROC(__turn_mmu_on) 203ENDPROC(__turn_mmu_on)
199 204
200 205
@@ -235,7 +240,8 @@ __create_page_tables:
235 * will be removed by paging_init(). We use our current program 240 * will be removed by paging_init(). We use our current program
236 * counter to determine corresponding section base address. 241 * counter to determine corresponding section base address.
237 */ 242 */
238 mov r6, pc, lsr #20 @ start of kernel section 243 mov r6, pc
244 mov r6, r6, lsr #20 @ start of kernel section
239 orr r3, r7, r6, lsl #20 @ flags + kernel base 245 orr r3, r7, r6, lsl #20 @ flags + kernel base
240 str r3, [r4, r6, lsl #2] @ identity mapping 246 str r3, [r4, r6, lsl #2] @ identity mapping
241 247
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index b7c3490eaa24..c9a8619f3856 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -86,7 +86,7 @@ int show_interrupts(struct seq_file *p, void *v)
86unlock: 86unlock:
87 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 87 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
88 } else if (i == NR_IRQS) { 88 } else if (i == NR_IRQS) {
89#ifdef CONFIG_ARCH_ACORN 89#ifdef CONFIG_FIQ
90 show_fiq_list(p, v); 90 show_fiq_list(p, v);
91#endif 91#endif
92#ifdef CONFIG_SMP 92#ifdef CONFIG_SMP
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index bac03c81489d..f28c5e9c51ea 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -102,6 +102,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
102 unsigned long loc; 102 unsigned long loc;
103 Elf32_Sym *sym; 103 Elf32_Sym *sym;
104 s32 offset; 104 s32 offset;
105 u32 upper, lower, sign, j1, j2;
105 106
106 offset = ELF32_R_SYM(rel->r_info); 107 offset = ELF32_R_SYM(rel->r_info);
107 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { 108 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
@@ -184,6 +185,58 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
184 (offset & 0x0fff); 185 (offset & 0x0fff);
185 break; 186 break;
186 187
188 case R_ARM_THM_CALL:
189 case R_ARM_THM_JUMP24:
190 upper = *(u16 *)loc;
191 lower = *(u16 *)(loc + 2);
192
193 /*
194 * 25 bit signed address range (Thumb-2 BL and B.W
195 * instructions):
196 * S:I1:I2:imm10:imm11:0
197 * where:
198 * S = upper[10] = offset[24]
199 * I1 = ~(J1 ^ S) = offset[23]
200 * I2 = ~(J2 ^ S) = offset[22]
201 * imm10 = upper[9:0] = offset[21:12]
202 * imm11 = lower[10:0] = offset[11:1]
203 * J1 = lower[13]
204 * J2 = lower[11]
205 */
206 sign = (upper >> 10) & 1;
207 j1 = (lower >> 13) & 1;
208 j2 = (lower >> 11) & 1;
209 offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
210 ((~(j2 ^ sign) & 1) << 22) |
211 ((upper & 0x03ff) << 12) |
212 ((lower & 0x07ff) << 1);
213 if (offset & 0x01000000)
214 offset -= 0x02000000;
215 offset += sym->st_value - loc;
216
217 /* only Thumb addresses allowed (no interworking) */
218 if (!(offset & 1) ||
219 offset <= (s32)0xff000000 ||
220 offset >= (s32)0x01000000) {
221 printk(KERN_ERR
222 "%s: relocation out of range, section "
223 "%d reloc %d sym '%s'\n", module->name,
224 relindex, i, strtab + sym->st_name);
225 return -ENOEXEC;
226 }
227
228 sign = (offset >> 24) & 1;
229 j1 = sign ^ (~(offset >> 23) & 1);
230 j2 = sign ^ (~(offset >> 22) & 1);
231 *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
232 ((offset >> 12) & 0x03ff));
233 *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
234 (j1 << 13) | (j2 << 11) |
235 ((offset >> 1) & 0x07ff));
236 upper = *(u16 *)loc;
237 lower = *(u16 *)(loc + 2);
238 break;
239
187 default: 240 default:
188 printk(KERN_ERR "%s: unknown relocation: %u\n", 241 printk(KERN_ERR "%s: unknown relocation: %u\n",
189 module->name, ELF32_R_TYPE(rel->r_info)); 242 module->name, ELF32_R_TYPE(rel->r_info));
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 39196dff478c..790fbee92ec5 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -388,7 +388,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
388 regs.ARM_r2 = (unsigned long)fn; 388 regs.ARM_r2 = (unsigned long)fn;
389 regs.ARM_r3 = (unsigned long)kernel_thread_exit; 389 regs.ARM_r3 = (unsigned long)kernel_thread_exit;
390 regs.ARM_pc = (unsigned long)kernel_thread_helper; 390 regs.ARM_pc = (unsigned long)kernel_thread_helper;
391 regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; 391 regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
392 392
393 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 393 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
394} 394}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 89882a1d0187..a2ea3854cb3c 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -521,7 +521,13 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
521 return -EIO; 521 return -EIO;
522 522
523 tmp = 0; 523 tmp = 0;
524 if (off < sizeof(struct pt_regs)) 524 if (off == PT_TEXT_ADDR)
525 tmp = tsk->mm->start_code;
526 else if (off == PT_DATA_ADDR)
527 tmp = tsk->mm->start_data;
528 else if (off == PT_TEXT_END_ADDR)
529 tmp = tsk->mm->end_code;
530 else if (off < sizeof(struct pt_regs))
525 tmp = get_user_reg(tsk, off >> 2); 531 tmp = get_user_reg(tsk, off >> 2);
526 532
527 return put_user(tmp, ret); 533 return put_user(tmp, ret);
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
new file mode 100644
index 000000000000..df246da4ceca
--- /dev/null
+++ b/arch/arm/kernel/return_address.c
@@ -0,0 +1,71 @@
1/*
2 * arch/arm/kernel/return_address.c
3 *
4 * Copyright (C) 2009 Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
5 * for Pengutronix
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 */
11#include <linux/module.h>
12
13#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
14#include <linux/sched.h>
15
16#include <asm/stacktrace.h>
17
18struct return_address_data {
19 unsigned int level;
20 void *addr;
21};
22
23static int save_return_addr(struct stackframe *frame, void *d)
24{
25 struct return_address_data *data = d;
26
27 if (!data->level) {
28 data->addr = (void *)frame->lr;
29
30 return 1;
31 } else {
32 --data->level;
33 return 0;
34 }
35}
36
37void *return_address(unsigned int level)
38{
39 struct return_address_data data;
40 struct stackframe frame;
41 register unsigned long current_sp asm ("sp");
42
43 data.level = level + 1;
44
45 frame.fp = (unsigned long)__builtin_frame_address(0);
46 frame.sp = current_sp;
47 frame.lr = (unsigned long)__builtin_return_address(0);
48 frame.pc = (unsigned long)return_address;
49
50 walk_stackframe(&frame, save_return_addr, &data);
51
52 if (!data.level)
53 return data.addr;
54 else
55 return NULL;
56}
57
58#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
59
60#if defined(CONFIG_ARM_UNWIND)
61#warning "TODO: return_address should use unwind tables"
62#endif
63
64void *return_address(unsigned int level)
65{
66 return NULL;
67}
68
69#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
70
71EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bc5e4128f9f3..d4d4f77c91b2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -25,6 +25,7 @@
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/fs.h> 26#include <linux/fs.h>
27 27
28#include <asm/unified.h>
28#include <asm/cpu.h> 29#include <asm/cpu.h>
29#include <asm/cputype.h> 30#include <asm/cputype.h>
30#include <asm/elf.h> 31#include <asm/elf.h>
@@ -327,25 +328,38 @@ void cpu_init(void)
327 } 328 }
328 329
329 /* 330 /*
331 * Define the placement constraint for the inline asm directive below.
332 * In Thumb-2, msr with an immediate value is not allowed.
333 */
334#ifdef CONFIG_THUMB2_KERNEL
335#define PLC "r"
336#else
337#define PLC "I"
338#endif
339
340 /*
330 * setup stacks for re-entrant exception handlers 341 * setup stacks for re-entrant exception handlers
331 */ 342 */
332 __asm__ ( 343 __asm__ (
333 "msr cpsr_c, %1\n\t" 344 "msr cpsr_c, %1\n\t"
334 "add sp, %0, %2\n\t" 345 "add r14, %0, %2\n\t"
346 "mov sp, r14\n\t"
335 "msr cpsr_c, %3\n\t" 347 "msr cpsr_c, %3\n\t"
336 "add sp, %0, %4\n\t" 348 "add r14, %0, %4\n\t"
349 "mov sp, r14\n\t"
337 "msr cpsr_c, %5\n\t" 350 "msr cpsr_c, %5\n\t"
338 "add sp, %0, %6\n\t" 351 "add r14, %0, %6\n\t"
352 "mov sp, r14\n\t"
339 "msr cpsr_c, %7" 353 "msr cpsr_c, %7"
340 : 354 :
341 : "r" (stk), 355 : "r" (stk),
342 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 356 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
343 "I" (offsetof(struct stack, irq[0])), 357 "I" (offsetof(struct stack, irq[0])),
344 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 358 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
345 "I" (offsetof(struct stack, abt[0])), 359 "I" (offsetof(struct stack, abt[0])),
346 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), 360 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
347 "I" (offsetof(struct stack, und[0])), 361 "I" (offsetof(struct stack, und[0])),
348 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 362 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
349 : "r14"); 363 : "r14");
350} 364}
351 365
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b76fe06d92e7..1423a3419789 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -48,57 +48,22 @@ const unsigned long sigreturn_codes[7] = {
48 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, 48 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
49}; 49};
50 50
51static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
52
53/* 51/*
54 * atomically swap in the new signal mask, and wait for a signal. 52 * atomically swap in the new signal mask, and wait for a signal.
55 */ 53 */
56asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs) 54asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
57{ 55{
58 sigset_t saveset;
59
60 mask &= _BLOCKABLE; 56 mask &= _BLOCKABLE;
61 spin_lock_irq(&current->sighand->siglock); 57 spin_lock_irq(&current->sighand->siglock);
62 saveset = current->blocked; 58 current->saved_sigmask = current->blocked;
63 siginitset(&current->blocked, mask); 59 siginitset(&current->blocked, mask);
64 recalc_sigpending(); 60 recalc_sigpending();
65 spin_unlock_irq(&current->sighand->siglock); 61 spin_unlock_irq(&current->sighand->siglock);
66 regs->ARM_r0 = -EINTR;
67
68 while (1) {
69 current->state = TASK_INTERRUPTIBLE;
70 schedule();
71 if (do_signal(&saveset, regs, 0))
72 return regs->ARM_r0;
73 }
74}
75
76asmlinkage int
77sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
78{
79 sigset_t saveset, newset;
80
81 /* XXX: Don't preclude handling different sized sigset_t's. */
82 if (sigsetsize != sizeof(sigset_t))
83 return -EINVAL;
84
85 if (copy_from_user(&newset, unewset, sizeof(newset)))
86 return -EFAULT;
87 sigdelsetmask(&newset, ~_BLOCKABLE);
88
89 spin_lock_irq(&current->sighand->siglock);
90 saveset = current->blocked;
91 current->blocked = newset;
92 recalc_sigpending();
93 spin_unlock_irq(&current->sighand->siglock);
94 regs->ARM_r0 = -EINTR;
95 62
96 while (1) { 63 current->state = TASK_INTERRUPTIBLE;
97 current->state = TASK_INTERRUPTIBLE; 64 schedule();
98 schedule(); 65 set_restore_sigmask();
99 if (do_signal(&saveset, regs, 0)) 66 return -ERESTARTNOHAND;
100 return regs->ARM_r0;
101 }
102} 67}
103 68
104asmlinkage int 69asmlinkage int
@@ -546,7 +511,7 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
546/* 511/*
547 * OK, we're invoking a handler 512 * OK, we're invoking a handler
548 */ 513 */
549static void 514static int
550handle_signal(unsigned long sig, struct k_sigaction *ka, 515handle_signal(unsigned long sig, struct k_sigaction *ka,
551 siginfo_t *info, sigset_t *oldset, 516 siginfo_t *info, sigset_t *oldset,
552 struct pt_regs * regs, int syscall) 517 struct pt_regs * regs, int syscall)
@@ -597,7 +562,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
597 562
598 if (ret != 0) { 563 if (ret != 0) {
599 force_sigsegv(sig, tsk); 564 force_sigsegv(sig, tsk);
600 return; 565 return ret;
601 } 566 }
602 567
603 /* 568 /*
@@ -611,6 +576,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
611 recalc_sigpending(); 576 recalc_sigpending();
612 spin_unlock_irq(&tsk->sighand->siglock); 577 spin_unlock_irq(&tsk->sighand->siglock);
613 578
579 return 0;
614} 580}
615 581
616/* 582/*
@@ -622,7 +588,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
622 * the kernel can handle, and then we build all the user-level signal handling 588 * the kernel can handle, and then we build all the user-level signal handling
623 * stack-frames in one go after that. 589 * stack-frames in one go after that.
624 */ 590 */
625static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) 591static void do_signal(struct pt_regs *regs, int syscall)
626{ 592{
627 struct k_sigaction ka; 593 struct k_sigaction ka;
628 siginfo_t info; 594 siginfo_t info;
@@ -635,7 +601,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
635 * if so. 601 * if so.
636 */ 602 */
637 if (!user_mode(regs)) 603 if (!user_mode(regs))
638 return 0; 604 return;
639 605
640 if (try_to_freeze()) 606 if (try_to_freeze())
641 goto no_signal; 607 goto no_signal;
@@ -644,9 +610,24 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
644 610
645 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 611 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
646 if (signr > 0) { 612 if (signr > 0) {
647 handle_signal(signr, &ka, &info, oldset, regs, syscall); 613 sigset_t *oldset;
614
615 if (test_thread_flag(TIF_RESTORE_SIGMASK))
616 oldset = &current->saved_sigmask;
617 else
618 oldset = &current->blocked;
619 if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
620 /*
621 * A signal was successfully delivered; the saved
622 * sigmask will have been stored in the signal frame,
623 * and will be restored by sigreturn, so we can simply
624 * clear the TIF_RESTORE_SIGMASK flag.
625 */
626 if (test_thread_flag(TIF_RESTORE_SIGMASK))
627 clear_thread_flag(TIF_RESTORE_SIGMASK);
628 }
648 single_step_set(current); 629 single_step_set(current);
649 return 1; 630 return;
650 } 631 }
651 632
652 no_signal: 633 no_signal:
@@ -698,16 +679,23 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
698 regs->ARM_r0 == -ERESTARTNOINTR) { 679 regs->ARM_r0 == -ERESTARTNOINTR) {
699 setup_syscall_restart(regs); 680 setup_syscall_restart(regs);
700 } 681 }
682
683 /* If there's no signal to deliver, we just put the saved sigmask
684 * back.
685 */
686 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
687 clear_thread_flag(TIF_RESTORE_SIGMASK);
688 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
689 }
701 } 690 }
702 single_step_set(current); 691 single_step_set(current);
703 return 0;
704} 692}
705 693
706asmlinkage void 694asmlinkage void
707do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) 695do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
708{ 696{
709 if (thread_flags & _TIF_SIGPENDING) 697 if (thread_flags & _TIF_SIGPENDING)
710 do_signal(&current->blocked, regs, syscall); 698 do_signal(regs, syscall);
711 699
712 if (thread_flags & _TIF_NOTIFY_RESUME) { 700 if (thread_flags & _TIF_NOTIFY_RESUME) {
713 clear_thread_flag(TIF_NOTIFY_RESUME); 701 clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 9f444e5cc165..20b7411e47fd 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -21,7 +21,7 @@
21 * Note that with framepointer enabled, even the leaf functions have the same 21 * Note that with framepointer enabled, even the leaf functions have the same
22 * prologue and epilogue, therefore we can ignore the LR value in this case. 22 * prologue and epilogue, therefore we can ignore the LR value in this case.
23 */ 23 */
24int unwind_frame(struct stackframe *frame) 24int notrace unwind_frame(struct stackframe *frame)
25{ 25{
26 unsigned long high, low; 26 unsigned long high, low;
27 unsigned long fp = frame->fp; 27 unsigned long fp = frame->fp;
@@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame)
43} 43}
44#endif 44#endif
45 45
46void walk_stackframe(struct stackframe *frame, 46void notrace walk_stackframe(struct stackframe *frame,
47 int (*fn)(struct stackframe *, void *), void *data) 47 int (*fn)(struct stackframe *, void *), void *data)
48{ 48{
49 while (1) { 49 while (1) {
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index dd56e11f339a..39baf1128bfa 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -62,7 +62,11 @@ struct unwind_ctrl_block {
62}; 62};
63 63
64enum regs { 64enum regs {
65#ifdef CONFIG_THUMB2_KERNEL
66 FP = 7,
67#else
65 FP = 11, 68 FP = 11,
69#endif
66 SP = 13, 70 SP = 13,
67 LR = 14, 71 LR = 14,
68 PC = 15 72 PC = 15