aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/asm-offsets.c20
-rw-r--r--arch/arm/kernel/entry-armv.S276
-rw-r--r--arch/arm/kernel/entry-common.S65
-rw-r--r--arch/arm/kernel/entry-header.S143
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/process.c25
-rw-r--r--arch/arm/kernel/ptrace.c5
-rw-r--r--arch/arm/kernel/sys_arm.c14
-rw-r--r--arch/arm/kernel/traps.c69
-rw-r--r--arch/arm/kernel/vmlinux.lds.S3
11 files changed, 404 insertions, 224 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 07a56ff61494..4a2af55e134b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -31,8 +31,3 @@ head-y := head.o
31obj-$(CONFIG_DEBUG_LL) += debug.o 31obj-$(CONFIG_DEBUG_LL) += debug.o
32 32
33extra-y := $(head-y) init_task.o vmlinux.lds 33extra-y := $(head-y) init_task.o vmlinux.lds
34
35# Spell out some dependencies that aren't automatically figured out
36$(obj)/entry-armv.o: $(obj)/entry-header.S include/asm-arm/constants.h
37$(obj)/entry-common.o: $(obj)/entry-header.S include/asm-arm/constants.h \
38 $(obj)/calls.S
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 99d43259ff89..c1ff4d1f1bfd 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -64,6 +64,26 @@ int main(void)
64 DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); 64 DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
65 DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7); 65 DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7);
66 BLANK(); 66 BLANK();
67 DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
68 DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
69 DEFINE(S_R2, offsetof(struct pt_regs, ARM_r2));
70 DEFINE(S_R3, offsetof(struct pt_regs, ARM_r3));
71 DEFINE(S_R4, offsetof(struct pt_regs, ARM_r4));
72 DEFINE(S_R5, offsetof(struct pt_regs, ARM_r5));
73 DEFINE(S_R6, offsetof(struct pt_regs, ARM_r6));
74 DEFINE(S_R7, offsetof(struct pt_regs, ARM_r7));
75 DEFINE(S_R8, offsetof(struct pt_regs, ARM_r8));
76 DEFINE(S_R9, offsetof(struct pt_regs, ARM_r9));
77 DEFINE(S_R10, offsetof(struct pt_regs, ARM_r10));
78 DEFINE(S_FP, offsetof(struct pt_regs, ARM_fp));
79 DEFINE(S_IP, offsetof(struct pt_regs, ARM_ip));
80 DEFINE(S_SP, offsetof(struct pt_regs, ARM_sp));
81 DEFINE(S_LR, offsetof(struct pt_regs, ARM_lr));
82 DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc));
83 DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr));
84 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
85 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
86 BLANK();
67#if __LINUX_ARM_ARCH__ >= 6 87#if __LINUX_ARM_ARCH__ >= 6
68 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 88 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
69 BLANK(); 89 BLANK();
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb27c317d94b..4eb36155dc93 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -14,12 +14,12 @@
14 * it to save wrong values... Be aware! 14 * it to save wrong values... Be aware!
15 */ 15 */
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/init.h>
18 17
19#include <asm/thread_info.h>
20#include <asm/glue.h> 18#include <asm/glue.h>
21#include <asm/ptrace.h>
22#include <asm/vfpmacros.h> 19#include <asm/vfpmacros.h>
20#include <asm/hardware.h> /* should be moved into entry-macro.S */
21#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
22#include <asm/arch/entry-macro.S>
23 23
24#include "entry-header.S" 24#include "entry-header.S"
25 25
@@ -118,7 +118,7 @@ __dabt_svc:
118 @ 118 @
119 @ IRQs off again before pulling preserved data off the stack 119 @ IRQs off again before pulling preserved data off the stack
120 @ 120 @
121 disable_irq r0 121 disable_irq
122 122
123 @ 123 @
124 @ restore SPSR and restart the instruction 124 @ restore SPSR and restart the instruction
@@ -198,7 +198,7 @@ __und_svc:
198 @ 198 @
199 @ IRQs off again before pulling preserved data off the stack 199 @ IRQs off again before pulling preserved data off the stack
200 @ 200 @
2011: disable_irq r0 2011: disable_irq
202 202
203 @ 203 @
204 @ restore SPSR and restart the instruction 204 @ restore SPSR and restart the instruction
@@ -232,7 +232,7 @@ __pabt_svc:
232 @ 232 @
233 @ IRQs off again before pulling preserved data off the stack 233 @ IRQs off again before pulling preserved data off the stack
234 @ 234 @
235 disable_irq r0 235 disable_irq
236 236
237 @ 237 @
238 @ restore SPSR and restart the instruction 238 @ restore SPSR and restart the instruction
@@ -269,6 +269,12 @@ __pabt_svc:
269 add r5, sp, #S_PC 269 add r5, sp, #S_PC
270 ldmia r7, {r2 - r4} @ Get USR pc, cpsr 270 ldmia r7, {r2 - r4} @ Get USR pc, cpsr
271 271
272#if __LINUX_ARM_ARCH__ < 6
273 @ make sure our user space atomic helper is aborted
274 cmp r2, #VIRT_OFFSET
275 bichs r3, r3, #PSR_Z_BIT
276#endif
277
272 @ 278 @
273 @ We are now ready to fill in the remaining blanks on the stack: 279 @ We are now ready to fill in the remaining blanks on the stack:
274 @ 280 @
@@ -316,7 +322,7 @@ __dabt_usr:
316 @ 322 @
317 @ IRQs on, then call the main handler 323 @ IRQs on, then call the main handler
318 @ 324 @
319 enable_irq r2 325 enable_irq
320 mov r2, sp 326 mov r2, sp
321 adr lr, ret_from_exception 327 adr lr, ret_from_exception
322 b do_DataAbort 328 b do_DataAbort
@@ -418,7 +424,7 @@ call_fpe:
418 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 424 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
419 bcs iwmmxt_task_enable 425 bcs iwmmxt_task_enable
420#endif 426#endif
421 enable_irq r7 427 enable_irq
422 add pc, pc, r8, lsr #6 428 add pc, pc, r8, lsr #6
423 mov r0, r0 429 mov r0, r0
424 430
@@ -472,7 +478,7 @@ fpundefinstr:
472__pabt_usr: 478__pabt_usr:
473 usr_entry abt 479 usr_entry abt
474 480
475 enable_irq r0 @ Enable interrupts 481 enable_irq @ Enable interrupts
476 mov r0, r2 @ address (pc) 482 mov r0, r2 @ address (pc)
477 mov r1, sp @ regs 483 mov r1, sp @ regs
478 bl do_PrefetchAbort @ call abort handler 484 bl do_PrefetchAbort @ call abort handler
@@ -499,8 +505,12 @@ ENTRY(__switch_to)
499 mra r4, r5, acc0 505 mra r4, r5, acc0
500 stmia ip, {r4, r5} 506 stmia ip, {r4, r5}
501#endif 507#endif
508#if defined(CONFIG_HAS_TLS_REG)
509 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
510#elif !defined(CONFIG_TLS_REG_EMUL)
502 mov r4, #0xffff0fff 511 mov r4, #0xffff0fff
503 str r3, [r4, #-3] @ Set TLS ptr 512 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
513#endif
504 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 514 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
505#ifdef CONFIG_VFP 515#ifdef CONFIG_VFP
506 @ Always disable VFP so we can lazily save/restore the old 516 @ Always disable VFP so we can lazily save/restore the old
@@ -519,11 +529,209 @@ ENTRY(__switch_to)
519 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 529 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
520 530
521 __INIT 531 __INIT
532
533/*
534 * User helpers.
535 *
536 * These are segment of kernel provided user code reachable from user space
537 * at a fixed address in kernel memory. This is used to provide user space
538 * with some operations which require kernel help because of unimplemented
539 * native feature and/or instructions in many ARM CPUs. The idea is for
540 * this code to be executed directly in user mode for best efficiency but
541 * which is too intimate with the kernel counter part to be left to user
542 * libraries. In fact this code might even differ from one CPU to another
543 * depending on the available instruction set and restrictions like on
544 * SMP systems. In other words, the kernel reserves the right to change
545 * this code as needed without warning. Only the entry points and their
546 * results are guaranteed to be stable.
547 *
548 * Each segment is 32-byte aligned and will be moved to the top of the high
549 * vector page. New segments (if ever needed) must be added in front of
550 * existing ones. This mechanism should be used only for things that are
551 * really small and justified, and not be abused freely.
552 *
553 * User space is expected to implement those things inline when optimizing
554 * for a processor that has the necessary native support, but only if such
555 * resulting binaries are already to be incompatible with earlier ARM
556 * processors due to the use of unsupported instructions other than what
557 * is provided here. In other words don't make binaries unable to run on
558 * earlier processors just for the sake of not using these kernel helpers
559 * if your compiled code is not going to use the new instructions for other
560 * purpose.
561 */
562
563 .align 5
564 .globl __kuser_helper_start
565__kuser_helper_start:
566
567/*
568 * Reference prototype:
569 *
570 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
571 *
572 * Input:
573 *
574 * r0 = oldval
575 * r1 = newval
576 * r2 = ptr
577 * lr = return address
578 *
579 * Output:
580 *
581 * r0 = returned value (zero or non-zero)
582 * C flag = set if r0 == 0, clear if r0 != 0
583 *
584 * Clobbered:
585 *
586 * r3, ip, flags
587 *
588 * Definition and user space usage example:
589 *
590 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
591 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
592 *
593 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
594 * Return zero if *ptr was changed or non-zero if no exchange happened.
595 * The C flag is also set if *ptr was changed to allow for assembly
596 * optimization in the calling code.
597 *
598 * For example, a user space atomic_add implementation could look like this:
599 *
600 * #define atomic_add(ptr, val) \
601 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
602 * register unsigned int __result asm("r1"); \
603 * asm volatile ( \
604 * "1: @ atomic_add\n\t" \
605 * "ldr r0, [r2]\n\t" \
606 * "mov r3, #0xffff0fff\n\t" \
607 * "add lr, pc, #4\n\t" \
608 * "add r1, r0, %2\n\t" \
609 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
610 * "bcc 1b" \
611 * : "=&r" (__result) \
612 * : "r" (__ptr), "rIL" (val) \
613 * : "r0","r3","ip","lr","cc","memory" ); \
614 * __result; })
615 */
616
617__kuser_cmpxchg: @ 0xffff0fc0
618
619#if __LINUX_ARM_ARCH__ < 6
620
621#ifdef CONFIG_SMP /* sanity check */
622#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
623#endif
624
625 /*
626 * Theory of operation:
627 *
628 * We set the Z flag before loading oldval. If ever an exception
629 * occurs we can not be sure the loaded value will still be the same
630 * when the exception returns, therefore the user exception handler
631 * will clear the Z flag whenever the interrupted user code was
632 * actually from the kernel address space (see the usr_entry macro).
633 *
634 * The post-increment on the str is used to prevent a race with an
635 * exception happening just after the str instruction which would
636 * clear the Z flag although the exchange was done.
637 */
638 teq ip, ip @ set Z flag
639 ldr ip, [r2] @ load current val
640 add r3, r2, #1 @ prepare store ptr
641 teqeq ip, r0 @ compare with oldval if still allowed
642 streq r1, [r3, #-1]! @ store newval if still allowed
643 subs r0, r2, r3 @ if r2 == r3 the str occured
644 mov pc, lr
645
646#else
647
648 ldrex r3, [r2]
649 subs r3, r3, r0
650 strexeq r3, r1, [r2]
651 rsbs r0, r3, #0
652 mov pc, lr
653
654#endif
655
656 .align 5
657
658/*
659 * Reference prototype:
660 *
661 * int __kernel_get_tls(void)
662 *
663 * Input:
664 *
665 * lr = return address
666 *
667 * Output:
668 *
669 * r0 = TLS value
670 *
671 * Clobbered:
672 *
673 * the Z flag might be lost
674 *
675 * Definition and user space usage example:
676 *
677 * typedef int (__kernel_get_tls_t)(void);
678 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
679 *
680 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
681 *
682 * This could be used as follows:
683 *
684 * #define __kernel_get_tls() \
685 * ({ register unsigned int __val asm("r0"); \
686 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
687 * : "=r" (__val) : : "lr","cc" ); \
688 * __val; })
689 */
690
691__kuser_get_tls: @ 0xffff0fe0
692
693#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
694
695 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
696 mov pc, lr
697
698#else
699
700 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
701 mov pc, lr
702
703#endif
704
705 .rep 5
706 .word 0 @ pad up to __kuser_helper_version
707 .endr
708
709/*
710 * Reference declaration:
711 *
712 * extern unsigned int __kernel_helper_version;
713 *
714 * Definition and user space usage example:
715 *
716 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
717 *
718 * User space may read this to determine the curent number of helpers
719 * available.
720 */
721
722__kuser_helper_version: @ 0xffff0ffc
723 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
724
725 .globl __kuser_helper_end
726__kuser_helper_end:
727
728
522/* 729/*
523 * Vector stubs. 730 * Vector stubs.
524 * 731 *
525 * This code is copied to 0x200 or 0xffff0200 so we can use branches in the 732 * This code is copied to 0xffff0200 so we can use branches in the
526 * vectors, rather than ldr's. 733 * vectors, rather than ldr's. Note that this code must not
734 * exceed 0x300 bytes.
527 * 735 *
528 * Common stub entry macro: 736 * Common stub entry macro:
529 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 737 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -544,7 +752,7 @@ vector_\name:
544 @ 752 @
545 mrs r13, cpsr 753 mrs r13, cpsr
546 bic r13, r13, #MODE_MASK 754 bic r13, r13, #MODE_MASK
547 orr r13, r13, #MODE_SVC 755 orr r13, r13, #SVC_MODE
548 msr spsr_cxsf, r13 @ switch to SVC_32 mode 756 msr spsr_cxsf, r13 @ switch to SVC_32 mode
549 757
550 and lr, lr, #15 758 and lr, lr, #15
@@ -552,6 +760,7 @@ vector_\name:
552 movs pc, lr @ Changes mode and branches 760 movs pc, lr @ Changes mode and branches
553 .endm 761 .endm
554 762
763 .globl __stubs_start
555__stubs_start: 764__stubs_start:
556/* 765/*
557 * Interrupt dispatcher 766 * Interrupt dispatcher
@@ -686,37 +895,24 @@ vector_addrexcptn:
686.LCsabt: 895.LCsabt:
687 .word __temp_abt 896 .word __temp_abt
688 897
898 .globl __stubs_end
689__stubs_end: 899__stubs_end:
690 900
691 .equ __real_stubs_start, .LCvectors + 0x200 901 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
692 902
693.LCvectors: 903 .globl __vectors_start
904__vectors_start:
694 swi SYS_ERROR0 905 swi SYS_ERROR0
695 b __real_stubs_start + (vector_und - __stubs_start) 906 b vector_und + stubs_offset
696 ldr pc, __real_stubs_start + (.LCvswi - __stubs_start) 907 ldr pc, .LCvswi + stubs_offset
697 b __real_stubs_start + (vector_pabt - __stubs_start) 908 b vector_pabt + stubs_offset
698 b __real_stubs_start + (vector_dabt - __stubs_start) 909 b vector_dabt + stubs_offset
699 b __real_stubs_start + (vector_addrexcptn - __stubs_start) 910 b vector_addrexcptn + stubs_offset
700 b __real_stubs_start + (vector_irq - __stubs_start) 911 b vector_irq + stubs_offset
701 b __real_stubs_start + (vector_fiq - __stubs_start) 912 b vector_fiq + stubs_offset
702 913
703ENTRY(__trap_init) 914 .globl __vectors_end
704 stmfd sp!, {r4 - r6, lr} 915__vectors_end:
705
706 mov r0, #0xff000000
707 orr r0, r0, #0x00ff0000 @ high vectors position
708 adr r1, .LCvectors @ set up the vectors
709 ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr}
710 stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr}
711
712 add r2, r0, #0x200
713 adr r0, __stubs_start @ copy stubs to 0x200
714 adr r1, __stubs_end
7151: ldr r3, [r0], #4
716 str r3, [r2], #4
717 cmp r0, r1
718 blt 1b
719 LOADREGS(fd, sp!, {r4 - r6, pc})
720 916
721 .data 917 .data
722 918
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 53a7e0dea44d..3f8d0e3aefab 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -9,19 +9,10 @@
9 */ 9 */
10#include <linux/config.h> 10#include <linux/config.h>
11 11
12#include <asm/thread_info.h>
13#include <asm/ptrace.h>
14#include <asm/unistd.h> 12#include <asm/unistd.h>
15 13
16#include "entry-header.S" 14#include "entry-header.S"
17 15
18/*
19 * We rely on the fact that R0 is at the bottom of the stack (due to
20 * slow/fast restore user regs).
21 */
22#if S_R0 != 0
23#error "Please fix"
24#endif
25 16
26 .align 5 17 .align 5
27/* 18/*
@@ -30,11 +21,19 @@
30 * stack. 21 * stack.
31 */ 22 */
32ret_fast_syscall: 23ret_fast_syscall:
33 disable_irq r1 @ disable interrupts 24 disable_irq @ disable interrupts
34 ldr r1, [tsk, #TI_FLAGS] 25 ldr r1, [tsk, #TI_FLAGS]
35 tst r1, #_TIF_WORK_MASK 26 tst r1, #_TIF_WORK_MASK
36 bne fast_work_pending 27 bne fast_work_pending
37 fast_restore_user_regs 28
29 @ fast_restore_user_regs
30 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
31 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
32 msr spsr_cxsf, r1 @ save in spsr_svc
33 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
34 mov r0, r0
35 add sp, sp, #S_FRAME_SIZE - S_PC
36 movs pc, lr @ return & move spsr_svc into cpsr
38 37
39/* 38/*
40 * Ok, we need to do extra processing, enter the slow path. 39 * Ok, we need to do extra processing, enter the slow path.
@@ -49,7 +48,7 @@ work_pending:
49 mov r0, sp @ 'regs' 48 mov r0, sp @ 'regs'
50 mov r2, why @ 'syscall' 49 mov r2, why @ 'syscall'
51 bl do_notify_resume 50 bl do_notify_resume
52 disable_irq r1 @ disable interrupts 51 disable_irq @ disable interrupts
53 b no_work_pending 52 b no_work_pending
54 53
55work_resched: 54work_resched:
@@ -59,12 +58,19 @@ work_resched:
59 */ 58 */
60ENTRY(ret_to_user) 59ENTRY(ret_to_user)
61ret_slow_syscall: 60ret_slow_syscall:
62 disable_irq r1 @ disable interrupts 61 disable_irq @ disable interrupts
63 ldr r1, [tsk, #TI_FLAGS] 62 ldr r1, [tsk, #TI_FLAGS]
64 tst r1, #_TIF_WORK_MASK 63 tst r1, #_TIF_WORK_MASK
65 bne work_pending 64 bne work_pending
66no_work_pending: 65no_work_pending:
67 slow_restore_user_regs 66 @ slow_restore_user_regs
67 ldr r1, [sp, #S_PSR] @ get calling cpsr
68 ldr lr, [sp, #S_PC]! @ get pc
69 msr spsr_cxsf, r1 @ save in spsr_svc
70 ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
71 mov r0, r0
72 add sp, sp, #S_FRAME_SIZE - S_PC
73 movs pc, lr @ return & move spsr_svc into cpsr
68 74
69/* 75/*
70 * This is how we return from a fork. 76 * This is how we return from a fork.
@@ -116,9 +122,26 @@ ENTRY(ret_from_fork)
116 122
117 .align 5 123 .align 5
118ENTRY(vector_swi) 124ENTRY(vector_swi)
119 save_user_regs 125 sub sp, sp, #S_FRAME_SIZE
126 stmia sp, {r0 - r12} @ Calling r0 - r12
127 add r8, sp, #S_PC
128 stmdb r8, {sp, lr}^ @ Calling sp, lr
129 mrs r8, spsr @ called from non-FIQ mode, so ok.
130 str lr, [sp, #S_PC] @ Save calling PC
131 str r8, [sp, #S_PSR] @ Save CPSR
132 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
120 zero_fp 133 zero_fp
121 get_scno 134
135 /*
136 * Get the system call number.
137 */
138#ifdef CONFIG_ARM_THUMB
139 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
140 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
141 ldreq scno, [lr, #-4]
142#else
143 ldr scno, [lr, #-4] @ get SWI instruction
144#endif
122 arm710_bug_check scno, ip 145 arm710_bug_check scno, ip
123 146
124#ifdef CONFIG_ALIGNMENT_TRAP 147#ifdef CONFIG_ALIGNMENT_TRAP
@@ -126,14 +149,14 @@ ENTRY(vector_swi)
126 ldr ip, [ip] 149 ldr ip, [ip]
127 mcr p15, 0, ip, c1, c0 @ update control register 150 mcr p15, 0, ip, c1, c0 @ update control register
128#endif 151#endif
129 enable_irq ip 152 enable_irq
130 153
131 str r4, [sp, #-S_OFF]! @ push fifth arg 154 str r4, [sp, #-S_OFF]! @ push fifth arg
132 155
133 get_thread_info tsk 156 get_thread_info tsk
134 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing 157 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
135 bic scno, scno, #0xff000000 @ mask off SWI op-code 158 bic scno, scno, #0xff000000 @ mask off SWI op-code
136 eor scno, scno, #OS_NUMBER << 20 @ check OS number 159 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
137 adr tbl, sys_call_table @ load syscall table pointer 160 adr tbl, sys_call_table @ load syscall table pointer
138 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? 161 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
139 bne __sys_trace 162 bne __sys_trace
@@ -144,8 +167,8 @@ ENTRY(vector_swi)
144 167
145 add r1, sp, #S_OFF 168 add r1, sp, #S_OFF
1462: mov why, #0 @ no longer a real syscall 1692: mov why, #0 @ no longer a real syscall
147 cmp scno, #ARMSWI_OFFSET 170 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
148 eor r0, scno, #OS_NUMBER << 20 @ put OS number back 171 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
149 bcs arm_syscall 172 bcs arm_syscall
150 b sys_ni_syscall @ not private func 173 b sys_ni_syscall @ not private func
151 174
@@ -190,7 +213,7 @@ ENTRY(sys_call_table)
190@ r5 = syscall table 213@ r5 = syscall table
191 .type sys_syscall, #function 214 .type sys_syscall, #function
192sys_syscall: 215sys_syscall:
193 eor scno, r0, #OS_NUMBER << 20 216 eor scno, r0, #__NR_SYSCALL_BASE
194 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 217 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
195 cmpne scno, #NR_syscalls @ check range 218 cmpne scno, #NR_syscalls @ check range
196 stmloia sp, {r5, r6} @ shuffle args 219 stmloia sp, {r5, r6} @ shuffle args
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4039d8c120b5..a3d40a0e2b04 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -1,24 +1,11 @@
1#include <linux/config.h> /* for CONFIG_ARCH_xxxx */ 1#include <linux/config.h>
2#include <linux/init.h>
2#include <linux/linkage.h> 3#include <linux/linkage.h>
3 4
4#include <asm/assembler.h> 5#include <asm/assembler.h>
5#include <asm/constants.h> 6#include <asm/constants.h>
6#include <asm/errno.h> 7#include <asm/errno.h>
7#include <asm/hardware.h> 8#include <asm/thread_info.h>
8#include <asm/arch/irqs.h>
9#include <asm/arch/entry-macro.S>
10
11#ifndef MODE_SVC
12#define MODE_SVC 0x13
13#endif
14
15 .macro zero_fp
16#ifdef CONFIG_FRAME_POINTER
17 mov fp, #0
18#endif
19 .endm
20
21 .text
22 9
23@ Bad Abort numbers 10@ Bad Abort numbers
24@ ----------------- 11@ -----------------
@@ -29,113 +16,44 @@
29#define BAD_IRQ 3 16#define BAD_IRQ 3
30#define BAD_UNDEFINSTR 4 17#define BAD_UNDEFINSTR 4
31 18
32#define PT_TRACESYS 0x00000002
33
34@ OS version number used in SWIs
35@ RISC OS is 0
36@ RISC iX is 8
37@ 19@
38#define OS_NUMBER 9 20@ Most of the stack format comes from struct pt_regs, but with
39#define ARMSWI_OFFSET 0x000f0000 21@ the addition of 8 bytes for storing syscall args 5 and 6.
40
41@ 22@
42@ Stack format (ensured by USER_* and SVC_*)
43@
44#define S_FRAME_SIZE 72
45#define S_OLD_R0 68
46#define S_PSR 64
47
48#define S_PC 60
49#define S_LR 56
50#define S_SP 52
51#define S_IP 48
52#define S_FP 44
53#define S_R10 40
54#define S_R9 36
55#define S_R8 32
56#define S_R7 28
57#define S_R6 24
58#define S_R5 20
59#define S_R4 16
60#define S_R3 12
61#define S_R2 8
62#define S_R1 4
63#define S_R0 0
64#define S_OFF 8 23#define S_OFF 8
65 24
66 .macro set_cpsr_c, reg, mode 25/*
67 msr cpsr_c, \mode 26 * The SWI code relies on the fact that R0 is at the bottom of the stack
27 * (due to slow/fast restore user regs).
28 */
29#if S_R0 != 0
30#error "Please fix"
31#endif
32
33 .macro zero_fp
34#ifdef CONFIG_FRAME_POINTER
35 mov fp, #0
36#endif
68 .endm 37 .endm
69 38
70#if __LINUX_ARM_ARCH__ >= 6 39#if __LINUX_ARM_ARCH__ >= 6
71 .macro disable_irq, temp 40 .macro disable_irq
72 cpsid i 41 cpsid i
73 .endm 42 .endm
74 43
75 .macro enable_irq, temp 44 .macro enable_irq
76 cpsie i 45 cpsie i
77 .endm 46 .endm
78#else 47#else
79 .macro disable_irq, temp 48 .macro disable_irq
80 set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC 49 msr cpsr_c, #PSR_I_BIT | SVC_MODE
81 .endm 50 .endm
82 51
83 .macro enable_irq, temp 52 .macro enable_irq
84 set_cpsr_c \temp, #MODE_SVC 53 msr cpsr_c, #SVC_MODE
85 .endm 54 .endm
86#endif 55#endif
87 56
88 .macro save_user_regs
89 sub sp, sp, #S_FRAME_SIZE
90 stmia sp, {r0 - r12} @ Calling r0 - r12
91 add r8, sp, #S_PC
92 stmdb r8, {sp, lr}^ @ Calling sp, lr
93 mrs r8, spsr @ called from non-FIQ mode, so ok.
94 str lr, [sp, #S_PC] @ Save calling PC
95 str r8, [sp, #S_PSR] @ Save CPSR
96 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
97 .endm
98
99 .macro restore_user_regs
100 ldr r1, [sp, #S_PSR] @ Get calling cpsr
101 disable_irq ip @ disable IRQs
102 ldr lr, [sp, #S_PC]! @ Get PC
103 msr spsr_cxsf, r1 @ save in spsr_svc
104 ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
105 mov r0, r0
106 add sp, sp, #S_FRAME_SIZE - S_PC
107 movs pc, lr @ return & move spsr_svc into cpsr
108 .endm
109
110/*
111 * Must be called with IRQs already disabled.
112 */
113 .macro fast_restore_user_regs
114 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
115 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
116 msr spsr_cxsf, r1 @ save in spsr_svc
117 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
118 mov r0, r0
119 add sp, sp, #S_FRAME_SIZE - S_PC
120 movs pc, lr @ return & move spsr_svc into cpsr
121 .endm
122
123/*
124 * Must be called with IRQs already disabled.
125 */
126 .macro slow_restore_user_regs
127 ldr r1, [sp, #S_PSR] @ get calling cpsr
128 ldr lr, [sp, #S_PC]! @ get pc
129 msr spsr_cxsf, r1 @ save in spsr_svc
130 ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
131 mov r0, r0
132 add sp, sp, #S_FRAME_SIZE - S_PC
133 movs pc, lr @ return & move spsr_svc into cpsr
134 .endm
135
136 .macro mask_pc, rd, rm
137 .endm
138
139 .macro get_thread_info, rd 57 .macro get_thread_info, rd
140 mov \rd, sp, lsr #13 58 mov \rd, sp, lsr #13
141 mov \rd, \rd, lsl #13 59 mov \rd, \rd, lsl #13
@@ -165,18 +83,3 @@ scno .req r7 @ syscall number
165tbl .req r8 @ syscall table pointer 83tbl .req r8 @ syscall table pointer
166why .req r8 @ Linux syscall (!= 0) 84why .req r8 @ Linux syscall (!= 0)
167tsk .req r9 @ current thread_info 85tsk .req r9 @ current thread_info
168
169/*
170 * Get the system call number.
171 */
172 .macro get_scno
173#ifdef CONFIG_ARM_THUMB
174 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
175 addne scno, r7, #OS_NUMBER << 20 @ put OS number in
176 ldreq scno, [lr, #-4]
177
178#else
179 mask_pc lr, lr
180 ldr scno, [lr, #-4] @ get SWI instruction
181#endif
182 .endm
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 171b3e811c71..4733877296d4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -19,6 +19,7 @@
19#include <asm/procinfo.h> 19#include <asm/procinfo.h>
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21#include <asm/constants.h> 21#include <asm/constants.h>
22#include <asm/thread_info.h>
22#include <asm/system.h> 23#include <asm/system.h>
23 24
24#define PROCINFO_MMUFLAGS 8 25#define PROCINFO_MMUFLAGS 8
@@ -131,7 +132,7 @@ __switch_data:
131 .long processor_id @ r4 132 .long processor_id @ r4
132 .long __machine_arch_type @ r5 133 .long __machine_arch_type @ r5
133 .long cr_alignment @ r6 134 .long cr_alignment @ r6
134 .long init_thread_union+8192 @ sp 135 .long init_thread_union + THREAD_START_SP @ sp
135 136
136/* 137/*
137 * The following fragment of code is executed with the MMU on, and uses 138 * The following fragment of code is executed with the MMU on, and uses
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 26eacd3e5def..8f146a4b4752 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -256,8 +256,6 @@ static unsigned long *thread_info_head;
256static unsigned int nr_thread_info; 256static unsigned int nr_thread_info;
257 257
258#define EXTRA_TASK_STRUCT 4 258#define EXTRA_TASK_STRUCT 4
259#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
260#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
261 259
262struct thread_info *alloc_thread_info(struct task_struct *task) 260struct thread_info *alloc_thread_info(struct task_struct *task)
263{ 261{
@@ -274,17 +272,16 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
274 } 272 }
275 273
276 if (!thread) 274 if (!thread)
277 thread = ll_alloc_task_struct(); 275 thread = (struct thread_info *)
276 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
278 277
279#ifdef CONFIG_MAGIC_SYSRQ 278#ifdef CONFIG_DEBUG_STACK_USAGE
280 /* 279 /*
281 * The stack must be cleared if you want SYSRQ-T to 280 * The stack must be cleared if you want SYSRQ-T to
282 * give sensible stack usage information 281 * give sensible stack usage information
283 */ 282 */
284 if (thread) { 283 if (thread)
285 char *p = (char *)thread; 284 memzero(thread, THREAD_SIZE);
286 memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
287 }
288#endif 285#endif
289 return thread; 286 return thread;
290} 287}
@@ -297,7 +294,7 @@ void free_thread_info(struct thread_info *thread)
297 thread_info_head = p; 294 thread_info_head = p;
298 nr_thread_info += 1; 295 nr_thread_info += 1;
299 } else 296 } else
300 ll_free_task_struct(thread); 297 free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
301} 298}
302 299
303/* 300/*
@@ -350,7 +347,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
350 struct thread_info *thread = p->thread_info; 347 struct thread_info *thread = p->thread_info;
351 struct pt_regs *childregs; 348 struct pt_regs *childregs;
352 349
353 childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1; 350 childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1;
354 *childregs = *regs; 351 *childregs = *regs;
355 childregs->ARM_r0 = 0; 352 childregs->ARM_r0 = 0;
356 childregs->ARM_sp = stack_start; 353 childregs->ARM_sp = stack_start;
@@ -447,15 +444,17 @@ EXPORT_SYMBOL(kernel_thread);
447unsigned long get_wchan(struct task_struct *p) 444unsigned long get_wchan(struct task_struct *p)
448{ 445{
449 unsigned long fp, lr; 446 unsigned long fp, lr;
450 unsigned long stack_page; 447 unsigned long stack_start, stack_end;
451 int count = 0; 448 int count = 0;
452 if (!p || p == current || p->state == TASK_RUNNING) 449 if (!p || p == current || p->state == TASK_RUNNING)
453 return 0; 450 return 0;
454 451
455 stack_page = 4096 + (unsigned long)p->thread_info; 452 stack_start = (unsigned long)(p->thread_info + 1);
453 stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE;
454
456 fp = thread_saved_fp(p); 455 fp = thread_saved_fp(p);
457 do { 456 do {
458 if (fp < stack_page || fp > 4092+stack_page) 457 if (fp < stack_start || fp > stack_end)
459 return 0; 458 return 0;
460 lr = pc_pointer (((unsigned long *)fp)[-1]); 459 lr = pc_pointer (((unsigned long *)fp)[-1]);
461 if (!in_sched_functions(lr)) 460 if (!in_sched_functions(lr))
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index efd7a341614b..cd99b83f14c2 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -19,6 +19,7 @@
19#include <linux/user.h> 19#include <linux/user.h>
20#include <linux/security.h> 20#include <linux/security.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/signal.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -693,7 +694,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
693 case PTRACE_SYSCALL: 694 case PTRACE_SYSCALL:
694 case PTRACE_CONT: 695 case PTRACE_CONT:
695 ret = -EIO; 696 ret = -EIO;
696 if ((unsigned long) data > _NSIG) 697 if (!valid_signal(data))
697 break; 698 break;
698 if (request == PTRACE_SYSCALL) 699 if (request == PTRACE_SYSCALL)
699 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 700 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
@@ -728,7 +729,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
728 */ 729 */
729 case PTRACE_SINGLESTEP: 730 case PTRACE_SINGLESTEP:
730 ret = -EIO; 731 ret = -EIO;
731 if ((unsigned long) data > _NSIG) 732 if (!valid_signal(data))
732 break; 733 break;
733 child->ptrace |= PT_SINGLESTEP; 734 child->ptrace |= PT_SINGLESTEP;
734 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 735 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 7ba6342cf93d..f897ce2ccf0d 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -227,18 +227,6 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third,
227 } 227 }
228} 228}
229 229
230asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg,
231 unsigned long __user *addr)
232{
233 unsigned long ret;
234 long err;
235
236 err = do_shmat(shmid, shmaddr, shmflg, &ret);
237 if (err == 0)
238 err = put_user(ret, addr);
239 return err;
240}
241
242/* Fork a new task - this creates a new program thread. 230/* Fork a new task - this creates a new program thread.
243 * This is called indirectly via a small wrapper 231 * This is called indirectly via a small wrapper
244 */ 232 */
@@ -314,7 +302,7 @@ long execve(const char *filename, char **argv, char **envp)
314 "b ret_to_user" 302 "b ret_to_user"
315 : 303 :
316 : "r" (current_thread_info()), 304 : "r" (current_thread_info()),
317 "Ir" (THREAD_SIZE - 8 - sizeof(regs)), 305 "Ir" (THREAD_START_SP - sizeof(regs)),
318 "r" (&regs), 306 "r" (&regs),
319 "Ir" (sizeof(regs)) 307 "Ir" (sizeof(regs))
320 : "r0", "r1", "r2", "r3", "ip", "memory"); 308 : "r0", "r1", "r2", "r3", "ip", "memory");
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6e31718f6008..14df16b983f4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -218,7 +218,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
218 tsk->comm, tsk->pid, tsk->thread_info + 1); 218 tsk->comm, tsk->pid, tsk->thread_info + 1);
219 219
220 if (!user_mode(regs) || in_interrupt()) { 220 if (!user_mode(regs) || in_interrupt()) {
221 dump_mem("Stack: ", regs->ARM_sp, 8192+(unsigned long)tsk->thread_info); 221 dump_mem("Stack: ", regs->ARM_sp,
222 THREAD_SIZE + (unsigned long)tsk->thread_info);
222 dump_backtrace(regs, tsk); 223 dump_backtrace(regs, tsk);
223 dump_instr(regs); 224 dump_instr(regs);
224 } 225 }
@@ -450,13 +451,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
450 451
451 case NR(set_tls): 452 case NR(set_tls):
452 thread->tp_value = regs->ARM_r0; 453 thread->tp_value = regs->ARM_r0;
454#if defined(CONFIG_HAS_TLS_REG)
455 asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
456#elif !defined(CONFIG_TLS_REG_EMUL)
453 /* 457 /*
454 * Our user accessible TLS ptr is located at 0xffff0ffc. 458 * User space must never try to access this directly.
455 * On SMP read access to this address must raise a fault 459 * Expect your app to break eventually if you do so.
456 * and be emulated from the data abort handler. 460 * The user helper at 0xffff0fe0 must be used instead.
457 * m 461 * (see entry-armv.S for details)
458 */ 462 */
459 *((unsigned long *)0xffff0ffc) = thread->tp_value; 463 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
464#endif
460 return 0; 465 return 0;
461 466
462 default: 467 default:
@@ -493,6 +498,44 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
493 return 0; 498 return 0;
494} 499}
495 500
501#ifdef CONFIG_TLS_REG_EMUL
502
503/*
504 * We might be running on an ARMv6+ processor which should have the TLS
505 * register but for some reason we can't use it, or maybe an SMP system
506 * using a pre-ARMv6 processor (there are apparently a few prototypes like
507 * that in existence) and therefore access to that register must be
508 * emulated.
509 */
510
511static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
512{
513 int reg = (instr >> 12) & 15;
514 if (reg == 15)
515 return 1;
516 regs->uregs[reg] = current_thread_info()->tp_value;
517 regs->ARM_pc += 4;
518 return 0;
519}
520
521static struct undef_hook arm_mrc_hook = {
522 .instr_mask = 0x0fff0fff,
523 .instr_val = 0x0e1d0f70,
524 .cpsr_mask = PSR_T_BIT,
525 .cpsr_val = 0,
526 .fn = get_tp_trap,
527};
528
529static int __init arm_mrc_hook_init(void)
530{
531 register_undef_hook(&arm_mrc_hook);
532 return 0;
533}
534
535late_initcall(arm_mrc_hook_init);
536
537#endif
538
496void __bad_xchg(volatile void *ptr, int size) 539void __bad_xchg(volatile void *ptr, int size)
497{ 540{
498 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", 541 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
@@ -578,9 +621,19 @@ EXPORT_SYMBOL(abort);
578 621
579void __init trap_init(void) 622void __init trap_init(void)
580{ 623{
581 extern void __trap_init(void); 624 extern char __stubs_start[], __stubs_end[];
625 extern char __vectors_start[], __vectors_end[];
626 extern char __kuser_helper_start[], __kuser_helper_end[];
627 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
582 628
583 __trap_init(); 629 /*
630 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
631 * into the vector page, mapped at 0xffff0000, and ensure these
632 * are visible to the instruction stream.
633 */
634 memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
635 memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
636 memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
584 flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); 637 flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
585 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 638 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
586} 639}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a39c6a42d68a..ad2d66c93a5c 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
5 5
6#include <asm-generic/vmlinux.lds.h> 6#include <asm-generic/vmlinux.lds.h>
7#include <linux/config.h> 7#include <linux/config.h>
8#include <asm/thread_info.h>
8 9
9OUTPUT_ARCH(arm) 10OUTPUT_ARCH(arm)
10ENTRY(stext) 11ENTRY(stext)
@@ -103,7 +104,7 @@ SECTIONS
103 __data_loc = ALIGN(4); /* location in binary */ 104 __data_loc = ALIGN(4); /* location in binary */
104 . = DATAADDR; 105 . = DATAADDR;
105#else 106#else
106 . = ALIGN(8192); 107 . = ALIGN(THREAD_SIZE);
107 __data_loc = .; 108 __data_loc = .;
108#endif 109#endif
109 110