aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-08-19 06:48:09 -0400
committerIngo Molnar <mingo@elte.hu>2010-08-19 06:48:09 -0400
commitc8710ad38900153af7a3e6762e99c062cfa46443 (patch)
treea0c0632274c4eb72f51e99a5861f71cffe65ea60 /arch/arm/kernel
parent6016ee13db518ab1cd0cbf43fc2ad5712021e338 (diff)
parent86397dc3ccfc0e17b7550d05eaf15fe91f6498dd (diff)
Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/compat.c7
-rw-r--r--arch/arm/kernel/compat.h2
-rw-r--r--arch/arm/kernel/crash_dump.c60
-rw-r--r--arch/arm/kernel/entry-armv.S29
-rw-r--r--arch/arm/kernel/entry-common.S124
-rw-r--r--arch/arm/kernel/etm.c4
-rw-r--r--arch/arm/kernel/irq.c41
-rw-r--r--arch/arm/kernel/kgdb.c124
-rw-r--r--arch/arm/kernel/machine_kexec.c14
-rw-r--r--arch/arm/kernel/module.c34
-rw-r--r--arch/arm/kernel/process.c42
-rw-r--r--arch/arm/kernel/ptrace.c96
-rw-r--r--arch/arm/kernel/relocate_kernel.S6
-rw-r--r--arch/arm/kernel/setup.c111
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/smp_twd.c3
-rw-r--r--arch/arm/kernel/sys_arm.c4
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c6
-rw-r--r--arch/arm/kernel/tcm.c118
-rw-r--r--arch/arm/kernel/traps.c41
22 files changed, 685 insertions, 206 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 26d302c28e13..980b78e31328 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -13,10 +13,12 @@ CFLAGS_REMOVE_return_address.o = -pg
13 13
14# Object file lists. 14# Object file lists.
15 15
16obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ 16obj-y := elf.o entry-armv.o entry-common.o irq.o \
17 process.o ptrace.o return_address.o setup.o signal.o \ 17 process.o ptrace.o return_address.o setup.o signal.o \
18 sys_arm.o stacktrace.o time.o traps.o 18 sys_arm.o stacktrace.o time.o traps.o
19 19
20obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
21
20obj-$(CONFIG_LEDS) += leds.o 22obj-$(CONFIG_LEDS) += leds.o
21obj-$(CONFIG_OC_ETM) += etm.o 23obj-$(CONFIG_OC_ETM) += etm.o
22 24
@@ -39,6 +41,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
39obj-$(CONFIG_KGDB) += kgdb.o 41obj-$(CONFIG_KGDB) += kgdb.o
40obj-$(CONFIG_ARM_UNWIND) += unwind.o 42obj-$(CONFIG_ARM_UNWIND) += unwind.o
41obj-$(CONFIG_HAVE_TCM) += tcm.o 43obj-$(CONFIG_HAVE_TCM) += tcm.o
44obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
42 45
43obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 46obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
44AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 47AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 883511522fca..85f2a019f77b 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -40,6 +40,9 @@
40int main(void) 40int main(void)
41{ 41{
42 DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); 42 DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
43#ifdef CONFIG_CC_STACKPROTECTOR
44 DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
45#endif
43 BLANK(); 46 BLANK();
44 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 47 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
45 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 48 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/compat.c
index 0a1385442f43..925652318b8b 100644
--- a/arch/arm/kernel/compat.c
+++ b/arch/arm/kernel/compat.c
@@ -217,10 +217,3 @@ void __init convert_to_tag_list(struct tag *tags)
217 struct param_struct *params = (struct param_struct *)tags; 217 struct param_struct *params = (struct param_struct *)tags;
218 build_tag_list(params, &params->u2); 218 build_tag_list(params, &params->u2);
219} 219}
220
221void __init squash_mem_tags(struct tag *tag)
222{
223 for (; tag->hdr.size; tag = tag_next(tag))
224 if (tag->hdr.tag == ATAG_MEM)
225 tag->hdr.tag = ATAG_NONE;
226}
diff --git a/arch/arm/kernel/compat.h b/arch/arm/kernel/compat.h
index 27e61a68bd1c..39264ab1b9c6 100644
--- a/arch/arm/kernel/compat.h
+++ b/arch/arm/kernel/compat.h
@@ -9,5 +9,3 @@
9*/ 9*/
10 10
11extern void convert_to_tag_list(struct tag *tags); 11extern void convert_to_tag_list(struct tag *tags);
12
13extern void squash_mem_tags(struct tag *tag);
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
new file mode 100644
index 000000000000..cd3b853a8a6d
--- /dev/null
+++ b/arch/arm/kernel/crash_dump.c
@@ -0,0 +1,60 @@
1/*
2 * arch/arm/kernel/crash_dump.c
3 *
4 * Copyright (C) 2010 Nokia Corporation.
5 * Author: Mika Westerberg
6 *
7 * This code is taken from arch/x86/kernel/crash_dump_64.c
8 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
9 * Copyright (C) IBM Corporation, 2004. All rights reserved
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/errno.h>
17#include <linux/crash_dump.h>
18#include <linux/uaccess.h>
19#include <linux/io.h>
20
21/* stores the physical address of elf header of crash image */
22unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
23
24/**
25 * copy_oldmem_page() - copy one page from old kernel memory
26 * @pfn: page frame number to be copied
27 * @buf: buffer where the copied page is placed
28 * @csize: number of bytes to copy
29 * @offset: offset in bytes into the page
30 * @userbuf: if set, @buf is int he user address space
31 *
32 * This function copies one page from old kernel memory into buffer pointed by
33 * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
34 * copied or negative error in case of failure.
35 */
36ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
37 size_t csize, unsigned long offset,
38 int userbuf)
39{
40 void *vaddr;
41
42 if (!csize)
43 return 0;
44
45 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
46 if (!vaddr)
47 return -ENOMEM;
48
49 if (userbuf) {
50 if (copy_to_user(buf, vaddr + offset, csize)) {
51 iounmap(vaddr);
52 return -EFAULT;
53 }
54 } else {
55 memcpy(buf, vaddr + offset, csize);
56 }
57
58 iounmap(vaddr);
59 return csize;
60}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3fd7861de4d1..bb8e93a76407 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,6 +22,7 @@
22#include <asm/thread_notify.h> 22#include <asm/thread_notify.h>
23#include <asm/unwind.h> 23#include <asm/unwind.h>
24#include <asm/unistd.h> 24#include <asm/unistd.h>
25#include <asm/tls.h>
25 26
26#include "entry-header.S" 27#include "entry-header.S"
27 28
@@ -735,11 +736,11 @@ ENTRY(__switch_to)
735#ifdef CONFIG_MMU 736#ifdef CONFIG_MMU
736 ldr r6, [r2, #TI_CPU_DOMAIN] 737 ldr r6, [r2, #TI_CPU_DOMAIN]
737#endif 738#endif
738#if defined(CONFIG_HAS_TLS_REG) 739 set_tls r3, r4, r5
739 mcr p15, 0, r3, c13, c0, 3 @ set TLS register 740#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
740#elif !defined(CONFIG_TLS_REG_EMUL) 741 ldr r7, [r2, #TI_TASK]
741 mov r4, #0xffff0fff 742 ldr r8, =__stack_chk_guard
742 str r3, [r4, #-15] @ TLS val at 0xffff0ff0 743 ldr r7, [r7, #TSK_STACK_CANARY]
743#endif 744#endif
744#ifdef CONFIG_MMU 745#ifdef CONFIG_MMU
745 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 746 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
@@ -749,6 +750,9 @@ ENTRY(__switch_to)
749 ldr r0, =thread_notify_head 750 ldr r0, =thread_notify_head
750 mov r1, #THREAD_NOTIFY_SWITCH 751 mov r1, #THREAD_NOTIFY_SWITCH
751 bl atomic_notifier_call_chain 752 bl atomic_notifier_call_chain
753#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
754 str r7, [r8]
755#endif
752 THUMB( mov ip, r4 ) 756 THUMB( mov ip, r4 )
753 mov r0, r5 757 mov r0, r5
754 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 758 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
@@ -1005,17 +1009,12 @@ kuser_cmpxchg_fixup:
1005 */ 1009 */
1006 1010
1007__kuser_get_tls: @ 0xffff0fe0 1011__kuser_get_tls: @ 0xffff0fe0
1008 1012 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1009#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
1010 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
1011#else
1012 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
1013#endif
1014 usr_ret lr 1013 usr_ret lr
1015 1014 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
1016 .rep 5 1015 .rep 4
1017 .word 0 @ pad up to __kuser_helper_version 1016 .word 0 @ 0xffff0ff0 software TLS value, then
1018 .endr 1017 .endr @ pad up to __kuser_helper_version
1019 1018
1020/* 1019/*
1021 * Reference declaration: 1020 * Reference declaration:
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 2c1db77d7848..f05a35a59694 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -92,75 +92,111 @@ ENDPROC(ret_from_fork)
92#define CALL(x) .long x 92#define CALL(x) .long x
93 93
94#ifdef CONFIG_FUNCTION_TRACER 94#ifdef CONFIG_FUNCTION_TRACER
95/*
96 * When compiling with -pg, gcc inserts a call to the mcount routine at the
97 * start of every function. In mcount, apart from the function's address (in
98 * lr), we need to get hold of the function's caller's address.
99 *
100 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
101 *
102 * bl mcount
103 *
104 * These versions have the limitation that in order for the mcount routine to
105 * be able to determine the function's caller's address, an APCS-style frame
106 * pointer (which is set up with something like the code below) is required.
107 *
108 * mov ip, sp
109 * push {fp, ip, lr, pc}
110 * sub fp, ip, #4
111 *
112 * With EABI, these frame pointers are not available unless -mapcs-frame is
113 * specified, and if building as Thumb-2, not even then.
114 *
115 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
116 * with call sites like:
117 *
118 * push {lr}
119 * bl __gnu_mcount_nc
120 *
121 * With these compilers, frame pointers are not necessary.
122 *
123 * mcount can be thought of as a function called in the middle of a subroutine
124 * call. As such, it needs to be transparent for both the caller and the
125 * callee: the original lr needs to be restored when leaving mcount, and no
126 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
127 * clobber the ip register. This is OK because the ARM calling convention
128 * allows it to be clobbered in subroutines and doesn't use it to hold
129 * parameters.)
130 */
95#ifdef CONFIG_DYNAMIC_FTRACE 131#ifdef CONFIG_DYNAMIC_FTRACE
96ENTRY(mcount) 132ENTRY(mcount)
97 stmdb sp!, {r0-r3, lr} 133 stmdb sp!, {r0-r3, lr}
98 mov r0, lr 134 mov r0, lr
99 sub r0, r0, #MCOUNT_INSN_SIZE 135 sub r0, r0, #MCOUNT_INSN_SIZE
100 136
101 .globl mcount_call 137 .globl mcount_call
102mcount_call: 138mcount_call:
103 bl ftrace_stub 139 bl ftrace_stub
104 ldr lr, [fp, #-4] @ restore lr 140 ldr lr, [fp, #-4] @ restore lr
105 ldmia sp!, {r0-r3, pc} 141 ldmia sp!, {r0-r3, pc}
106 142
107ENTRY(ftrace_caller) 143ENTRY(ftrace_caller)
108 stmdb sp!, {r0-r3, lr} 144 stmdb sp!, {r0-r3, lr}
109 ldr r1, [fp, #-4] 145 ldr r1, [fp, #-4]
110 mov r0, lr 146 mov r0, lr
111 sub r0, r0, #MCOUNT_INSN_SIZE 147 sub r0, r0, #MCOUNT_INSN_SIZE
112 148
113 .globl ftrace_call 149 .globl ftrace_call
114ftrace_call: 150ftrace_call:
115 bl ftrace_stub 151 bl ftrace_stub
116 ldr lr, [fp, #-4] @ restore lr 152 ldr lr, [fp, #-4] @ restore lr
117 ldmia sp!, {r0-r3, pc} 153 ldmia sp!, {r0-r3, pc}
118 154
119#else 155#else
120 156
121ENTRY(__gnu_mcount_nc) 157ENTRY(__gnu_mcount_nc)
122 stmdb sp!, {r0-r3, lr} 158 stmdb sp!, {r0-r3, lr}
123 ldr r0, =ftrace_trace_function 159 ldr r0, =ftrace_trace_function
124 ldr r2, [r0] 160 ldr r2, [r0]
125 adr r0, ftrace_stub 161 adr r0, ftrace_stub
126 cmp r0, r2 162 cmp r0, r2
127 bne gnu_trace 163 bne gnu_trace
128 ldmia sp!, {r0-r3, ip, lr} 164 ldmia sp!, {r0-r3, ip, lr}
129 mov pc, ip 165 mov pc, ip
130 166
131gnu_trace: 167gnu_trace:
132 ldr r1, [sp, #20] @ lr of instrumented routine 168 ldr r1, [sp, #20] @ lr of instrumented routine
133 mov r0, lr 169 mov r0, lr
134 sub r0, r0, #MCOUNT_INSN_SIZE 170 sub r0, r0, #MCOUNT_INSN_SIZE
135 mov lr, pc 171 mov lr, pc
136 mov pc, r2 172 mov pc, r2
137 ldmia sp!, {r0-r3, ip, lr} 173 ldmia sp!, {r0-r3, ip, lr}
138 mov pc, ip 174 mov pc, ip
139 175
140ENTRY(mcount) 176ENTRY(mcount)
141 stmdb sp!, {r0-r3, lr} 177 stmdb sp!, {r0-r3, lr}
142 ldr r0, =ftrace_trace_function 178 ldr r0, =ftrace_trace_function
143 ldr r2, [r0] 179 ldr r2, [r0]
144 adr r0, ftrace_stub 180 adr r0, ftrace_stub
145 cmp r0, r2 181 cmp r0, r2
146 bne trace 182 bne trace
147 ldr lr, [fp, #-4] @ restore lr 183 ldr lr, [fp, #-4] @ restore lr
148 ldmia sp!, {r0-r3, pc} 184 ldmia sp!, {r0-r3, pc}
149 185
150trace: 186trace:
151 ldr r1, [fp, #-4] @ lr of instrumented routine 187 ldr r1, [fp, #-4] @ lr of instrumented routine
152 mov r0, lr 188 mov r0, lr
153 sub r0, r0, #MCOUNT_INSN_SIZE 189 sub r0, r0, #MCOUNT_INSN_SIZE
154 mov lr, pc 190 mov lr, pc
155 mov pc, r2 191 mov pc, r2
156 ldr lr, [fp, #-4] @ restore lr 192 ldr lr, [fp, #-4] @ restore lr
157 ldmia sp!, {r0-r3, pc} 193 ldmia sp!, {r0-r3, pc}
158 194
159#endif /* CONFIG_DYNAMIC_FTRACE */ 195#endif /* CONFIG_DYNAMIC_FTRACE */
160 196
161 .globl ftrace_stub 197 .globl ftrace_stub
162ftrace_stub: 198ftrace_stub:
163 mov pc, lr 199 mov pc, lr
164 200
165#endif /* CONFIG_FUNCTION_TRACER */ 201#endif /* CONFIG_FUNCTION_TRACER */
166 202
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 827753966301..56418f98cd01 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -543,7 +543,9 @@ static int __init etm_probe(struct amba_device *dev, struct amba_id *id)
543 t->etm_portsz = 1; 543 t->etm_portsz = 1;
544 544
545 etm_unlock(t); 545 etm_unlock(t);
546 ret = etm_readl(t, CSCR_PRSR); 546 (void)etm_readl(t, ETMMR_PDSR);
547 /* dummy first read */
548 (void)etm_readl(&tracer, ETMMR_OSSRR);
547 549
548 t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; 550 t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
549 etm_writel(t, 0x440, ETMR_CTRL); 551 etm_writel(t, 0x440, ETMR_CTRL);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 3b3d2c80509c..c0d5c3b3a760 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -47,12 +47,14 @@
47#define irq_finish(irq) do { } while (0) 47#define irq_finish(irq) do { } while (0)
48#endif 48#endif
49 49
50unsigned int arch_nr_irqs;
50void (*init_arch_irq)(void) __initdata = NULL; 51void (*init_arch_irq)(void) __initdata = NULL;
51unsigned long irq_err_count; 52unsigned long irq_err_count;
52 53
53int show_interrupts(struct seq_file *p, void *v) 54int show_interrupts(struct seq_file *p, void *v)
54{ 55{
55 int i = *(loff_t *) v, cpu; 56 int i = *(loff_t *) v, cpu;
57 struct irq_desc *desc;
56 struct irqaction * action; 58 struct irqaction * action;
57 unsigned long flags; 59 unsigned long flags;
58 60
@@ -67,24 +69,25 @@ int show_interrupts(struct seq_file *p, void *v)
67 seq_putc(p, '\n'); 69 seq_putc(p, '\n');
68 } 70 }
69 71
70 if (i < NR_IRQS) { 72 if (i < nr_irqs) {
71 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 73 desc = irq_to_desc(i);
72 action = irq_desc[i].action; 74 raw_spin_lock_irqsave(&desc->lock, flags);
75 action = desc->action;
73 if (!action) 76 if (!action)
74 goto unlock; 77 goto unlock;
75 78
76 seq_printf(p, "%3d: ", i); 79 seq_printf(p, "%3d: ", i);
77 for_each_present_cpu(cpu) 80 for_each_present_cpu(cpu)
78 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); 81 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
79 seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); 82 seq_printf(p, " %10s", desc->chip->name ? : "-");
80 seq_printf(p, " %s", action->name); 83 seq_printf(p, " %s", action->name);
81 for (action = action->next; action; action = action->next) 84 for (action = action->next; action; action = action->next)
82 seq_printf(p, ", %s", action->name); 85 seq_printf(p, ", %s", action->name);
83 86
84 seq_putc(p, '\n'); 87 seq_putc(p, '\n');
85unlock: 88unlock:
86 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 89 raw_spin_unlock_irqrestore(&desc->lock, flags);
87 } else if (i == NR_IRQS) { 90 } else if (i == nr_irqs) {
88#ifdef CONFIG_FIQ 91#ifdef CONFIG_FIQ
89 show_fiq_list(p, v); 92 show_fiq_list(p, v);
90#endif 93#endif
@@ -112,7 +115,7 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
112 * Some hardware gives randomly wrong interrupts. Rather 115 * Some hardware gives randomly wrong interrupts. Rather
113 * than crashing, do something sensible. 116 * than crashing, do something sensible.
114 */ 117 */
115 if (unlikely(irq >= NR_IRQS)) { 118 if (unlikely(irq >= nr_irqs)) {
116 if (printk_ratelimit()) 119 if (printk_ratelimit())
117 printk(KERN_WARNING "Bad IRQ%u\n", irq); 120 printk(KERN_WARNING "Bad IRQ%u\n", irq);
118 ack_bad_irq(irq); 121 ack_bad_irq(irq);
@@ -132,12 +135,12 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
132 struct irq_desc *desc; 135 struct irq_desc *desc;
133 unsigned long flags; 136 unsigned long flags;
134 137
135 if (irq >= NR_IRQS) { 138 if (irq >= nr_irqs) {
136 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); 139 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
137 return; 140 return;
138 } 141 }
139 142
140 desc = irq_desc + irq; 143 desc = irq_to_desc(irq);
141 raw_spin_lock_irqsave(&desc->lock, flags); 144 raw_spin_lock_irqsave(&desc->lock, flags);
142 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 145 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
143 if (iflags & IRQF_VALID) 146 if (iflags & IRQF_VALID)
@@ -151,14 +154,25 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
151 154
152void __init init_IRQ(void) 155void __init init_IRQ(void)
153{ 156{
157 struct irq_desc *desc;
154 int irq; 158 int irq;
155 159
156 for (irq = 0; irq < NR_IRQS; irq++) 160 for (irq = 0; irq < nr_irqs; irq++) {
157 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; 161 desc = irq_to_desc_alloc_node(irq, 0);
162 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
163 }
158 164
159 init_arch_irq(); 165 init_arch_irq();
160} 166}
161 167
168#ifdef CONFIG_SPARSE_IRQ
169int __init arch_probe_nr_irqs(void)
170{
171 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
172 return 0;
173}
174#endif
175
162#ifdef CONFIG_HOTPLUG_CPU 176#ifdef CONFIG_HOTPLUG_CPU
163 177
164static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 178static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
@@ -178,10 +192,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
178void migrate_irqs(void) 192void migrate_irqs(void)
179{ 193{
180 unsigned int i, cpu = smp_processor_id(); 194 unsigned int i, cpu = smp_processor_id();
195 struct irq_desc *desc;
181 196
182 for (i = 0; i < NR_IRQS; i++) { 197 for_each_irq_desc(i, desc) {
183 struct irq_desc *desc = irq_desc + i;
184
185 if (desc->node == cpu) { 198 if (desc->node == cpu) {
186 unsigned int newcpu = cpumask_any_and(desc->affinity, 199 unsigned int newcpu = cpumask_any_and(desc->affinity,
187 cpu_online_mask); 200 cpu_online_mask);
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index c868a8864117..778c2f7024ff 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -10,57 +10,62 @@
10 * Deepak Saxena <dsaxena@plexity.net> 10 * Deepak Saxena <dsaxena@plexity.net>
11 */ 11 */
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/kdebug.h>
13#include <linux/kgdb.h> 14#include <linux/kgdb.h>
14#include <asm/traps.h> 15#include <asm/traps.h>
15 16
16/* Make a local copy of the registers passed into the handler (bletch) */ 17struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
17void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
18{ 18{
19 int regno; 19 { "r0", 4, offsetof(struct pt_regs, ARM_r0)},
20 20 { "r1", 4, offsetof(struct pt_regs, ARM_r1)},
21 /* Initialize all to zero. */ 21 { "r2", 4, offsetof(struct pt_regs, ARM_r2)},
22 for (regno = 0; regno < GDB_MAX_REGS; regno++) 22 { "r3", 4, offsetof(struct pt_regs, ARM_r3)},
23 gdb_regs[regno] = 0; 23 { "r4", 4, offsetof(struct pt_regs, ARM_r4)},
24 { "r5", 4, offsetof(struct pt_regs, ARM_r5)},
25 { "r6", 4, offsetof(struct pt_regs, ARM_r6)},
26 { "r7", 4, offsetof(struct pt_regs, ARM_r7)},
27 { "r8", 4, offsetof(struct pt_regs, ARM_r8)},
28 { "r9", 4, offsetof(struct pt_regs, ARM_r9)},
29 { "r10", 4, offsetof(struct pt_regs, ARM_r10)},
30 { "fp", 4, offsetof(struct pt_regs, ARM_fp)},
31 { "ip", 4, offsetof(struct pt_regs, ARM_ip)},
32 { "sp", 4, offsetof(struct pt_regs, ARM_sp)},
33 { "lr", 4, offsetof(struct pt_regs, ARM_lr)},
34 { "pc", 4, offsetof(struct pt_regs, ARM_pc)},
35 { "f0", 12, -1 },
36 { "f1", 12, -1 },
37 { "f2", 12, -1 },
38 { "f3", 12, -1 },
39 { "f4", 12, -1 },
40 { "f5", 12, -1 },
41 { "f6", 12, -1 },
42 { "f7", 12, -1 },
43 { "fps", 4, -1 },
44 { "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)},
45};
24 46
25 gdb_regs[_R0] = kernel_regs->ARM_r0; 47char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
26 gdb_regs[_R1] = kernel_regs->ARM_r1; 48{
27 gdb_regs[_R2] = kernel_regs->ARM_r2; 49 if (regno >= DBG_MAX_REG_NUM || regno < 0)
28 gdb_regs[_R3] = kernel_regs->ARM_r3; 50 return NULL;
29 gdb_regs[_R4] = kernel_regs->ARM_r4; 51
30 gdb_regs[_R5] = kernel_regs->ARM_r5; 52 if (dbg_reg_def[regno].offset != -1)
31 gdb_regs[_R6] = kernel_regs->ARM_r6; 53 memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
32 gdb_regs[_R7] = kernel_regs->ARM_r7; 54 dbg_reg_def[regno].size);
33 gdb_regs[_R8] = kernel_regs->ARM_r8; 55 else
34 gdb_regs[_R9] = kernel_regs->ARM_r9; 56 memset(mem, 0, dbg_reg_def[regno].size);
35 gdb_regs[_R10] = kernel_regs->ARM_r10; 57 return dbg_reg_def[regno].name;
36 gdb_regs[_FP] = kernel_regs->ARM_fp;
37 gdb_regs[_IP] = kernel_regs->ARM_ip;
38 gdb_regs[_SPT] = kernel_regs->ARM_sp;
39 gdb_regs[_LR] = kernel_regs->ARM_lr;
40 gdb_regs[_PC] = kernel_regs->ARM_pc;
41 gdb_regs[_CPSR] = kernel_regs->ARM_cpsr;
42} 58}
43 59
44/* Copy local gdb registers back to kgdb regs, for later copy to kernel */ 60int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
45void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
46{ 61{
47 kernel_regs->ARM_r0 = gdb_regs[_R0]; 62 if (regno >= DBG_MAX_REG_NUM || regno < 0)
48 kernel_regs->ARM_r1 = gdb_regs[_R1]; 63 return -EINVAL;
49 kernel_regs->ARM_r2 = gdb_regs[_R2]; 64
50 kernel_regs->ARM_r3 = gdb_regs[_R3]; 65 if (dbg_reg_def[regno].offset != -1)
51 kernel_regs->ARM_r4 = gdb_regs[_R4]; 66 memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
52 kernel_regs->ARM_r5 = gdb_regs[_R5]; 67 dbg_reg_def[regno].size);
53 kernel_regs->ARM_r6 = gdb_regs[_R6]; 68 return 0;
54 kernel_regs->ARM_r7 = gdb_regs[_R7];
55 kernel_regs->ARM_r8 = gdb_regs[_R8];
56 kernel_regs->ARM_r9 = gdb_regs[_R9];
57 kernel_regs->ARM_r10 = gdb_regs[_R10];
58 kernel_regs->ARM_fp = gdb_regs[_FP];
59 kernel_regs->ARM_ip = gdb_regs[_IP];
60 kernel_regs->ARM_sp = gdb_regs[_SPT];
61 kernel_regs->ARM_lr = gdb_regs[_LR];
62 kernel_regs->ARM_pc = gdb_regs[_PC];
63 kernel_regs->ARM_cpsr = gdb_regs[_CPSR];
64} 69}
65 70
66void 71void
@@ -176,6 +181,33 @@ void kgdb_roundup_cpus(unsigned long flags)
176 local_irq_disable(); 181 local_irq_disable();
177} 182}
178 183
184static int __kgdb_notify(struct die_args *args, unsigned long cmd)
185{
186 struct pt_regs *regs = args->regs;
187
188 if (kgdb_handle_exception(1, args->signr, cmd, regs))
189 return NOTIFY_DONE;
190 return NOTIFY_STOP;
191}
192static int
193kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
194{
195 unsigned long flags;
196 int ret;
197
198 local_irq_save(flags);
199 ret = __kgdb_notify(ptr, cmd);
200 local_irq_restore(flags);
201
202 return ret;
203}
204
205static struct notifier_block kgdb_notifier = {
206 .notifier_call = kgdb_notify,
207 .priority = -INT_MAX,
208};
209
210
179/** 211/**
180 * kgdb_arch_init - Perform any architecture specific initalization. 212 * kgdb_arch_init - Perform any architecture specific initalization.
181 * 213 *
@@ -184,6 +216,11 @@ void kgdb_roundup_cpus(unsigned long flags)
184 */ 216 */
185int kgdb_arch_init(void) 217int kgdb_arch_init(void)
186{ 218{
219 int ret = register_die_notifier(&kgdb_notifier);
220
221 if (ret != 0)
222 return ret;
223
187 register_undef_hook(&kgdb_brkpt_hook); 224 register_undef_hook(&kgdb_brkpt_hook);
188 register_undef_hook(&kgdb_compiled_brkpt_hook); 225 register_undef_hook(&kgdb_compiled_brkpt_hook);
189 226
@@ -200,6 +237,7 @@ void kgdb_arch_exit(void)
200{ 237{
201 unregister_undef_hook(&kgdb_brkpt_hook); 238 unregister_undef_hook(&kgdb_brkpt_hook);
202 unregister_undef_hook(&kgdb_compiled_brkpt_hook); 239 unregister_undef_hook(&kgdb_compiled_brkpt_hook);
240 unregister_die_notifier(&kgdb_notifier);
203} 241}
204 242
205/* 243/*
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 598ca61e7bca..1fc74cbd1a19 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -37,12 +37,12 @@ void machine_kexec_cleanup(struct kimage *image)
37{ 37{
38} 38}
39 39
40void machine_shutdown(void)
41{
42}
43
44void machine_crash_shutdown(struct pt_regs *regs) 40void machine_crash_shutdown(struct pt_regs *regs)
45{ 41{
42 local_irq_disable();
43 crash_save_cpu(regs, smp_processor_id());
44
45 printk(KERN_INFO "Loading crashdump kernel...\n");
46} 46}
47 47
48void machine_kexec(struct kimage *image) 48void machine_kexec(struct kimage *image)
@@ -74,7 +74,11 @@ void machine_kexec(struct kimage *image)
74 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); 74 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
75 printk(KERN_INFO "Bye!\n"); 75 printk(KERN_INFO "Bye!\n");
76 76
77 cpu_proc_fin(); 77 local_irq_disable();
78 local_fiq_disable();
78 setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ 79 setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
80 flush_cache_all();
81 cpu_proc_fin();
82 flush_cache_all();
79 cpu_reset(reboot_code_buffer_phys); 83 cpu_reset(reboot_code_buffer_phys);
80} 84}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index c628bdf6c430..6b4605893f1e 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -102,7 +102,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
102 unsigned long loc; 102 unsigned long loc;
103 Elf32_Sym *sym; 103 Elf32_Sym *sym;
104 s32 offset; 104 s32 offset;
105#ifdef CONFIG_THUMB2_KERNEL
105 u32 upper, lower, sign, j1, j2; 106 u32 upper, lower, sign, j1, j2;
107#endif
106 108
107 offset = ELF32_R_SYM(rel->r_info); 109 offset = ELF32_R_SYM(rel->r_info);
108 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { 110 if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
@@ -185,6 +187,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
185 (offset & 0x0fff); 187 (offset & 0x0fff);
186 break; 188 break;
187 189
190#ifdef CONFIG_THUMB2_KERNEL
188 case R_ARM_THM_CALL: 191 case R_ARM_THM_CALL:
189 case R_ARM_THM_JUMP24: 192 case R_ARM_THM_JUMP24:
190 upper = *(u16 *)loc; 193 upper = *(u16 *)loc;
@@ -233,9 +236,40 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
233 *(u16 *)(loc + 2) = (u16)((lower & 0xd000) | 236 *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
234 (j1 << 13) | (j2 << 11) | 237 (j1 << 13) | (j2 << 11) |
235 ((offset >> 1) & 0x07ff)); 238 ((offset >> 1) & 0x07ff));
239 break;
240
241 case R_ARM_THM_MOVW_ABS_NC:
242 case R_ARM_THM_MOVT_ABS:
236 upper = *(u16 *)loc; 243 upper = *(u16 *)loc;
237 lower = *(u16 *)(loc + 2); 244 lower = *(u16 *)(loc + 2);
245
246 /*
247 * MOVT/MOVW instructions encoding in Thumb-2:
248 *
249 * i = upper[10]
250 * imm4 = upper[3:0]
251 * imm3 = lower[14:12]
252 * imm8 = lower[7:0]
253 *
254 * imm16 = imm4:i:imm3:imm8
255 */
256 offset = ((upper & 0x000f) << 12) |
257 ((upper & 0x0400) << 1) |
258 ((lower & 0x7000) >> 4) | (lower & 0x00ff);
259 offset = (offset ^ 0x8000) - 0x8000;
260 offset += sym->st_value;
261
262 if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
263 offset >>= 16;
264
265 *(u16 *)loc = (u16)((upper & 0xfbf0) |
266 ((offset & 0xf000) >> 12) |
267 ((offset & 0x0800) >> 1));
268 *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
269 ((offset & 0x0700) << 4) |
270 (offset & 0x00ff));
238 break; 271 break;
272#endif
239 273
240 default: 274 default:
241 printk(KERN_ERR "%s: unknown relocation: %u\n", 275 printk(KERN_ERR "%s: unknown relocation: %u\n",
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index a4a9cc88bec7..401e38be1f78 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,7 +28,9 @@
28#include <linux/tick.h> 28#include <linux/tick.h>
29#include <linux/utsname.h> 29#include <linux/utsname.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/random.h>
31 32
33#include <asm/cacheflush.h>
32#include <asm/leds.h> 34#include <asm/leds.h>
33#include <asm/processor.h> 35#include <asm/processor.h>
34#include <asm/system.h> 36#include <asm/system.h>
@@ -36,6 +38,12 @@
36#include <asm/stacktrace.h> 38#include <asm/stacktrace.h>
37#include <asm/mach/time.h> 39#include <asm/mach/time.h>
38 40
41#ifdef CONFIG_CC_STACKPROTECTOR
42#include <linux/stackprotector.h>
43unsigned long __stack_chk_guard __read_mostly;
44EXPORT_SYMBOL(__stack_chk_guard);
45#endif
46
39static const char *processor_modes[] = { 47static const char *processor_modes[] = {
40 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , 48 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
41 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", 49 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
@@ -84,10 +92,9 @@ __setup("hlt", hlt_setup);
84 92
85void arm_machine_restart(char mode, const char *cmd) 93void arm_machine_restart(char mode, const char *cmd)
86{ 94{
87 /* 95 /* Disable interrupts first */
88 * Clean and disable cache, and turn off interrupts 96 local_irq_disable();
89 */ 97 local_fiq_disable();
90 cpu_proc_fin();
91 98
92 /* 99 /*
93 * Tell the mm system that we are going to reboot - 100 * Tell the mm system that we are going to reboot -
@@ -96,6 +103,15 @@ void arm_machine_restart(char mode, const char *cmd)
96 */ 103 */
97 setup_mm_for_reboot(mode); 104 setup_mm_for_reboot(mode);
98 105
106 /* Clean and invalidate caches */
107 flush_cache_all();
108
109 /* Turn off caching */
110 cpu_proc_fin();
111
112 /* Push out any further dirty data, and ensure cache is empty */
113 flush_cache_all();
114
99 /* 115 /*
100 * Now call the architecture specific reboot code. 116 * Now call the architecture specific reboot code.
101 */ 117 */
@@ -189,19 +205,29 @@ int __init reboot_setup(char *str)
189 205
190__setup("reboot=", reboot_setup); 206__setup("reboot=", reboot_setup);
191 207
192void machine_halt(void) 208void machine_shutdown(void)
193{ 209{
210#ifdef CONFIG_SMP
211 smp_send_stop();
212#endif
194} 213}
195 214
215void machine_halt(void)
216{
217 machine_shutdown();
218 while (1);
219}
196 220
197void machine_power_off(void) 221void machine_power_off(void)
198{ 222{
223 machine_shutdown();
199 if (pm_power_off) 224 if (pm_power_off)
200 pm_power_off(); 225 pm_power_off();
201} 226}
202 227
203void machine_restart(char *cmd) 228void machine_restart(char *cmd)
204{ 229{
230 machine_shutdown();
205 arm_pm_restart(reboot_mode, cmd); 231 arm_pm_restart(reboot_mode, cmd);
206} 232}
207 233
@@ -426,3 +452,9 @@ unsigned long get_wchan(struct task_struct *p)
426 } while (count ++ < 16); 452 } while (count ++ < 16);
427 return 0; 453 return 0;
428} 454}
455
456unsigned long arch_randomize_brk(struct mm_struct *mm)
457{
458 unsigned long range_end = mm->brk + 0x02000000;
459 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
460}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 3f562a7c0a99..f99d489822d5 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -52,6 +52,102 @@
52#define BREAKINST_THUMB 0xde01 52#define BREAKINST_THUMB 0xde01
53#endif 53#endif
54 54
55struct pt_regs_offset {
56 const char *name;
57 int offset;
58};
59
60#define REG_OFFSET_NAME(r) \
61 {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
62#define REG_OFFSET_END {.name = NULL, .offset = 0}
63
64static const struct pt_regs_offset regoffset_table[] = {
65 REG_OFFSET_NAME(r0),
66 REG_OFFSET_NAME(r1),
67 REG_OFFSET_NAME(r2),
68 REG_OFFSET_NAME(r3),
69 REG_OFFSET_NAME(r4),
70 REG_OFFSET_NAME(r5),
71 REG_OFFSET_NAME(r6),
72 REG_OFFSET_NAME(r7),
73 REG_OFFSET_NAME(r8),
74 REG_OFFSET_NAME(r9),
75 REG_OFFSET_NAME(r10),
76 REG_OFFSET_NAME(fp),
77 REG_OFFSET_NAME(ip),
78 REG_OFFSET_NAME(sp),
79 REG_OFFSET_NAME(lr),
80 REG_OFFSET_NAME(pc),
81 REG_OFFSET_NAME(cpsr),
82 REG_OFFSET_NAME(ORIG_r0),
83 REG_OFFSET_END,
84};
85
86/**
87 * regs_query_register_offset() - query register offset from its name
88 * @name: the name of a register
89 *
90 * regs_query_register_offset() returns the offset of a register in struct
91 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
92 */
93int regs_query_register_offset(const char *name)
94{
95 const struct pt_regs_offset *roff;
96 for (roff = regoffset_table; roff->name != NULL; roff++)
97 if (!strcmp(roff->name, name))
98 return roff->offset;
99 return -EINVAL;
100}
101
102/**
103 * regs_query_register_name() - query register name from its offset
104 * @offset: the offset of a register in struct pt_regs.
105 *
106 * regs_query_register_name() returns the name of a register from its
107 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
108 */
109const char *regs_query_register_name(unsigned int offset)
110{
111 const struct pt_regs_offset *roff;
112 for (roff = regoffset_table; roff->name != NULL; roff++)
113 if (roff->offset == offset)
114 return roff->name;
115 return NULL;
116}
117
118/**
119 * regs_within_kernel_stack() - check the address in the stack
120 * @regs: pt_regs which contains kernel stack pointer.
121 * @addr: address which is checked.
122 *
123 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
124 * If @addr is within the kernel stack, it returns true. If not, returns false.
125 */
126bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
127{
128 return ((addr & ~(THREAD_SIZE - 1)) ==
129 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
130}
131
132/**
133 * regs_get_kernel_stack_nth() - get Nth entry of the stack
134 * @regs: pt_regs which contains kernel stack pointer.
135 * @n: stack entry number.
136 *
137 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
138 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
139 * this returns 0.
140 */
141unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
142{
143 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
144 addr += n;
145 if (regs_within_kernel_stack(regs, (unsigned long)addr))
146 return *addr;
147 else
148 return 0;
149}
150
55/* 151/*
56 * this routine will get a word off of the processes privileged stack. 152 * this routine will get a word off of the processes privileged stack.
57 * the offset is how far from the base addr as stored in the THREAD. 153 * the offset is how far from the base addr as stored in the THREAD.
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 61930eb09029..fd26f8d65151 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -10,6 +10,12 @@ relocate_new_kernel:
10 ldr r0,kexec_indirection_page 10 ldr r0,kexec_indirection_page
11 ldr r1,kexec_start_address 11 ldr r1,kexec_start_address
12 12
13 /*
14 * If there is no indirection page (we are doing crashdumps)
15 * skip any relocation.
16 */
17 cmp r0, #0
18 beq 2f
13 19
140: /* top, read another word for the indirection page */ 200: /* top, read another word for the indirection page */
15 ldr r3, [r0],#4 21 ldr r3, [r0],#4
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 122d999bdc7c..d5231ae7355a 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -19,12 +19,15 @@
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/screen_info.h> 20#include <linux/screen_info.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kexec.h>
23#include <linux/crash_dump.h>
22#include <linux/root_dev.h> 24#include <linux/root_dev.h>
23#include <linux/cpu.h> 25#include <linux/cpu.h>
24#include <linux/interrupt.h> 26#include <linux/interrupt.h>
25#include <linux/smp.h> 27#include <linux/smp.h>
26#include <linux/fs.h> 28#include <linux/fs.h>
27#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
30#include <linux/memblock.h>
28 31
29#include <asm/unified.h> 32#include <asm/unified.h>
30#include <asm/cpu.h> 33#include <asm/cpu.h>
@@ -44,7 +47,9 @@
44#include <asm/traps.h> 47#include <asm/traps.h>
45#include <asm/unwind.h> 48#include <asm/unwind.h>
46 49
50#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
47#include "compat.h" 51#include "compat.h"
52#endif
48#include "atags.h" 53#include "atags.h"
49#include "tcm.h" 54#include "tcm.h"
50 55
@@ -269,6 +274,21 @@ static void __init cacheid_init(void)
269extern struct proc_info_list *lookup_processor_type(unsigned int); 274extern struct proc_info_list *lookup_processor_type(unsigned int);
270extern struct machine_desc *lookup_machine_type(unsigned int); 275extern struct machine_desc *lookup_machine_type(unsigned int);
271 276
277static void __init feat_v6_fixup(void)
278{
279 int id = read_cpuid_id();
280
281 if ((id & 0xff0f0000) != 0x41070000)
282 return;
283
284 /*
285 * HWCAP_TLS is available only on 1136 r1p0 and later,
286 * see also kuser_get_tls_init.
287 */
288 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
289 elf_hwcap &= ~HWCAP_TLS;
290}
291
272static void __init setup_processor(void) 292static void __init setup_processor(void)
273{ 293{
274 struct proc_info_list *list; 294 struct proc_info_list *list;
@@ -311,6 +331,8 @@ static void __init setup_processor(void)
311 elf_hwcap &= ~HWCAP_THUMB; 331 elf_hwcap &= ~HWCAP_THUMB;
312#endif 332#endif
313 333
334 feat_v6_fixup();
335
314 cacheid_init(); 336 cacheid_init();
315 cpu_proc_init(); 337 cpu_proc_init();
316} 338}
@@ -402,13 +424,12 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
402 size -= start & ~PAGE_MASK; 424 size -= start & ~PAGE_MASK;
403 bank->start = PAGE_ALIGN(start); 425 bank->start = PAGE_ALIGN(start);
404 bank->size = size & PAGE_MASK; 426 bank->size = size & PAGE_MASK;
405 bank->node = PHYS_TO_NID(start);
406 427
407 /* 428 /*
408 * Check whether this memory region has non-zero size or 429 * Check whether this memory region has non-zero size or
409 * invalid node number. 430 * invalid node number.
410 */ 431 */
411 if (bank->size == 0 || bank->node >= MAX_NUMNODES) 432 if (bank->size == 0)
412 return -EINVAL; 433 return -EINVAL;
413 434
414 meminfo.nr_banks++; 435 meminfo.nr_banks++;
@@ -663,6 +684,86 @@ static int __init customize_machine(void)
663} 684}
664arch_initcall(customize_machine); 685arch_initcall(customize_machine);
665 686
687#ifdef CONFIG_KEXEC
688static inline unsigned long long get_total_mem(void)
689{
690 unsigned long total;
691
692 total = max_low_pfn - min_low_pfn;
693 return total << PAGE_SHIFT;
694}
695
696/**
697 * reserve_crashkernel() - reserves memory are for crash kernel
698 *
699 * This function reserves memory area given in "crashkernel=" kernel command
700 * line parameter. The memory reserved is used by a dump capture kernel when
701 * primary kernel is crashing.
702 */
703static void __init reserve_crashkernel(void)
704{
705 unsigned long long crash_size, crash_base;
706 unsigned long long total_mem;
707 int ret;
708
709 total_mem = get_total_mem();
710 ret = parse_crashkernel(boot_command_line, total_mem,
711 &crash_size, &crash_base);
712 if (ret)
713 return;
714
715 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
716 if (ret < 0) {
717 printk(KERN_WARNING "crashkernel reservation failed - "
718 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
719 return;
720 }
721
722 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
723 "for crashkernel (System RAM: %ldMB)\n",
724 (unsigned long)(crash_size >> 20),
725 (unsigned long)(crash_base >> 20),
726 (unsigned long)(total_mem >> 20));
727
728 crashk_res.start = crash_base;
729 crashk_res.end = crash_base + crash_size - 1;
730 insert_resource(&iomem_resource, &crashk_res);
731}
732#else
733static inline void reserve_crashkernel(void) {}
734#endif /* CONFIG_KEXEC */
735
736/*
737 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
738 * is_kdump_kernel() to determine if we are booting after a panic. Hence
739 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
740 */
741
742#ifdef CONFIG_CRASH_DUMP
743/*
744 * elfcorehdr= specifies the location of elf core header stored by the crashed
745 * kernel. This option will be passed by kexec loader to the capture kernel.
746 */
747static int __init setup_elfcorehdr(char *arg)
748{
749 char *end;
750
751 if (!arg)
752 return -EINVAL;
753
754 elfcorehdr_addr = memparse(arg, &end);
755 return end > arg ? 0 : -EINVAL;
756}
757early_param("elfcorehdr", setup_elfcorehdr);
758#endif /* CONFIG_CRASH_DUMP */
759
760static void __init squash_mem_tags(struct tag *tag)
761{
762 for (; tag->hdr.size; tag = tag_next(tag))
763 if (tag->hdr.tag == ATAG_MEM)
764 tag->hdr.tag = ATAG_NONE;
765}
766
666void __init setup_arch(char **cmdline_p) 767void __init setup_arch(char **cmdline_p)
667{ 768{
668 struct tag *tags = (struct tag *)&init_tags; 769 struct tag *tags = (struct tag *)&init_tags;
@@ -683,12 +784,14 @@ void __init setup_arch(char **cmdline_p)
683 else if (mdesc->boot_params) 784 else if (mdesc->boot_params)
684 tags = phys_to_virt(mdesc->boot_params); 785 tags = phys_to_virt(mdesc->boot_params);
685 786
787#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
686 /* 788 /*
687 * If we have the old style parameters, convert them to 789 * If we have the old style parameters, convert them to
688 * a tag list. 790 * a tag list.
689 */ 791 */
690 if (tags->hdr.tag != ATAG_CORE) 792 if (tags->hdr.tag != ATAG_CORE)
691 convert_to_tag_list(tags); 793 convert_to_tag_list(tags);
794#endif
692 if (tags->hdr.tag != ATAG_CORE) 795 if (tags->hdr.tag != ATAG_CORE)
693 tags = (struct tag *)&init_tags; 796 tags = (struct tag *)&init_tags;
694 797
@@ -716,12 +819,15 @@ void __init setup_arch(char **cmdline_p)
716 819
717 parse_early_param(); 820 parse_early_param();
718 821
822 arm_memblock_init(&meminfo, mdesc);
823
719 paging_init(mdesc); 824 paging_init(mdesc);
720 request_standard_resources(&meminfo, mdesc); 825 request_standard_resources(&meminfo, mdesc);
721 826
722#ifdef CONFIG_SMP 827#ifdef CONFIG_SMP
723 smp_init_cpus(); 828 smp_init_cpus();
724#endif 829#endif
830 reserve_crashkernel();
725 831
726 cpu_init(); 832 cpu_init();
727 tcm_init(); 833 tcm_init();
@@ -729,6 +835,7 @@ void __init setup_arch(char **cmdline_p)
729 /* 835 /*
730 * Set up various architecture-specific pointers 836 * Set up various architecture-specific pointers
731 */ 837 */
838 arch_nr_irqs = mdesc->nr_irqs;
732 init_arch_irq = mdesc->init_irq; 839 init_arch_irq = mdesc->init_irq;
733 system_timer = mdesc->timer; 840 system_timer = mdesc->timer;
734 init_machine = mdesc->init_machine; 841 init_machine = mdesc->init_machine;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b8c3d0f689d9..40dc74f2b27f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -429,7 +429,11 @@ static void smp_timer_broadcast(const struct cpumask *mask)
429{ 429{
430 send_ipi_message(mask, IPI_TIMER); 430 send_ipi_message(mask, IPI_TIMER);
431} 431}
432#else
433#define smp_timer_broadcast NULL
434#endif
432 435
436#ifndef CONFIG_LOCAL_TIMERS
433static void broadcast_timer_set_mode(enum clock_event_mode mode, 437static void broadcast_timer_set_mode(enum clock_event_mode mode,
434 struct clock_event_device *evt) 438 struct clock_event_device *evt)
435{ 439{
@@ -444,7 +448,6 @@ static void local_timer_setup(struct clock_event_device *evt)
444 evt->rating = 400; 448 evt->rating = 400;
445 evt->mult = 1; 449 evt->mult = 1;
446 evt->set_mode = broadcast_timer_set_mode; 450 evt->set_mode = broadcast_timer_set_mode;
447 evt->broadcast = smp_timer_broadcast;
448 451
449 clockevents_register_device(evt); 452 clockevents_register_device(evt);
450} 453}
@@ -456,6 +459,7 @@ void __cpuinit percpu_timer_setup(void)
456 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 459 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
457 460
458 evt->cpumask = cpumask_of(cpu); 461 evt->cpumask = cpumask_of(cpu);
462 evt->broadcast = smp_timer_broadcast;
459 463
460 local_timer_setup(evt); 464 local_timer_setup(evt);
461} 465}
@@ -467,10 +471,13 @@ static DEFINE_SPINLOCK(stop_lock);
467 */ 471 */
468static void ipi_cpu_stop(unsigned int cpu) 472static void ipi_cpu_stop(unsigned int cpu)
469{ 473{
470 spin_lock(&stop_lock); 474 if (system_state == SYSTEM_BOOTING ||
471 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 475 system_state == SYSTEM_RUNNING) {
472 dump_stack(); 476 spin_lock(&stop_lock);
473 spin_unlock(&stop_lock); 477 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
478 dump_stack();
479 spin_unlock(&stop_lock);
480 }
474 481
475 set_cpu_online(cpu, false); 482 set_cpu_online(cpu, false);
476 483
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 7c5f0c024db7..35882fbf37f9 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -132,7 +132,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
132 twd_calibrate_rate(); 132 twd_calibrate_rate();
133 133
134 clk->name = "local_timer"; 134 clk->name = "local_timer";
135 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 135 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
136 CLOCK_EVT_FEAT_C3STOP;
136 clk->rating = 350; 137 clk->rating = 350;
137 clk->set_mode = twd_set_mode; 138 clk->set_mode = twd_set_mode;
138 clk->set_next_event = twd_set_next_event; 139 clk->set_next_event = twd_set_next_event;
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index c23501842b98..5b7c541a4c63 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -62,7 +62,7 @@ asmlinkage int sys_vfork(struct pt_regs *regs)
62/* sys_execve() executes a new program. 62/* sys_execve() executes a new program.
63 * This is called indirectly via a small wrapper 63 * This is called indirectly via a small wrapper
64 */ 64 */
65asmlinkage int sys_execve(char __user *filenamei, char __user * __user *argv, 65asmlinkage int sys_execve(const char __user *filenamei, char __user * __user *argv,
66 char __user * __user *envp, struct pt_regs *regs) 66 char __user * __user *envp, struct pt_regs *regs)
67{ 67{
68 int error; 68 int error;
@@ -84,7 +84,7 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[])
84 int ret; 84 int ret;
85 85
86 memset(&regs, 0, sizeof(struct pt_regs)); 86 memset(&regs, 0, sizeof(struct pt_regs));
87 ret = do_execve((char *)filename, (char __user * __user *)argv, 87 ret = do_execve(filename, (char __user * __user *)argv,
88 (char __user * __user *)envp, &regs); 88 (char __user * __user *)envp, &regs);
89 if (ret < 0) 89 if (ret < 0)
90 goto out; 90 goto out;
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 33ff678e32f2..4ad8da15ef2b 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -141,7 +141,7 @@ static long cp_oldabi_stat64(struct kstat *stat,
141 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 141 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
142} 142}
143 143
144asmlinkage long sys_oabi_stat64(char __user * filename, 144asmlinkage long sys_oabi_stat64(const char __user * filename,
145 struct oldabi_stat64 __user * statbuf) 145 struct oldabi_stat64 __user * statbuf)
146{ 146{
147 struct kstat stat; 147 struct kstat stat;
@@ -151,7 +151,7 @@ asmlinkage long sys_oabi_stat64(char __user * filename,
151 return error; 151 return error;
152} 152}
153 153
154asmlinkage long sys_oabi_lstat64(char __user * filename, 154asmlinkage long sys_oabi_lstat64(const char __user * filename,
155 struct oldabi_stat64 __user * statbuf) 155 struct oldabi_stat64 __user * statbuf)
156{ 156{
157 struct kstat stat; 157 struct kstat stat;
@@ -172,7 +172,7 @@ asmlinkage long sys_oabi_fstat64(unsigned long fd,
172} 172}
173 173
174asmlinkage long sys_oabi_fstatat64(int dfd, 174asmlinkage long sys_oabi_fstatat64(int dfd,
175 char __user *filename, 175 const char __user *filename,
176 struct oldabi_stat64 __user *statbuf, 176 struct oldabi_stat64 __user *statbuf,
177 int flag) 177 int flag)
178{ 178{
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index e50303868f1b..26685c2f7a49 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -13,38 +13,35 @@
13#include <linux/ioport.h> 13#include <linux/ioport.h>
14#include <linux/genalloc.h> 14#include <linux/genalloc.h>
15#include <linux/string.h> /* memcpy */ 15#include <linux/string.h> /* memcpy */
16#include <asm/page.h> /* PAGE_SHIFT */
17#include <asm/cputype.h> 16#include <asm/cputype.h>
18#include <asm/mach/map.h> 17#include <asm/mach/map.h>
19#include <mach/memory.h> 18#include <mach/memory.h>
20#include "tcm.h" 19#include "tcm.h"
21 20
22/* Scream and warn about misuse */
23#if !defined(ITCM_OFFSET) || !defined(ITCM_END) || \
24 !defined(DTCM_OFFSET) || !defined(DTCM_END)
25#error "TCM support selected but offsets not defined!"
26#endif
27
28static struct gen_pool *tcm_pool; 21static struct gen_pool *tcm_pool;
29 22
30/* TCM section definitions from the linker */ 23/* TCM section definitions from the linker */
31extern char __itcm_start, __sitcm_text, __eitcm_text; 24extern char __itcm_start, __sitcm_text, __eitcm_text;
32extern char __dtcm_start, __sdtcm_data, __edtcm_data; 25extern char __dtcm_start, __sdtcm_data, __edtcm_data;
33 26
27/* These will be increased as we run */
28u32 dtcm_end = DTCM_OFFSET;
29u32 itcm_end = ITCM_OFFSET;
30
34/* 31/*
35 * TCM memory resources 32 * TCM memory resources
36 */ 33 */
37static struct resource dtcm_res = { 34static struct resource dtcm_res = {
38 .name = "DTCM RAM", 35 .name = "DTCM RAM",
39 .start = DTCM_OFFSET, 36 .start = DTCM_OFFSET,
40 .end = DTCM_END, 37 .end = DTCM_OFFSET,
41 .flags = IORESOURCE_MEM 38 .flags = IORESOURCE_MEM
42}; 39};
43 40
44static struct resource itcm_res = { 41static struct resource itcm_res = {
45 .name = "ITCM RAM", 42 .name = "ITCM RAM",
46 .start = ITCM_OFFSET, 43 .start = ITCM_OFFSET,
47 .end = ITCM_END, 44 .end = ITCM_OFFSET,
48 .flags = IORESOURCE_MEM 45 .flags = IORESOURCE_MEM
49}; 46};
50 47
@@ -52,8 +49,8 @@ static struct map_desc dtcm_iomap[] __initdata = {
52 { 49 {
53 .virtual = DTCM_OFFSET, 50 .virtual = DTCM_OFFSET,
54 .pfn = __phys_to_pfn(DTCM_OFFSET), 51 .pfn = __phys_to_pfn(DTCM_OFFSET),
55 .length = (DTCM_END - DTCM_OFFSET + 1), 52 .length = 0,
56 .type = MT_UNCACHED 53 .type = MT_MEMORY_DTCM
57 } 54 }
58}; 55};
59 56
@@ -61,8 +58,8 @@ static struct map_desc itcm_iomap[] __initdata = {
61 { 58 {
62 .virtual = ITCM_OFFSET, 59 .virtual = ITCM_OFFSET,
63 .pfn = __phys_to_pfn(ITCM_OFFSET), 60 .pfn = __phys_to_pfn(ITCM_OFFSET),
64 .length = (ITCM_END - ITCM_OFFSET + 1), 61 .length = 0,
65 .type = MT_UNCACHED 62 .type = MT_MEMORY_ITCM
66 } 63 }
67}; 64};
68 65
@@ -93,14 +90,24 @@ void tcm_free(void *addr, size_t len)
93} 90}
94EXPORT_SYMBOL(tcm_free); 91EXPORT_SYMBOL(tcm_free);
95 92
96 93static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
97static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) 94 u32 *offset)
98{ 95{
99 const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 96 const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128,
100 256, 512, 1024, -1, -1, -1, -1 }; 97 256, 512, 1024, -1, -1, -1, -1 };
101 u32 tcm_region; 98 u32 tcm_region;
102 int tcm_size; 99 int tcm_size;
103 100
101 /*
102 * If there are more than one TCM bank of this type,
103 * select the TCM bank to operate on in the TCM selection
104 * register.
105 */
106 if (banks > 1)
107 asm("mcr p15, 0, %0, c9, c2, 0"
108 : /* No output operands */
109 : "r" (bank));
110
104 /* Read the special TCM region register c9, 0 */ 111 /* Read the special TCM region register c9, 0 */
105 if (!type) 112 if (!type)
106 asm("mrc p15, 0, %0, c9, c1, 0" 113 asm("mrc p15, 0, %0, c9, c1, 0"
@@ -111,26 +118,24 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size)
111 118
112 tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; 119 tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f];
113 if (tcm_size < 0) { 120 if (tcm_size < 0) {
114 pr_err("CPU: %sTCM of unknown size!\n", 121 pr_err("CPU: %sTCM%d of unknown size\n",
115 type ? "I" : "D"); 122 type ? "I" : "D", bank);
123 return -EINVAL;
124 } else if (tcm_size > 32) {
125 pr_err("CPU: %sTCM%d larger than 32k found\n",
126 type ? "I" : "D", bank);
127 return -EINVAL;
116 } else { 128 } else {
117 pr_info("CPU: found %sTCM %dk @ %08x, %senabled\n", 129 pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n",
118 type ? "I" : "D", 130 type ? "I" : "D",
131 bank,
119 tcm_size, 132 tcm_size,
120 (tcm_region & 0xfffff000U), 133 (tcm_region & 0xfffff000U),
121 (tcm_region & 1) ? "" : "not "); 134 (tcm_region & 1) ? "" : "not ");
122 } 135 }
123 136
124 if (tcm_size != expected_size) {
125 pr_crit("CPU: %sTCM was detected %dk but expected %dk!\n",
126 type ? "I" : "D",
127 tcm_size,
128 expected_size);
129 /* Adjust to the expected size? what can we do... */
130 }
131
132 /* Force move the TCM bank to where we want it, enable */ 137 /* Force move the TCM bank to where we want it, enable */
133 tcm_region = offset | (tcm_region & 0x00000ffeU) | 1; 138 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
134 139
135 if (!type) 140 if (!type)
136 asm("mcr p15, 0, %0, c9, c1, 0" 141 asm("mcr p15, 0, %0, c9, c1, 0"
@@ -141,10 +146,15 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size)
141 : /* No output operands */ 146 : /* No output operands */
142 : "r" (tcm_region)); 147 : "r" (tcm_region));
143 148
144 pr_debug("CPU: moved %sTCM %dk to %08x, enabled\n", 149 /* Increase offset */
145 type ? "I" : "D", 150 *offset += (tcm_size << 10);
146 tcm_size, 151
147 (tcm_region & 0xfffff000U)); 152 pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n",
153 type ? "I" : "D",
154 bank,
155 tcm_size,
156 (tcm_region & 0xfffff000U));
157 return 0;
148} 158}
149 159
150/* 160/*
@@ -153,34 +163,52 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size)
153void __init tcm_init(void) 163void __init tcm_init(void)
154{ 164{
155 u32 tcm_status = read_cpuid_tcmstatus(); 165 u32 tcm_status = read_cpuid_tcmstatus();
166 u8 dtcm_banks = (tcm_status >> 16) & 0x03;
167 u8 itcm_banks = (tcm_status & 0x03);
156 char *start; 168 char *start;
157 char *end; 169 char *end;
158 char *ram; 170 char *ram;
171 int ret;
172 int i;
159 173
160 /* Setup DTCM if present */ 174 /* Setup DTCM if present */
161 if (tcm_status & (1 << 16)) { 175 if (dtcm_banks > 0) {
162 setup_tcm_bank(0, DTCM_OFFSET, 176 for (i = 0; i < dtcm_banks; i++) {
163 (DTCM_END - DTCM_OFFSET + 1) >> 10); 177 ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end);
178 if (ret)
179 return;
180 }
181 dtcm_res.end = dtcm_end - 1;
164 request_resource(&iomem_resource, &dtcm_res); 182 request_resource(&iomem_resource, &dtcm_res);
183 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
165 iotable_init(dtcm_iomap, 1); 184 iotable_init(dtcm_iomap, 1);
166 /* Copy data from RAM to DTCM */ 185 /* Copy data from RAM to DTCM */
167 start = &__sdtcm_data; 186 start = &__sdtcm_data;
168 end = &__edtcm_data; 187 end = &__edtcm_data;
169 ram = &__dtcm_start; 188 ram = &__dtcm_start;
189 /* This means you compiled more code than fits into DTCM */
190 BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET));
170 memcpy(start, ram, (end-start)); 191 memcpy(start, ram, (end-start));
171 pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); 192 pr_debug("CPU DTCM: copied data from %p - %p\n", start, end);
172 } 193 }
173 194
174 /* Setup ITCM if present */ 195 /* Setup ITCM if present */
175 if (tcm_status & 1) { 196 if (itcm_banks > 0) {
176 setup_tcm_bank(1, ITCM_OFFSET, 197 for (i = 0; i < itcm_banks; i++) {
177 (ITCM_END - ITCM_OFFSET + 1) >> 10); 198 ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end);
199 if (ret)
200 return;
201 }
202 itcm_res.end = itcm_end - 1;
178 request_resource(&iomem_resource, &itcm_res); 203 request_resource(&iomem_resource, &itcm_res);
204 itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
179 iotable_init(itcm_iomap, 1); 205 iotable_init(itcm_iomap, 1);
180 /* Copy code from RAM to ITCM */ 206 /* Copy code from RAM to ITCM */
181 start = &__sitcm_text; 207 start = &__sitcm_text;
182 end = &__eitcm_text; 208 end = &__eitcm_text;
183 ram = &__itcm_start; 209 ram = &__itcm_start;
210 /* This means you compiled more code than fits into ITCM */
211 BUG_ON((end - start) > (itcm_end - ITCM_OFFSET));
184 memcpy(start, ram, (end-start)); 212 memcpy(start, ram, (end-start));
185 pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); 213 pr_debug("CPU ITCM: copied code from %p - %p\n", start, end);
186 } 214 }
@@ -208,10 +236,10 @@ static int __init setup_tcm_pool(void)
208 pr_debug("Setting up TCM memory pool\n"); 236 pr_debug("Setting up TCM memory pool\n");
209 237
210 /* Add the rest of DTCM to the TCM pool */ 238 /* Add the rest of DTCM to the TCM pool */
211 if (tcm_status & (1 << 16)) { 239 if (tcm_status & (0x03 << 16)) {
212 if (dtcm_pool_start < DTCM_END) { 240 if (dtcm_pool_start < dtcm_end) {
213 ret = gen_pool_add(tcm_pool, dtcm_pool_start, 241 ret = gen_pool_add(tcm_pool, dtcm_pool_start,
214 DTCM_END - dtcm_pool_start + 1, -1); 242 dtcm_end - dtcm_pool_start, -1);
215 if (ret) { 243 if (ret) {
216 pr_err("CPU DTCM: could not add DTCM " \ 244 pr_err("CPU DTCM: could not add DTCM " \
217 "remainder to pool!\n"); 245 "remainder to pool!\n");
@@ -219,16 +247,16 @@ static int __init setup_tcm_pool(void)
219 } 247 }
220 pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ 248 pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \
221 "the TCM memory pool\n", 249 "the TCM memory pool\n",
222 DTCM_END - dtcm_pool_start + 1, 250 dtcm_end - dtcm_pool_start,
223 dtcm_pool_start); 251 dtcm_pool_start);
224 } 252 }
225 } 253 }
226 254
227 /* Add the rest of ITCM to the TCM pool */ 255 /* Add the rest of ITCM to the TCM pool */
228 if (tcm_status & 1) { 256 if (tcm_status & 0x03) {
229 if (itcm_pool_start < ITCM_END) { 257 if (itcm_pool_start < itcm_end) {
230 ret = gen_pool_add(tcm_pool, itcm_pool_start, 258 ret = gen_pool_add(tcm_pool, itcm_pool_start,
231 ITCM_END - itcm_pool_start + 1, -1); 259 itcm_end - itcm_pool_start, -1);
232 if (ret) { 260 if (ret) {
233 pr_err("CPU ITCM: could not add ITCM " \ 261 pr_err("CPU ITCM: could not add ITCM " \
234 "remainder to pool!\n"); 262 "remainder to pool!\n");
@@ -236,7 +264,7 @@ static int __init setup_tcm_pool(void)
236 } 264 }
237 pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ 265 pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \
238 "the TCM memory pool\n", 266 "the TCM memory pool\n",
239 ITCM_END - itcm_pool_start + 1, 267 itcm_end - itcm_pool_start,
240 itcm_pool_start); 268 itcm_pool_start);
241 } 269 }
242 } 270 }
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1621e5327b2a..cda78d59aa31 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -30,6 +30,7 @@
30#include <asm/unistd.h> 30#include <asm/unistd.h>
31#include <asm/traps.h> 31#include <asm/traps.h>
32#include <asm/unwind.h> 32#include <asm/unwind.h>
33#include <asm/tls.h>
33 34
34#include "ptrace.h" 35#include "ptrace.h"
35#include "signal.h" 36#include "signal.h"
@@ -518,17 +519,20 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
518 519
519 case NR(set_tls): 520 case NR(set_tls):
520 thread->tp_value = regs->ARM_r0; 521 thread->tp_value = regs->ARM_r0;
521#if defined(CONFIG_HAS_TLS_REG) 522 if (tls_emu)
522 asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); 523 return 0;
523#elif !defined(CONFIG_TLS_REG_EMUL) 524 if (has_tls_reg) {
524 /* 525 asm ("mcr p15, 0, %0, c13, c0, 3"
525 * User space must never try to access this directly. 526 : : "r" (regs->ARM_r0));
526 * Expect your app to break eventually if you do so. 527 } else {
527 * The user helper at 0xffff0fe0 must be used instead. 528 /*
528 * (see entry-armv.S for details) 529 * User space must never try to access this directly.
529 */ 530 * Expect your app to break eventually if you do so.
530 *((unsigned int *)0xffff0ff0) = regs->ARM_r0; 531 * The user helper at 0xffff0fe0 must be used instead.
531#endif 532 * (see entry-armv.S for details)
533 */
534 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
535 }
532 return 0; 536 return 0;
533 537
534#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 538#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
@@ -743,6 +747,16 @@ void __init trap_init(void)
743 return; 747 return;
744} 748}
745 749
750static void __init kuser_get_tls_init(unsigned long vectors)
751{
752 /*
753 * vectors + 0xfe0 = __kuser_get_tls
754 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
755 */
756 if (tls_emu || has_tls_reg)
757 memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
758}
759
746void __init early_trap_init(void) 760void __init early_trap_init(void)
747{ 761{
748 unsigned long vectors = CONFIG_VECTORS_BASE; 762 unsigned long vectors = CONFIG_VECTORS_BASE;
@@ -761,6 +775,11 @@ void __init early_trap_init(void)
761 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); 775 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
762 776
763 /* 777 /*
778 * Do processor specific fixups for the kuser helpers
779 */
780 kuser_get_tls_init(vectors);
781
782 /*
764 * Copy signal return handlers into the vector page, and 783 * Copy signal return handlers into the vector page, and
765 * set sigreturn to be a pointer to these. 784 * set sigreturn to be a pointer to these.
766 */ 785 */