aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/entry-armv.S37
-rw-r--r--arch/arm/kernel/entry-common.S11
-rw-r--r--arch/arm/kernel/entry-header.S16
-rw-r--r--arch/arm/kernel/head-common.S4
-rw-r--r--arch/arm/kernel/init_task.c5
-rw-r--r--arch/arm/kernel/kprobes.c19
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp.c23
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/sys_arm.c1
-rw-r--r--arch/arm/kernel/tcm.c246
-rw-r--r--arch/arm/kernel/tcm.h17
-rw-r--r--arch/arm/kernel/time.c1
-rw-r--r--arch/arm/kernel/traps.c81
-rw-r--r--arch/arm/kernel/vmlinux.lds.S57
16 files changed, 440 insertions, 88 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 3213c9382b17..79087dd6d869 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -2,7 +2,8 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
6 7
7ifdef CONFIG_DYNAMIC_FTRACE 8ifdef CONFIG_DYNAMIC_FTRACE
8CFLAGS_REMOVE_ftrace.o = -pg 9CFLAGS_REMOVE_ftrace.o = -pg
@@ -34,6 +35,7 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
34obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 35obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
35obj-$(CONFIG_KGDB) += kgdb.o 36obj-$(CONFIG_KGDB) += kgdb.o
36obj-$(CONFIG_ARM_UNWIND) += unwind.o 37obj-$(CONFIG_ARM_UNWIND) += unwind.o
38obj-$(CONFIG_HAVE_TCM) += tcm.o
37 39
38obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 40obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
39AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 41AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3d727a8a23bc..322410be573c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -272,7 +272,15 @@ __und_svc:
272 @ 272 @
273 @ r0 - instruction 273 @ r0 - instruction
274 @ 274 @
275#ifndef CONFIG_THUMB2_KERNEL
275 ldr r0, [r2, #-4] 276 ldr r0, [r2, #-4]
277#else
278 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
279 and r9, r0, #0xf800
280 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
281 ldrhhs r9, [r2] @ bottom 16 bits
282 orrhs r0, r9, r0, lsl #16
283#endif
276 adr r9, BSYM(1f) 284 adr r9, BSYM(1f)
277 bl call_fpe 285 bl call_fpe
278 286
@@ -303,22 +311,16 @@ __pabt_svc:
303 tst r3, #PSR_I_BIT 311 tst r3, #PSR_I_BIT
304 biceq r9, r9, #PSR_I_BIT 312 biceq r9, r9, #PSR_I_BIT
305 313
306 @
307 @ set args, then call main handler
308 @
309 @ r0 - address of faulting instruction
310 @ r1 - pointer to registers on stack
311 @
312#ifdef MULTI_PABORT
313 mov r0, r2 @ pass address of aborted instruction. 314 mov r0, r2 @ pass address of aborted instruction.
315#ifdef MULTI_PABORT
314 ldr r4, .LCprocfns 316 ldr r4, .LCprocfns
315 mov lr, pc 317 mov lr, pc
316 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 318 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
317#else 319#else
318 CPU_PABORT_HANDLER(r0, r2) 320 bl CPU_PABORT_HANDLER
319#endif 321#endif
320 msr cpsr_c, r9 @ Maybe enable interrupts 322 msr cpsr_c, r9 @ Maybe enable interrupts
321 mov r1, sp @ regs 323 mov r2, sp @ regs
322 bl do_PrefetchAbort @ call abort handler 324 bl do_PrefetchAbort @ call abort handler
323 325
324 @ 326 @
@@ -678,7 +680,9 @@ ENTRY(fp_enter)
678 .word no_fp 680 .word no_fp
679 .previous 681 .previous
680 682
681no_fp: mov pc, lr 683ENTRY(no_fp)
684 mov pc, lr
685ENDPROC(no_fp)
682 686
683__und_usr_unknown: 687__und_usr_unknown:
684 enable_irq 688 enable_irq
@@ -691,16 +695,16 @@ ENDPROC(__und_usr_unknown)
691__pabt_usr: 695__pabt_usr:
692 usr_entry 696 usr_entry
693 697
694#ifdef MULTI_PABORT
695 mov r0, r2 @ pass address of aborted instruction. 698 mov r0, r2 @ pass address of aborted instruction.
699#ifdef MULTI_PABORT
696 ldr r4, .LCprocfns 700 ldr r4, .LCprocfns
697 mov lr, pc 701 mov lr, pc
698 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 702 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
699#else 703#else
700 CPU_PABORT_HANDLER(r0, r2) 704 bl CPU_PABORT_HANDLER
701#endif 705#endif
702 enable_irq @ Enable interrupts 706 enable_irq @ Enable interrupts
703 mov r1, sp @ regs 707 mov r2, sp @ regs
704 bl do_PrefetchAbort @ call abort handler 708 bl do_PrefetchAbort @ call abort handler
705 UNWIND(.fnend ) 709 UNWIND(.fnend )
706 /* fall through */ 710 /* fall through */
@@ -734,13 +738,6 @@ ENTRY(__switch_to)
734#ifdef CONFIG_MMU 738#ifdef CONFIG_MMU
735 ldr r6, [r2, #TI_CPU_DOMAIN] 739 ldr r6, [r2, #TI_CPU_DOMAIN]
736#endif 740#endif
737#if __LINUX_ARM_ARCH__ >= 6
738#ifdef CONFIG_CPU_32v6K
739 clrex
740#else
741 strex r5, r4, [ip] @ Clear exclusive monitor
742#endif
743#endif
744#if defined(CONFIG_HAS_TLS_REG) 741#if defined(CONFIG_HAS_TLS_REG)
745 mcr p15, 0, r3, c13, c0, 3 @ set TLS register 742 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
746#elif !defined(CONFIG_TLS_REG_EMUL) 743#elif !defined(CONFIG_TLS_REG_EMUL)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 807cfebb0f44..f0fe95b7085d 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -126,7 +126,7 @@ ENTRY(__gnu_mcount_nc)
126 cmp r0, r2 126 cmp r0, r2
127 bne gnu_trace 127 bne gnu_trace
128 ldmia sp!, {r0-r3, ip, lr} 128 ldmia sp!, {r0-r3, ip, lr}
129 bx ip 129 mov pc, ip
130 130
131gnu_trace: 131gnu_trace:
132 ldr r1, [sp, #20] @ lr of instrumented routine 132 ldr r1, [sp, #20] @ lr of instrumented routine
@@ -135,7 +135,7 @@ gnu_trace:
135 mov lr, pc 135 mov lr, pc
136 mov pc, r2 136 mov pc, r2
137 ldmia sp!, {r0-r3, ip, lr} 137 ldmia sp!, {r0-r3, ip, lr}
138 bx ip 138 mov pc, ip
139 139
140ENTRY(mcount) 140ENTRY(mcount)
141 stmdb sp!, {r0-r3, lr} 141 stmdb sp!, {r0-r3, lr}
@@ -425,13 +425,6 @@ sys_mmap2:
425#endif 425#endif
426ENDPROC(sys_mmap2) 426ENDPROC(sys_mmap2)
427 427
428ENTRY(pabort_ifar)
429 mrc p15, 0, r0, cr6, cr0, 2
430ENTRY(pabort_noifar)
431 mov pc, lr
432ENDPROC(pabort_ifar)
433ENDPROC(pabort_noifar)
434
435#ifdef CONFIG_OABI_COMPAT 428#ifdef CONFIG_OABI_COMPAT
436 429
437/* 430/*
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index a4eaf4f920c5..ac34c0d9384b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -76,13 +76,27 @@
76#ifndef CONFIG_THUMB2_KERNEL 76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr 77 .macro svc_exit, rpsr
78 msr spsr_cxsf, \rpsr 78 msr spsr_cxsf, \rpsr
79#if defined(CONFIG_CPU_32v6K)
80 clrex @ clear the exclusive monitor
79 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 81 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
82#elif defined (CONFIG_CPU_V6)
83 ldr r0, [sp]
84 strex r1, r2, [sp] @ clear the exclusive monitor
85 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
86#else
87 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
88#endif
80 .endm 89 .endm
81 90
82 .macro restore_user_regs, fast = 0, offset = 0 91 .macro restore_user_regs, fast = 0, offset = 0
83 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 92 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
84 ldr lr, [sp, #\offset + S_PC]! @ get pc 93 ldr lr, [sp, #\offset + S_PC]! @ get pc
85 msr spsr_cxsf, r1 @ save in spsr_svc 94 msr spsr_cxsf, r1 @ save in spsr_svc
95#if defined(CONFIG_CPU_32v6K)
96 clrex @ clear the exclusive monitor
97#elif defined (CONFIG_CPU_V6)
98 strex r1, r2, [sp] @ clear the exclusive monitor
99#endif
86 .if \fast 100 .if \fast
87 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 101 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
88 .else 102 .else
@@ -98,6 +112,7 @@
98 .endm 112 .endm
99#else /* CONFIG_THUMB2_KERNEL */ 113#else /* CONFIG_THUMB2_KERNEL */
100 .macro svc_exit, rpsr 114 .macro svc_exit, rpsr
115 clrex @ clear the exclusive monitor
101 ldr r0, [sp, #S_SP] @ top of the stack 116 ldr r0, [sp, #S_SP] @ top of the stack
102 ldr r1, [sp, #S_PC] @ return address 117 ldr r1, [sp, #S_PC] @ return address
103 tst r0, #4 @ orig stack 8-byte aligned? 118 tst r0, #4 @ orig stack 8-byte aligned?
@@ -110,6 +125,7 @@
110 .endm 125 .endm
111 126
112 .macro restore_user_regs, fast = 0, offset = 0 127 .macro restore_user_regs, fast = 0, offset = 0
128 clrex @ clear the exclusive monitor
113 mov r2, sp 129 mov r2, sp
114 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 130 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
115 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 131 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 93ad576b2d74..885a7214418d 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -13,6 +13,7 @@
13 13
14#define ATAG_CORE 0x54410001 14#define ATAG_CORE 0x54410001
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
16#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
16 17
17 .align 2 18 .align 2
18 .type __switch_data, %object 19 .type __switch_data, %object
@@ -251,7 +252,8 @@ __vet_atags:
251 bne 1f 252 bne 1f
252 253
253 ldr r5, [r2, #0] @ is first tag ATAG_CORE? 254 ldr r5, [r2, #0] @ is first tag ATAG_CORE?
254 subs r5, r5, #ATAG_CORE_SIZE 255 cmp r5, #ATAG_CORE_SIZE
256 cmpne r5, #ATAG_CORE_SIZE_EMPTY
255 bne 1f 257 bne 1f
256 ldr r5, [r2, #4] 258 ldr r5, [r2, #4]
257 ldr r6, =ATAG_CORE 259 ldr r6, =ATAG_CORE
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
index 3f470866bb89..e7cbb50dc356 100644
--- a/arch/arm/kernel/init_task.c
+++ b/arch/arm/kernel/init_task.c
@@ -24,9 +24,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
24 * 24 *
25 * The things we do for performance.. 25 * The things we do for performance..
26 */ 26 */
27union thread_union init_thread_union 27union thread_union init_thread_union __init_task_data =
28 __attribute__((__section__(".data.init_task"))) = 28 { INIT_THREAD_INFO(init_task) };
29 { INIT_THREAD_INFO(init_task) };
30 29
31/* 30/*
32 * Initial task structure. 31 * Initial task structure.
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index f692efddd449..60c62c377fa9 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/stop_machine.h>
25#include <linux/stringify.h> 26#include <linux/stringify.h>
26#include <asm/traps.h> 27#include <asm/traps.h>
27#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
@@ -83,10 +84,24 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
83 flush_insns(p->addr, 1); 84 flush_insns(p->addr, 1);
84} 85}
85 86
87/*
88 * The actual disarming is done here on each CPU and synchronized using
89 * stop_machine. This synchronization is necessary on SMP to avoid removing
90 * a probe between the moment the 'Undefined Instruction' exception is raised
91 * and the moment the exception handler reads the faulting instruction from
92 * memory.
93 */
94int __kprobes __arch_disarm_kprobe(void *p)
95{
96 struct kprobe *kp = p;
97 *kp->addr = kp->opcode;
98 flush_insns(kp->addr, 1);
99 return 0;
100}
101
86void __kprobes arch_disarm_kprobe(struct kprobe *p) 102void __kprobes arch_disarm_kprobe(struct kprobe *p)
87{ 103{
88 *p->addr = p->opcode; 104 stop_machine(__arch_disarm_kprobe, p, &cpu_online_map);
89 flush_insns(p->addr, 1);
90} 105}
91 106
92void __kprobes arch_remove_kprobe(struct kprobe *p) 107void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d4d4f77c91b2..c6c57b640b6b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -45,6 +45,7 @@
45 45
46#include "compat.h" 46#include "compat.h"
47#include "atags.h" 47#include "atags.h"
48#include "tcm.h"
48 49
49#ifndef MEM_SIZE 50#ifndef MEM_SIZE
50#define MEM_SIZE (16*1024*1024) 51#define MEM_SIZE (16*1024*1024)
@@ -749,6 +750,7 @@ void __init setup_arch(char **cmdline_p)
749#endif 750#endif
750 751
751 cpu_init(); 752 cpu_init();
753 tcm_init();
752 754
753 /* 755 /*
754 * Set up various architecture-specific pointers 756 * Set up various architecture-specific pointers
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c5..57162af53dc9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -36,6 +36,7 @@
36#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
37#include <asm/ptrace.h> 37#include <asm/ptrace.h>
38#include <asm/localtimer.h> 38#include <asm/localtimer.h>
39#include <asm/smp_plat.h>
39 40
40/* 41/*
41 * as from 2.5, kernels no longer have an init_tasks structure 42 * as from 2.5, kernels no longer have an init_tasks structure
@@ -153,7 +154,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
153/* 154/*
154 * __cpu_disable runs on the processor to be shutdown. 155 * __cpu_disable runs on the processor to be shutdown.
155 */ 156 */
156int __cpuexit __cpu_disable(void) 157int __cpu_disable(void)
157{ 158{
158 unsigned int cpu = smp_processor_id(); 159 unsigned int cpu = smp_processor_id();
159 struct task_struct *p; 160 struct task_struct *p;
@@ -189,7 +190,7 @@ int __cpuexit __cpu_disable(void)
189 read_lock(&tasklist_lock); 190 read_lock(&tasklist_lock);
190 for_each_process(p) { 191 for_each_process(p) {
191 if (p->mm) 192 if (p->mm)
192 cpu_clear(cpu, p->mm->cpu_vm_mask); 193 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
193 } 194 }
194 read_unlock(&tasklist_lock); 195 read_unlock(&tasklist_lock);
195 196
@@ -200,7 +201,7 @@ int __cpuexit __cpu_disable(void)
200 * called on the thread which is asking for a CPU to be shutdown - 201 * called on the thread which is asking for a CPU to be shutdown -
201 * waits until shutdown has completed, or it is timed out. 202 * waits until shutdown has completed, or it is timed out.
202 */ 203 */
203void __cpuexit __cpu_die(unsigned int cpu) 204void __cpu_die(unsigned int cpu)
204{ 205{
205 if (!platform_cpu_kill(cpu)) 206 if (!platform_cpu_kill(cpu))
206 printk("CPU%u: unable to kill\n", cpu); 207 printk("CPU%u: unable to kill\n", cpu);
@@ -214,7 +215,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
214 * of the other hotplug-cpu capable cores, so presumably coming 215 * of the other hotplug-cpu capable cores, so presumably coming
215 * out of idle fixes this. 216 * out of idle fixes this.
216 */ 217 */
217void __cpuexit cpu_die(void) 218void __ref cpu_die(void)
218{ 219{
219 unsigned int cpu = smp_processor_id(); 220 unsigned int cpu = smp_processor_id();
220 221
@@ -257,7 +258,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
257 atomic_inc(&mm->mm_users); 258 atomic_inc(&mm->mm_users);
258 atomic_inc(&mm->mm_count); 259 atomic_inc(&mm->mm_count);
259 current->active_mm = mm; 260 current->active_mm = mm;
260 cpu_set(cpu, mm->cpu_vm_mask); 261 cpumask_set_cpu(cpu, mm_cpumask(mm));
261 cpu_switch_mm(mm->pgd, mm); 262 cpu_switch_mm(mm->pgd, mm);
262 enter_lazy_tlb(mm, current); 263 enter_lazy_tlb(mm, current);
263 local_flush_tlb_all(); 264 local_flush_tlb_all();
@@ -586,12 +587,6 @@ struct tlb_args {
586 unsigned long ta_end; 587 unsigned long ta_end;
587}; 588};
588 589
589/* all SMP configurations have the extended CPUID registers */
590static inline int tlb_ops_need_broadcast(void)
591{
592 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
593}
594
595static inline void ipi_flush_tlb_all(void *ignored) 590static inline void ipi_flush_tlb_all(void *ignored)
596{ 591{
597 local_flush_tlb_all(); 592 local_flush_tlb_all();
@@ -643,7 +638,7 @@ void flush_tlb_all(void)
643void flush_tlb_mm(struct mm_struct *mm) 638void flush_tlb_mm(struct mm_struct *mm)
644{ 639{
645 if (tlb_ops_need_broadcast()) 640 if (tlb_ops_need_broadcast())
646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 641 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
647 else 642 else
648 local_flush_tlb_mm(mm); 643 local_flush_tlb_mm(mm);
649} 644}
@@ -654,7 +649,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
654 struct tlb_args ta; 649 struct tlb_args ta;
655 ta.ta_vma = vma; 650 ta.ta_vma = vma;
656 ta.ta_start = uaddr; 651 ta.ta_start = uaddr;
657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 652 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
658 } else 653 } else
659 local_flush_tlb_page(vma, uaddr); 654 local_flush_tlb_page(vma, uaddr);
660} 655}
@@ -677,7 +672,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
677 ta.ta_vma = vma; 672 ta.ta_vma = vma;
678 ta.ta_start = start; 673 ta.ta_start = start;
679 ta.ta_end = end; 674 ta.ta_end = end;
680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 675 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
681 } else 676 } else
682 local_flush_tlb_range(vma, start, end); 677 local_flush_tlb_range(vma, start, end);
683} 678}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index d8c88c633c6f..a73a34dccf2a 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -166,10 +166,12 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
166 clockevents_register_device(clk); 166 clockevents_register_device(clk);
167} 167}
168 168
169#ifdef CONFIG_HOTPLUG_CPU
169/* 170/*
170 * take a local timer down 171 * take a local timer down
171 */ 172 */
172void __cpuexit twd_timer_stop(void) 173void twd_timer_stop(void)
173{ 174{
174 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 175 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
175} 176}
177#endif
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index b3ec641b5cf8..78ecaac65206 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -25,7 +25,6 @@
25#include <linux/mman.h> 25#include <linux/mman.h>
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/file.h> 27#include <linux/file.h>
28#include <linux/utsname.h>
29#include <linux/ipc.h> 28#include <linux/ipc.h>
30#include <linux/uaccess.h> 29#include <linux/uaccess.h>
31 30
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
new file mode 100644
index 000000000000..e50303868f1b
--- /dev/null
+++ b/arch/arm/kernel/tcm.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) 2008-2009 ST-Ericsson AB
3 * License terms: GNU General Public License (GPL) version 2
4 * TCM memory handling for ARM systems
5 *
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 * Author: Rickard Andersson <rickard.andersson@stericsson.com>
8 */
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/genalloc.h>
15#include <linux/string.h> /* memcpy */
16#include <asm/page.h> /* PAGE_SHIFT */
17#include <asm/cputype.h>
18#include <asm/mach/map.h>
19#include <mach/memory.h>
20#include "tcm.h"
21
22/* Scream and warn about misuse */
23#if !defined(ITCM_OFFSET) || !defined(ITCM_END) || \
24 !defined(DTCM_OFFSET) || !defined(DTCM_END)
25#error "TCM support selected but offsets not defined!"
26#endif
27
28static struct gen_pool *tcm_pool;
29
30/* TCM section definitions from the linker */
31extern char __itcm_start, __sitcm_text, __eitcm_text;
32extern char __dtcm_start, __sdtcm_data, __edtcm_data;
33
34/*
35 * TCM memory resources
36 */
37static struct resource dtcm_res = {
38 .name = "DTCM RAM",
39 .start = DTCM_OFFSET,
40 .end = DTCM_END,
41 .flags = IORESOURCE_MEM
42};
43
44static struct resource itcm_res = {
45 .name = "ITCM RAM",
46 .start = ITCM_OFFSET,
47 .end = ITCM_END,
48 .flags = IORESOURCE_MEM
49};
50
51static struct map_desc dtcm_iomap[] __initdata = {
52 {
53 .virtual = DTCM_OFFSET,
54 .pfn = __phys_to_pfn(DTCM_OFFSET),
55 .length = (DTCM_END - DTCM_OFFSET + 1),
56 .type = MT_UNCACHED
57 }
58};
59
60static struct map_desc itcm_iomap[] __initdata = {
61 {
62 .virtual = ITCM_OFFSET,
63 .pfn = __phys_to_pfn(ITCM_OFFSET),
64 .length = (ITCM_END - ITCM_OFFSET + 1),
65 .type = MT_UNCACHED
66 }
67};
68
69/*
70 * Allocate a chunk of TCM memory
71 */
72void *tcm_alloc(size_t len)
73{
74 unsigned long vaddr;
75
76 if (!tcm_pool)
77 return NULL;
78
79 vaddr = gen_pool_alloc(tcm_pool, len);
80 if (!vaddr)
81 return NULL;
82
83 return (void *) vaddr;
84}
85EXPORT_SYMBOL(tcm_alloc);
86
87/*
88 * Free a chunk of TCM memory
89 */
90void tcm_free(void *addr, size_t len)
91{
92 gen_pool_free(tcm_pool, (unsigned long) addr, len);
93}
94EXPORT_SYMBOL(tcm_free);
95
96
97static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size)
98{
99 const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128,
100 256, 512, 1024, -1, -1, -1, -1 };
101 u32 tcm_region;
102 int tcm_size;
103
104 /* Read the special TCM region register c9, 0 */
105 if (!type)
106 asm("mrc p15, 0, %0, c9, c1, 0"
107 : "=r" (tcm_region));
108 else
109 asm("mrc p15, 0, %0, c9, c1, 1"
110 : "=r" (tcm_region));
111
112 tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f];
113 if (tcm_size < 0) {
114 pr_err("CPU: %sTCM of unknown size!\n",
115 type ? "I" : "D");
116 } else {
117 pr_info("CPU: found %sTCM %dk @ %08x, %senabled\n",
118 type ? "I" : "D",
119 tcm_size,
120 (tcm_region & 0xfffff000U),
121 (tcm_region & 1) ? "" : "not ");
122 }
123
124 if (tcm_size != expected_size) {
125 pr_crit("CPU: %sTCM was detected %dk but expected %dk!\n",
126 type ? "I" : "D",
127 tcm_size,
128 expected_size);
129 /* Adjust to the expected size? what can we do... */
130 }
131
132 /* Force move the TCM bank to where we want it, enable */
133 tcm_region = offset | (tcm_region & 0x00000ffeU) | 1;
134
135 if (!type)
136 asm("mcr p15, 0, %0, c9, c1, 0"
137 : /* No output operands */
138 : "r" (tcm_region));
139 else
140 asm("mcr p15, 0, %0, c9, c1, 1"
141 : /* No output operands */
142 : "r" (tcm_region));
143
144 pr_debug("CPU: moved %sTCM %dk to %08x, enabled\n",
145 type ? "I" : "D",
146 tcm_size,
147 (tcm_region & 0xfffff000U));
148}
149
150/*
151 * This initializes the TCM memory
152 */
153void __init tcm_init(void)
154{
155 u32 tcm_status = read_cpuid_tcmstatus();
156 char *start;
157 char *end;
158 char *ram;
159
160 /* Setup DTCM if present */
161 if (tcm_status & (1 << 16)) {
162 setup_tcm_bank(0, DTCM_OFFSET,
163 (DTCM_END - DTCM_OFFSET + 1) >> 10);
164 request_resource(&iomem_resource, &dtcm_res);
165 iotable_init(dtcm_iomap, 1);
166 /* Copy data from RAM to DTCM */
167 start = &__sdtcm_data;
168 end = &__edtcm_data;
169 ram = &__dtcm_start;
170 memcpy(start, ram, (end-start));
171 pr_debug("CPU DTCM: copied data from %p - %p\n", start, end);
172 }
173
174 /* Setup ITCM if present */
175 if (tcm_status & 1) {
176 setup_tcm_bank(1, ITCM_OFFSET,
177 (ITCM_END - ITCM_OFFSET + 1) >> 10);
178 request_resource(&iomem_resource, &itcm_res);
179 iotable_init(itcm_iomap, 1);
180 /* Copy code from RAM to ITCM */
181 start = &__sitcm_text;
182 end = &__eitcm_text;
183 ram = &__itcm_start;
184 memcpy(start, ram, (end-start));
185 pr_debug("CPU ITCM: copied code from %p - %p\n", start, end);
186 }
187}
188
189/*
190 * This creates the TCM memory pool and has to be done later,
191 * during the core_initicalls, since the allocator is not yet
192 * up and running when the first initialization runs.
193 */
194static int __init setup_tcm_pool(void)
195{
196 u32 tcm_status = read_cpuid_tcmstatus();
197 u32 dtcm_pool_start = (u32) &__edtcm_data;
198 u32 itcm_pool_start = (u32) &__eitcm_text;
199 int ret;
200
201 /*
202 * Set up malloc pool, 2^2 = 4 bytes granularity since
203 * the TCM is sometimes just 4 KiB. NB: pages and cache
204 * line alignments does not matter in TCM!
205 */
206 tcm_pool = gen_pool_create(2, -1);
207
208 pr_debug("Setting up TCM memory pool\n");
209
210 /* Add the rest of DTCM to the TCM pool */
211 if (tcm_status & (1 << 16)) {
212 if (dtcm_pool_start < DTCM_END) {
213 ret = gen_pool_add(tcm_pool, dtcm_pool_start,
214 DTCM_END - dtcm_pool_start + 1, -1);
215 if (ret) {
216 pr_err("CPU DTCM: could not add DTCM " \
217 "remainder to pool!\n");
218 return ret;
219 }
220 pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \
221 "the TCM memory pool\n",
222 DTCM_END - dtcm_pool_start + 1,
223 dtcm_pool_start);
224 }
225 }
226
227 /* Add the rest of ITCM to the TCM pool */
228 if (tcm_status & 1) {
229 if (itcm_pool_start < ITCM_END) {
230 ret = gen_pool_add(tcm_pool, itcm_pool_start,
231 ITCM_END - itcm_pool_start + 1, -1);
232 if (ret) {
233 pr_err("CPU ITCM: could not add ITCM " \
234 "remainder to pool!\n");
235 return ret;
236 }
237 pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \
238 "the TCM memory pool\n",
239 ITCM_END - itcm_pool_start + 1,
240 itcm_pool_start);
241 }
242 }
243 return 0;
244}
245
246core_initcall(setup_tcm_pool);
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h
new file mode 100644
index 000000000000..8015ad434a40
--- /dev/null
+++ b/arch/arm/kernel/tcm.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) 2008-2009 ST-Ericsson AB
3 * License terms: GNU General Public License (GPL) version 2
4 * TCM memory handling for ARM systems
5 *
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 * Author: Rickard Andersson <rickard.andersson@stericsson.com>
8 */
9
10#ifdef CONFIG_HAVE_TCM
11void __init tcm_init(void);
12#else
13/* No TCM support, just blank inlines to be optimized out */
14inline void tcm_init(void)
15{
16}
17#endif
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 4cdc4a0bd02d..d38cdf2c8276 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/sched.h>
24#include <linux/smp.h> 25#include <linux/smp.h>
25#include <linux/timex.h> 26#include <linux/timex.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 57eb0f6f6005..f838f36eb702 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -45,21 +45,21 @@ static int __init user_debug_setup(char *str)
45__setup("user_debug=", user_debug_setup); 45__setup("user_debug=", user_debug_setup);
46#endif 46#endif
47 47
48static void dump_mem(const char *str, unsigned long bottom, unsigned long top); 48static void dump_mem(const char *, const char *, unsigned long, unsigned long);
49 49
50void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) 50void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
51{ 51{
52#ifdef CONFIG_KALLSYMS 52#ifdef CONFIG_KALLSYMS
53 printk("[<%08lx>] ", where); 53 char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
54 print_symbol("(%s) ", where); 54 sprint_symbol(sym1, where);
55 printk("from [<%08lx>] ", from); 55 sprint_symbol(sym2, from);
56 print_symbol("(%s)\n", from); 56 printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
57#else 57#else
58 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 58 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
59#endif 59#endif
60 60
61 if (in_exception_text(where)) 61 if (in_exception_text(where))
62 dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 62 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
63} 63}
64 64
65#ifndef CONFIG_ARM_UNWIND 65#ifndef CONFIG_ARM_UNWIND
@@ -81,9 +81,10 @@ static int verify_stack(unsigned long sp)
81/* 81/*
82 * Dump out the contents of some memory nicely... 82 * Dump out the contents of some memory nicely...
83 */ 83 */
84static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 84static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
85 unsigned long top)
85{ 86{
86 unsigned long p = bottom & ~31; 87 unsigned long first;
87 mm_segment_t fs; 88 mm_segment_t fs;
88 int i; 89 int i;
89 90
@@ -95,33 +96,37 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
95 fs = get_fs(); 96 fs = get_fs();
96 set_fs(KERNEL_DS); 97 set_fs(KERNEL_DS);
97 98
98 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); 99 printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
99 100
100 for (p = bottom & ~31; p < top;) { 101 for (first = bottom & ~31; first < top; first += 32) {
101 printk("%04lx: ", p & 0xffff); 102 unsigned long p;
103 char str[sizeof(" 12345678") * 8 + 1];
102 104
103 for (i = 0; i < 8; i++, p += 4) { 105 memset(str, ' ', sizeof(str));
104 unsigned int val; 106 str[sizeof(str) - 1] = '\0';
105 107
106 if (p < bottom || p >= top) 108 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
107 printk(" "); 109 if (p >= bottom && p < top) {
108 else { 110 unsigned long val;
109 __get_user(val, (unsigned long *)p); 111 if (__get_user(val, (unsigned long *)p) == 0)
110 printk("%08x ", val); 112 sprintf(str + i * 9, " %08lx", val);
113 else
114 sprintf(str + i * 9, " ????????");
111 } 115 }
112 } 116 }
113 printk ("\n"); 117 printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
114 } 118 }
115 119
116 set_fs(fs); 120 set_fs(fs);
117} 121}
118 122
119static void dump_instr(struct pt_regs *regs) 123static void dump_instr(const char *lvl, struct pt_regs *regs)
120{ 124{
121 unsigned long addr = instruction_pointer(regs); 125 unsigned long addr = instruction_pointer(regs);
122 const int thumb = thumb_mode(regs); 126 const int thumb = thumb_mode(regs);
123 const int width = thumb ? 4 : 8; 127 const int width = thumb ? 4 : 8;
124 mm_segment_t fs; 128 mm_segment_t fs;
129 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
125 int i; 130 int i;
126 131
127 /* 132 /*
@@ -132,7 +137,6 @@ static void dump_instr(struct pt_regs *regs)
132 fs = get_fs(); 137 fs = get_fs();
133 set_fs(KERNEL_DS); 138 set_fs(KERNEL_DS);
134 139
135 printk("Code: ");
136 for (i = -4; i < 1; i++) { 140 for (i = -4; i < 1; i++) {
137 unsigned int val, bad; 141 unsigned int val, bad;
138 142
@@ -142,13 +146,14 @@ static void dump_instr(struct pt_regs *regs)
142 bad = __get_user(val, &((u32 *)addr)[i]); 146 bad = __get_user(val, &((u32 *)addr)[i]);
143 147
144 if (!bad) 148 if (!bad)
145 printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); 149 p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
150 width, val);
146 else { 151 else {
147 printk("bad PC value."); 152 p += sprintf(p, "bad PC value");
148 break; 153 break;
149 } 154 }
150 } 155 }
151 printk("\n"); 156 printk("%sCode: %s\n", lvl, str);
152 157
153 set_fs(fs); 158 set_fs(fs);
154} 159}
@@ -224,18 +229,19 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
224 struct task_struct *tsk = thread->task; 229 struct task_struct *tsk = thread->task;
225 static int die_counter; 230 static int die_counter;
226 231
227 printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", 232 printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
228 str, err, ++die_counter); 233 str, err, ++die_counter);
234 sysfs_printk_last_file();
229 print_modules(); 235 print_modules();
230 __show_regs(regs); 236 __show_regs(regs);
231 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 237 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
232 tsk->comm, task_pid_nr(tsk), thread + 1); 238 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
233 239
234 if (!user_mode(regs) || in_interrupt()) { 240 if (!user_mode(regs) || in_interrupt()) {
235 dump_mem("Stack: ", regs->ARM_sp, 241 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
236 THREAD_SIZE + (unsigned long)task_stack_page(tsk)); 242 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
237 dump_backtrace(regs, tsk); 243 dump_backtrace(regs, tsk);
238 dump_instr(regs); 244 dump_instr(KERN_EMERG, regs);
239 } 245 }
240} 246}
241 247
@@ -250,13 +256,14 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
250 256
251 oops_enter(); 257 oops_enter();
252 258
253 console_verbose();
254 spin_lock_irq(&die_lock); 259 spin_lock_irq(&die_lock);
260 console_verbose();
255 bust_spinlocks(1); 261 bust_spinlocks(1);
256 __die(str, err, thread, regs); 262 __die(str, err, thread, regs);
257 bust_spinlocks(0); 263 bust_spinlocks(0);
258 add_taint(TAINT_DIE); 264 add_taint(TAINT_DIE);
259 spin_unlock_irq(&die_lock); 265 spin_unlock_irq(&die_lock);
266 oops_exit();
260 267
261 if (in_interrupt()) 268 if (in_interrupt())
262 panic("Fatal exception in interrupt"); 269 panic("Fatal exception in interrupt");
@@ -264,7 +271,6 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
264 if (panic_on_oops) 271 if (panic_on_oops)
265 panic("Fatal exception"); 272 panic("Fatal exception");
266 273
267 oops_exit();
268 do_exit(SIGSEGV); 274 do_exit(SIGSEGV);
269} 275}
270 276
@@ -349,7 +355,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
349 if (user_debug & UDBG_UNDEFINED) { 355 if (user_debug & UDBG_UNDEFINED) {
350 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 356 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
351 current->comm, task_pid_nr(current), pc); 357 current->comm, task_pid_nr(current), pc);
352 dump_instr(regs); 358 dump_instr(KERN_INFO, regs);
353 } 359 }
354#endif 360#endif
355 361
@@ -400,7 +406,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
400 if (user_debug & UDBG_SYSCALL) { 406 if (user_debug & UDBG_SYSCALL) {
401 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", 407 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
402 task_pid_nr(current), current->comm, n); 408 task_pid_nr(current), current->comm, n);
403 dump_instr(regs); 409 dump_instr(KERN_ERR, regs);
404 } 410 }
405#endif 411#endif
406 412
@@ -418,12 +424,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
418static inline void 424static inline void
419do_cache_op(unsigned long start, unsigned long end, int flags) 425do_cache_op(unsigned long start, unsigned long end, int flags)
420{ 426{
427 struct mm_struct *mm = current->active_mm;
421 struct vm_area_struct *vma; 428 struct vm_area_struct *vma;
422 429
423 if (end < start || flags) 430 if (end < start || flags)
424 return; 431 return;
425 432
426 vma = find_vma(current->active_mm, start); 433 down_read(&mm->mmap_sem);
434 vma = find_vma(mm, start);
427 if (vma && vma->vm_start < end) { 435 if (vma && vma->vm_start < end) {
428 if (start < vma->vm_start) 436 if (start < vma->vm_start)
429 start = vma->vm_start; 437 start = vma->vm_start;
@@ -432,6 +440,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
432 440
433 flush_cache_user_range(vma, start, end); 441 flush_cache_user_range(vma, start, end);
434 } 442 }
443 up_read(&mm->mmap_sem);
435} 444}
436 445
437/* 446/*
@@ -576,7 +585,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
576 if (user_debug & UDBG_SYSCALL) { 585 if (user_debug & UDBG_SYSCALL) {
577 printk("[%d] %s: arm syscall %d\n", 586 printk("[%d] %s: arm syscall %d\n",
578 task_pid_nr(current), current->comm, no); 587 task_pid_nr(current), current->comm, no);
579 dump_instr(regs); 588 dump_instr("", regs);
580 if (user_mode(regs)) { 589 if (user_mode(regs)) {
581 __show_regs(regs); 590 __show_regs(regs);
582 c_backtrace(regs->ARM_fp, processor_mode(regs)); 591 c_backtrace(regs->ARM_fp, processor_mode(regs));
@@ -653,7 +662,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
653 if (user_debug & UDBG_BADABORT) { 662 if (user_debug & UDBG_BADABORT) {
654 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", 663 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
655 task_pid_nr(current), current->comm, code, instr); 664 task_pid_nr(current), current->comm, code, instr);
656 dump_instr(regs); 665 dump_instr(KERN_ERR, regs);
657 show_pte(current->mm, addr); 666 show_pte(current->mm, addr);
658 } 667 }
659#endif 668#endif
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 5cc4812c9763..aecf87dfbaec 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -199,6 +199,63 @@ SECTIONS
199 } 199 }
200 _edata_loc = __data_loc + SIZEOF(.data); 200 _edata_loc = __data_loc + SIZEOF(.data);
201 201
202#ifdef CONFIG_HAVE_TCM
203 /*
204 * We align everything to a page boundary so we can
205 * free it after init has commenced and TCM contents have
206 * been copied to its destination.
207 */
208 .tcm_start : {
209 . = ALIGN(PAGE_SIZE);
210 __tcm_start = .;
211 __itcm_start = .;
212 }
213
214 /*
215 * Link these to the ITCM RAM
216 * Put VMA to the TCM address and LMA to the common RAM
217 * and we'll upload the contents from RAM to TCM and free
218 * the used RAM after that.
219 */
220 .text_itcm ITCM_OFFSET : AT(__itcm_start)
221 {
222 __sitcm_text = .;
223 *(.tcm.text)
224 *(.tcm.rodata)
225 . = ALIGN(4);
226 __eitcm_text = .;
227 }
228
229 /*
230 * Reset the dot pointer, this is needed to create the
231 * relative __dtcm_start below (to be used as extern in code).
232 */
233 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
234
235 .dtcm_start : {
236 __dtcm_start = .;
237 }
238
239 /* TODO: add remainder of ITCM as well, that can be used for data! */
240 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
241 {
242 . = ALIGN(4);
243 __sdtcm_data = .;
244 *(.tcm.data)
245 . = ALIGN(4);
246 __edtcm_data = .;
247 }
248
249 /* Reset the dot pointer or the linker gets confused */
250 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
251
252 /* End marker for freeing TCM copy in linked object */
253 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
254 . = ALIGN(PAGE_SIZE);
255 __tcm_end = .;
256 }
257#endif
258
202 .bss : { 259 .bss : {
203 __bss_start = .; /* BSS */ 260 __bss_start = .; /* BSS */
204 *(.bss) 261 *(.bss)