aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
commitbbb20089a3275a19e475dbc21320c3742e3ca423 (patch)
tree216fdc1cbef450ca688135c5b8969169482d9a48 /arch/arm/kernel
parent3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff)
parent657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff)
Merge branch 'dmaengine' into async-tx-next
Conflicts: crypto/async_tx/async_xor.c drivers/dma/ioat/dma_v2.h drivers/dma/ioat/pci.c drivers/md/raid5.c
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/entry-common.S3
-rw-r--r--arch/arm/kernel/init_task.c4
-rw-r--r--arch/arm/kernel/irq.c6
-rw-r--r--arch/arm/kernel/process.c79
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/smp.c131
-rw-r--r--arch/arm/kernel/smp_scu.c48
-rw-r--r--arch/arm/kernel/smp_twd.c175
-rw-r--r--arch/arm/kernel/unwind.c19
-rw-r--r--arch/arm/kernel/vmlinux.lds.S10
13 files changed, 409 insertions, 87 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 11a5197a221f..ff89d0b3abc5 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_ARTHUR) += arthur.o
22obj-$(CONFIG_ISA_DMA) += dma-isa.o 22obj-$(CONFIG_ISA_DMA) += dma-isa.o
23obj-$(CONFIG_PCI) += bios32.o isa.o 23obj-$(CONFIG_PCI) += bios32.o isa.o
24obj-$(CONFIG_SMP) += smp.o 24obj-$(CONFIG_SMP) += smp.o
25obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
26obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
25obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 27obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
26obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 28obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
27obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o 29obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 1680e9e9c831..f776e72a4cb8 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -372,6 +372,8 @@
372/* 360 */ CALL(sys_inotify_init1) 372/* 360 */ CALL(sys_inotify_init1)
373 CALL(sys_preadv) 373 CALL(sys_preadv)
374 CALL(sys_pwritev) 374 CALL(sys_pwritev)
375 CALL(sys_rt_tgsigqueueinfo)
376 CALL(sys_perf_counter_open)
375#ifndef syscalls_counted 377#ifndef syscalls_counted
376.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 378.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
377#define syscalls_counted 379#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 83b1da6b7baa..fc8af43c5000 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -482,6 +482,9 @@ __und_usr:
482 subeq r4, r2, #4 @ ARM instr at LR - 4 482 subeq r4, r2, #4 @ ARM instr at LR - 4
483 subne r4, r2, #2 @ Thumb instr at LR - 2 483 subne r4, r2, #2 @ Thumb instr at LR - 2
4841: ldreqt r0, [r4] 4841: ldreqt r0, [r4]
485#ifdef CONFIG_CPU_ENDIAN_BE8
486 reveq r0, r0 @ little endian instruction
487#endif
485 beq call_fpe 488 beq call_fpe
486 @ Thumb instruction 489 @ Thumb instruction
487#if __LINUX_ARM_ARCH__ >= 7 490#if __LINUX_ARM_ARCH__ >= 7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b55cb0331809..366e5097a41a 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -210,6 +210,9 @@ ENTRY(vector_swi)
210 A710( teq ip, #0x0f000000 ) 210 A710( teq ip, #0x0f000000 )
211 A710( bne .Larm710bug ) 211 A710( bne .Larm710bug )
212#endif 212#endif
213#ifdef CONFIG_CPU_ENDIAN_BE8
214 rev r10, r10 @ little endian instruction
215#endif
213 216
214#elif defined(CONFIG_AEABI) 217#elif defined(CONFIG_AEABI)
215 218
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
index e859af349467..3f470866bb89 100644
--- a/arch/arm/kernel/init_task.c
+++ b/arch/arm/kernel/init_task.c
@@ -14,10 +14,6 @@
14 14
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/* 17/*
22 * Initial thread structure. 18 * Initial thread structure.
23 * 19 *
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6874c7dca75a..096f600dc8d8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -167,7 +167,7 @@ void __init init_IRQ(void)
167 167
168#ifdef CONFIG_SMP 168#ifdef CONFIG_SMP
169 cpumask_setall(bad_irq_desc.affinity); 169 cpumask_setall(bad_irq_desc.affinity);
170 bad_irq_desc.cpu = smp_processor_id(); 170 bad_irq_desc.node = smp_processor_id();
171#endif 171#endif
172 init_arch_irq(); 172 init_arch_irq();
173} 173}
@@ -176,7 +176,7 @@ void __init init_IRQ(void)
176 176
177static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 177static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
178{ 178{
179 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 179 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
180 180
181 spin_lock_irq(&desc->lock); 181 spin_lock_irq(&desc->lock);
182 desc->chip->set_affinity(irq, cpumask_of(cpu)); 182 desc->chip->set_affinity(irq, cpumask_of(cpu));
@@ -195,7 +195,7 @@ void migrate_irqs(void)
195 for (i = 0; i < NR_IRQS; i++) { 195 for (i = 0; i < NR_IRQS; i++) {
196 struct irq_desc *desc = irq_desc + i; 196 struct irq_desc *desc = irq_desc + i;
197 197
198 if (desc->cpu == cpu) { 198 if (desc->node == cpu) {
199 unsigned int newcpu = cpumask_any_and(desc->affinity, 199 unsigned int newcpu = cpumask_any_and(desc->affinity,
200 cpu_online_mask); 200 cpu_online_mask);
201 if (newcpu >= nr_cpu_ids) { 201 if (newcpu >= nr_cpu_ids) {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c3265a2e7cd4..39196dff478c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)
114/* 114/*
115 * Function pointers to optional machine specific functions 115 * Function pointers to optional machine specific functions
116 */ 116 */
117void (*pm_idle)(void);
118EXPORT_SYMBOL(pm_idle);
119
120void (*pm_power_off)(void); 117void (*pm_power_off)(void);
121EXPORT_SYMBOL(pm_power_off); 118EXPORT_SYMBOL(pm_power_off);
122 119
@@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
130 */ 127 */
131static void default_idle(void) 128static void default_idle(void)
132{ 129{
133 if (hlt_counter) 130 if (!need_resched())
134 cpu_relax(); 131 arch_idle();
135 else { 132 local_irq_enable();
136 local_irq_disable();
137 if (!need_resched())
138 arch_idle();
139 local_irq_enable();
140 }
141} 133}
142 134
135void (*pm_idle)(void) = default_idle;
136EXPORT_SYMBOL(pm_idle);
137
143/* 138/*
144 * The idle thread. We try to conserve power, while trying to keep 139 * The idle thread, has rather strange semantics for calling pm_idle,
145 * overall latency low. The architecture specific idle is passed 140 * but this is what x86 does and we need to do the same, so that
146 * a value to indicate the level of "idleness" of the system. 141 * things like cpuidle get called in the same way. The only difference
142 * is that we always respect 'hlt_counter' to prevent low power idle.
147 */ 143 */
148void cpu_idle(void) 144void cpu_idle(void)
149{ 145{
@@ -151,21 +147,31 @@ void cpu_idle(void)
151 147
152 /* endless idle loop with no priority at all */ 148 /* endless idle loop with no priority at all */
153 while (1) { 149 while (1) {
154 void (*idle)(void) = pm_idle; 150 tick_nohz_stop_sched_tick(1);
155 151 leds_event(led_idle_start);
152 while (!need_resched()) {
156#ifdef CONFIG_HOTPLUG_CPU 153#ifdef CONFIG_HOTPLUG_CPU
157 if (cpu_is_offline(smp_processor_id())) { 154 if (cpu_is_offline(smp_processor_id()))
158 leds_event(led_idle_start); 155 cpu_die();
159 cpu_die();
160 }
161#endif 156#endif
162 157
163 if (!idle) 158 local_irq_disable();
164 idle = default_idle; 159 if (hlt_counter) {
165 leds_event(led_idle_start); 160 local_irq_enable();
166 tick_nohz_stop_sched_tick(1); 161 cpu_relax();
167 while (!need_resched()) 162 } else {
168 idle(); 163 stop_critical_timings();
164 pm_idle();
165 start_critical_timings();
166 /*
167 * This will eventually be removed - pm_idle
168 * functions should always return with IRQs
169 * enabled.
170 */
171 WARN_ON(irqs_disabled());
172 local_irq_enable();
173 }
174 }
169 leds_event(led_idle_end); 175 leds_event(led_idle_end);
170 tick_nohz_restart_sched_tick(); 176 tick_nohz_restart_sched_tick();
171 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
@@ -352,6 +358,23 @@ asm( ".section .text\n"
352" .size kernel_thread_helper, . - kernel_thread_helper\n" 358" .size kernel_thread_helper, . - kernel_thread_helper\n"
353" .previous"); 359" .previous");
354 360
361#ifdef CONFIG_ARM_UNWIND
362extern void kernel_thread_exit(long code);
363asm( ".section .text\n"
364" .align\n"
365" .type kernel_thread_exit, #function\n"
366"kernel_thread_exit:\n"
367" .fnstart\n"
368" .cantunwind\n"
369" bl do_exit\n"
370" nop\n"
371" .fnend\n"
372" .size kernel_thread_exit, . - kernel_thread_exit\n"
373" .previous");
374#else
375#define kernel_thread_exit do_exit
376#endif
377
355/* 378/*
356 * Create a kernel thread. 379 * Create a kernel thread.
357 */ 380 */
@@ -363,9 +386,9 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
363 386
364 regs.ARM_r1 = (unsigned long)arg; 387 regs.ARM_r1 = (unsigned long)arg;
365 regs.ARM_r2 = (unsigned long)fn; 388 regs.ARM_r2 = (unsigned long)fn;
366 regs.ARM_r3 = (unsigned long)do_exit; 389 regs.ARM_r3 = (unsigned long)kernel_thread_exit;
367 regs.ARM_pc = (unsigned long)kernel_thread_helper; 390 regs.ARM_pc = (unsigned long)kernel_thread_helper;
368 regs.ARM_cpsr = SVC_MODE; 391 regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
369 392
370 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 393 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
371} 394}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 80b8b5c7e07a..93bb4247b7ed 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -426,9 +426,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
426 */ 426 */
427 thumb = handler & 1; 427 thumb = handler & 1;
428 428
429 if (thumb) 429 if (thumb) {
430 cpsr |= PSR_T_BIT; 430 cpsr |= PSR_T_BIT;
431 else 431#if __LINUX_ARM_ARCH__ >= 7
432 /* clear the If-Then Thumb-2 execution state */
433 cpsr &= ~PSR_IT_MASK;
434#endif
435 } else
432 cpsr &= ~PSR_T_BIT; 436 cpsr &= ~PSR_T_BIT;
433 } 437 }
434#endif 438#endif
@@ -532,7 +536,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
532 return err; 536 return err;
533} 537}
534 538
535static inline void restart_syscall(struct pt_regs *regs) 539static inline void setup_syscall_restart(struct pt_regs *regs)
536{ 540{
537 regs->ARM_r0 = regs->ARM_ORIG_r0; 541 regs->ARM_r0 = regs->ARM_ORIG_r0;
538 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; 542 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
@@ -567,7 +571,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
567 } 571 }
568 /* fallthrough */ 572 /* fallthrough */
569 case -ERESTARTNOINTR: 573 case -ERESTARTNOINTR:
570 restart_syscall(regs); 574 setup_syscall_restart(regs);
571 } 575 }
572 } 576 }
573 577
@@ -691,7 +695,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
691 if (regs->ARM_r0 == -ERESTARTNOHAND || 695 if (regs->ARM_r0 == -ERESTARTNOHAND ||
692 regs->ARM_r0 == -ERESTARTSYS || 696 regs->ARM_r0 == -ERESTARTSYS ||
693 regs->ARM_r0 == -ERESTARTNOINTR) { 697 regs->ARM_r0 == -ERESTARTNOINTR) {
694 restart_syscall(regs); 698 setup_syscall_restart(regs);
695 } 699 }
696 } 700 }
697 single_step_set(current); 701 single_step_set(current);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 6014dfd22af4..de885fd256c5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -22,16 +22,20 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
25 27
26#include <asm/atomic.h> 28#include <asm/atomic.h>
27#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
28#include <asm/cpu.h> 30#include <asm/cpu.h>
31#include <asm/cputype.h>
29#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
30#include <asm/pgtable.h> 33#include <asm/pgtable.h>
31#include <asm/pgalloc.h> 34#include <asm/pgalloc.h>
32#include <asm/processor.h> 35#include <asm/processor.h>
33#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
34#include <asm/ptrace.h> 37#include <asm/ptrace.h>
38#include <asm/localtimer.h>
35 39
36/* 40/*
37 * as from 2.5, kernels no longer have an init_tasks structure 41 * as from 2.5, kernels no longer have an init_tasks structure
@@ -163,7 +167,7 @@ int __cpuexit __cpu_disable(void)
163 * Take this CPU offline. Once we clear this, we can't return, 167 * Take this CPU offline. Once we clear this, we can't return,
164 * and we must not schedule until we're ready to give up the cpu. 168 * and we must not schedule until we're ready to give up the cpu.
165 */ 169 */
166 cpu_clear(cpu, cpu_online_map); 170 set_cpu_online(cpu, false);
167 171
168 /* 172 /*
169 * OK - migrate IRQs away from this CPU 173 * OK - migrate IRQs away from this CPU
@@ -274,9 +278,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
274 local_fiq_enable(); 278 local_fiq_enable();
275 279
276 /* 280 /*
277 * Setup local timer for this CPU. 281 * Setup the percpu timer for this CPU.
278 */ 282 */
279 local_timer_setup(); 283 percpu_timer_setup();
280 284
281 calibrate_delay(); 285 calibrate_delay();
282 286
@@ -285,7 +289,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
285 /* 289 /*
286 * OK, now it's safe to let the boot CPU continue 290 * OK, now it's safe to let the boot CPU continue
287 */ 291 */
288 cpu_set(cpu, cpu_online_map); 292 set_cpu_online(cpu, true);
289 293
290 /* 294 /*
291 * OK, it's off to the idle thread for us 295 * OK, it's off to the idle thread for us
@@ -383,10 +387,16 @@ void show_local_irqs(struct seq_file *p)
383 seq_putc(p, '\n'); 387 seq_putc(p, '\n');
384} 388}
385 389
390/*
391 * Timer (local or broadcast) support
392 */
393static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
394
386static void ipi_timer(void) 395static void ipi_timer(void)
387{ 396{
397 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
388 irq_enter(); 398 irq_enter();
389 local_timer_interrupt(); 399 evt->event_handler(evt);
390 irq_exit(); 400 irq_exit();
391} 401}
392 402
@@ -405,6 +415,42 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)
405} 415}
406#endif 416#endif
407 417
418#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
419static void smp_timer_broadcast(const struct cpumask *mask)
420{
421 send_ipi_message(mask, IPI_TIMER);
422}
423
424static void broadcast_timer_set_mode(enum clock_event_mode mode,
425 struct clock_event_device *evt)
426{
427}
428
429static void local_timer_setup(struct clock_event_device *evt)
430{
431 evt->name = "dummy_timer";
432 evt->features = CLOCK_EVT_FEAT_ONESHOT |
433 CLOCK_EVT_FEAT_PERIODIC |
434 CLOCK_EVT_FEAT_DUMMY;
435 evt->rating = 400;
436 evt->mult = 1;
437 evt->set_mode = broadcast_timer_set_mode;
438 evt->broadcast = smp_timer_broadcast;
439
440 clockevents_register_device(evt);
441}
442#endif
443
444void __cpuinit percpu_timer_setup(void)
445{
446 unsigned int cpu = smp_processor_id();
447 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
448
449 evt->cpumask = cpumask_of(cpu);
450
451 local_timer_setup(evt);
452}
453
408static DEFINE_SPINLOCK(stop_lock); 454static DEFINE_SPINLOCK(stop_lock);
409 455
410/* 456/*
@@ -417,7 +463,7 @@ static void ipi_cpu_stop(unsigned int cpu)
417 dump_stack(); 463 dump_stack();
418 spin_unlock(&stop_lock); 464 spin_unlock(&stop_lock);
419 465
420 cpu_clear(cpu, cpu_online_map); 466 set_cpu_online(cpu, false);
421 467
422 local_fiq_disable(); 468 local_fiq_disable();
423 local_irq_disable(); 469 local_irq_disable();
@@ -501,11 +547,6 @@ void smp_send_reschedule(int cpu)
501 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 547 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
502} 548}
503 549
504void smp_timer_broadcast(const struct cpumask *mask)
505{
506 send_ipi_message(mask, IPI_TIMER);
507}
508
509void smp_send_stop(void) 550void smp_send_stop(void)
510{ 551{
511 cpumask_t mask = cpu_online_map; 552 cpumask_t mask = cpu_online_map;
@@ -545,6 +586,12 @@ struct tlb_args {
545 unsigned long ta_end; 586 unsigned long ta_end;
546}; 587};
547 588
589/* all SMP configurations have the extended CPUID registers */
590static inline int tlb_ops_need_broadcast(void)
591{
592 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
593}
594
548static inline void ipi_flush_tlb_all(void *ignored) 595static inline void ipi_flush_tlb_all(void *ignored)
549{ 596{
550 local_flush_tlb_all(); 597 local_flush_tlb_all();
@@ -587,51 +634,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
587 634
588void flush_tlb_all(void) 635void flush_tlb_all(void)
589{ 636{
590 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 637 if (tlb_ops_need_broadcast())
638 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
639 else
640 local_flush_tlb_all();
591} 641}
592 642
593void flush_tlb_mm(struct mm_struct *mm) 643void flush_tlb_mm(struct mm_struct *mm)
594{ 644{
595 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 645 if (tlb_ops_need_broadcast())
646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
647 else
648 local_flush_tlb_mm(mm);
596} 649}
597 650
598void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 651void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
599{ 652{
600 struct tlb_args ta; 653 if (tlb_ops_need_broadcast()) {
601 654 struct tlb_args ta;
602 ta.ta_vma = vma; 655 ta.ta_vma = vma;
603 ta.ta_start = uaddr; 656 ta.ta_start = uaddr;
604 657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
605 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 658 } else
659 local_flush_tlb_page(vma, uaddr);
606} 660}
607 661
608void flush_tlb_kernel_page(unsigned long kaddr) 662void flush_tlb_kernel_page(unsigned long kaddr)
609{ 663{
610 struct tlb_args ta; 664 if (tlb_ops_need_broadcast()) {
611 665 struct tlb_args ta;
612 ta.ta_start = kaddr; 666 ta.ta_start = kaddr;
613 667 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
614 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 668 } else
669 local_flush_tlb_kernel_page(kaddr);
615} 670}
616 671
617void flush_tlb_range(struct vm_area_struct *vma, 672void flush_tlb_range(struct vm_area_struct *vma,
618 unsigned long start, unsigned long end) 673 unsigned long start, unsigned long end)
619{ 674{
620 struct tlb_args ta; 675 if (tlb_ops_need_broadcast()) {
621 676 struct tlb_args ta;
622 ta.ta_vma = vma; 677 ta.ta_vma = vma;
623 ta.ta_start = start; 678 ta.ta_start = start;
624 ta.ta_end = end; 679 ta.ta_end = end;
625 680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
626 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 681 } else
682 local_flush_tlb_range(vma, start, end);
627} 683}
628 684
629void flush_tlb_kernel_range(unsigned long start, unsigned long end) 685void flush_tlb_kernel_range(unsigned long start, unsigned long end)
630{ 686{
631 struct tlb_args ta; 687 if (tlb_ops_need_broadcast()) {
632 688 struct tlb_args ta;
633 ta.ta_start = start; 689 ta.ta_start = start;
634 ta.ta_end = end; 690 ta.ta_end = end;
635 691 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
636 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 692 } else
693 local_flush_tlb_kernel_range(start, end);
637} 694}
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
new file mode 100644
index 000000000000..d3831f616ee9
--- /dev/null
+++ b/arch/arm/kernel/smp_scu.c
@@ -0,0 +1,48 @@
1/*
2 * linux/arch/arm/kernel/smp_scu.c
3 *
4 * Copyright (C) 2002 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/io.h>
13
14#include <asm/smp_scu.h>
15#include <asm/cacheflush.h>
16
17#define SCU_CTRL 0x00
18#define SCU_CONFIG 0x04
19#define SCU_CPU_STATUS 0x08
20#define SCU_INVALIDATE 0x0c
21#define SCU_FPGA_REVISION 0x10
22
23/*
24 * Get the number of CPU cores from the SCU configuration
25 */
26unsigned int __init scu_get_core_count(void __iomem *scu_base)
27{
28 unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
29 return (ncores & 0x03) + 1;
30}
31
32/*
33 * Enable the SCU
34 */
35void __init scu_enable(void __iomem *scu_base)
36{
37 u32 scu_ctrl;
38
39 scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
40 scu_ctrl |= 1;
41 __raw_writel(scu_ctrl, scu_base + SCU_CTRL);
42
43 /*
44 * Ensure that the data accessed by CPU0 before the SCU was
45 * initialised is visible to the other CPUs.
46 */
47 flush_cache_all();
48}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
new file mode 100644
index 000000000000..d8c88c633c6f
--- /dev/null
+++ b/arch/arm/kernel/smp_twd.c
@@ -0,0 +1,175 @@
1/*
2 * linux/arch/arm/kernel/smp_twd.c
3 *
4 * Copyright (C) 2002 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/smp.h>
16#include <linux/jiffies.h>
17#include <linux/clockchips.h>
18#include <linux/irq.h>
19#include <linux/io.h>
20
21#include <asm/smp_twd.h>
22#include <asm/hardware/gic.h>
23
24#define TWD_TIMER_LOAD 0x00
25#define TWD_TIMER_COUNTER 0x04
26#define TWD_TIMER_CONTROL 0x08
27#define TWD_TIMER_INTSTAT 0x0C
28
29#define TWD_WDOG_LOAD 0x20
30#define TWD_WDOG_COUNTER 0x24
31#define TWD_WDOG_CONTROL 0x28
32#define TWD_WDOG_INTSTAT 0x2C
33#define TWD_WDOG_RESETSTAT 0x30
34#define TWD_WDOG_DISABLE 0x34
35
36#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
37#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
38#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
39#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
40
41/* set up by the platform code */
42void __iomem *twd_base;
43
44static unsigned long twd_timer_rate;
45
46static void twd_set_mode(enum clock_event_mode mode,
47 struct clock_event_device *clk)
48{
49 unsigned long ctrl;
50
51 switch (mode) {
52 case CLOCK_EVT_MODE_PERIODIC:
53 /* timer load already set up */
54 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
55 | TWD_TIMER_CONTROL_PERIODIC;
56 break;
57 case CLOCK_EVT_MODE_ONESHOT:
58 /* period set, and timer enabled in 'next_event' hook */
59 ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
60 break;
61 case CLOCK_EVT_MODE_UNUSED:
62 case CLOCK_EVT_MODE_SHUTDOWN:
63 default:
64 ctrl = 0;
65 }
66
67 __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
68}
69
70static int twd_set_next_event(unsigned long evt,
71 struct clock_event_device *unused)
72{
73 unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
74
75 ctrl |= TWD_TIMER_CONTROL_ENABLE;
76
77 __raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
78 __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
79
80 return 0;
81}
82
83/*
84 * local_timer_ack: checks for a local timer interrupt.
85 *
86 * If a local timer interrupt has occurred, acknowledge and return 1.
87 * Otherwise, return 0.
88 */
89int twd_timer_ack(void)
90{
91 if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
92 __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
93 return 1;
94 }
95
96 return 0;
97}
98
99static void __cpuinit twd_calibrate_rate(void)
100{
101 unsigned long load, count;
102 u64 waitjiffies;
103
104 /*
105 * If this is the first time round, we need to work out how fast
106 * the timer ticks
107 */
108 if (twd_timer_rate == 0) {
109 printk(KERN_INFO "Calibrating local timer... ");
110
111 /* Wait for a tick to start */
112 waitjiffies = get_jiffies_64() + 1;
113
114 while (get_jiffies_64() < waitjiffies)
115 udelay(10);
116
117 /* OK, now the tick has started, let's get the timer going */
118 waitjiffies += 5;
119
120 /* enable, no interrupt or reload */
121 __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
122
123 /* maximum value */
124 __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
125
126 while (get_jiffies_64() < waitjiffies)
127 udelay(10);
128
129 count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
130
131 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
132
133 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
134 (twd_timer_rate / 100000) % 100);
135 }
136
137 load = twd_timer_rate / HZ;
138
139 __raw_writel(load, twd_base + TWD_TIMER_LOAD);
140}
141
142/*
143 * Setup the local clock events for a CPU.
144 */
145void __cpuinit twd_timer_setup(struct clock_event_device *clk)
146{
147 unsigned long flags;
148
149 twd_calibrate_rate();
150
151 clk->name = "local_timer";
152 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
153 clk->rating = 350;
154 clk->set_mode = twd_set_mode;
155 clk->set_next_event = twd_set_next_event;
156 clk->shift = 20;
157 clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
158 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
159 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
160
161 /* Make sure our local interrupt controller has this enabled */
162 local_irq_save(flags);
163 get_irq_chip(clk->irq)->unmask(clk->irq);
164 local_irq_restore(flags);
165
166 clockevents_register_device(clk);
167}
168
169/*
170 * take a local timer down
171 */
172void __cpuexit twd_timer_stop(void)
173{
174 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
175}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 1dedc2c7ff49..dd56e11f339a 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -212,7 +212,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
212 ctrl->vrs[14] = *vsp++; 212 ctrl->vrs[14] = *vsp++;
213 ctrl->vrs[SP] = (unsigned long)vsp; 213 ctrl->vrs[SP] = (unsigned long)vsp;
214 } else if (insn == 0xb0) { 214 } else if (insn == 0xb0) {
215 ctrl->vrs[PC] = ctrl->vrs[LR]; 215 if (ctrl->vrs[PC] == 0)
216 ctrl->vrs[PC] = ctrl->vrs[LR];
216 /* no further processing */ 217 /* no further processing */
217 ctrl->entries = 0; 218 ctrl->entries = 0;
218 } else if (insn == 0xb1) { 219 } else if (insn == 0xb1) {
@@ -309,18 +310,20 @@ int unwind_frame(struct stackframe *frame)
309 } 310 }
310 311
311 while (ctrl.entries > 0) { 312 while (ctrl.entries > 0) {
312 int urc; 313 int urc = unwind_exec_insn(&ctrl);
313
314 if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
315 return -URC_FAILURE;
316 urc = unwind_exec_insn(&ctrl);
317 if (urc < 0) 314 if (urc < 0)
318 return urc; 315 return urc;
316 if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
317 return -URC_FAILURE;
319 } 318 }
320 319
321 if (ctrl.vrs[PC] == 0) 320 if (ctrl.vrs[PC] == 0)
322 ctrl.vrs[PC] = ctrl.vrs[LR]; 321 ctrl.vrs[PC] = ctrl.vrs[LR];
323 322
323 /* check for infinite loop */
324 if (frame->pc == ctrl.vrs[PC])
325 return -URC_FAILURE;
326
324 frame->fp = ctrl.vrs[FP]; 327 frame->fp = ctrl.vrs[FP];
325 frame->sp = ctrl.vrs[SP]; 328 frame->sp = ctrl.vrs[SP];
326 frame->lr = ctrl.vrs[LR]; 329 frame->lr = ctrl.vrs[LR];
@@ -332,7 +335,6 @@ int unwind_frame(struct stackframe *frame)
332void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) 335void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
333{ 336{
334 struct stackframe frame; 337 struct stackframe frame;
335 unsigned long high, low;
336 register unsigned long current_sp asm ("sp"); 338 register unsigned long current_sp asm ("sp");
337 339
338 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 340 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -362,9 +364,6 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
362 frame.pc = thread_saved_pc(tsk); 364 frame.pc = thread_saved_pc(tsk);
363 } 365 }
364 366
365 low = frame.sp & ~(THREAD_SIZE - 1);
366 high = low + THREAD_SIZE;
367
368 while (1) { 367 while (1) {
369 int urc; 368 int urc;
370 unsigned long where = frame.pc; 369 unsigned long where = frame.pc;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index c90f27250ead..4340bf3d2c84 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -84,6 +84,14 @@ SECTIONS
84 *(.exitcall.exit) 84 *(.exitcall.exit)
85 *(.ARM.exidx.exit.text) 85 *(.ARM.exidx.exit.text)
86 *(.ARM.extab.exit.text) 86 *(.ARM.extab.exit.text)
87#ifndef CONFIG_HOTPLUG_CPU
88 *(.ARM.exidx.cpuexit.text)
89 *(.ARM.extab.cpuexit.text)
90#endif
91#ifndef CONFIG_HOTPLUG
92 *(.ARM.exidx.devexit.text)
93 *(.ARM.extab.devexit.text)
94#endif
87#ifndef CONFIG_MMU 95#ifndef CONFIG_MMU
88 *(.fixup) 96 *(.fixup)
89 *(__ex_table) 97 *(__ex_table)
@@ -141,6 +149,7 @@ SECTIONS
141 149
142 .data : AT(__data_loc) { 150 .data : AT(__data_loc) {
143 _data = .; /* address in memory */ 151 _data = .; /* address in memory */
152 _sdata = .;
144 153
145 /* 154 /*
146 * first, the init task union, aligned 155 * first, the init task union, aligned
@@ -192,6 +201,7 @@ SECTIONS
192 __bss_start = .; /* BSS */ 201 __bss_start = .; /* BSS */
193 *(.bss) 202 *(.bss)
194 *(COMMON) 203 *(COMMON)
204 __bss_stop = .;
195 _end = .; 205 _end = .;
196 } 206 }
197 /* Stabs debugging sections. */ 207 /* Stabs debugging sections. */