aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-02-04 09:51:49 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-02-04 09:51:49 -0500
commit2fb20e2e35cd1455d022ab7a553d7f1663ffbdeb (patch)
tree4b3bb0f76c15264fe7d4cced91127f3d96ff31f5 /arch/arm/kernel
parent9cb543124a2d31af42ce61a4c30765ecc8e5f1fa (diff)
parent5b91ab0abc957145c3ff6be03eb9a3901797019f (diff)
Merge branch 'for-rmk/broadcast' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/debug.S2
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/hyp-stub.S18
-rw-r--r--arch/arm/kernel/smp.c13
4 files changed, 16 insertions, 22 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 6809200c31fb..14f7c3b14632 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -100,12 +100,14 @@ ENTRY(printch)
100 b 1b 100 b 1b
101ENDPROC(printch) 101ENDPROC(printch)
102 102
103#ifdef CONFIG_MMU
103ENTRY(debug_ll_addr) 104ENTRY(debug_ll_addr)
104 addruart r2, r3, ip 105 addruart r2, r3, ip
105 str r2, [r0] 106 str r2, [r0]
106 str r3, [r1] 107 str r3, [r1]
107 mov pc, lr 108 mov pc, lr
108ENDPROC(debug_ll_addr) 109ENDPROC(debug_ll_addr)
110#endif
109 111
110#else 112#else
111 113
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4eee351f4668..486a15ae9011 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -246,6 +246,7 @@ __create_page_tables:
246 246
247 /* 247 /*
248 * Then map boot params address in r2 if specified. 248 * Then map boot params address in r2 if specified.
249 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
249 */ 250 */
250 mov r0, r2, lsr #SECTION_SHIFT 251 mov r0, r2, lsr #SECTION_SHIFT
251 movs r0, r0, lsl #SECTION_SHIFT 252 movs r0, r0, lsl #SECTION_SHIFT
@@ -253,6 +254,8 @@ __create_page_tables:
253 addne r3, r3, #PAGE_OFFSET 254 addne r3, r3, #PAGE_OFFSET
254 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 255 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
255 orrne r6, r7, r0 256 orrne r6, r7, r0
257 strne r6, [r3], #1 << PMD_ORDER
258 addne r6, r6, #1 << SECTION_SHIFT
256 strne r6, [r3] 259 strne r6, [r3]
257 260
258#ifdef CONFIG_DEBUG_LL 261#ifdef CONFIG_DEBUG_LL
@@ -331,7 +334,7 @@ ENTRY(secondary_startup)
331 * as it has already been validated by the primary processor. 334 * as it has already been validated by the primary processor.
332 */ 335 */
333#ifdef CONFIG_ARM_VIRT_EXT 336#ifdef CONFIG_ARM_VIRT_EXT
334 bl __hyp_stub_install 337 bl __hyp_stub_install_secondary
335#endif 338#endif
336 safe_svcmode_maskall r9 339 safe_svcmode_maskall r9
337 340
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 65b2417aebce..1315c4ccfa56 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
99 * immediately. 99 * immediately.
100 */ 100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7 101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 bxne lr 102 movne pc, lr
103 103
104 /* 104 /*
105 * Once we have given up on one CPU, we do not try to install the 105 * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
111 */ 111 */
112 112
113 cmp r4, #HYP_MODE 113 cmp r4, #HYP_MODE
114 bxne lr @ give up if the CPU is not in HYP mode 114 movne pc, lr @ give up if the CPU is not in HYP mode
115 115
116/* 116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set 117 * Configure HSCTLR to set correct exception endianness/instruction set
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
120 * Eventually, CPU-specific code might be needed -- assume not for now 120 * Eventually, CPU-specific code might be needed -- assume not for now
121 * 121 *
122 * This code relies on the "eret" instruction to synchronize the 122 * This code relies on the "eret" instruction to synchronize the
123 * various coprocessor accesses. 123 * various coprocessor accesses. This is done when we switch to SVC
124 * (see safe_svcmode_maskall).
124 */ 125 */
125 @ Now install the hypervisor stub: 126 @ Now install the hypervisor stub:
126 adr r7, __hyp_stub_vectors 127 adr r7, __hyp_stub_vectors
@@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
1551: 1561:
156#endif 157#endif
157 158
158 bic r7, r4, #MODE_MASK 159 bx lr @ The boot CPU mode is left in r4.
159 orr r7, r7, #SVC_MODE
160THUMB( orr r7, r7, #PSR_T_BIT )
161 msr spsr_cxsf, r7 @ This is SPSR_hyp.
162
163 __MSR_ELR_HYP(14) @ msr elr_hyp, lr
164 __ERET @ return, switching to SVC mode
165 @ The boot CPU mode is left in r4.
166ENDPROC(__hyp_stub_install_secondary) 160ENDPROC(__hyp_stub_install_secondary)
167 161
168__hyp_stub_do_trap: 162__hyp_stub_do_trap:
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
200 @ fall through 194 @ fall through
201ENTRY(__hyp_set_vectors) 195ENTRY(__hyp_set_vectors)
202 __HVC(0) 196 __HVC(0)
203 bx lr 197 mov pc, lr
204ENDPROC(__hyp_set_vectors) 198ENDPROC(__hyp_set_vectors)
205 199
206#ifndef ZIMAGE 200#ifndef ZIMAGE
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 84f4cbf652e5..b7e3b506219b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -475,14 +475,8 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
475 */ 475 */
476static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); 476static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
477 477
478static void ipi_timer(void)
479{
480 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
481 evt->event_handler(evt);
482}
483
484#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 478#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
485static void smp_timer_broadcast(const struct cpumask *mask) 479void tick_broadcast(const struct cpumask *mask)
486{ 480{
487 smp_cross_call(mask, IPI_TIMER); 481 smp_cross_call(mask, IPI_TIMER);
488} 482}
@@ -530,7 +524,6 @@ static void __cpuinit percpu_timer_setup(void)
530 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 524 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
531 525
532 evt->cpumask = cpumask_of(cpu); 526 evt->cpumask = cpumask_of(cpu);
533 evt->broadcast = smp_timer_broadcast;
534 527
535 if (!lt_ops || lt_ops->setup(evt)) 528 if (!lt_ops || lt_ops->setup(evt))
536 broadcast_timer_setup(evt); 529 broadcast_timer_setup(evt);
@@ -596,11 +589,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
596 case IPI_WAKEUP: 589 case IPI_WAKEUP:
597 break; 590 break;
598 591
592#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
599 case IPI_TIMER: 593 case IPI_TIMER:
600 irq_enter(); 594 irq_enter();
601 ipi_timer(); 595 tick_receive_broadcast();
602 irq_exit(); 596 irq_exit();
603 break; 597 break;
598#endif
604 599
605 case IPI_RESCHEDULE: 600 case IPI_RESCHEDULE:
606 scheduler_ipi(); 601 scheduler_ipi();