From a4a12e008e292a81d312659529b71be2026ab355 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 30 Nov 2012 11:56:05 +0000 Subject: ARM: virt: Avoid bx instruction for compatibility with <=ARMv4 Non-T variants of ARMv4 do not support the bx instruction. However, __hyp_stub_install is always called from the same instruction set used to build the bulk of the kernel, so bx should not be necessary. This patch uses the traditional "mov pc" instead of bx. Cc: Signed-off-by: Dave Martin [will: fixed up remaining bx instruction] Signed-off-by: Will Deacon --- arch/arm/kernel/hyp-stub.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 65b2417aebce..3c60256d3927 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary) * immediately. */ compare_cpu_mode_with_primary r4, r5, r6, r7 - bxne lr + movne pc, lr /* * Once we have given up on one CPU, we do not try to install the @@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary) */ cmp r4, #HYP_MODE - bxne lr @ give up if the CPU is not in HYP mode + movne pc, lr @ give up if the CPU is not in HYP mode /* * Configure HSCTLR to set correct exception endianness/instruction set @@ -200,7 +200,7 @@ ENDPROC(__hyp_get_vectors) @ fall through ENTRY(__hyp_set_vectors) __HVC(0) - bx lr + mov pc, lr ENDPROC(__hyp_set_vectors) #ifndef ZIMAGE -- cgit v1.2.2 From 6e484be1ccca3ea495db45900fd42aac8d49d754 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 4 Jan 2013 17:44:14 +0000 Subject: ARM: virt: boot secondary CPUs through the right entry point Secondary CPUs should use the __hyp_stub_install_secondary entry point, so boot mode inconsistencies can be detected. Cc: Acked-by: Dave Martin Reported-by: Ian Molton Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f4668..16abc8322f79 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -331,7 +331,7 @@ ENTRY(secondary_startup) * as it has already been validated by the primary processor. */ #ifdef CONFIG_ARM_VIRT_EXT - bl __hyp_stub_install + bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r9 -- cgit v1.2.2 From d01723479e6a6c70c83295f7847477a016d5e14a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 4 Jan 2013 17:44:15 +0000 Subject: ARM: virt: simplify __hyp_stub_install epilog __hyp_stub_install duplicates quite a bit of safe_svcmode_maskall by forcing the CPU back to SVC. This is unnecessary, as safe_svcmode_maskall is called just after. Furthermore, the way we build SPSR_hyp is buggy as we fail to mask the interrupts, leading to interesting behaviours on TC2 + UEFI. The fix is to simply remove this code and rely on safe_svcmode_maskall to do the right thing. Cc: Reviewed-by: Dave Martin Reported-by: Harry Liebel Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/kernel/hyp-stub.S | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 3c60256d3927..1315c4ccfa56 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary) * Eventually, CPU-specific code might be needed -- assume not for now * * This code relies on the "eret" instruction to synchronize the - * various coprocessor accesses. + * various coprocessor accesses. This is done when we switch to SVC + * (see safe_svcmode_maskall). */ @ Now install the hypervisor stub: adr r7, __hyp_stub_vectors @@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE 1: #endif - bic r7, r4, #MODE_MASK - orr r7, r7, #SVC_MODE -THUMB( orr r7, r7, #PSR_T_BIT ) - msr spsr_cxsf, r7 @ This is SPSR_hyp. - - __MSR_ELR_HYP(14) @ msr elr_hyp, lr - __ERET @ return, switching to SVC mode - @ The boot CPU mode is left in r4. + bx lr @ The boot CPU mode is left in r4. ENDPROC(__hyp_stub_install_secondary) __hyp_stub_do_trap: -- cgit v1.2.2 From 6f16f4998f98e42e3f2dedf663cfb691ff0324af Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 15 Jan 2013 18:51:32 +0100 Subject: ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area We currently use a temporary 1MB section aligned to a 1MB boundary for mapping the provided device tree until the final page table is created. However, if the device tree happens to cross that 1MB boundary, the end of it remains unmapped and the kernel crashes when it attempts to access it. Given no restriction on the location of that DTB, it could end up with only a few bytes mapped at the end of a section. Solve this issue by mapping two consecutive sections. Signed-off-by: Nicolas Pitre Tested-by: Sascha Hauer Tested-by: Tomasz Figa Cc: stable@vger.kernel.org Signed-off-by: Russell King --- arch/arm/kernel/head.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f4668..61fcb18c7e5b 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -246,6 +246,7 @@ __create_page_tables: /* * Then map boot params address in r2 if specified. + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -253,6 +254,8 @@ __create_page_tables: addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 + strne r6, [r3], #1 << PMD_ORDER + addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #ifdef CONFIG_DEBUG_LL -- cgit v1.2.2 From a73b59c51ab288d81b515b504790267f594884b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 16 Jan 2013 15:32:06 +0100 Subject: ARM: compile fix for DEBUG_LL=y && MMU=n MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit debug_ll_addr is only used on machines with an MMU so it can be #ifdef'ed out safely. This fixes: arch/arm/kernel/debug.S: Assembler messages: arch/arm/kernel/debug.S:104: Error: too many positional arguments The problem was introduced in e5c5f2a ARM: implement debug_ll_io_init(). Signed-off-by: Uwe Kleine-König Reviewed-by: Stephen Warren Acked-by: Rob Herring Signed-off-by: Olof Johansson --- arch/arm/kernel/debug.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 6809200c31fb..14f7c3b14632 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -100,12 +100,14 @@ ENTRY(printch) b 1b ENDPROC(printch) +#ifdef CONFIG_MMU ENTRY(debug_ll_addr) addruart r2, r3, ip str r2, [r0] str r3, [r1] mov pc, lr ENDPROC(debug_ll_addr) +#endif #else -- cgit v1.2.2 From e2c501190c7d4bf9d7febb9e1f1094cbde59ed89 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 Oct 2012 11:17:01 +0000 Subject: arm: Use generic timer broadcast receiver Currently, the ARM backend must maintain a redundant list of timers for the purpose of centralising timer broadcast functionality. This prevents sharing timer drivers across architectures. This patch moves the pain of dealing with timer broadcasts to the core clockevents tick broadcast code, which already maintains its own list of timers. Signed-off-by: Mark Rutland Reviewed-by: Santosh Shilimkar Tested-by: Santosh Shilimkar Reviewed-by: Stephen Boyd --- arch/arm/kernel/smp.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e5..930851912b37 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -475,12 +475,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu) */ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); -static void ipi_timer(void) -{ - struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); - evt->event_handler(evt); -} - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static void smp_timer_broadcast(const struct cpumask *mask) { @@ -596,11 +590,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) case IPI_WAKEUP: break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); - ipi_timer(); + tick_receive_broadcast(); irq_exit(); break; +#endif case IPI_RESCHEDULE: scheduler_ipi(); -- cgit v1.2.2 From 3d06770eef43eaad606e77246bfcc7e82b1d9fb4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 Oct 2012 12:13:42 +0000 Subject: arm: Add generic timer broadcast support Implement timer_broadcast for the arm architecture, allowing for the use of clock_event_device_drivers decoupled from the timer tick broadcast mechanism. Signed-off-by: Mark Rutland Reviewed-by: Santosh Shilimkar Tested-by: Santosh Shilimkar Reviewed-by: Stephen Boyd --- arch/arm/kernel/smp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 930851912b37..b7e3b506219b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -476,7 +476,7 @@ u64 smp_irq_stat_cpu(unsigned int cpu) static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -static void smp_timer_broadcast(const struct cpumask *mask) +void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } @@ -524,7 +524,6 @@ static void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); - evt->broadcast = smp_timer_broadcast; if (!lt_ops || lt_ops->setup(evt)) broadcast_timer_setup(evt); -- cgit v1.2.2