From 505d7b193181be029f4f9aea59e6bdbfdd1e9e76 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 28 Jul 2005 20:32:47 +0100 Subject: [ARM SMP] Ensure secondary CPUs have a clean TLB Since ARMv6 CPUs will not flush the TLB on context switches, it is possible that we may end up with some global TLB entries remaining present, eventually upsetting userspace. Explicitly flush the entire TLB on secondary CPUs as they startup, after we have switched to the init_mm page tables. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 295e0a8379cf..b2085735a2ba 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -176,6 +176,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) cpu_set(cpu, mm->cpu_vm_mask); cpu_switch_mm(mm->pgd, mm); enter_lazy_tlb(mm, current); + local_flush_tlb_all(); cpu_init(); -- cgit v1.2.2 From e7ec02938dbe8ca35b750f29eaa4b12de0b52754 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 28 Jul 2005 20:36:26 +0100 Subject: [ARM SMP] Fix another ARMv6 bitop problem We sometimes forgot to check whether the exclusive store succeeded. Ensure that we always check. Also ensure that we always use the out of line versions, since the inline versions are not SMP safe. Signed-off-by: Russell King --- arch/arm/lib/bitops.h | 2 +- include/asm-arm/bitops.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 5382a3023602..2036ff15bda9 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -7,7 +7,7 @@ 1: ldrexb r2, [r1] \instr r2, r2, r3 strexb r0, r2, [r1] - cmpne r0, #0 + cmp r0, #0 bne 1b mov pc, lr .endm diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index c1adc6b3e86d..aad7aad026b3 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -229,6 +229,7 @@ extern int _find_next_zero_bit_be(const void * p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); extern int _find_next_bit_be(const unsigned long *p, int size, int offset); +#ifndef CONFIG_SMP /* * The __* form of bitops are non-atomic and may be reordered. */ @@ -241,6 +242,10 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); (__builtin_constant_p(nr) ? \ ____atomic_##name(nr, p) : \ _##name##_be(nr,p)) +#else +#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) +#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) +#endif #define NONATOMIC_BITOP(name,nr,p) \ (____nonatomic_##name(nr, p)) -- cgit v1.2.2 From 7ac5ae4b122f9415948c642b945a26938aa8f347 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 29 Jul 2005 16:36:48 +0100 Subject: [ARM SMP] Ensure secondary CPUs see their pen release Since the secondary CPUs will not be operating in symetric mode while they are held in the pen, we need to ensure that the write to pen_release is visible to them, by flushing the cache. Signed-off-by: Russell King --- arch/arm/mach-integrator/platsmp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/mach-integrator/platsmp.c b/arch/arm/mach-integrator/platsmp.c index aecf47ba033a..ea10bd8c972c 100644 --- a/arch/arm/mach-integrator/platsmp.c +++ b/arch/arm/mach-integrator/platsmp.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -80,6 +81,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) * "cpu" is Linux's internal ID. */ pen_release = cpu; + flush_cache_all(); /* * XXX -- cgit v1.2.2