aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-01-26 13:09:42 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 16:39:51 -0500
commit11805bcfa411c816b7c76fc40724be6733c74ffc (patch)
tree09c5c6c91deffb60339f027779521bafa42921ce /arch/arm/include
parent48ab7e09e0a7c00a217f87e4b57dfbee9c603e79 (diff)
ARM: 5905/1: ARM: Global ASID allocation on SMP
The current ASID allocation algorithm doesn't ensure the notification of the other CPUs when the ASID rolls over. This may lead to two processes using the same ASID (but different generation) or multiple threads of the same process using different ASIDs. This patch adds the broadcasting of the ASID rollover event to the other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid" during the handling of the broadcast, the ASID numbering now starts at "smp_processor_id() + 1". At rollover, the cpu_last_asid will be set to NR_CPUS. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/include/asm/mmu_context.h15
2 files changed, 16 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584d04a1..68870c776671 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 unsigned int id;
9 spinlock_t id_lock;
9#endif 10#endif
10 unsigned int kvm_seq; 11 unsigned int kvm_seq;
11} mm_context_t; 12} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329dd..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
46 49
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm); 51void __new_context(struct mm_struct *mm);
49 52
50static inline void check_context(struct mm_struct *mm) 53static inline void check_context(struct mm_struct *mm)
51{ 54{
55 /*
56 * This code is executed with interrupts enabled. Therefore,
57 * mm->context.id cannot be updated to the latest ASID version
58 * on a different CPU (and condition below not triggered)
59 * without first getting an IPI to reset the context. The
60 * alternative is to take a read_lock on mm->context.id_lock
61 * (after changing its type to rwlock_t).
62 */
52 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
53 __new_context(mm); 64 __new_context(mm);
54 65
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
108 __flush_icache_all(); 119 __flush_icache_all();
109#endif 120#endif
110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
111 check_context(next); 126 check_context(next);
112 cpu_switch_mm(next->pgd, next); 127 cpu_switch_mm(next->pgd, next);
113 if (cache_is_vivt()) 128 if (cache_is_vivt())