diff options
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329dd..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); | |||
43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) | 43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) |
44 | 44 | ||
45 | extern unsigned int cpu_last_asid; | 45 | extern unsigned int cpu_last_asid; |
46 | #ifdef CONFIG_SMP | ||
47 | DECLARE_PER_CPU(struct mm_struct *, current_mm); | ||
48 | #endif | ||
46 | 49 | ||
47 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 50 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
48 | void __new_context(struct mm_struct *mm); | 51 | void __new_context(struct mm_struct *mm); |
49 | 52 | ||
50 | static inline void check_context(struct mm_struct *mm) | 53 | static inline void check_context(struct mm_struct *mm) |
51 | { | 54 | { |
55 | /* | ||
56 | * This code is executed with interrupts enabled. Therefore, | ||
57 | * mm->context.id cannot be updated to the latest ASID version | ||
58 | * on a different CPU (and condition below not triggered) | ||
59 | * without first getting an IPI to reset the context. The | ||
60 | * alternative is to take a read_lock on mm->context.id_lock | ||
61 | * (after changing its type to rwlock_t). | ||
62 | */ | ||
52 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | 63 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) |
53 | __new_context(mm); | 64 | __new_context(mm); |
54 | 65 | ||
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
108 | __flush_icache_all(); | 119 | __flush_icache_all(); |
109 | #endif | 120 | #endif |
110 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | 121 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { |
122 | #ifdef CONFIG_SMP | ||
123 | struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); | ||
124 | *crt_mm = next; | ||
125 | #endif | ||
111 | check_context(next); | 126 | check_context(next); |
112 | cpu_switch_mm(next->pgd, next); | 127 | cpu_switch_mm(next->pgd, next); |
113 | if (cache_is_vivt()) | 128 | if (cache_is_vivt()) |