aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-02-28 11:47:36 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-03-03 17:54:14 -0500
commit8a4e3a9ead7e37ce1505602b564c15da09ac039f (patch)
tree5cc8ad188810649ea0739c769977451b825e4325 /arch
parent37f47e3d62533c931b04cb409f2eb299e6342331 (diff)
ARM: 7659/1: mm: make mm->context.id an atomic64_t variable
mm->context.id is updated under asid_lock when a new ASID is allocated to an mm_struct. However, it is also read without the lock when a task is being scheduled and checking whether or not the current ASID generation is up-to-date. If two threads of the same process are being scheduled in parallel and the bottom bits of the generation in their mm->context.id match the current generation (that is, the mm_struct has not been used for ~2^24 rollovers) then the non-atomic, lockless access to mm->context.id may yield the incorrect ASID. This patch fixes this issue by making mm->context.id and atomic64_t, ensuring that the generation is always read consistently. For code that only requires access to the ASID bits (e.g. TLB flushing by mm), then the value is accessed directly, which GCC converts to an ldrb. Cc: <stable@vger.kernel.org> # 3.8 Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/mmu.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/mm/context.c21
4 files changed, 19 insertions, 14 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3b..e3d55547e755 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,15 @@
5 5
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 u64 id; 8 atomic64_t id;
9#endif 9#endif
10 unsigned int vmalloc_seq; 10 unsigned int vmalloc_seq;
11} mm_context_t; 11} mm_context_t;
12 12
13#ifdef CONFIG_CPU_HAS_ASID 13#ifdef CONFIG_CPU_HAS_ASID
14#define ASID_BITS 8 14#define ASID_BITS 8
15#define ASID_MASK ((~0ULL) << ASID_BITS) 15#define ASID_MASK ((~0ULL) << ASID_BITS)
16#define ASID(mm) ((mm)->context.id & ~ASID_MASK) 16#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
17#else 17#else
18#define ASID(mm) (0) 18#define ASID(mm) (0)
19#endif 19#endif
@@ -26,7 +26,7 @@ typedef struct {
26 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> 26 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
27 */ 27 */
28typedef struct { 28typedef struct {
29 unsigned long end_brk; 29 unsigned long end_brk;
30} mm_context_t; 30} mm_context_t;
31 31
32#endif 32#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc5..863a6611323c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) 28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
29 29
30#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
31 31
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 5ce738b43508..923eec7105cf 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -110,7 +110,7 @@ int main(void)
110 BLANK(); 110 BLANK();
111#endif 111#endif
112#ifdef CONFIG_CPU_HAS_ASID 112#ifdef CONFIG_CPU_HAS_ASID
113 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 113 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
114 BLANK(); 114 BLANK();
115#endif 115#endif
116 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); 116 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 03ba181e359c..44d4ee52f3e2 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
152 return 0; 152 return 0;
153} 153}
154 154
155static void new_context(struct mm_struct *mm, unsigned int cpu) 155static u64 new_context(struct mm_struct *mm, unsigned int cpu)
156{ 156{
157 u64 asid = mm->context.id; 157 u64 asid = atomic64_read(&mm->context.id);
158 u64 generation = atomic64_read(&asid_generation); 158 u64 generation = atomic64_read(&asid_generation);
159 159
160 if (asid != 0 && is_reserved_asid(asid)) { 160 if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
181 cpumask_clear(mm_cpumask(mm)); 181 cpumask_clear(mm_cpumask(mm));
182 } 182 }
183 183
184 mm->context.id = asid; 184 return asid;
185} 185}
186 186
187void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 187void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
188{ 188{
189 unsigned long flags; 189 unsigned long flags;
190 unsigned int cpu = smp_processor_id(); 190 unsigned int cpu = smp_processor_id();
191 u64 asid;
191 192
192 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 193 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
193 __check_vmalloc_seq(mm); 194 __check_vmalloc_seq(mm);
@@ -198,19 +199,23 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
198 */ 199 */
199 cpu_set_reserved_ttbr0(); 200 cpu_set_reserved_ttbr0();
200 201
201 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 202 asid = atomic64_read(&mm->context.id);
202 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) 203 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
204 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
203 goto switch_mm_fastpath; 205 goto switch_mm_fastpath;
204 206
205 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 207 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
206 /* Check that our ASID belongs to the current generation. */ 208 /* Check that our ASID belongs to the current generation. */
207 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 209 asid = atomic64_read(&mm->context.id);
208 new_context(mm, cpu); 210 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
211 asid = new_context(mm, cpu);
212 atomic64_set(&mm->context.id, asid);
213 }
209 214
210 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
211 local_flush_tlb_all(); 216 local_flush_tlb_all();
212 217
213 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); 218 atomic64_set(&per_cpu(active_asids, cpu), asid);
214 cpumask_set_cpu(cpu, mm_cpumask(mm)); 219 cpumask_set_cpu(cpu, mm_cpumask(mm));
215 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 220 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
216 221