aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-11-19 06:30:49 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-11-19 06:30:49 -0500
commit2079f30e9e83887ca95fa129d0bc734b2c4b406d (patch)
tree91dbfd05c27d757e4e05a265308542bf54d8d3b7 /arch/arm/mm
parentf27d9b7198a0a0ffbd872a4b795c7613cd759ea3 (diff)
parentbf51bb82ccd9a74e9702d06107b23e54b27a5707 (diff)
Merge branch 'asid-allocation' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/context.c207
1 files changed, 108 insertions, 99 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 4e07eec1270d..7a27d7363be2 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -2,6 +2,9 @@
2 * linux/arch/arm/mm/context.c 2 * linux/arch/arm/mm/context.c
3 * 3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
5 * 8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -14,14 +17,40 @@
14#include <linux/percpu.h> 17#include <linux/percpu.h>
15 18
16#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/smp_plat.h>
17#include <asm/thread_notify.h> 21#include <asm/thread_notify.h>
18#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
19 23
24/*
25 * On ARMv6, we have the following structure in the Context ID:
26 *
27 * 31 7 0
28 * +-------------------------+-----------+
29 * | process ID | ASID |
30 * +-------------------------+-----------+
31 * | context ID |
32 * +-------------------------------------+
33 *
34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes.
37 */
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
40
41#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
42#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
43
20static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 44static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
21unsigned int cpu_last_asid = ASID_FIRST_VERSION; 45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
47
48static DEFINE_PER_CPU(atomic64_t, active_asids);
49static DEFINE_PER_CPU(u64, reserved_asids);
50static cpumask_t tlb_flush_pending;
22 51
23#ifdef CONFIG_ARM_LPAE 52#ifdef CONFIG_ARM_LPAE
24void cpu_set_reserved_ttbr0(void) 53static void cpu_set_reserved_ttbr0(void)
25{ 54{
26 unsigned long ttbl = __pa(swapper_pg_dir); 55 unsigned long ttbl = __pa(swapper_pg_dir);
27 unsigned long ttbh = 0; 56 unsigned long ttbh = 0;
@@ -37,7 +66,7 @@ void cpu_set_reserved_ttbr0(void)
37 isb(); 66 isb();
38} 67}
39#else 68#else
40void cpu_set_reserved_ttbr0(void) 69static void cpu_set_reserved_ttbr0(void)
41{ 70{
42 u32 ttb; 71 u32 ttb;
43 /* Copy TTBR1 into TTBR0 */ 72 /* Copy TTBR1 into TTBR0 */
@@ -84,124 +113,104 @@ static int __init contextidr_notifier_init(void)
84arch_initcall(contextidr_notifier_init); 113arch_initcall(contextidr_notifier_init);
85#endif 114#endif
86 115
87/* 116static void flush_context(unsigned int cpu)
88 * We fork()ed a process, and we need a new context for the child
89 * to run in.
90 */
91void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
92{ 117{
93 mm->context.id = 0; 118 int i;
94 raw_spin_lock_init(&mm->context.id_lock); 119 u64 asid;
95} 120
121 /* Update the list of reserved ASIDs and the ASID bitmap. */
122 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
123 for_each_possible_cpu(i) {
124 if (i == cpu) {
125 asid = 0;
126 } else {
127 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
128 __set_bit(ASID_TO_IDX(asid), asid_map);
129 }
130 per_cpu(reserved_asids, i) = asid;
131 }
96 132
97static void flush_context(void) 133 /* Queue a TLB invalidate and flush the I-cache if necessary. */
98{ 134 if (!tlb_ops_need_broadcast())
99 cpu_set_reserved_ttbr0(); 135 cpumask_set_cpu(cpu, &tlb_flush_pending);
100 local_flush_tlb_all(); 136 else
101 if (icache_is_vivt_asid_tagged()) { 137 cpumask_setall(&tlb_flush_pending);
138
139 if (icache_is_vivt_asid_tagged())
102 __flush_icache_all(); 140 __flush_icache_all();
103 dsb();
104 }
105} 141}
106 142
107#ifdef CONFIG_SMP 143static int is_reserved_asid(u64 asid)
144{
145 int cpu;
146 for_each_possible_cpu(cpu)
147 if (per_cpu(reserved_asids, cpu) == asid)
148 return 1;
149 return 0;
150}
108 151
109static void set_mm_context(struct mm_struct *mm, unsigned int asid) 152static void new_context(struct mm_struct *mm, unsigned int cpu)
110{ 153{
111 unsigned long flags; 154 u64 asid = mm->context.id;
155 u64 generation = atomic64_read(&asid_generation);
112 156
113 /* 157 if (asid != 0 && is_reserved_asid(asid)) {
114 * Locking needed for multi-threaded applications where the
115 * same mm->context.id could be set from different CPUs during
116 * the broadcast. This function is also called via IPI so the
117 * mm->context.id_lock has to be IRQ-safe.
118 */
119 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
120 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
121 /* 158 /*
122 * Old version of ASID found. Set the new one and 159 * Our current ASID was active during a rollover, we can
123 * reset mm_cpumask(mm). 160 * continue to use it and this was just a false alarm.
124 */ 161 */
125 mm->context.id = asid; 162 asid = generation | (asid & ~ASID_MASK);
163 } else {
164 /*
165 * Allocate a free ASID. If we can't find one, take a
166 * note of the currently active ASIDs and mark the TLBs
167 * as requiring flushes.
168 */
169 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
170 if (asid == NUM_USER_ASIDS) {
171 generation = atomic64_add_return(ASID_FIRST_VERSION,
172 &asid_generation);
173 flush_context(cpu);
174 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
175 }
176 __set_bit(asid, asid_map);
177 asid = generation | IDX_TO_ASID(asid);
126 cpumask_clear(mm_cpumask(mm)); 178 cpumask_clear(mm_cpumask(mm));
127 } 179 }
128 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
129 180
130 /* 181 mm->context.id = asid;
131 * Set the mm_cpumask(mm) bit for the current CPU.
132 */
133 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
134} 182}
135 183
136/* 184void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
137 * Reset the ASID on the current CPU. This function call is broadcast
138 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
139 */
140static void reset_context(void *info)
141{ 185{
142 unsigned int asid; 186 unsigned long flags;
143 unsigned int cpu = smp_processor_id(); 187 unsigned int cpu = smp_processor_id();
144 struct mm_struct *mm = current->active_mm;
145 188
146 smp_rmb(); 189 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
147 asid = cpu_last_asid + cpu + 1; 190 __check_kvm_seq(mm);
148 191
149 flush_context(); 192 /*
150 set_mm_context(mm, asid); 193 * Required during context switch to avoid speculative page table
151 194 * walking with the wrong TTBR.
152 /* set the new ASID */ 195 */
153 cpu_switch_mm(mm->pgd, mm); 196 cpu_set_reserved_ttbr0();
154}
155 197
156#else 198 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
199 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
200 goto switch_mm_fastpath;
157 201
158static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) 202 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
159{ 203 /* Check that our ASID belongs to the current generation. */
160 mm->context.id = asid; 204 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
161 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 205 new_context(mm, cpu);
162}
163 206
164#endif 207 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
208 cpumask_set_cpu(cpu, mm_cpumask(mm));
165 209
166void __new_context(struct mm_struct *mm) 210 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
167{ 211 local_flush_tlb_all();
168 unsigned int asid; 212 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
169 213
170 raw_spin_lock(&cpu_asid_lock); 214switch_mm_fastpath:
171#ifdef CONFIG_SMP 215 cpu_switch_mm(mm->pgd, mm);
172 /*
173 * Check the ASID again, in case the change was broadcast from
174 * another CPU before we acquired the lock.
175 */
176 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
177 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
178 raw_spin_unlock(&cpu_asid_lock);
179 return;
180 }
181#endif
182 /*
183 * At this point, it is guaranteed that the current mm (with
184 * an old ASID) isn't active on any other CPU since the ASIDs
185 * are changed simultaneously via IPI.
186 */
187 asid = ++cpu_last_asid;
188 if (asid == 0)
189 asid = cpu_last_asid = ASID_FIRST_VERSION;
190
191 /*
192 * If we've used up all our ASIDs, we need
193 * to start a new version and flush the TLB.
194 */
195 if (unlikely((asid & ~ASID_MASK) == 0)) {
196 asid = cpu_last_asid + smp_processor_id() + 1;
197 flush_context();
198#ifdef CONFIG_SMP
199 smp_wmb();
200 smp_call_function(reset_context, NULL, 1);
201#endif
202 cpu_last_asid += NR_CPUS;
203 }
204
205 set_mm_context(mm, asid);
206 raw_spin_unlock(&cpu_asid_lock);
207} 216}