aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-11-14 05:37:34 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-11-21 10:24:46 -0500
commita391263cd84e6ae2da26a54383f3abf80c18d9df (patch)
treed2bff30f0aa5a36787b9a735b081637b392003fa /arch/arm/mm/context.c
parent2b94fe2ac97fdd2ae7521004e857e33016720eb7 (diff)
ARM: 8203/1: mm: try to re-use old ASID assignments following a rollover
Rather than unconditionally allocating a fresh ASID to an mm from an older generation, attempt to re-use the old assignment where possible. This can bring performance benefits on systems where the ASID is used to tag things other than the TLB (e.g. branch prediction resources). Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c58
1 files changed, 34 insertions, 24 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6eb97b3a7481..91892569710f 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
184 u64 asid = atomic64_read(&mm->context.id); 184 u64 asid = atomic64_read(&mm->context.id);
185 u64 generation = atomic64_read(&asid_generation); 185 u64 generation = atomic64_read(&asid_generation);
186 186
187 if (asid != 0 && is_reserved_asid(asid)) { 187 if (asid != 0) {
188 /* 188 /*
189 * Our current ASID was active during a rollover, we can 189 * If our current ASID was active during a rollover, we
190 * continue to use it and this was just a false alarm. 190 * can continue to use it and this was just a false alarm.
191 */ 191 */
192 asid = generation | (asid & ~ASID_MASK); 192 if (is_reserved_asid(asid))
193 } else { 193 return generation | (asid & ~ASID_MASK);
194
194 /* 195 /*
195 * Allocate a free ASID. If we can't find one, take a 196 * We had a valid ASID in a previous life, so try to re-use
196 * note of the currently active ASIDs and mark the TLBs 197 * it if possible.,
197 * as requiring flushes. We always count from ASID #1,
198 * as we reserve ASID #0 to switch via TTBR0 and to
199 * avoid speculative page table walks from hitting in
200 * any partial walk caches, which could be populated
201 * from overlapping level-1 descriptors used to map both
202 * the module area and the userspace stack.
203 */ 198 */
204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 199 asid &= ~ASID_MASK;
205 if (asid == NUM_USER_ASIDS) { 200 if (!__test_and_set_bit(asid, asid_map))
206 generation = atomic64_add_return(ASID_FIRST_VERSION, 201 goto bump_gen;
207 &asid_generation);
208 flush_context(cpu);
209 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
210 }
211 __set_bit(asid, asid_map);
212 cur_idx = asid;
213 asid |= generation;
214 cpumask_clear(mm_cpumask(mm));
215 } 202 }
216 203
204 /*
205 * Allocate a free ASID. If we can't find one, take a note of the
206 * currently active ASIDs and mark the TLBs as requiring flushes.
207 * We always count from ASID #1, as we reserve ASID #0 to switch
208 * via TTBR0 and to avoid speculative page table walks from hitting
209 * in any partial walk caches, which could be populated from
210 * overlapping level-1 descriptors used to map both the module
211 * area and the userspace stack.
212 */
213 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
214 if (asid == NUM_USER_ASIDS) {
215 generation = atomic64_add_return(ASID_FIRST_VERSION,
216 &asid_generation);
217 flush_context(cpu);
218 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
219 }
220
221 __set_bit(asid, asid_map);
222 cur_idx = asid;
223
224bump_gen:
225 asid |= generation;
226 cpumask_clear(mm_cpumask(mm));
217 return asid; 227 return asid;
218} 228}
219 229