aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-12-17 13:17:31 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-12-29 07:46:48 -0500
commita7a04105068e9bb4cba43d97613c4f19b9e90b0c (patch)
tree7b321f537f2a9b532f3ae8b482797c4d7c672d1b /arch/arm/mm/context.c
parente1a5848e3398dca135f3ae77fe2e01145f9d8826 (diff)
ARM: 7925/1: mm: keep track of last ASID allocation to improve bitmap searching
Since we only clear entries in the ASID bitmap on a rollover event, the bitmap tends to consist of a block of consecutive set bits followed by a block of consecutive clear bits. The exception to this rule is for ASIDs which have been carried over from a previous generation, but these are bound by the number of CPUs. This patch optimises our bitmap searching strategy, so that we search from the last successful allocation, rather than search from index 1 each time we allocate a new ASID. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 3ad0fdaa5cc1..52e6f13ac9c7 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -180,6 +180,7 @@ static int is_reserved_asid(u64 asid)
180 180
181static u64 new_context(struct mm_struct *mm, unsigned int cpu) 181static u64 new_context(struct mm_struct *mm, unsigned int cpu)
182{ 182{
183 static u32 cur_idx = 1;
183 u64 asid = atomic64_read(&mm->context.id); 184 u64 asid = atomic64_read(&mm->context.id);
184 u64 generation = atomic64_read(&asid_generation); 185 u64 generation = atomic64_read(&asid_generation);
185 186
@@ -197,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
197 * as we reserve ASID #0 to switch via TTBR0 and indicate 198 * as we reserve ASID #0 to switch via TTBR0 and indicate
198 * rollover events. 199 * rollover events.
199 */ 200 */
200 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 201 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
201 if (asid == NUM_USER_ASIDS) { 202 if (asid == NUM_USER_ASIDS) {
202 generation = atomic64_add_return(ASID_FIRST_VERSION, 203 generation = atomic64_add_return(ASID_FIRST_VERSION,
203 &asid_generation); 204 &asid_generation);
@@ -205,6 +206,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
205 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 206 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
206 } 207 }
207 __set_bit(asid, asid_map); 208 __set_bit(asid, asid_map);
209 cur_idx = asid;
208 asid |= generation; 210 asid |= generation;
209 cpumask_clear(mm_cpumask(mm)); 211 cpumask_clear(mm_cpumask(mm));
210 } 212 }