diff options
Diffstat (limited to 'arch/arm64/mm/context.c')
-rw-r--r-- | arch/arm64/mm/context.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b7b397802088..efcf1f7ef1e4 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c | |||
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
179 | &asid_generation); | 179 | &asid_generation); |
180 | flush_context(cpu); | 180 | flush_context(cpu); |
181 | 181 | ||
182 | /* We have at least 1 ASID per CPU, so this will always succeed */ | 182 | /* We have more ASIDs than CPUs, so this will always succeed */ |
183 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); | 183 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
184 | 184 | ||
185 | set_asid: | 185 | set_asid: |
@@ -227,8 +227,11 @@ switch_mm_fastpath: | |||
227 | static int asids_init(void) | 227 | static int asids_init(void) |
228 | { | 228 | { |
229 | asid_bits = get_cpu_asid_bits(); | 229 | asid_bits = get_cpu_asid_bits(); |
230 | /* If we end up with more CPUs than ASIDs, expect things to crash */ | 230 | /* |
231 | WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); | 231 | * Expect allocation after rollover to fail if we don't have at least |
232 | * one more ASID than CPUs. ASID #0 is reserved for init_mm. | ||
233 | */ | ||
234 | WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); | ||
232 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); | 235 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); |
233 | asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), | 236 | asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), |
234 | GFP_KERNEL); | 237 | GFP_KERNEL); |