aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 91892569710f..845769e41332 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
144 /* Update the list of reserved ASIDs and the ASID bitmap. */ 144 /* Update the list of reserved ASIDs and the ASID bitmap. */
145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 for_each_possible_cpu(i) { 146 for_each_possible_cpu(i) {
147 if (i == cpu) { 147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148 asid = 0; 148 /*
149 } else { 149 * If this CPU has already been through a
150 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 150 * rollover, but hasn't run another task in
151 /* 151 * the meantime, we must preserve its reserved
152 * If this CPU has already been through a 152 * ASID, as this is the only trace we have of
153 * rollover, but hasn't run another task in 153 * the process it is still running.
154 * the meantime, we must preserve its reserved 154 */
155 * ASID, as this is the only trace we have of 155 if (asid == 0)
156 * the process it is still running. 156 asid = per_cpu(reserved_asids, i);
157 */ 157 __set_bit(asid & ~ASID_MASK, asid_map);
158 if (asid == 0)
159 asid = per_cpu(reserved_asids, i);
160 __set_bit(asid & ~ASID_MASK, asid_map);
161 }
162 per_cpu(reserved_asids, i) = asid; 158 per_cpu(reserved_asids, i) = asid;
163 } 159 }
164 160