diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
| commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
| tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /arch/arm/mm/context.c | |
| parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
| parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) | |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'arch/arm/mm/context.c')
| -rw-r--r-- | arch/arm/mm/context.c | 38 |
1 files changed, 26 insertions, 12 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 845769e41332..c8c8b9ed02e0 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu) | |||
| 165 | __flush_icache_all(); | 165 | __flush_icache_all(); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static int is_reserved_asid(u64 asid) | 168 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
| 169 | { | 169 | { |
| 170 | int cpu; | 170 | int cpu; |
| 171 | for_each_possible_cpu(cpu) | 171 | bool hit = false; |
| 172 | if (per_cpu(reserved_asids, cpu) == asid) | 172 | |
| 173 | return 1; | 173 | /* |
| 174 | return 0; | 174 | * Iterate over the set of reserved ASIDs looking for a match. |
| 175 | * If we find one, then we can update our mm to use newasid | ||
| 176 | * (i.e. the same ASID in the current generation) but we can't | ||
| 177 | * exit the loop early, since we need to ensure that all copies | ||
| 178 | * of the old ASID are updated to reflect the mm. Failure to do | ||
| 179 | * so could result in us missing the reserved ASID in a future | ||
| 180 | * generation. | ||
| 181 | */ | ||
| 182 | for_each_possible_cpu(cpu) { | ||
| 183 | if (per_cpu(reserved_asids, cpu) == asid) { | ||
| 184 | hit = true; | ||
| 185 | per_cpu(reserved_asids, cpu) = newasid; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | return hit; | ||
| 175 | } | 190 | } |
| 176 | 191 | ||
| 177 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) | 192 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
| @@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 181 | u64 generation = atomic64_read(&asid_generation); | 196 | u64 generation = atomic64_read(&asid_generation); |
| 182 | 197 | ||
| 183 | if (asid != 0) { | 198 | if (asid != 0) { |
| 199 | u64 newasid = generation | (asid & ~ASID_MASK); | ||
| 200 | |||
| 184 | /* | 201 | /* |
| 185 | * If our current ASID was active during a rollover, we | 202 | * If our current ASID was active during a rollover, we |
| 186 | * can continue to use it and this was just a false alarm. | 203 | * can continue to use it and this was just a false alarm. |
| 187 | */ | 204 | */ |
| 188 | if (is_reserved_asid(asid)) | 205 | if (check_update_reserved_asid(asid, newasid)) |
| 189 | return generation | (asid & ~ASID_MASK); | 206 | return newasid; |
| 190 | 207 | ||
| 191 | /* | 208 | /* |
| 192 | * We had a valid ASID in a previous life, so try to re-use | 209 | * We had a valid ASID in a previous life, so try to re-use |
| @@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 194 | */ | 211 | */ |
| 195 | asid &= ~ASID_MASK; | 212 | asid &= ~ASID_MASK; |
| 196 | if (!__test_and_set_bit(asid, asid_map)) | 213 | if (!__test_and_set_bit(asid, asid_map)) |
| 197 | goto bump_gen; | 214 | return newasid; |
| 198 | } | 215 | } |
| 199 | 216 | ||
| 200 | /* | 217 | /* |
| @@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 216 | 233 | ||
| 217 | __set_bit(asid, asid_map); | 234 | __set_bit(asid, asid_map); |
| 218 | cur_idx = asid; | 235 | cur_idx = asid; |
| 219 | |||
| 220 | bump_gen: | ||
| 221 | asid |= generation; | ||
| 222 | cpumask_clear(mm_cpumask(mm)); | 236 | cpumask_clear(mm_cpumask(mm)); |
| 223 | return asid; | 237 | return asid | generation; |
| 224 | } | 238 | } |
| 225 | 239 | ||
| 226 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | 240 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
