aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-12-17 13:17:54 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-12-29 07:46:49 -0500
commit5d49750933210457ec2d5e8507823e365b2604fb (patch)
treea7894fc890c1ba6ab4da678505f1a8f60fd8d033
parenta7a04105068e9bb4cba43d97613c4f19b9e90b0c (diff)
ARM: 7926/1: mm: flesh out and fix the comments in the ASID allocator
The ASID allocator has to deal with some pretty horrible behaviours by the CPU, so expand on some of the comments in there so I remember why we can never allocate ASID zero to a userspace task. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/context.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 52e6f13ac9c7..6eb97b3a7481 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -36,8 +36,8 @@
36 * The context ID is used by debuggers and trace logic, and 36 * The context ID is used by debuggers and trace logic, and
37 * should be unique within all running processes. 37 * should be unique within all running processes.
38 * 38 *
39 * In big endian operation, the two 32 bit words are swapped if accesed by 39 * In big endian operation, the two 32 bit words are swapped if accessed
40 * non 64-bit operations. 40 * by non-64-bit operations.
41 */ 41 */
42#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 42#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
43#define NUM_USER_ASIDS ASID_FIRST_VERSION 43#define NUM_USER_ASIDS ASID_FIRST_VERSION
@@ -195,8 +195,11 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
195 * Allocate a free ASID. If we can't find one, take a 195 * Allocate a free ASID. If we can't find one, take a
196 * note of the currently active ASIDs and mark the TLBs 196 * note of the currently active ASIDs and mark the TLBs
197 * as requiring flushes. We always count from ASID #1, 197 * as requiring flushes. We always count from ASID #1,
198 * as we reserve ASID #0 to switch via TTBR0 and indicate 198 * as we reserve ASID #0 to switch via TTBR0 and to
199 * rollover events. 199 * avoid speculative page table walks from hitting in
200 * any partial walk caches, which could be populated
201 * from overlapping level-1 descriptors used to map both
202 * the module area and the userspace stack.
200 */ 203 */
201 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
202 if (asid == NUM_USER_ASIDS) { 205 if (asid == NUM_USER_ASIDS) {
@@ -224,8 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
224 __check_vmalloc_seq(mm); 227 __check_vmalloc_seq(mm);
225 228
226 /* 229 /*
227 * Required during context switch to avoid speculative page table 230 * We cannot update the pgd and the ASID atomicly with classic
228 * walking with the wrong TTBR. 231 * MMU, so switch exclusively to global mappings to avoid
232 * speculative page table walking with the wrong TTBR.
229 */ 233 */
230 cpu_set_reserved_ttbr0(); 234 cpu_set_reserved_ttbr0();
231 235