diff options
author | Marc Zyngier <Marc.Zyngier@arm.com> | 2013-06-21 07:06:55 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-06-24 10:26:41 -0400 |
commit | b8e4a4740fa2b17c0a447b3ab783b3dc10702e27 (patch) | |
tree | d6315409786d92102af8e057c8c4183d2d002101 /arch/arm/mm/context.c | |
parent | ae120d9edfe96628f03d87634acda0bfa7110632 (diff) |
ARM: 7768/1: prevent risks of out-of-bound access in ASID allocator
On a CPU that never ran anything, both the active and reserved ASID
fields are set to zero. In this case the ASID_TO_IDX() macro will
return -1, which is not a very useful value to index a bitmap.
Instead of trying to offset the ASID so that ASID #1 is actually
bit 0 in the asid_map bitmap, just always ignore bit 0 and start
the search from bit 1. This makes the code a bit more readable,
and without risk of OoB access.
Cc: <stable@vger.kernel.org> # 3.9
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r-- | arch/arm/mm/context.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 8e12fcbb2c63..83e09058f96f 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -39,10 +39,7 @@ | |||
39 | * non 64-bit operations. | 39 | * non 64-bit operations. |
40 | */ | 40 | */ |
41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
42 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | 42 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
43 | |||
44 | #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) | ||
45 | #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) | ||
46 | 43 | ||
47 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 44 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
@@ -137,7 +134,7 @@ static void flush_context(unsigned int cpu) | |||
137 | */ | 134 | */ |
138 | if (asid == 0) | 135 | if (asid == 0) |
139 | asid = per_cpu(reserved_asids, i); | 136 | asid = per_cpu(reserved_asids, i); |
140 | __set_bit(ASID_TO_IDX(asid), asid_map); | 137 | __set_bit(asid & ~ASID_MASK, asid_map); |
141 | } | 138 | } |
142 | per_cpu(reserved_asids, i) = asid; | 139 | per_cpu(reserved_asids, i) = asid; |
143 | } | 140 | } |
@@ -176,17 +173,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
176 | /* | 173 | /* |
177 | * Allocate a free ASID. If we can't find one, take a | 174 | * Allocate a free ASID. If we can't find one, take a |
178 | * note of the currently active ASIDs and mark the TLBs | 175 | * note of the currently active ASIDs and mark the TLBs |
179 | * as requiring flushes. | 176 | * as requiring flushes. We always count from ASID #1, |
177 | * as we reserve ASID #0 to switch via TTBR0 and indicate | ||
178 | * rollover events. | ||
180 | */ | 179 | */ |
181 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 180 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
182 | if (asid == NUM_USER_ASIDS) { | 181 | if (asid == NUM_USER_ASIDS) { |
183 | generation = atomic64_add_return(ASID_FIRST_VERSION, | 182 | generation = atomic64_add_return(ASID_FIRST_VERSION, |
184 | &asid_generation); | 183 | &asid_generation); |
185 | flush_context(cpu); | 184 | flush_context(cpu); |
186 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 185 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
187 | } | 186 | } |
188 | __set_bit(asid, asid_map); | 187 | __set_bit(asid, asid_map); |
189 | asid = generation | IDX_TO_ASID(asid); | 188 | asid |= generation; |
190 | cpumask_clear(mm_cpumask(mm)); | 189 | cpumask_clear(mm_cpumask(mm)); |
191 | } | 190 | } |
192 | 191 | ||