aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-08-01 09:57:49 -0400
committerWill Deacon <will.deacon@arm.com>2012-11-05 11:25:25 -0500
commitbf51bb82ccd9a74e9702d06107b23e54b27a5707 (patch)
treedcc0ab6b18523602bf308c88926e85fcd6b19f30 /arch/arm/mm
parent4b883160835faf38c9356f0885cf491a1e661e88 (diff)
ARM: mm: use bitmap operations when allocating new ASIDs
When allocating a new ASID, we must take care not to re-assign a reserved ASID-value to a new mm. This requires us to check each candidate ASID against those currently reserved by other cores before assigning a new ASID to the current mm. This patch improves the ASID allocation algorithm by using a bitmap-based approach. Rather than iterating over the reserved ASID array for each candidate ASID, we simply find the first zero bit, ensuring that those indices corresponding to reserved ASIDs are set when flushing during a rollover event. Tested-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/context.c54
1 files changed, 35 insertions, 19 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 5ac09e8b4030..7a27d7363be2 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -36,9 +36,14 @@
36 * should be unique within all running processes. 36 * should be unique within all running processes.
37 */ 37 */
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 38#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
40
41#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
42#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
39 43
40static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 44static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
41static atomic64_t cpu_last_asid = ATOMIC64_INIT(ASID_FIRST_VERSION); 45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
42 47
43static DEFINE_PER_CPU(atomic64_t, active_asids); 48static DEFINE_PER_CPU(atomic64_t, active_asids);
44static DEFINE_PER_CPU(u64, reserved_asids); 49static DEFINE_PER_CPU(u64, reserved_asids);
@@ -111,12 +116,19 @@ arch_initcall(contextidr_notifier_init);
111static void flush_context(unsigned int cpu) 116static void flush_context(unsigned int cpu)
112{ 117{
113 int i; 118 int i;
114 119 u64 asid;
115 /* Update the list of reserved ASIDs. */ 120
116 for_each_possible_cpu(i) 121 /* Update the list of reserved ASIDs and the ASID bitmap. */
117 per_cpu(reserved_asids, i) = 122 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
118 atomic64_xchg(&per_cpu(active_asids, i), 0); 123 for_each_possible_cpu(i) {
119 per_cpu(reserved_asids, cpu) = 0; 124 if (i == cpu) {
125 asid = 0;
126 } else {
127 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
128 __set_bit(ASID_TO_IDX(asid), asid_map);
129 }
130 per_cpu(reserved_asids, i) = asid;
131 }
120 132
121 /* Queue a TLB invalidate and flush the I-cache if necessary. */ 133 /* Queue a TLB invalidate and flush the I-cache if necessary. */
122 if (!tlb_ops_need_broadcast()) 134 if (!tlb_ops_need_broadcast())
@@ -128,11 +140,11 @@ static void flush_context(unsigned int cpu)
128 __flush_icache_all(); 140 __flush_icache_all();
129} 141}
130 142
131static int is_reserved_asid(u64 asid, u64 mask) 143static int is_reserved_asid(u64 asid)
132{ 144{
133 int cpu; 145 int cpu;
134 for_each_possible_cpu(cpu) 146 for_each_possible_cpu(cpu)
135 if ((per_cpu(reserved_asids, cpu) & mask) == (asid & mask)) 147 if (per_cpu(reserved_asids, cpu) == asid)
136 return 1; 148 return 1;
137 return 0; 149 return 0;
138} 150}
@@ -140,25 +152,29 @@ static int is_reserved_asid(u64 asid, u64 mask)
140static void new_context(struct mm_struct *mm, unsigned int cpu) 152static void new_context(struct mm_struct *mm, unsigned int cpu)
141{ 153{
142 u64 asid = mm->context.id; 154 u64 asid = mm->context.id;
155 u64 generation = atomic64_read(&asid_generation);
143 156
144 if (asid != 0 && is_reserved_asid(asid, ULLONG_MAX)) { 157 if (asid != 0 && is_reserved_asid(asid)) {
145 /* 158 /*
146 * Our current ASID was active during a rollover, we can 159 * Our current ASID was active during a rollover, we can
147 * continue to use it and this was just a false alarm. 160 * continue to use it and this was just a false alarm.
148 */ 161 */
149 asid = (atomic64_read(&cpu_last_asid) & ASID_MASK) | \ 162 asid = generation | (asid & ~ASID_MASK);
150 (asid & ~ASID_MASK);
151 } else { 163 } else {
152 /* 164 /*
153 * Allocate a free ASID. If we can't find one, take a 165 * Allocate a free ASID. If we can't find one, take a
154 * note of the currently active ASIDs and mark the TLBs 166 * note of the currently active ASIDs and mark the TLBs
155 * as requiring flushes. 167 * as requiring flushes.
156 */ 168 */
157 do { 169 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
158 asid = atomic64_inc_return(&cpu_last_asid); 170 if (asid == NUM_USER_ASIDS) {
159 if ((asid & ~ASID_MASK) == 0) 171 generation = atomic64_add_return(ASID_FIRST_VERSION,
160 flush_context(cpu); 172 &asid_generation);
161 } while (is_reserved_asid(asid, ~ASID_MASK)); 173 flush_context(cpu);
174 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
175 }
176 __set_bit(asid, asid_map);
177 asid = generation | IDX_TO_ASID(asid);
162 cpumask_clear(mm_cpumask(mm)); 178 cpumask_clear(mm_cpumask(mm));
163 } 179 }
164 180
@@ -179,13 +195,13 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
179 */ 195 */
180 cpu_set_reserved_ttbr0(); 196 cpu_set_reserved_ttbr0();
181 197
182 if (!((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS) 198 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
183 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) 199 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
184 goto switch_mm_fastpath; 200 goto switch_mm_fastpath;
185 201
186 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 202 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
187 /* Check that our ASID belongs to the current generation. */ 203 /* Check that our ASID belongs to the current generation. */
188 if ((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS) 204 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
189 new_context(mm, cpu); 205 new_context(mm, cpu);
190 206
191 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); 207 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);