aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 84e6f772e204..6eb97b3a7481 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -36,8 +36,8 @@
36 * The context ID is used by debuggers and trace logic, and 36 * The context ID is used by debuggers and trace logic, and
37 * should be unique within all running processes. 37 * should be unique within all running processes.
38 * 38 *
39 * In big endian operation, the two 32 bit words are swapped if accesed by 39 * In big endian operation, the two 32 bit words are swapped if accessed
40 * non 64-bit operations. 40 * by non-64-bit operations.
41 */ 41 */
42#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 42#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
43#define NUM_USER_ASIDS ASID_FIRST_VERSION 43#define NUM_USER_ASIDS ASID_FIRST_VERSION
@@ -78,20 +78,21 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
78#endif 78#endif
79 79
80#ifdef CONFIG_ARM_LPAE 80#ifdef CONFIG_ARM_LPAE
81static void cpu_set_reserved_ttbr0(void) 81/*
82{ 82 * With LPAE, the ASID and page tables are updated atomicly, so there is
83 /* 83 * no need for a reserved set of tables (the active ASID tracking prevents
84 * Set TTBR0 to swapper_pg_dir which contains only global entries. The 84 * any issues across a rollover).
85 * ASID is set to 0. 85 */
86 */ 86#define cpu_set_reserved_ttbr0()
87 cpu_set_ttbr(0, __pa(swapper_pg_dir));
88 isb();
89}
90#else 87#else
91static void cpu_set_reserved_ttbr0(void) 88static void cpu_set_reserved_ttbr0(void)
92{ 89{
93 u32 ttb; 90 u32 ttb;
94 /* Copy TTBR1 into TTBR0 */ 91 /*
92 * Copy TTBR1 into TTBR0.
93 * This points at swapper_pg_dir, which contains only global
94 * entries so any speculative walks are perfectly safe.
95 */
95 asm volatile( 96 asm volatile(
96 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" 97 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
97 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" 98 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
@@ -179,6 +180,7 @@ static int is_reserved_asid(u64 asid)
179 180
180static u64 new_context(struct mm_struct *mm, unsigned int cpu) 181static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181{ 182{
183 static u32 cur_idx = 1;
182 u64 asid = atomic64_read(&mm->context.id); 184 u64 asid = atomic64_read(&mm->context.id);
183 u64 generation = atomic64_read(&asid_generation); 185 u64 generation = atomic64_read(&asid_generation);
184 186
@@ -193,10 +195,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
193 * Allocate a free ASID. If we can't find one, take a 195 * Allocate a free ASID. If we can't find one, take a
194 * note of the currently active ASIDs and mark the TLBs 196 * note of the currently active ASIDs and mark the TLBs
195 * as requiring flushes. We always count from ASID #1, 197 * as requiring flushes. We always count from ASID #1,
196 * as we reserve ASID #0 to switch via TTBR0 and indicate 198 * as we reserve ASID #0 to switch via TTBR0 and to
197 * rollover events. 199 * avoid speculative page table walks from hitting in
200 * any partial walk caches, which could be populated
201 * from overlapping level-1 descriptors used to map both
202 * the module area and the userspace stack.
198 */ 203 */
199 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
200 if (asid == NUM_USER_ASIDS) { 205 if (asid == NUM_USER_ASIDS) {
201 generation = atomic64_add_return(ASID_FIRST_VERSION, 206 generation = atomic64_add_return(ASID_FIRST_VERSION,
202 &asid_generation); 207 &asid_generation);
@@ -204,6 +209,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 209 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
205 } 210 }
206 __set_bit(asid, asid_map); 211 __set_bit(asid, asid_map);
212 cur_idx = asid;
207 asid |= generation; 213 asid |= generation;
208 cpumask_clear(mm_cpumask(mm)); 214 cpumask_clear(mm_cpumask(mm));
209 } 215 }
@@ -221,8 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
221 __check_vmalloc_seq(mm); 227 __check_vmalloc_seq(mm);
222 228
223 /* 229 /*
224 * Required during context switch to avoid speculative page table 230 * We cannot update the pgd and the ASID atomicly with classic
225 * walking with the wrong TTBR. 231 * MMU, so switch exclusively to global mappings to avoid
232 * speculative page table walking with the wrong TTBR.
226 */ 233 */
227 cpu_set_reserved_ttbr0(); 234 cpu_set_reserved_ttbr0();
228 235