diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-04 15:11:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-04 15:11:48 -0400 |
commit | b3c6858fb172512f63838523ae7817ae8adec564 (patch) | |
tree | 4f2aff0bde6433a00f35e1685d97be68eda6404c | |
parent | 0a23ea65ce9f10ec2ea392571006b781b150327f (diff) | |
parent | 82cd588052815eb4146f9f7c5347ca5e32c56360 (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
"Here are some more arm64 fixes for 4.13. The main one is the PTE race
with the hardware walker, but there are a couple of other things too.
- Report correct timer frequency to userspace when trapping
CNTFRQ_EL0
- Fix race with hardware page table updates when updating access
flags
- Silence clang overflow warning in VA_START and PAGE_OFFSET
calculations"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: avoid overflow in VA_START and PAGE_OFFSET
arm64: Fix potential race with hardware DBM in ptep_set_access_flags()
arm64: Use arch_timer_get_rate when trapping CNTFRQ_EL0
-rw-r--r-- | arch/arm64/include/asm/memory.h | 6 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 15 |
3 files changed, 13 insertions, 10 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 32f82723338a..ef39dcb9ca6a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -64,8 +64,10 @@ | |||
64 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. | 64 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. |
65 | */ | 65 | */ |
66 | #define VA_BITS (CONFIG_ARM64_VA_BITS) | 66 | #define VA_BITS (CONFIG_ARM64_VA_BITS) |
67 | #define VA_START (UL(0xffffffffffffffff) << VA_BITS) | 67 | #define VA_START (UL(0xffffffffffffffff) - \ |
68 | #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) | 68 | (UL(1) << VA_BITS) + 1) |
69 | #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ | ||
70 | (UL(1) << (VA_BITS - 1)) + 1) | ||
69 | #define KIMAGE_VADDR (MODULES_END) | 71 | #define KIMAGE_VADDR (MODULES_END) |
70 | #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) | 72 | #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) |
71 | #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) | 73 | #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d48f47080213..8a62648848e5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -523,7 +523,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) | |||
523 | { | 523 | { |
524 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | 524 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
525 | 525 | ||
526 | pt_regs_write_reg(regs, rt, read_sysreg(cntfrq_el0)); | 526 | pt_regs_write_reg(regs, rt, arch_timer_get_rate()); |
527 | regs->pc += 4; | 527 | regs->pc += 4; |
528 | } | 528 | } |
529 | 529 | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c7861c9864e6..2509e4fe6992 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -163,26 +163,27 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
163 | /* only preserve the access flags and write permission */ | 163 | /* only preserve the access flags and write permission */ |
164 | pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY; | 164 | pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY; |
165 | 165 | ||
166 | /* | 166 | /* set PTE_RDONLY if actual read-only or clean PTE */ |
167 | * PTE_RDONLY is cleared by default in the asm below, so set it in | ||
168 | * back if necessary (read-only or clean PTE). | ||
169 | */ | ||
170 | if (!pte_write(entry) || !pte_sw_dirty(entry)) | 167 | if (!pte_write(entry) || !pte_sw_dirty(entry)) |
171 | pte_val(entry) |= PTE_RDONLY; | 168 | pte_val(entry) |= PTE_RDONLY; |
172 | 169 | ||
173 | /* | 170 | /* |
174 | * Setting the flags must be done atomically to avoid racing with the | 171 | * Setting the flags must be done atomically to avoid racing with the |
175 | * hardware update of the access/dirty state. | 172 | * hardware update of the access/dirty state. The PTE_RDONLY bit must |
173 | * be set to the most permissive (lowest value) of *ptep and entry | ||
174 | * (calculated as: a & b == ~(~a | ~b)). | ||
176 | */ | 175 | */ |
176 | pte_val(entry) ^= PTE_RDONLY; | ||
177 | asm volatile("// ptep_set_access_flags\n" | 177 | asm volatile("// ptep_set_access_flags\n" |
178 | " prfm pstl1strm, %2\n" | 178 | " prfm pstl1strm, %2\n" |
179 | "1: ldxr %0, %2\n" | 179 | "1: ldxr %0, %2\n" |
180 | " and %0, %0, %3 // clear PTE_RDONLY\n" | 180 | " eor %0, %0, %3 // negate PTE_RDONLY in *ptep\n" |
181 | " orr %0, %0, %4 // set flags\n" | 181 | " orr %0, %0, %4 // set flags\n" |
182 | " eor %0, %0, %3 // negate final PTE_RDONLY\n" | ||
182 | " stxr %w1, %0, %2\n" | 183 | " stxr %w1, %0, %2\n" |
183 | " cbnz %w1, 1b\n" | 184 | " cbnz %w1, 1b\n" |
184 | : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) | 185 | : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) |
185 | : "L" (~PTE_RDONLY), "r" (pte_val(entry))); | 186 | : "L" (PTE_RDONLY), "r" (pte_val(entry))); |
186 | 187 | ||
187 | flush_tlb_fix_spurious_fault(vma, address); | 188 | flush_tlb_fix_spurious_fault(vma, address); |
188 | return 1; | 189 | return 1; |