aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaura Abbott <labbott@redhat.com>2016-09-21 18:25:04 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-22 05:17:22 -0400
commitca219452c6b8a6cd1369b6a78b1cf069d0386865 (patch)
treeb2b7de70e9429cf33bbe50698dbbdfa25362fe2e
parent0edfa8391664a4f795c67d0e07480fbe801a0e1d (diff)
arm64: Correctly bounds check virt_addr_valid
virt_addr_valid is supposed to return true if and only if virt_to_page returns a valid page structure. The current macro does math on whatever address is given and passes that to pfn_valid to verify. vmalloc and module addresses can happen to generate a pfn that 'happens' to be valid. Fix this by only performing the pfn_valid check on addresses that have the potential to be valid. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/memory.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 31b73227b41f..ba62df8c6e35 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -214,7 +214,7 @@ static inline void *phys_to_virt(phys_addr_t x)
214 214
215#ifndef CONFIG_SPARSEMEM_VMEMMAP 215#ifndef CONFIG_SPARSEMEM_VMEMMAP
216#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 216#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
217#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 217#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
218#else 218#else
219#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 219#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
220#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 220#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
@@ -222,11 +222,15 @@ static inline void *phys_to_virt(phys_addr_t x)
222#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 222#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
223#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 223#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
224 224
225#define virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ 225#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
226 + PHYS_OFFSET) >> PAGE_SHIFT) 226 + PHYS_OFFSET) >> PAGE_SHIFT)
227#endif 227#endif
228#endif 228#endif
229 229
230#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
231#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
232 _virt_addr_valid(kaddr))
233
230#include <asm-generic/memory_model.h> 234#include <asm-generic/memory_model.h>
231 235
232#endif 236#endif