aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-07-23 09:18:13 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-31 07:04:47 -0400
commitb74253f78400f9a4b42da84bb1de7540b88ce7c4 (patch)
tree27a91700b0dafa9119ed774ad29f481152491ead /arch/arm/include/asm
parent15ac49b65024f55c4371a53214879a9c77c4fbf9 (diff)
ARM: 7479/1: mm: avoid NULL dereference when flushing gate_vma with VIVT caches
The vivt_flush_cache_{range,page} functions check that the mm_struct of the VMA being flushed has been active on the current CPU before performing the cache maintenance. The gate_vma has a NULL mm_struct pointer and, as such, will cause a kernel fault if we try to flush it with the above operations. This happens during ELF core dumps, which include the gate_vma as it may be useful for debugging purposes. This patch adds checks to the VIVT cache flushing functions so that VMAs with a NULL mm_struct are flushed unconditionally (the vectors page may be dirty if we use it to store the current TLS pointer). Cc: <stable@vger.kernel.org> # 3.4+ Reported-by: Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> Tested-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/cacheflush.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 004c1bc95d2b..e4448e16046d 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
215static inline void 215static inline void
216vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 216vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
217{ 217{
218 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 218 struct mm_struct *mm = vma->vm_mm;
219
220 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
219 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 221 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
220 vma->vm_flags); 222 vma->vm_flags);
221} 223}
@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
223static inline void 225static inline void
224vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 226vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
225{ 227{
226 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 228 struct mm_struct *mm = vma->vm_mm;
229
230 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
227 unsigned long addr = user_addr & PAGE_MASK; 231 unsigned long addr = user_addr & PAGE_MASK;
228 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 232 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
229 } 233 }