aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-18 11:31:38 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-01-20 08:48:30 -0500
commited42acaef1a9d51631a31b55e9ed52d400430492 (patch)
tree62b93dda8ee5eb1c31a3233df3e0f244dd83b367 /arch/arm/mm
parent56dd47098abe1fdde598a8d8b7c04d775506f456 (diff)
ARM: make_coherent: avoid recalculating the pfn for the modified page
We already know the pfn for the page to be modified in make_coherent, so let's stop recalculating it unnecessarily. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault-armv.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 8e9bc517132e..ae88f2c3a6df 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -37,7 +37,7 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. 37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
38 */ 38 */
39static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, 39static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
40 pte_t *ptep) 40 unsigned long pfn, pte_t *ptep)
41{ 41{
42 pte_t entry = *ptep; 42 pte_t entry = *ptep;
43 int ret; 43 int ret;
@@ -52,7 +52,6 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
52 * fault (ie, is old), we can safely ignore any issues. 52 * fault (ie, is old), we can safely ignore any issues.
53 */ 53 */
54 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { 54 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
55 unsigned long pfn = pte_pfn(entry);
56 flush_cache_page(vma, address, pfn); 55 flush_cache_page(vma, address, pfn);
57 outer_flush_range((pfn << PAGE_SHIFT), 56 outer_flush_range((pfn << PAGE_SHIFT),
58 (pfn << PAGE_SHIFT) + PAGE_SIZE); 57 (pfn << PAGE_SHIFT) + PAGE_SIZE);
@@ -65,7 +64,8 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
65 return ret; 64 return ret;
66} 65}
67 66
68static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 67static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
68 unsigned long pfn)
69{ 69{
70 spinlock_t *ptl; 70 spinlock_t *ptl;
71 pgd_t *pgd; 71 pgd_t *pgd;
@@ -90,7 +90,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
90 pte = pte_offset_map_nested(pmd, address); 90 pte = pte_offset_map_nested(pmd, address);
91 spin_lock(ptl); 91 spin_lock(ptl);
92 92
93 ret = do_adjust_pte(vma, address, pte); 93 ret = do_adjust_pte(vma, address, pfn, pte);
94 94
95 spin_unlock(ptl); 95 spin_unlock(ptl);
96 pte_unmap_nested(pte); 96 pte_unmap_nested(pte);
@@ -127,11 +127,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne
127 if (!(mpnt->vm_flags & VM_MAYSHARE)) 127 if (!(mpnt->vm_flags & VM_MAYSHARE))
128 continue; 128 continue;
129 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 129 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
130 aliases += adjust_pte(mpnt, mpnt->vm_start + offset); 130 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
131 } 131 }
132 flush_dcache_mmap_unlock(mapping); 132 flush_dcache_mmap_unlock(mapping);
133 if (aliases) 133 if (aliases)
134 adjust_pte(vma, addr); 134 adjust_pte(vma, addr, pfn);
135 else 135 else
136 flush_cache_page(vma, addr, pfn); 136 flush_cache_page(vma, addr, pfn);
137} 137}