diff options
| -rw-r--r-- | arch/arm/mm/fault-armv.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8e9bc517132e..ae88f2c3a6df 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
| @@ -37,7 +37,7 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | |||
| 37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | 37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
| 38 | */ | 38 | */ |
| 39 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | 39 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
| 40 | pte_t *ptep) | 40 | unsigned long pfn, pte_t *ptep) |
| 41 | { | 41 | { |
| 42 | pte_t entry = *ptep; | 42 | pte_t entry = *ptep; |
| 43 | int ret; | 43 | int ret; |
| @@ -52,7 +52,6 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
| 52 | * fault (ie, is old), we can safely ignore any issues. | 52 | * fault (ie, is old), we can safely ignore any issues. |
| 53 | */ | 53 | */ |
| 54 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { | 54 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
| 55 | unsigned long pfn = pte_pfn(entry); | ||
| 56 | flush_cache_page(vma, address, pfn); | 55 | flush_cache_page(vma, address, pfn); |
| 57 | outer_flush_range((pfn << PAGE_SHIFT), | 56 | outer_flush_range((pfn << PAGE_SHIFT), |
| 58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | 57 | (pfn << PAGE_SHIFT) + PAGE_SIZE); |
| @@ -65,7 +64,8 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
| 65 | return ret; | 64 | return ret; |
| 66 | } | 65 | } |
| 67 | 66 | ||
| 68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | 67 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
| 68 | unsigned long pfn) | ||
| 69 | { | 69 | { |
| 70 | spinlock_t *ptl; | 70 | spinlock_t *ptl; |
| 71 | pgd_t *pgd; | 71 | pgd_t *pgd; |
| @@ -90,7 +90,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |||
| 90 | pte = pte_offset_map_nested(pmd, address); | 90 | pte = pte_offset_map_nested(pmd, address); |
| 91 | spin_lock(ptl); | 91 | spin_lock(ptl); |
| 92 | 92 | ||
| 93 | ret = do_adjust_pte(vma, address, pte); | 93 | ret = do_adjust_pte(vma, address, pfn, pte); |
| 94 | 94 | ||
| 95 | spin_unlock(ptl); | 95 | spin_unlock(ptl); |
| 96 | pte_unmap_nested(pte); | 96 | pte_unmap_nested(pte); |
| @@ -127,11 +127,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
| 127 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | 127 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
| 128 | continue; | 128 | continue; |
| 129 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 129 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
| 130 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); | 130 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
| 131 | } | 131 | } |
| 132 | flush_dcache_mmap_unlock(mapping); | 132 | flush_dcache_mmap_unlock(mapping); |
| 133 | if (aliases) | 133 | if (aliases) |
| 134 | adjust_pte(vma, addr); | 134 | adjust_pte(vma, addr, pfn); |
| 135 | else | 135 | else |
| 136 | flush_cache_page(vma, addr, pfn); | 136 | flush_cache_page(vma, addr, pfn); |
| 137 | } | 137 | } |
