aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault-armv.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index be4ab3d73c91..7fc1b35a6746 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
26/* 26/*
27 * We take the easy way out of this problem - we make the 27 * We take the easy way out of this problem - we make the
28 * PTE uncacheable. However, we leave the write buffer on. 28 * PTE uncacheable. However, we leave the write buffer on.
29 *
30 * Note that the pte lock held when calling update_mmu_cache must also
31 * guard the pte (somewhere else in the same mm) that we modify here.
32 * Therefore those configurations which might call adjust_pte (those
33 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
29 */ 34 */
30static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 35static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
31{ 36{
@@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
127 * 2. If we have multiple shared mappings of the same space in 132 * 2. If we have multiple shared mappings of the same space in
128 * an object, we need to deal with the cache aliasing issues. 133 * an object, we need to deal with the cache aliasing issues.
129 * 134 *
130 * Note that the page_table_lock will be held. 135 * Note that the pte lock will be held.
131 */ 136 */
132void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 137void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
133{ 138{