aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2012-10-08 19:34:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:23:05 -0400
commitb113da65785d5f3f9ff1451ec0fe43d6d76da25b (patch)
tree826a1e6c00faa177299d484a517c4724d87fda14 /mm
parentdbc9fdf063dc4f12af71d7858bd216170129822e (diff)
mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
The transparent huge page code passes a PMD pointer in as the third argument of update_mmu_cache(), which expects a PTE pointer. This never got noticed because X86 implements update_mmu_cache() as a macro and thus we don't get any type checking, and X86 is the only architecture which supports transparent huge pages currently. Before other architectures can support transparent huge pages properly we need to add a new interface which will take a PMD pointer as the third argument rather than a PTE pointer. [akpm@linux-foundation.org: implement update_mm_cache_pmd() for s390] Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3a8d6b7d95db..68a3c93036f6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -900,7 +900,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
900 entry = pmd_mkyoung(orig_pmd); 900 entry = pmd_mkyoung(orig_pmd);
901 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 901 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
902 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 902 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
903 update_mmu_cache(vma, address, pmd); 903 update_mmu_cache_pmd(vma, address, pmd);
904 ret |= VM_FAULT_WRITE; 904 ret |= VM_FAULT_WRITE;
905 goto out_unlock; 905 goto out_unlock;
906 } 906 }
@@ -956,7 +956,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
956 pmdp_clear_flush(vma, haddr, pmd); 956 pmdp_clear_flush(vma, haddr, pmd);
957 page_add_new_anon_rmap(new_page, vma, haddr); 957 page_add_new_anon_rmap(new_page, vma, haddr);
958 set_pmd_at(mm, haddr, pmd, entry); 958 set_pmd_at(mm, haddr, pmd, entry);
959 update_mmu_cache(vma, address, pmd); 959 update_mmu_cache_pmd(vma, address, pmd);
960 page_remove_rmap(page); 960 page_remove_rmap(page);
961 put_page(page); 961 put_page(page);
962 ret |= VM_FAULT_WRITE; 962 ret |= VM_FAULT_WRITE;
@@ -2041,7 +2041,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2041 BUG_ON(!pmd_none(*pmd)); 2041 BUG_ON(!pmd_none(*pmd));
2042 page_add_new_anon_rmap(new_page, vma, address); 2042 page_add_new_anon_rmap(new_page, vma, address);
2043 set_pmd_at(mm, address, pmd, _pmd); 2043 set_pmd_at(mm, address, pmd, _pmd);
2044 update_mmu_cache(vma, address, pmd); 2044 update_mmu_cache_pmd(vma, address, pmd);
2045 pgtable_trans_huge_deposit(mm, pgtable); 2045 pgtable_trans_huge_deposit(mm, pgtable);
2046 spin_unlock(&mm->page_table_lock); 2046 spin_unlock(&mm->page_table_lock);
2047 2047