aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2013-04-29 18:07:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:33 -0400
commit106c992a5ebef28193cf5958e49ceff5e4aebb04 (patch)
tree86f6a69dce858449e9f02d43b65cd2c2743c57dd /mm/hugetlb.c
parent146732ce104ddfed3d4d82722c0b336074016b92 (diff)
mm/hugetlb: add more arch-defined huge_pte functions
Commit abf09bed3cce ("s390/mm: implement software dirty bits") introduced another difference in the pte layout vs. the pmd layout on s390, thoroughly breaking the s390 support for hugetlbfs. This requires replacing some more pte_xxx functions in mm/hugetlbfs.c with a huge_pte_xxx version. This patch introduces those huge_pte_xxx functions and their generic implementation in asm-generic/hugetlb.h, which will now be included on all architectures supporting hugetlbfs apart from s390. This change will be a no-op for those architectures. [akpm@linux-foundation.org: fix warning] Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Hillf Danton <dhillf@gmail.com> Acked-by: Michal Hocko <mhocko@suse.cz> [for !s390 parts] Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1a12f5b9a0ab..73b864a32017 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2247,10 +2247,11 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2247 pte_t entry; 2247 pte_t entry;
2248 2248
2249 if (writable) { 2249 if (writable) {
2250 entry = 2250 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2251 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 2251 vma->vm_page_prot)));
2252 } else { 2252 } else {
2253 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 2253 entry = huge_pte_wrprotect(mk_huge_pte(page,
2254 vma->vm_page_prot));
2254 } 2255 }
2255 entry = pte_mkyoung(entry); 2256 entry = pte_mkyoung(entry);
2256 entry = pte_mkhuge(entry); 2257 entry = pte_mkhuge(entry);
@@ -2264,7 +2265,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
2264{ 2265{
2265 pte_t entry; 2266 pte_t entry;
2266 2267
2267 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 2268 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2268 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 2269 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2269 update_mmu_cache(vma, address, ptep); 2270 update_mmu_cache(vma, address, ptep);
2270} 2271}
@@ -2379,7 +2380,7 @@ again:
2379 * HWPoisoned hugepage is already unmapped and dropped reference 2380 * HWPoisoned hugepage is already unmapped and dropped reference
2380 */ 2381 */
2381 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 2382 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2382 pte_clear(mm, address, ptep); 2383 huge_pte_clear(mm, address, ptep);
2383 continue; 2384 continue;
2384 } 2385 }
2385 2386
@@ -2403,7 +2404,7 @@ again:
2403 2404
2404 pte = huge_ptep_get_and_clear(mm, address, ptep); 2405 pte = huge_ptep_get_and_clear(mm, address, ptep);
2405 tlb_remove_tlb_entry(tlb, ptep, address); 2406 tlb_remove_tlb_entry(tlb, ptep, address);
2406 if (pte_dirty(pte)) 2407 if (huge_pte_dirty(pte))
2407 set_page_dirty(page); 2408 set_page_dirty(page);
2408 2409
2409 page_remove_rmap(page); 2410 page_remove_rmap(page);
@@ -2856,7 +2857,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2856 * page now as it is used to determine if a reservation has been 2857 * page now as it is used to determine if a reservation has been
2857 * consumed. 2858 * consumed.
2858 */ 2859 */
2859 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { 2860 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
2860 if (vma_needs_reservation(h, vma, address) < 0) { 2861 if (vma_needs_reservation(h, vma, address) < 0) {
2861 ret = VM_FAULT_OOM; 2862 ret = VM_FAULT_OOM;
2862 goto out_mutex; 2863 goto out_mutex;
@@ -2886,12 +2887,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2886 2887
2887 2888
2888 if (flags & FAULT_FLAG_WRITE) { 2889 if (flags & FAULT_FLAG_WRITE) {
2889 if (!pte_write(entry)) { 2890 if (!huge_pte_write(entry)) {
2890 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2891 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2891 pagecache_page); 2892 pagecache_page);
2892 goto out_page_table_lock; 2893 goto out_page_table_lock;
2893 } 2894 }
2894 entry = pte_mkdirty(entry); 2895 entry = huge_pte_mkdirty(entry);
2895 } 2896 }
2896 entry = pte_mkyoung(entry); 2897 entry = pte_mkyoung(entry);
2897 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2898 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
@@ -2972,7 +2973,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2972 * directly from any kind of swap entries. 2973 * directly from any kind of swap entries.
2973 */ 2974 */
2974 if (absent || is_swap_pte(huge_ptep_get(pte)) || 2975 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2975 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { 2976 ((flags & FOLL_WRITE) &&
2977 !huge_pte_write(huge_ptep_get(pte)))) {
2976 int ret; 2978 int ret;
2977 2979
2978 spin_unlock(&mm->page_table_lock); 2980 spin_unlock(&mm->page_table_lock);
@@ -3042,7 +3044,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3042 } 3044 }
3043 if (!huge_pte_none(huge_ptep_get(ptep))) { 3045 if (!huge_pte_none(huge_ptep_get(ptep))) {
3044 pte = huge_ptep_get_and_clear(mm, address, ptep); 3046 pte = huge_ptep_get_and_clear(mm, address, ptep);
3045 pte = pte_mkhuge(pte_modify(pte, newprot)); 3047 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3046 pte = arch_make_huge_pte(pte, vma, NULL, 0); 3048 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3047 set_huge_pte_at(mm, address, ptep, pte); 3049 set_huge_pte_at(mm, address, ptep, pte);
3048 pages++; 3050 pages++;