diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2008-04-28 05:13:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:25 -0400 |
commit | 7f2e9525ba55b1c42ad6c4a5a59d7eb7bdd9be72 (patch) | |
tree | 10792496f50b4b0fed8a2c356a8819ae8f9ce47f /mm/hugetlb.c | |
parent | 8fe627ec5b7c47b1654dff50536d9709863295a3 (diff) |
hugetlbfs: common code update for s390
Huge ptes have a special type on s390 and cannot be handled with the standard
pte functions in certain cases, e.g. because of a different location of the
invalid bit. This patch adds some new architecture- specific functions to
hugetlb common code, as a prerequisite for the s390 large page support.
This won't affect other architectures in functionality, but I need to add some
new dummy inline functions to the headers.
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 262d0a93d2b..df28c1773fb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -132,6 +132,7 @@ static void update_and_free_page(struct page *page) | |||
132 | } | 132 | } |
133 | set_compound_page_dtor(page, NULL); | 133 | set_compound_page_dtor(page, NULL); |
134 | set_page_refcounted(page); | 134 | set_page_refcounted(page); |
135 | arch_release_hugepage(page); | ||
135 | __free_pages(page, HUGETLB_PAGE_ORDER); | 136 | __free_pages(page, HUGETLB_PAGE_ORDER); |
136 | } | 137 | } |
137 | 138 | ||
@@ -201,6 +202,10 @@ static struct page *alloc_fresh_huge_page_node(int nid) | |||
201 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, | 202 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, |
202 | HUGETLB_PAGE_ORDER); | 203 | HUGETLB_PAGE_ORDER); |
203 | if (page) { | 204 | if (page) { |
205 | if (arch_prepare_hugepage(page)) { | ||
206 | __free_pages(page, HUGETLB_PAGE_ORDER); | ||
207 | return 0; | ||
208 | } | ||
204 | set_compound_page_dtor(page, free_huge_page); | 209 | set_compound_page_dtor(page, free_huge_page); |
205 | spin_lock(&hugetlb_lock); | 210 | spin_lock(&hugetlb_lock); |
206 | nr_huge_pages++; | 211 | nr_huge_pages++; |
@@ -735,7 +740,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, | |||
735 | entry = | 740 | entry = |
736 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | 741 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
737 | } else { | 742 | } else { |
738 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | 743 | entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); |
739 | } | 744 | } |
740 | entry = pte_mkyoung(entry); | 745 | entry = pte_mkyoung(entry); |
741 | entry = pte_mkhuge(entry); | 746 | entry = pte_mkhuge(entry); |
@@ -748,8 +753,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, | |||
748 | { | 753 | { |
749 | pte_t entry; | 754 | pte_t entry; |
750 | 755 | ||
751 | entry = pte_mkwrite(pte_mkdirty(*ptep)); | 756 | entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); |
752 | if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { | 757 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { |
753 | update_mmu_cache(vma, address, entry); | 758 | update_mmu_cache(vma, address, entry); |
754 | } | 759 | } |
755 | } | 760 | } |
@@ -779,10 +784,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
779 | 784 | ||
780 | spin_lock(&dst->page_table_lock); | 785 | spin_lock(&dst->page_table_lock); |
781 | spin_lock(&src->page_table_lock); | 786 | spin_lock(&src->page_table_lock); |
782 | if (!pte_none(*src_pte)) { | 787 | if (!huge_pte_none(huge_ptep_get(src_pte))) { |
783 | if (cow) | 788 | if (cow) |
784 | ptep_set_wrprotect(src, addr, src_pte); | 789 | huge_ptep_set_wrprotect(src, addr, src_pte); |
785 | entry = *src_pte; | 790 | entry = huge_ptep_get(src_pte); |
786 | ptepage = pte_page(entry); | 791 | ptepage = pte_page(entry); |
787 | get_page(ptepage); | 792 | get_page(ptepage); |
788 | set_huge_pte_at(dst, addr, dst_pte, entry); | 793 | set_huge_pte_at(dst, addr, dst_pte, entry); |
@@ -826,7 +831,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
826 | continue; | 831 | continue; |
827 | 832 | ||
828 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 833 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
829 | if (pte_none(pte)) | 834 | if (huge_pte_none(pte)) |
830 | continue; | 835 | continue; |
831 | 836 | ||
832 | page = pte_page(pte); | 837 | page = pte_page(pte); |
@@ -890,7 +895,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
890 | spin_lock(&mm->page_table_lock); | 895 | spin_lock(&mm->page_table_lock); |
891 | 896 | ||
892 | ptep = huge_pte_offset(mm, address & HPAGE_MASK); | 897 | ptep = huge_pte_offset(mm, address & HPAGE_MASK); |
893 | if (likely(pte_same(*ptep, pte))) { | 898 | if (likely(pte_same(huge_ptep_get(ptep), pte))) { |
894 | /* Break COW */ | 899 | /* Break COW */ |
895 | huge_ptep_clear_flush(vma, address, ptep); | 900 | huge_ptep_clear_flush(vma, address, ptep); |
896 | set_huge_pte_at(mm, address, ptep, | 901 | set_huge_pte_at(mm, address, ptep, |
@@ -960,7 +965,7 @@ retry: | |||
960 | goto backout; | 965 | goto backout; |
961 | 966 | ||
962 | ret = 0; | 967 | ret = 0; |
963 | if (!pte_none(*ptep)) | 968 | if (!huge_pte_none(huge_ptep_get(ptep))) |
964 | goto backout; | 969 | goto backout; |
965 | 970 | ||
966 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 971 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) |
@@ -1002,8 +1007,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1002 | * the same page in the page cache. | 1007 | * the same page in the page cache. |
1003 | */ | 1008 | */ |
1004 | mutex_lock(&hugetlb_instantiation_mutex); | 1009 | mutex_lock(&hugetlb_instantiation_mutex); |
1005 | entry = *ptep; | 1010 | entry = huge_ptep_get(ptep); |
1006 | if (pte_none(entry)) { | 1011 | if (huge_pte_none(entry)) { |
1007 | ret = hugetlb_no_page(mm, vma, address, ptep, write_access); | 1012 | ret = hugetlb_no_page(mm, vma, address, ptep, write_access); |
1008 | mutex_unlock(&hugetlb_instantiation_mutex); | 1013 | mutex_unlock(&hugetlb_instantiation_mutex); |
1009 | return ret; | 1014 | return ret; |
@@ -1013,7 +1018,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1013 | 1018 | ||
1014 | spin_lock(&mm->page_table_lock); | 1019 | spin_lock(&mm->page_table_lock); |
1015 | /* Check for a racing update before calling hugetlb_cow */ | 1020 | /* Check for a racing update before calling hugetlb_cow */ |
1016 | if (likely(pte_same(entry, *ptep))) | 1021 | if (likely(pte_same(entry, huge_ptep_get(ptep)))) |
1017 | if (write_access && !pte_write(entry)) | 1022 | if (write_access && !pte_write(entry)) |
1018 | ret = hugetlb_cow(mm, vma, address, ptep, entry); | 1023 | ret = hugetlb_cow(mm, vma, address, ptep, entry); |
1019 | spin_unlock(&mm->page_table_lock); | 1024 | spin_unlock(&mm->page_table_lock); |
@@ -1043,7 +1048,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1043 | */ | 1048 | */ |
1044 | pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); | 1049 | pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); |
1045 | 1050 | ||
1046 | if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) { | 1051 | if (!pte || huge_pte_none(huge_ptep_get(pte)) || |
1052 | (write && !pte_write(huge_ptep_get(pte)))) { | ||
1047 | int ret; | 1053 | int ret; |
1048 | 1054 | ||
1049 | spin_unlock(&mm->page_table_lock); | 1055 | spin_unlock(&mm->page_table_lock); |
@@ -1059,7 +1065,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1059 | } | 1065 | } |
1060 | 1066 | ||
1061 | pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; | 1067 | pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; |
1062 | page = pte_page(*pte); | 1068 | page = pte_page(huge_ptep_get(pte)); |
1063 | same_page: | 1069 | same_page: |
1064 | if (pages) { | 1070 | if (pages) { |
1065 | get_page(page); | 1071 | get_page(page); |
@@ -1108,7 +1114,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
1108 | continue; | 1114 | continue; |
1109 | if (huge_pmd_unshare(mm, &address, ptep)) | 1115 | if (huge_pmd_unshare(mm, &address, ptep)) |
1110 | continue; | 1116 | continue; |
1111 | if (!pte_none(*ptep)) { | 1117 | if (!huge_pte_none(huge_ptep_get(ptep))) { |
1112 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 1118 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
1113 | pte = pte_mkhuge(pte_modify(pte, newprot)); | 1119 | pte = pte_mkhuge(pte_modify(pte, newprot)); |
1114 | set_huge_pte_at(mm, address, ptep, pte); | 1120 | set_huge_pte_at(mm, address, ptep, pte); |