diff options
-rw-r--r-- | include/asm-ia64/hugetlb.h | 37 | ||||
-rw-r--r-- | include/asm-powerpc/hugetlb.h | 37 | ||||
-rw-r--r-- | include/asm-sh/hugetlb.h | 37 | ||||
-rw-r--r-- | include/asm-sparc64/hugetlb.h | 37 | ||||
-rw-r--r-- | include/asm-x86/hugetlb.h | 37 | ||||
-rw-r--r-- | mm/hugetlb.c | 36 |
6 files changed, 206 insertions, 15 deletions
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h index 5f5434374972..f28a9701f1cf 100644 --- a/include/asm-ia64/hugetlb.h +++ b/include/asm-ia64/hugetlb.h | |||
@@ -39,4 +39,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
39 | { | 39 | { |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline int huge_pte_none(pte_t pte) | ||
43 | { | ||
44 | return pte_none(pte); | ||
45 | } | ||
46 | |||
47 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
48 | { | ||
49 | return pte_wrprotect(pte); | ||
50 | } | ||
51 | |||
52 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
53 | unsigned long addr, pte_t *ptep) | ||
54 | { | ||
55 | ptep_set_wrprotect(mm, addr, ptep); | ||
56 | } | ||
57 | |||
58 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
59 | unsigned long addr, pte_t *ptep, | ||
60 | pte_t pte, int dirty) | ||
61 | { | ||
62 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
63 | } | ||
64 | |||
65 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
66 | { | ||
67 | return *ptep; | ||
68 | } | ||
69 | |||
70 | static inline int arch_prepare_hugepage(struct page *page) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static inline void arch_release_hugepage(struct page *page) | ||
76 | { | ||
77 | } | ||
78 | |||
42 | #endif /* _ASM_IA64_HUGETLB_H */ | 79 | #endif /* _ASM_IA64_HUGETLB_H */ |
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h index bead2ff78493..649c6c3b87b3 100644 --- a/include/asm-powerpc/hugetlb.h +++ b/include/asm-powerpc/hugetlb.h | |||
@@ -39,4 +39,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
39 | { | 39 | { |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline int huge_pte_none(pte_t pte) | ||
43 | { | ||
44 | return pte_none(pte); | ||
45 | } | ||
46 | |||
47 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
48 | { | ||
49 | return pte_wrprotect(pte); | ||
50 | } | ||
51 | |||
52 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
53 | unsigned long addr, pte_t *ptep) | ||
54 | { | ||
55 | ptep_set_wrprotect(mm, addr, ptep); | ||
56 | } | ||
57 | |||
58 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
59 | unsigned long addr, pte_t *ptep, | ||
60 | pte_t pte, int dirty) | ||
61 | { | ||
62 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
63 | } | ||
64 | |||
65 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
66 | { | ||
67 | return *ptep; | ||
68 | } | ||
69 | |||
70 | static inline int arch_prepare_hugepage(struct page *page) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static inline void arch_release_hugepage(struct page *page) | ||
76 | { | ||
77 | } | ||
78 | |||
42 | #endif /* _ASM_POWERPC_HUGETLB_H */ | 79 | #endif /* _ASM_POWERPC_HUGETLB_H */ |
diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h index d1ed476467a1..02402303d89b 100644 --- a/include/asm-sh/hugetlb.h +++ b/include/asm-sh/hugetlb.h | |||
@@ -51,4 +51,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
51 | { | 51 | { |
52 | } | 52 | } |
53 | 53 | ||
54 | static inline int huge_pte_none(pte_t pte) | ||
55 | { | ||
56 | return pte_none(pte); | ||
57 | } | ||
58 | |||
59 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
60 | { | ||
61 | return pte_wrprotect(pte); | ||
62 | } | ||
63 | |||
64 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
65 | unsigned long addr, pte_t *ptep) | ||
66 | { | ||
67 | ptep_set_wrprotect(mm, addr, ptep); | ||
68 | } | ||
69 | |||
70 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
71 | unsigned long addr, pte_t *ptep, | ||
72 | pte_t pte, int dirty) | ||
73 | { | ||
74 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
75 | } | ||
76 | |||
77 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
78 | { | ||
79 | return *ptep; | ||
80 | } | ||
81 | |||
82 | static inline int arch_prepare_hugepage(struct page *page) | ||
83 | { | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static inline void arch_release_hugepage(struct page *page) | ||
88 | { | ||
89 | } | ||
90 | |||
54 | #endif /* _ASM_SH_HUGETLB_H */ | 91 | #endif /* _ASM_SH_HUGETLB_H */ |
diff --git a/include/asm-sparc64/hugetlb.h b/include/asm-sparc64/hugetlb.h index 0b9e44c85c5d..412af58926a0 100644 --- a/include/asm-sparc64/hugetlb.h +++ b/include/asm-sparc64/hugetlb.h | |||
@@ -44,4 +44,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
44 | { | 44 | { |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline int huge_pte_none(pte_t pte) | ||
48 | { | ||
49 | return pte_none(pte); | ||
50 | } | ||
51 | |||
52 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
53 | { | ||
54 | return pte_wrprotect(pte); | ||
55 | } | ||
56 | |||
57 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
58 | unsigned long addr, pte_t *ptep) | ||
59 | { | ||
60 | ptep_set_wrprotect(mm, addr, ptep); | ||
61 | } | ||
62 | |||
63 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
64 | unsigned long addr, pte_t *ptep, | ||
65 | pte_t pte, int dirty) | ||
66 | { | ||
67 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
68 | } | ||
69 | |||
70 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
71 | { | ||
72 | return *ptep; | ||
73 | } | ||
74 | |||
75 | static inline int arch_prepare_hugepage(struct page *page) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static inline void arch_release_hugepage(struct page *page) | ||
81 | { | ||
82 | } | ||
83 | |||
47 | #endif /* _ASM_SPARC64_HUGETLB_H */ | 84 | #endif /* _ASM_SPARC64_HUGETLB_H */ |
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h index f57236dfc8f4..14171a4924f6 100644 --- a/include/asm-x86/hugetlb.h +++ b/include/asm-x86/hugetlb.h | |||
@@ -51,4 +51,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
51 | { | 51 | { |
52 | } | 52 | } |
53 | 53 | ||
54 | static inline int huge_pte_none(pte_t pte) | ||
55 | { | ||
56 | return pte_none(pte); | ||
57 | } | ||
58 | |||
59 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
60 | { | ||
61 | return pte_wrprotect(pte); | ||
62 | } | ||
63 | |||
64 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
65 | unsigned long addr, pte_t *ptep) | ||
66 | { | ||
67 | ptep_set_wrprotect(mm, addr, ptep); | ||
68 | } | ||
69 | |||
70 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
71 | unsigned long addr, pte_t *ptep, | ||
72 | pte_t pte, int dirty) | ||
73 | { | ||
74 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
75 | } | ||
76 | |||
77 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
78 | { | ||
79 | return *ptep; | ||
80 | } | ||
81 | |||
82 | static inline int arch_prepare_hugepage(struct page *page) | ||
83 | { | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static inline void arch_release_hugepage(struct page *page) | ||
88 | { | ||
89 | } | ||
90 | |||
54 | #endif /* _ASM_X86_HUGETLB_H */ | 91 | #endif /* _ASM_X86_HUGETLB_H */ |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 262d0a93d2b6..df28c1773fb2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -132,6 +132,7 @@ static void update_and_free_page(struct page *page) | |||
132 | } | 132 | } |
133 | set_compound_page_dtor(page, NULL); | 133 | set_compound_page_dtor(page, NULL); |
134 | set_page_refcounted(page); | 134 | set_page_refcounted(page); |
135 | arch_release_hugepage(page); | ||
135 | __free_pages(page, HUGETLB_PAGE_ORDER); | 136 | __free_pages(page, HUGETLB_PAGE_ORDER); |
136 | } | 137 | } |
137 | 138 | ||
@@ -201,6 +202,10 @@ static struct page *alloc_fresh_huge_page_node(int nid) | |||
201 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, | 202 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, |
202 | HUGETLB_PAGE_ORDER); | 203 | HUGETLB_PAGE_ORDER); |
203 | if (page) { | 204 | if (page) { |
205 | if (arch_prepare_hugepage(page)) { | ||
206 | __free_pages(page, HUGETLB_PAGE_ORDER); | ||
207 | return 0; | ||
208 | } | ||
204 | set_compound_page_dtor(page, free_huge_page); | 209 | set_compound_page_dtor(page, free_huge_page); |
205 | spin_lock(&hugetlb_lock); | 210 | spin_lock(&hugetlb_lock); |
206 | nr_huge_pages++; | 211 | nr_huge_pages++; |
@@ -735,7 +740,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, | |||
735 | entry = | 740 | entry = |
736 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | 741 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
737 | } else { | 742 | } else { |
738 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | 743 | entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); |
739 | } | 744 | } |
740 | entry = pte_mkyoung(entry); | 745 | entry = pte_mkyoung(entry); |
741 | entry = pte_mkhuge(entry); | 746 | entry = pte_mkhuge(entry); |
@@ -748,8 +753,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, | |||
748 | { | 753 | { |
749 | pte_t entry; | 754 | pte_t entry; |
750 | 755 | ||
751 | entry = pte_mkwrite(pte_mkdirty(*ptep)); | 756 | entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); |
752 | if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { | 757 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { |
753 | update_mmu_cache(vma, address, entry); | 758 | update_mmu_cache(vma, address, entry); |
754 | } | 759 | } |
755 | } | 760 | } |
@@ -779,10 +784,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
779 | 784 | ||
780 | spin_lock(&dst->page_table_lock); | 785 | spin_lock(&dst->page_table_lock); |
781 | spin_lock(&src->page_table_lock); | 786 | spin_lock(&src->page_table_lock); |
782 | if (!pte_none(*src_pte)) { | 787 | if (!huge_pte_none(huge_ptep_get(src_pte))) { |
783 | if (cow) | 788 | if (cow) |
784 | ptep_set_wrprotect(src, addr, src_pte); | 789 | huge_ptep_set_wrprotect(src, addr, src_pte); |
785 | entry = *src_pte; | 790 | entry = huge_ptep_get(src_pte); |
786 | ptepage = pte_page(entry); | 791 | ptepage = pte_page(entry); |
787 | get_page(ptepage); | 792 | get_page(ptepage); |
788 | set_huge_pte_at(dst, addr, dst_pte, entry); | 793 | set_huge_pte_at(dst, addr, dst_pte, entry); |
@@ -826,7 +831,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
826 | continue; | 831 | continue; |
827 | 832 | ||
828 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 833 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
829 | if (pte_none(pte)) | 834 | if (huge_pte_none(pte)) |
830 | continue; | 835 | continue; |
831 | 836 | ||
832 | page = pte_page(pte); | 837 | page = pte_page(pte); |
@@ -890,7 +895,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
890 | spin_lock(&mm->page_table_lock); | 895 | spin_lock(&mm->page_table_lock); |
891 | 896 | ||
892 | ptep = huge_pte_offset(mm, address & HPAGE_MASK); | 897 | ptep = huge_pte_offset(mm, address & HPAGE_MASK); |
893 | if (likely(pte_same(*ptep, pte))) { | 898 | if (likely(pte_same(huge_ptep_get(ptep), pte))) { |
894 | /* Break COW */ | 899 | /* Break COW */ |
895 | huge_ptep_clear_flush(vma, address, ptep); | 900 | huge_ptep_clear_flush(vma, address, ptep); |
896 | set_huge_pte_at(mm, address, ptep, | 901 | set_huge_pte_at(mm, address, ptep, |
@@ -960,7 +965,7 @@ retry: | |||
960 | goto backout; | 965 | goto backout; |
961 | 966 | ||
962 | ret = 0; | 967 | ret = 0; |
963 | if (!pte_none(*ptep)) | 968 | if (!huge_pte_none(huge_ptep_get(ptep))) |
964 | goto backout; | 969 | goto backout; |
965 | 970 | ||
966 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 971 | new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) |
@@ -1002,8 +1007,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1002 | * the same page in the page cache. | 1007 | * the same page in the page cache. |
1003 | */ | 1008 | */ |
1004 | mutex_lock(&hugetlb_instantiation_mutex); | 1009 | mutex_lock(&hugetlb_instantiation_mutex); |
1005 | entry = *ptep; | 1010 | entry = huge_ptep_get(ptep); |
1006 | if (pte_none(entry)) { | 1011 | if (huge_pte_none(entry)) { |
1007 | ret = hugetlb_no_page(mm, vma, address, ptep, write_access); | 1012 | ret = hugetlb_no_page(mm, vma, address, ptep, write_access); |
1008 | mutex_unlock(&hugetlb_instantiation_mutex); | 1013 | mutex_unlock(&hugetlb_instantiation_mutex); |
1009 | return ret; | 1014 | return ret; |
@@ -1013,7 +1018,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1013 | 1018 | ||
1014 | spin_lock(&mm->page_table_lock); | 1019 | spin_lock(&mm->page_table_lock); |
1015 | /* Check for a racing update before calling hugetlb_cow */ | 1020 | /* Check for a racing update before calling hugetlb_cow */ |
1016 | if (likely(pte_same(entry, *ptep))) | 1021 | if (likely(pte_same(entry, huge_ptep_get(ptep)))) |
1017 | if (write_access && !pte_write(entry)) | 1022 | if (write_access && !pte_write(entry)) |
1018 | ret = hugetlb_cow(mm, vma, address, ptep, entry); | 1023 | ret = hugetlb_cow(mm, vma, address, ptep, entry); |
1019 | spin_unlock(&mm->page_table_lock); | 1024 | spin_unlock(&mm->page_table_lock); |
@@ -1043,7 +1048,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1043 | */ | 1048 | */ |
1044 | pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); | 1049 | pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); |
1045 | 1050 | ||
1046 | if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) { | 1051 | if (!pte || huge_pte_none(huge_ptep_get(pte)) || |
1052 | (write && !pte_write(huge_ptep_get(pte)))) { | ||
1047 | int ret; | 1053 | int ret; |
1048 | 1054 | ||
1049 | spin_unlock(&mm->page_table_lock); | 1055 | spin_unlock(&mm->page_table_lock); |
@@ -1059,7 +1065,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1059 | } | 1065 | } |
1060 | 1066 | ||
1061 | pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; | 1067 | pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; |
1062 | page = pte_page(*pte); | 1068 | page = pte_page(huge_ptep_get(pte)); |
1063 | same_page: | 1069 | same_page: |
1064 | if (pages) { | 1070 | if (pages) { |
1065 | get_page(page); | 1071 | get_page(page); |
@@ -1108,7 +1114,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
1108 | continue; | 1114 | continue; |
1109 | if (huge_pmd_unshare(mm, &address, ptep)) | 1115 | if (huge_pmd_unshare(mm, &address, ptep)) |
1110 | continue; | 1116 | continue; |
1111 | if (!pte_none(*ptep)) { | 1117 | if (!huge_pte_none(huge_ptep_get(ptep))) { |
1112 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 1118 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
1113 | pte = pte_mkhuge(pte_modify(pte, newprot)); | 1119 | pte = pte_mkhuge(pte_modify(pte, newprot)); |
1114 | set_huge_pte_at(mm, address, ptep, pte); | 1120 | set_huge_pte_at(mm, address, ptep, pte); |