diff options
-rw-r--r-- | arch/ia64/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/hugetlb.h | 56 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 95 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/hugetlb.h | 1 | ||||
-rw-r--r-- | include/asm-generic/hugetlb.h | 40 | ||||
-rw-r--r-- | mm/hugetlb.c | 24 |
12 files changed, 156 insertions, 68 deletions
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h index 94eaa5bd5d0c..aa910054b8e7 100644 --- a/arch/ia64/include/asm/hugetlb.h +++ b/arch/ia64/include/asm/hugetlb.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_IA64_HUGETLB_H | 2 | #define _ASM_IA64_HUGETLB_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm-generic/hugetlb.h> | ||
5 | 6 | ||
6 | 7 | ||
7 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | 8 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index ef99db994c2f..fe0d15d32660 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define __ASM_HUGETLB_H | 10 | #define __ASM_HUGETLB_H |
11 | 11 | ||
12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
13 | #include <asm-generic/hugetlb.h> | ||
13 | 14 | ||
14 | 15 | ||
15 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 16 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 62e11a32c4c2..4fcbd6b14a3a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_HUGETLB_PAGE | 4 | #ifdef CONFIG_HUGETLB_PAGE |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | #include <asm-generic/hugetlb.h> | ||
6 | 7 | ||
7 | extern struct kmem_cache *hugepte_cache; | 8 | extern struct kmem_cache *hugepte_cache; |
8 | 9 | ||
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 593753ee07f3..bd90359d6d22 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ | 114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ |
115 | ({ \ | 115 | ({ \ |
116 | pte_t __pte = huge_ptep_get(__ptep); \ | 116 | pte_t __pte = huge_ptep_get(__ptep); \ |
117 | if (pte_write(__pte)) { \ | 117 | if (huge_pte_write(__pte)) { \ |
118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ | 118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ |
119 | set_huge_pte_at(__mm, __addr, __ptep, \ | 119 | set_huge_pte_at(__mm, __addr, __ptep, \ |
120 | huge_pte_wrprotect(__pte)); \ | 120 | huge_pte_wrprotect(__pte)); \ |
@@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); | 127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) | ||
131 | { | ||
132 | pte_t pte; | ||
133 | pmd_t pmd; | ||
134 | |||
135 | pmd = mk_pmd_phys(page_to_phys(page), pgprot); | ||
136 | pte_val(pte) = pmd_val(pmd); | ||
137 | return pte; | ||
138 | } | ||
139 | |||
140 | static inline int huge_pte_write(pte_t pte) | ||
141 | { | ||
142 | pmd_t pmd; | ||
143 | |||
144 | pmd_val(pmd) = pte_val(pte); | ||
145 | return pmd_write(pmd); | ||
146 | } | ||
147 | |||
148 | static inline int huge_pte_dirty(pte_t pte) | ||
149 | { | ||
150 | /* No dirty bit in the segment table entry. */ | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static inline pte_t huge_pte_mkwrite(pte_t pte) | ||
155 | { | ||
156 | pmd_t pmd; | ||
157 | |||
158 | pmd_val(pmd) = pte_val(pte); | ||
159 | pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); | ||
160 | return pte; | ||
161 | } | ||
162 | |||
163 | static inline pte_t huge_pte_mkdirty(pte_t pte) | ||
164 | { | ||
165 | /* No dirty bit in the segment table entry. */ | ||
166 | return pte; | ||
167 | } | ||
168 | |||
169 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) | ||
170 | { | ||
171 | pmd_t pmd; | ||
172 | |||
173 | pmd_val(pmd) = pte_val(pte); | ||
174 | pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); | ||
175 | return pte; | ||
176 | } | ||
177 | |||
178 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, | ||
179 | pte_t *ptep) | ||
180 | { | ||
181 | pmd_clear((pmd_t *) ptep); | ||
182 | } | ||
183 | |||
130 | #endif /* _ASM_S390_HUGETLB_H */ | 184 | #endif /* _ASM_S390_HUGETLB_H */ |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a64c0e5428f..b4622915bd15 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -424,6 +424,13 @@ extern unsigned long MODULES_END; | |||
424 | #define __S110 PAGE_RW | 424 | #define __S110 PAGE_RW |
425 | #define __S111 PAGE_RW | 425 | #define __S111 PAGE_RW |
426 | 426 | ||
427 | /* | ||
428 | * Segment entry (large page) protection definitions. | ||
429 | */ | ||
430 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | ||
431 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | ||
432 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | ||
433 | |||
427 | static inline int mm_exclusive(struct mm_struct *mm) | 434 | static inline int mm_exclusive(struct mm_struct *mm) |
428 | { | 435 | { |
429 | return likely(mm == current->active_mm && | 436 | return likely(mm == current->active_mm && |
@@ -914,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
914 | #ifdef CONFIG_HUGETLB_PAGE | 921 | #ifdef CONFIG_HUGETLB_PAGE |
915 | static inline pte_t pte_mkhuge(pte_t pte) | 922 | static inline pte_t pte_mkhuge(pte_t pte) |
916 | { | 923 | { |
917 | /* | ||
918 | * PROT_NONE needs to be remapped from the pte type to the ste type. | ||
919 | * The HW invalid bit is also different for pte and ste. The pte | ||
920 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE | ||
921 | * bit, so we don't have to clear it. | ||
922 | */ | ||
923 | if (pte_val(pte) & _PAGE_INVALID) { | ||
924 | if (pte_val(pte) & _PAGE_SWT) | ||
925 | pte_val(pte) |= _HPAGE_TYPE_NONE; | ||
926 | pte_val(pte) |= _SEGMENT_ENTRY_INV; | ||
927 | } | ||
928 | /* | ||
929 | * Clear SW pte bits, there are no SW bits in a segment table entry. | ||
930 | */ | ||
931 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | | ||
932 | _PAGE_SWR | _PAGE_SWW); | ||
933 | /* | ||
934 | * Also set the change-override bit because we don't need dirty bit | ||
935 | * tracking for hugetlbfs pages. | ||
936 | */ | ||
937 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); | 924 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); |
938 | return pte; | 925 | return pte; |
939 | } | 926 | } |
@@ -1278,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |||
1278 | } | 1265 | } |
1279 | } | 1266 | } |
1280 | 1267 | ||
1281 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1268 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
1282 | |||
1283 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | ||
1284 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | ||
1285 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | ||
1286 | |||
1287 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
1288 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
1289 | |||
1290 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
1291 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
1292 | |||
1293 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
1294 | { | ||
1295 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | ||
1296 | } | ||
1297 | |||
1298 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
1299 | pmd_t *pmdp, pmd_t entry) | ||
1300 | { | ||
1301 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | ||
1302 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1303 | *pmdp = entry; | ||
1304 | } | ||
1305 | |||
1306 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | 1269 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
1307 | { | 1270 | { |
1308 | /* | 1271 | /* |
@@ -1323,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |||
1323 | return pmd; | 1286 | return pmd; |
1324 | } | 1287 | } |
1325 | 1288 | ||
1326 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | 1289 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
1327 | { | 1290 | { |
1328 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | 1291 | pmd_t __pmd; |
1329 | return pmd; | 1292 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
1293 | return __pmd; | ||
1330 | } | 1294 | } |
1331 | 1295 | ||
1332 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | 1296 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
@@ -1336,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) | |||
1336 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; | 1300 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; |
1337 | return pmd; | 1301 | return pmd; |
1338 | } | 1302 | } |
1303 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | ||
1304 | |||
1305 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
1306 | |||
1307 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
1308 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
1309 | |||
1310 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
1311 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
1312 | |||
1313 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
1314 | { | ||
1315 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | ||
1316 | } | ||
1317 | |||
1318 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
1319 | pmd_t *pmdp, pmd_t entry) | ||
1320 | { | ||
1321 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | ||
1322 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1323 | *pmdp = entry; | ||
1324 | } | ||
1325 | |||
1326 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
1327 | { | ||
1328 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | ||
1329 | return pmd; | ||
1330 | } | ||
1339 | 1331 | ||
1340 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | 1332 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
1341 | { | 1333 | { |
@@ -1432,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
1432 | } | 1424 | } |
1433 | } | 1425 | } |
1434 | 1426 | ||
1435 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | ||
1436 | { | ||
1437 | pmd_t __pmd; | ||
1438 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); | ||
1439 | return __pmd; | ||
1440 | } | ||
1441 | |||
1442 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) | 1427 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) |
1443 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | 1428 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
1444 | 1429 | ||
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 532525ec88c1..121089d57802 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page) | |||
39 | if (!ptep) | 39 | if (!ptep) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
41 | 41 | ||
42 | pte = mk_pte(page, PAGE_RW); | 42 | pte_val(pte) = addr; |
43 | for (i = 0; i < PTRS_PER_PTE; i++) { | 43 | for (i = 0; i < PTRS_PER_PTE; i++) { |
44 | set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); | 44 | set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); |
45 | pte_val(pte) += PAGE_SIZE; | 45 | pte_val(pte) += PAGE_SIZE; |
diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index b3808c7d67b2..699255d6d1c6 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/cacheflush.h> | 4 | #include <asm/cacheflush.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | #include <asm-generic/hugetlb.h> | ||
6 | 7 | ||
7 | 8 | ||
8 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 9 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 7eb57d245044..e4cab465b81f 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_SPARC64_HUGETLB_H | 2 | #define _ASM_SPARC64_HUGETLB_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm-generic/hugetlb.h> | ||
5 | 6 | ||
6 | 7 | ||
7 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 8 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index 0f885af2b621..3257733003f8 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define _ASM_TILE_HUGETLB_H | 16 | #define _ASM_TILE_HUGETLB_H |
17 | 17 | ||
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm-generic/hugetlb.h> | ||
19 | 20 | ||
20 | 21 | ||
21 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 22 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index bdd35dbd0605..a8091216963b 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_HUGETLB_H | 2 | #define _ASM_X86_HUGETLB_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm-generic/hugetlb.h> | ||
5 | 6 | ||
6 | 7 | ||
7 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 8 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h new file mode 100644 index 000000000000..d06079c774a0 --- /dev/null +++ b/include/asm-generic/hugetlb.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #ifndef _ASM_GENERIC_HUGETLB_H | ||
2 | #define _ASM_GENERIC_HUGETLB_H | ||
3 | |||
4 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) | ||
5 | { | ||
6 | return mk_pte(page, pgprot); | ||
7 | } | ||
8 | |||
9 | static inline int huge_pte_write(pte_t pte) | ||
10 | { | ||
11 | return pte_write(pte); | ||
12 | } | ||
13 | |||
14 | static inline int huge_pte_dirty(pte_t pte) | ||
15 | { | ||
16 | return pte_dirty(pte); | ||
17 | } | ||
18 | |||
19 | static inline pte_t huge_pte_mkwrite(pte_t pte) | ||
20 | { | ||
21 | return pte_mkwrite(pte); | ||
22 | } | ||
23 | |||
24 | static inline pte_t huge_pte_mkdirty(pte_t pte) | ||
25 | { | ||
26 | return pte_mkdirty(pte); | ||
27 | } | ||
28 | |||
29 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) | ||
30 | { | ||
31 | return pte_modify(pte, newprot); | ||
32 | } | ||
33 | |||
34 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, | ||
35 | pte_t *ptep) | ||
36 | { | ||
37 | pte_clear(mm, addr, ptep); | ||
38 | } | ||
39 | |||
40 | #endif /* _ASM_GENERIC_HUGETLB_H */ | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1a12f5b9a0ab..73b864a32017 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2247,10 +2247,11 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, | |||
2247 | pte_t entry; | 2247 | pte_t entry; |
2248 | 2248 | ||
2249 | if (writable) { | 2249 | if (writable) { |
2250 | entry = | 2250 | entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, |
2251 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | 2251 | vma->vm_page_prot))); |
2252 | } else { | 2252 | } else { |
2253 | entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | 2253 | entry = huge_pte_wrprotect(mk_huge_pte(page, |
2254 | vma->vm_page_prot)); | ||
2254 | } | 2255 | } |
2255 | entry = pte_mkyoung(entry); | 2256 | entry = pte_mkyoung(entry); |
2256 | entry = pte_mkhuge(entry); | 2257 | entry = pte_mkhuge(entry); |
@@ -2264,7 +2265,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, | |||
2264 | { | 2265 | { |
2265 | pte_t entry; | 2266 | pte_t entry; |
2266 | 2267 | ||
2267 | entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); | 2268 | entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); |
2268 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) | 2269 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) |
2269 | update_mmu_cache(vma, address, ptep); | 2270 | update_mmu_cache(vma, address, ptep); |
2270 | } | 2271 | } |
@@ -2379,7 +2380,7 @@ again: | |||
2379 | * HWPoisoned hugepage is already unmapped and dropped reference | 2380 | * HWPoisoned hugepage is already unmapped and dropped reference |
2380 | */ | 2381 | */ |
2381 | if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { | 2382 | if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { |
2382 | pte_clear(mm, address, ptep); | 2383 | huge_pte_clear(mm, address, ptep); |
2383 | continue; | 2384 | continue; |
2384 | } | 2385 | } |
2385 | 2386 | ||
@@ -2403,7 +2404,7 @@ again: | |||
2403 | 2404 | ||
2404 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 2405 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
2405 | tlb_remove_tlb_entry(tlb, ptep, address); | 2406 | tlb_remove_tlb_entry(tlb, ptep, address); |
2406 | if (pte_dirty(pte)) | 2407 | if (huge_pte_dirty(pte)) |
2407 | set_page_dirty(page); | 2408 | set_page_dirty(page); |
2408 | 2409 | ||
2409 | page_remove_rmap(page); | 2410 | page_remove_rmap(page); |
@@ -2856,7 +2857,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2856 | * page now as it is used to determine if a reservation has been | 2857 | * page now as it is used to determine if a reservation has been |
2857 | * consumed. | 2858 | * consumed. |
2858 | */ | 2859 | */ |
2859 | if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { | 2860 | if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { |
2860 | if (vma_needs_reservation(h, vma, address) < 0) { | 2861 | if (vma_needs_reservation(h, vma, address) < 0) { |
2861 | ret = VM_FAULT_OOM; | 2862 | ret = VM_FAULT_OOM; |
2862 | goto out_mutex; | 2863 | goto out_mutex; |
@@ -2886,12 +2887,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2886 | 2887 | ||
2887 | 2888 | ||
2888 | if (flags & FAULT_FLAG_WRITE) { | 2889 | if (flags & FAULT_FLAG_WRITE) { |
2889 | if (!pte_write(entry)) { | 2890 | if (!huge_pte_write(entry)) { |
2890 | ret = hugetlb_cow(mm, vma, address, ptep, entry, | 2891 | ret = hugetlb_cow(mm, vma, address, ptep, entry, |
2891 | pagecache_page); | 2892 | pagecache_page); |
2892 | goto out_page_table_lock; | 2893 | goto out_page_table_lock; |
2893 | } | 2894 | } |
2894 | entry = pte_mkdirty(entry); | 2895 | entry = huge_pte_mkdirty(entry); |
2895 | } | 2896 | } |
2896 | entry = pte_mkyoung(entry); | 2897 | entry = pte_mkyoung(entry); |
2897 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, | 2898 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, |
@@ -2972,7 +2973,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2972 | * directly from any kind of swap entries. | 2973 | * directly from any kind of swap entries. |
2973 | */ | 2974 | */ |
2974 | if (absent || is_swap_pte(huge_ptep_get(pte)) || | 2975 | if (absent || is_swap_pte(huge_ptep_get(pte)) || |
2975 | ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { | 2976 | ((flags & FOLL_WRITE) && |
2977 | !huge_pte_write(huge_ptep_get(pte)))) { | ||
2976 | int ret; | 2978 | int ret; |
2977 | 2979 | ||
2978 | spin_unlock(&mm->page_table_lock); | 2980 | spin_unlock(&mm->page_table_lock); |
@@ -3042,7 +3044,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
3042 | } | 3044 | } |
3043 | if (!huge_pte_none(huge_ptep_get(ptep))) { | 3045 | if (!huge_pte_none(huge_ptep_get(ptep))) { |
3044 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 3046 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
3045 | pte = pte_mkhuge(pte_modify(pte, newprot)); | 3047 | pte = pte_mkhuge(huge_pte_modify(pte, newprot)); |
3046 | pte = arch_make_huge_pte(pte, vma, NULL, 0); | 3048 | pte = arch_make_huge_pte(pte, vma, NULL, 0); |
3047 | set_huge_pte_at(mm, address, ptep, pte); | 3049 | set_huge_pte_at(mm, address, ptep, pte); |
3048 | pages++; | 3050 | pages++; |