diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2013-04-29 18:07:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 18:54:33 -0400 |
commit | 106c992a5ebef28193cf5958e49ceff5e4aebb04 (patch) | |
tree | 86f6a69dce858449e9f02d43b65cd2c2743c57dd /arch/s390/include/asm | |
parent | 146732ce104ddfed3d4d82722c0b336074016b92 (diff) |
mm/hugetlb: add more arch-defined huge_pte functions
Commit abf09bed3cce ("s390/mm: implement software dirty bits")
introduced another difference in the pte layout vs. the pmd layout on
s390, thoroughly breaking the s390 support for hugetlbfs. This requires
replacing some more pte_xxx functions in mm/hugetlbfs.c with a
huge_pte_xxx version.
This patch introduces those huge_pte_xxx functions and their generic
implementation in asm-generic/hugetlb.h, which will now be included on
all architectures supporting hugetlbfs apart from s390. This change
will be a no-op for those architectures.
[akpm@linux-foundation.org: fix warning]
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Hillf Danton <dhillf@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.cz> [for !s390 parts]
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/hugetlb.h | 56 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 95 |
2 files changed, 95 insertions, 56 deletions
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 593753ee07f3..bd90359d6d22 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ | 114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ |
115 | ({ \ | 115 | ({ \ |
116 | pte_t __pte = huge_ptep_get(__ptep); \ | 116 | pte_t __pte = huge_ptep_get(__ptep); \ |
117 | if (pte_write(__pte)) { \ | 117 | if (huge_pte_write(__pte)) { \ |
118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ | 118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ |
119 | set_huge_pte_at(__mm, __addr, __ptep, \ | 119 | set_huge_pte_at(__mm, __addr, __ptep, \ |
120 | huge_pte_wrprotect(__pte)); \ | 120 | huge_pte_wrprotect(__pte)); \ |
@@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); | 127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) | ||
131 | { | ||
132 | pte_t pte; | ||
133 | pmd_t pmd; | ||
134 | |||
135 | pmd = mk_pmd_phys(page_to_phys(page), pgprot); | ||
136 | pte_val(pte) = pmd_val(pmd); | ||
137 | return pte; | ||
138 | } | ||
139 | |||
140 | static inline int huge_pte_write(pte_t pte) | ||
141 | { | ||
142 | pmd_t pmd; | ||
143 | |||
144 | pmd_val(pmd) = pte_val(pte); | ||
145 | return pmd_write(pmd); | ||
146 | } | ||
147 | |||
148 | static inline int huge_pte_dirty(pte_t pte) | ||
149 | { | ||
150 | /* No dirty bit in the segment table entry. */ | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static inline pte_t huge_pte_mkwrite(pte_t pte) | ||
155 | { | ||
156 | pmd_t pmd; | ||
157 | |||
158 | pmd_val(pmd) = pte_val(pte); | ||
159 | pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); | ||
160 | return pte; | ||
161 | } | ||
162 | |||
163 | static inline pte_t huge_pte_mkdirty(pte_t pte) | ||
164 | { | ||
165 | /* No dirty bit in the segment table entry. */ | ||
166 | return pte; | ||
167 | } | ||
168 | |||
169 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) | ||
170 | { | ||
171 | pmd_t pmd; | ||
172 | |||
173 | pmd_val(pmd) = pte_val(pte); | ||
174 | pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); | ||
175 | return pte; | ||
176 | } | ||
177 | |||
178 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, | ||
179 | pte_t *ptep) | ||
180 | { | ||
181 | pmd_clear((pmd_t *) ptep); | ||
182 | } | ||
183 | |||
130 | #endif /* _ASM_S390_HUGETLB_H */ | 184 | #endif /* _ASM_S390_HUGETLB_H */ |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a64c0e5428f..b4622915bd15 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -424,6 +424,13 @@ extern unsigned long MODULES_END; | |||
424 | #define __S110 PAGE_RW | 424 | #define __S110 PAGE_RW |
425 | #define __S111 PAGE_RW | 425 | #define __S111 PAGE_RW |
426 | 426 | ||
427 | /* | ||
428 | * Segment entry (large page) protection definitions. | ||
429 | */ | ||
430 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | ||
431 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | ||
432 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | ||
433 | |||
427 | static inline int mm_exclusive(struct mm_struct *mm) | 434 | static inline int mm_exclusive(struct mm_struct *mm) |
428 | { | 435 | { |
429 | return likely(mm == current->active_mm && | 436 | return likely(mm == current->active_mm && |
@@ -914,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
914 | #ifdef CONFIG_HUGETLB_PAGE | 921 | #ifdef CONFIG_HUGETLB_PAGE |
915 | static inline pte_t pte_mkhuge(pte_t pte) | 922 | static inline pte_t pte_mkhuge(pte_t pte) |
916 | { | 923 | { |
917 | /* | ||
918 | * PROT_NONE needs to be remapped from the pte type to the ste type. | ||
919 | * The HW invalid bit is also different for pte and ste. The pte | ||
920 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE | ||
921 | * bit, so we don't have to clear it. | ||
922 | */ | ||
923 | if (pte_val(pte) & _PAGE_INVALID) { | ||
924 | if (pte_val(pte) & _PAGE_SWT) | ||
925 | pte_val(pte) |= _HPAGE_TYPE_NONE; | ||
926 | pte_val(pte) |= _SEGMENT_ENTRY_INV; | ||
927 | } | ||
928 | /* | ||
929 | * Clear SW pte bits, there are no SW bits in a segment table entry. | ||
930 | */ | ||
931 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | | ||
932 | _PAGE_SWR | _PAGE_SWW); | ||
933 | /* | ||
934 | * Also set the change-override bit because we don't need dirty bit | ||
935 | * tracking for hugetlbfs pages. | ||
936 | */ | ||
937 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); | 924 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); |
938 | return pte; | 925 | return pte; |
939 | } | 926 | } |
@@ -1278,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |||
1278 | } | 1265 | } |
1279 | } | 1266 | } |
1280 | 1267 | ||
1281 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1268 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
1282 | |||
1283 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | ||
1284 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | ||
1285 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | ||
1286 | |||
1287 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
1288 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
1289 | |||
1290 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
1291 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
1292 | |||
1293 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
1294 | { | ||
1295 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | ||
1296 | } | ||
1297 | |||
1298 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
1299 | pmd_t *pmdp, pmd_t entry) | ||
1300 | { | ||
1301 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | ||
1302 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1303 | *pmdp = entry; | ||
1304 | } | ||
1305 | |||
1306 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | 1269 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
1307 | { | 1270 | { |
1308 | /* | 1271 | /* |
@@ -1323,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |||
1323 | return pmd; | 1286 | return pmd; |
1324 | } | 1287 | } |
1325 | 1288 | ||
1326 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | 1289 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
1327 | { | 1290 | { |
1328 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | 1291 | pmd_t __pmd; |
1329 | return pmd; | 1292 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
1293 | return __pmd; | ||
1330 | } | 1294 | } |
1331 | 1295 | ||
1332 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | 1296 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
@@ -1336,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) | |||
1336 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; | 1300 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; |
1337 | return pmd; | 1301 | return pmd; |
1338 | } | 1302 | } |
1303 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | ||
1304 | |||
1305 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
1306 | |||
1307 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
1308 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
1309 | |||
1310 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
1311 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
1312 | |||
1313 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
1314 | { | ||
1315 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; | ||
1316 | } | ||
1317 | |||
1318 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
1319 | pmd_t *pmdp, pmd_t entry) | ||
1320 | { | ||
1321 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | ||
1322 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | ||
1323 | *pmdp = entry; | ||
1324 | } | ||
1325 | |||
1326 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
1327 | { | ||
1328 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | ||
1329 | return pmd; | ||
1330 | } | ||
1339 | 1331 | ||
1340 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | 1332 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
1341 | { | 1333 | { |
@@ -1432,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
1432 | } | 1424 | } |
1433 | } | 1425 | } |
1434 | 1426 | ||
1435 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | ||
1436 | { | ||
1437 | pmd_t __pmd; | ||
1438 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); | ||
1439 | return __pmd; | ||
1440 | } | ||
1441 | |||
1442 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) | 1427 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) |
1443 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | 1428 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
1444 | 1429 | ||