aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2016-07-18 08:35:13 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-07-31 05:27:57 -0400
commitbc29b7ac1d9f09f5024b0e257e91bf5df611ccd4 (patch)
treef32fddeba065a46d018f6c427f220317b1dc5b89 /arch/s390/mm
parentbad60e6f259a01cf9f29a1ef8d435ab6c60b2de9 (diff)
s390/mm: clean up pte/pmd encoding
The hugetlbfs pte<->pmd conversion functions currently assume that the pmd bit layout is consistent with the pte layout, which is not really true. The SW read and write bits are encoded as the sequence "wr" in a pte, but in a pmd it is "rw". The hugetlbfs conversion assumes that the sequence is identical in both cases, which results in swapped read and write bits in the pmd. In practice this is not a problem, because those pmd bits are only relevant for THP pmds and not for hugetlbfs pmds. The hugetlbfs code works on (fake) ptes, and the converted pte bits are correct. There is another variation in pte/pmd encoding which affects dirty prot-none ptes/pmds. In this case, a pmd has both its HW read-only and invalid bit set, while it is only the invalid bit for a pte. This also has no effect in practice, but it should better be consistent. This patch fixes both inconsistencies by changing the SW read/write bit layout for pmds as well as the PAGE_NONE encoding for ptes. It also makes the hugetlbfs conversion functions more robust by introducing a move_set_bit() macro that uses the pte/pmd bit #defines instead of constant shifts. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/hugetlbpage.c52
1 files changed, 38 insertions, 14 deletions
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e19d853883be..cd404aa3931c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -11,6 +11,12 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
13 13
14/*
15 * If the bit selected by single-bit bitmask "a" is set within "x", move
16 * it to the position indicated by single-bit bitmask "b".
17 */
18#define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
19
14static inline unsigned long __pte_to_rste(pte_t pte) 20static inline unsigned long __pte_to_rste(pte_t pte)
15{ 21{
16 unsigned long rste; 22 unsigned long rste;
@@ -37,13 +43,22 @@ static inline unsigned long __pte_to_rste(pte_t pte)
37 */ 43 */
38 if (pte_present(pte)) { 44 if (pte_present(pte)) {
39 rste = pte_val(pte) & PAGE_MASK; 45 rste = pte_val(pte) & PAGE_MASK;
40 rste |= (pte_val(pte) & _PAGE_READ) >> 4; 46 rste |= move_set_bit(pte_val(pte), _PAGE_READ,
41 rste |= (pte_val(pte) & _PAGE_WRITE) >> 4; 47 _SEGMENT_ENTRY_READ);
42 rste |= (pte_val(pte) & _PAGE_INVALID) >> 5; 48 rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
43 rste |= (pte_val(pte) & _PAGE_PROTECT); 49 _SEGMENT_ENTRY_WRITE);
44 rste |= (pte_val(pte) & _PAGE_DIRTY) << 10; 50 rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
45 rste |= (pte_val(pte) & _PAGE_YOUNG) << 10; 51 _SEGMENT_ENTRY_INVALID);
46 rste |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13; 52 rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
53 _SEGMENT_ENTRY_PROTECT);
54 rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
55 _SEGMENT_ENTRY_DIRTY);
56 rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
57 _SEGMENT_ENTRY_YOUNG);
58#ifdef CONFIG_MEM_SOFT_DIRTY
59 rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
60 _SEGMENT_ENTRY_SOFT_DIRTY);
61#endif
47 } else 62 } else
48 rste = _SEGMENT_ENTRY_INVALID; 63 rste = _SEGMENT_ENTRY_INVALID;
49 return rste; 64 return rste;
@@ -82,13 +97,22 @@ static inline pte_t __rste_to_pte(unsigned long rste)
82 if (present) { 97 if (present) {
83 pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 98 pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
84 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 99 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
85 pte_val(pte) |= (rste & _SEGMENT_ENTRY_READ) << 4; 100 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
86 pte_val(pte) |= (rste & _SEGMENT_ENTRY_WRITE) << 4; 101 _PAGE_READ);
87 pte_val(pte) |= (rste & _SEGMENT_ENTRY_INVALID) << 5; 102 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
88 pte_val(pte) |= (rste & _SEGMENT_ENTRY_PROTECT); 103 _PAGE_WRITE);
89 pte_val(pte) |= (rste & _SEGMENT_ENTRY_DIRTY) >> 10; 104 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
90 pte_val(pte) |= (rste & _SEGMENT_ENTRY_YOUNG) >> 10; 105 _PAGE_INVALID);
91 pte_val(pte) |= (rste & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13; 106 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
107 _PAGE_PROTECT);
108 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
109 _PAGE_DIRTY);
110 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
111 _PAGE_YOUNG);
112#ifdef CONFIG_MEM_SOFT_DIRTY
113 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
114 _PAGE_DIRTY);
115#endif
92 } else 116 } else
93 pte_val(pte) = _PAGE_INVALID; 117 pte_val(pte) = _PAGE_INVALID;
94 return pte; 118 return pte;