aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-10-01 06:58:34 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-10-09 08:16:56 -0400
commit378b1e7a80a59325ca1036e892462db728126f84 (patch)
tree6d25959ac7401eee48f76441ca42c2759aef15b4 /arch/s390/mm
parent521b3d790c16fad9d83c72d610c1e416ad3f7ae3 (diff)
s390/mm: fix pmd_huge() usage for kernel mapping
pmd_huge() will always return 0 on !HUGETLBFS, however we use that helper function when walking the kernel page tables to decide if we have a 1MB page frame or not. Since we create 1MB frames for the kernel 1:1 mapping independently of HUGETLBFS this can lead to incorrect storage accesses since the code can assume that we have a pointer to a page table instead of a pointer to a 1MB frame. Fix this by adding a pmd_large() primitive like other architectures have it already and remove all references to HUGETLBFS/HUGETLBPAGE from the code that walks kernel page tables. Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/vmem.c29
2 files changed, 16 insertions, 15 deletions
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index b36537a5f43e..0f33bab3e984 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -21,7 +21,7 @@ static void change_page_attr(unsigned long addr, int numpages,
21 pgdp = pgd_offset(&init_mm, addr); 21 pgdp = pgd_offset(&init_mm, addr);
22 pudp = pud_offset(pgdp, addr); 22 pudp = pud_offset(pgdp, addr);
23 pmdp = pmd_offset(pudp, addr); 23 pmdp = pmd_offset(pudp, addr);
24 if (pmd_huge(*pmdp)) { 24 if (pmd_large(*pmdp)) {
25 WARN_ON_ONCE(1); 25 WARN_ON_ONCE(1);
26 continue; 26 continue;
27 } 27 }
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index c22abf900c9e..5b70393911bd 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -79,7 +79,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
79 */ 79 */
80static int vmem_add_mem(unsigned long start, unsigned long size, int ro) 80static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
81{ 81{
82 unsigned long address; 82 unsigned long end = start + size;
83 unsigned long address = start;
83 pgd_t *pg_dir; 84 pgd_t *pg_dir;
84 pud_t *pu_dir; 85 pud_t *pu_dir;
85 pmd_t *pm_dir; 86 pmd_t *pm_dir;
@@ -87,7 +88,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
87 pte_t pte; 88 pte_t pte;
88 int ret = -ENOMEM; 89 int ret = -ENOMEM;
89 90
90 for (address = start; address < start + size; address += PAGE_SIZE) { 91 while (address < end) {
91 pg_dir = pgd_offset_k(address); 92 pg_dir = pgd_offset_k(address);
92 if (pgd_none(*pg_dir)) { 93 if (pgd_none(*pg_dir)) {
93 pu_dir = vmem_pud_alloc(); 94 pu_dir = vmem_pud_alloc();
@@ -108,12 +109,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
108 pm_dir = pmd_offset(pu_dir, address); 109 pm_dir = pmd_offset(pu_dir, address);
109 110
110#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 111#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
111 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 112 if (MACHINE_HAS_EDAT1 && address && !(address & ~PMD_MASK) &&
112 (address + HPAGE_SIZE <= start + size) && 113 (address + PMD_SIZE <= end)) {
113 (address >= HPAGE_SIZE)) {
114 pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 114 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
115 pmd_val(*pm_dir) = pte_val(pte); 115 pmd_val(*pm_dir) = pte_val(pte);
116 address += HPAGE_SIZE - PAGE_SIZE; 116 address += PMD_SIZE;
117 continue; 117 continue;
118 } 118 }
119#endif 119#endif
@@ -126,10 +126,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
126 126
127 pt_dir = pte_offset_kernel(pm_dir, address); 127 pt_dir = pte_offset_kernel(pm_dir, address);
128 *pt_dir = pte; 128 *pt_dir = pte;
129 address += PAGE_SIZE;
129 } 130 }
130 ret = 0; 131 ret = 0;
131out: 132out:
132 flush_tlb_kernel_range(start, start + size); 133 flush_tlb_kernel_range(start, end);
133 return ret; 134 return ret;
134} 135}
135 136
@@ -139,7 +140,8 @@ out:
139 */ 140 */
140static void vmem_remove_range(unsigned long start, unsigned long size) 141static void vmem_remove_range(unsigned long start, unsigned long size)
141{ 142{
142 unsigned long address; 143 unsigned long end = start + size;
144 unsigned long address = start;
143 pgd_t *pg_dir; 145 pgd_t *pg_dir;
144 pud_t *pu_dir; 146 pud_t *pu_dir;
145 pmd_t *pm_dir; 147 pmd_t *pm_dir;
@@ -147,7 +149,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
147 pte_t pte; 149 pte_t pte;
148 150
149 pte_val(pte) = _PAGE_TYPE_EMPTY; 151 pte_val(pte) = _PAGE_TYPE_EMPTY;
150 for (address = start; address < start + size; address += PAGE_SIZE) { 152 while (address < end) {
151 pg_dir = pgd_offset_k(address); 153 pg_dir = pgd_offset_k(address);
152 pu_dir = pud_offset(pg_dir, address); 154 pu_dir = pud_offset(pg_dir, address);
153 if (pud_none(*pu_dir)) 155 if (pud_none(*pu_dir))
@@ -155,17 +157,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
155 pm_dir = pmd_offset(pu_dir, address); 157 pm_dir = pmd_offset(pu_dir, address);
156 if (pmd_none(*pm_dir)) 158 if (pmd_none(*pm_dir))
157 continue; 159 continue;
158 160 if (pmd_large(*pm_dir)) {
159 if (pmd_huge(*pm_dir)) {
160 pmd_clear(pm_dir); 161 pmd_clear(pm_dir);
161 address += HPAGE_SIZE - PAGE_SIZE; 162 address += PMD_SIZE;
162 continue; 163 continue;
163 } 164 }
164
165 pt_dir = pte_offset_kernel(pm_dir, address); 165 pt_dir = pte_offset_kernel(pm_dir, address);
166 *pt_dir = pte; 166 *pt_dir = pte;
167 address += PAGE_SIZE;
167 } 168 }
168 flush_tlb_kernel_range(start, start + size); 169 flush_tlb_kernel_range(start, end);
169} 170}
170 171
171/* 172/*