aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2015-11-26 10:42:41 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2015-11-26 10:42:41 -0500
commit667c27597ca823f20c524bbd486c6709f905673b (patch)
tree31c7925b3532fe99303ce1172547909803ee25a1 /arch/arm64/mm
parent0ebea8088095f1c18c1d1de284ccc4c479ca21c1 (diff)
Revert "arm64: Mark kernel page ranges contiguous"
This reverts commit 348a65cdcbbf243073ee39d1f7d4413081ad7eab. Incorrect page table manipulation that does not respect the ARM ARM recommended break-before-make sequence may lead to TLB conflicts. The contiguous PTE patch makes the system even more susceptible to such errors by changing the mapping from a single page to a contiguous range of pages. An additional TLB invalidation would reduce the risk window, however, the correct fix is to switch to a temporary swapper_pg_dir. Once the correct workaround is done, the reverted commit will be re-applied. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Jeremy Linton <jeremy.linton@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/mmu.c69
1 files changed, 8 insertions, 61 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 78d91b1eab84..873e363048c6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -85,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
85 do { 85 do {
86 /* 86 /*
87 * Need to have the least restrictive permissions available 87 * Need to have the least restrictive permissions available
88 * permissions will be fixed up later. Default the new page 88 * permissions will be fixed up later
89 * range as contiguous ptes.
90 */ 89 */
91 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); 90 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
92 pfn++; 91 pfn++;
93 } while (pte++, i++, i < PTRS_PER_PTE); 92 } while (pte++, i++, i < PTRS_PER_PTE);
94} 93}
95 94
96/*
97 * Given a PTE with the CONT bit set, determine where the CONT range
98 * starts, and clear the entire range of PTE CONT bits.
99 */
100static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
101{
102 int i;
103
104 pte -= CONT_RANGE_OFFSET(addr);
105 for (i = 0; i < CONT_PTES; i++) {
106 set_pte(pte, pte_mknoncont(*pte));
107 pte++;
108 }
109 flush_tlb_all();
110}
111
112/*
113 * Given a range of PTEs set the pfn and provided page protection flags
114 */
115static void __populate_init_pte(pte_t *pte, unsigned long addr,
116 unsigned long end, phys_addr_t phys,
117 pgprot_t prot)
118{
119 unsigned long pfn = __phys_to_pfn(phys);
120
121 do {
122 /* clear all the bits except the pfn, then apply the prot */
123 set_pte(pte, pfn_pte(pfn, prot));
124 pte++;
125 pfn++;
126 addr += PAGE_SIZE;
127 } while (addr != end);
128}
129
130static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 95static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
131 unsigned long end, phys_addr_t phys, 96 unsigned long end, unsigned long pfn,
132 pgprot_t prot, 97 pgprot_t prot,
133 void *(*alloc)(unsigned long size)) 98 void *(*alloc)(unsigned long size))
134{ 99{
135 pte_t *pte; 100 pte_t *pte;
136 unsigned long next;
137 101
138 if (pmd_none(*pmd) || pmd_sect(*pmd)) { 102 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
139 pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); 103 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -146,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
146 110
147 pte = pte_offset_kernel(pmd, addr); 111 pte = pte_offset_kernel(pmd, addr);
148 do { 112 do {
149 next = min(end, (addr + CONT_SIZE) & CONT_MASK); 113 set_pte(pte, pfn_pte(pfn, prot));
150 if (((addr | next | phys) & ~CONT_MASK) == 0) { 114 pfn++;
151 /* a block of CONT_PTES */ 115 } while (pte++, addr += PAGE_SIZE, addr != end);
152 __populate_init_pte(pte, addr, next, phys,
153 __pgprot(pgprot_val(prot) | PTE_CONT));
154 } else {
155 /*
156 * If the range being split is already inside of a
157 * contiguous range but this PTE isn't going to be
158 * contiguous, then we want to unmark the adjacent
159 * ranges, then update the portion of the range we
160 * are interrested in.
161 */
162 clear_cont_pte_range(pte, addr);
163 __populate_init_pte(pte, addr, next, phys, prot);
164 }
165
166 pte += (next - addr) >> PAGE_SHIFT;
167 phys += next - addr;
168 addr = next;
169 } while (addr != end);
170} 116}
171 117
172static void split_pud(pud_t *old_pud, pmd_t *pmd) 118static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -227,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
227 } 173 }
228 } 174 }
229 } else { 175 } else {
230 alloc_init_pte(pmd, addr, next, phys, prot, alloc); 176 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
177 prot, alloc);
231 } 178 }
232 phys += next - addr; 179 phys += next - addr;
233 } while (pmd++, addr = next, addr != end); 180 } while (pmd++, addr = next, addr != end);