aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2016-08-24 13:27:30 -0400
committerWill Deacon <will.deacon@arm.com>2016-08-25 13:00:30 -0400
commit5ebe3a44cc744d11cb60d8438106a9322b7c04dc (patch)
tree24cc917fb9078acdc2b1deee131c4744d625cf1e /arch/arm64/mm
parentb61130381120398876b86282082ad9f24976dfcf (diff)
arm64: hibernate: Support DEBUG_PAGEALLOC
DEBUG_PAGEALLOC removes the valid bit of page table entries to prevent any access to unallocated memory. Hibernate uses this as a hint that those pages don't need to be saved/restored. This patch adds the kernel_page_present() function it uses. hibernate.c copies the resume kernel's linear map for use during restore. Add _copy_pte() to fill-in the holes made by DEBUG_PAGEALLOC in the resume kernel, so we can restore data the original kernel had at these addresses. Finally, DEBUG_PAGEALLOC means the linear-map alias of KERNEL_START to KERNEL_END may have holes in it, so we can't lazily clean this whole area to the PoC. Only clean the new mmuoff region, and the kernel/kvm idmaps. This reverts commit da24eb1f3f9e2c7b75c5f8c40d8e48e2c4789596. Reported-by: Will Deacon <will.deacon@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/pageattr.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index ca6d268e3313..8def55e7249b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -139,4 +139,43 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
139 __pgprot(0), 139 __pgprot(0),
140 __pgprot(PTE_VALID)); 140 __pgprot(PTE_VALID));
141} 141}
142#endif 142#ifdef CONFIG_HIBERNATION
143/*
144 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
145 * is used to determine if a linear map page has been marked as not-valid by
146 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
147 * This is based on kern_addr_valid(), which almost does what we need.
148 *
149 * Because this is only called on the kernel linear map, p?d_sect() implies
150 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
151 * disabled.
152 */
153bool kernel_page_present(struct page *page)
154{
155 pgd_t *pgd;
156 pud_t *pud;
157 pmd_t *pmd;
158 pte_t *pte;
159 unsigned long addr = (unsigned long)page_address(page);
160
161 pgd = pgd_offset_k(addr);
162 if (pgd_none(*pgd))
163 return false;
164
165 pud = pud_offset(pgd, addr);
166 if (pud_none(*pud))
167 return false;
168 if (pud_sect(*pud))
169 return true;
170
171 pmd = pmd_offset(pud, addr);
172 if (pmd_none(*pmd))
173 return false;
174 if (pmd_sect(*pmd))
175 return true;
176
177 pte = pte_offset_kernel(pmd, addr);
178 return pte_valid(*pte);
179}
180#endif /* CONFIG_HIBERNATION */
181#endif /* CONFIG_DEBUG_PAGEALLOC */