aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-02-04 21:33:38 -0500
committerH. Peter Anvin <hpa@zytor.com>2009-02-05 00:33:09 -0500
commitb534816b552d35bbd3c60702139ed5c7da2f55c2 (patch)
tree1e48095368b05b498264105757e6d79e78dace62 /arch
parent4560839939f4b4a96e21e80584f87308ac93c1da (diff)
x86: don't apply __supported_pte_mask to non-present ptes
On an x86 system which doesn't support global mappings, __supported_pte_mask has _PAGE_GLOBAL clear, to make sure it never appears in the PTE. pfn_pte() and so on will enforce it with: static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & __supported_pte_mask); } However, we overload _PAGE_GLOBAL with _PAGE_PROTNONE on non-present ptes to distinguish them from swap entries. However, applying __supported_pte_mask indiscriminately will clear the bit and corrupt the pte. I guess the best fix is to only apply __supported_pte_mask to present ptes. This seems like the right solution to me, as it means we can completely ignore the issue of overlaps between the present pte bits and the non-present pte-as-swap entry use of the bits. __supported_pte_mask contains the set of flags we support on the current hardware. We also use bits in the pte for things like logically present ptes with no permissions, and swap entries for swapped out pages. We should only apply __supported_pte_mask to present ptes, because otherwise we may destroy other information being stored in the ptes. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/pgtable.h26
-rw-r--r--arch/x86/include/asm/xen/page.h2
2 files changed, 21 insertions, 7 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 06bbcbd66e9c..4f5af8447d54 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -302,16 +302,30 @@ static inline pte_t pte_mkspecial(pte_t pte)
302 302
303extern pteval_t __supported_pte_mask; 303extern pteval_t __supported_pte_mask;
304 304
305/*
306 * Mask out unsupported bits in a present pgprot. Non-present pgprots
307 * can use those bits for other purposes, so leave them be.
308 */
309static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
310{
311 pgprotval_t protval = pgprot_val(pgprot);
312
313 if (protval & _PAGE_PRESENT)
314 protval &= __supported_pte_mask;
315
316 return protval;
317}
318
305static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 319static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
306{ 320{
307 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | 321 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
308 pgprot_val(pgprot)) & __supported_pte_mask); 322 massage_pgprot(pgprot));
309} 323}
310 324
311static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 325static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
312{ 326{
313 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | 327 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
314 pgprot_val(pgprot)) & __supported_pte_mask); 328 massage_pgprot(pgprot));
315} 329}
316 330
317static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 331static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -323,7 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
323 * the newprot (if present): 337 * the newprot (if present):
324 */ 338 */
325 val &= _PAGE_CHG_MASK; 339 val &= _PAGE_CHG_MASK;
326 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask; 340 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
327 341
328 return __pte(val); 342 return __pte(val);
329} 343}
@@ -339,7 +353,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
339 353
340#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) 354#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
341 355
342#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 356#define canon_pgprot(p) __pgprot(massage_pgprot(p))
343 357
344static inline int is_new_memtype_allowed(unsigned long flags, 358static inline int is_new_memtype_allowed(unsigned long flags,
345 unsigned long new_flags) 359 unsigned long new_flags)
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 7ef617ef1df3..4bd990ee43df 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -137,7 +137,7 @@ static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
137 pte_t pte; 137 pte_t pte;
138 138
139 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | 139 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
140 (pgprot_val(pgprot) & __supported_pte_mask); 140 massage_pgprot(pgprot);
141 141
142 return pte; 142 return pte;
143} 143}