aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c232
1 files changed, 127 insertions, 105 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4119379f80ff..464d8fc21ce6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -16,6 +16,7 @@
16#include <asm/sections.h> 16#include <asm/sections.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
19#include <asm/proto.h>
19 20
20/* 21/*
21 * The current flushing context - we pass it instead of 5 arguments: 22 * The current flushing context - we pass it instead of 5 arguments:
@@ -26,8 +27,23 @@ struct cpa_data {
26 pgprot_t mask_clr; 27 pgprot_t mask_clr;
27 int numpages; 28 int numpages;
28 int flushtlb; 29 int flushtlb;
30 unsigned long pfn;
29}; 31};
30 32
33#ifdef CONFIG_X86_64
34
35static inline unsigned long highmap_start_pfn(void)
36{
37 return __pa(_text) >> PAGE_SHIFT;
38}
39
40static inline unsigned long highmap_end_pfn(void)
41{
42 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
43}
44
45#endif
46
31static inline int 47static inline int
32within(unsigned long addr, unsigned long start, unsigned long end) 48within(unsigned long addr, unsigned long start, unsigned long end)
33{ 49{
@@ -123,29 +139,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
123 } 139 }
124} 140}
125 141
126#define HIGH_MAP_START __START_KERNEL_map
127#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
128
129
130/*
131 * Converts a virtual address to a X86-64 highmap address
132 */
133static unsigned long virt_to_highmap(void *address)
134{
135#ifdef CONFIG_X86_64
136 return __pa((unsigned long)address) + HIGH_MAP_START - phys_base;
137#else
138 return (unsigned long)address;
139#endif
140}
141
142/* 142/*
143 * Certain areas of memory on x86 require very specific protection flags, 143 * Certain areas of memory on x86 require very specific protection flags,
144 * for example the BIOS area or kernel text. Callers don't always get this 144 * for example the BIOS area or kernel text. Callers don't always get this
145 * right (again, ioremap() on BIOS memory is not uncommon) so this function 145 * right (again, ioremap() on BIOS memory is not uncommon) so this function
146 * checks and fixes these known static required protection bits. 146 * checks and fixes these known static required protection bits.
147 */ 147 */
148static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) 148static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
149 unsigned long pfn)
149{ 150{
150 pgprot_t forbidden = __pgprot(0); 151 pgprot_t forbidden = __pgprot(0);
151 152
@@ -153,30 +154,23 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
153 * The BIOS area between 640k and 1Mb needs to be executable for 154 * The BIOS area between 640k and 1Mb needs to be executable for
154 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. 155 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
155 */ 156 */
156 if (within(__pa(address), BIOS_BEGIN, BIOS_END)) 157 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
157 pgprot_val(forbidden) |= _PAGE_NX; 158 pgprot_val(forbidden) |= _PAGE_NX;
158 159
159 /* 160 /*
160 * The kernel text needs to be executable for obvious reasons 161 * The kernel text needs to be executable for obvious reasons
161 * Does not cover __inittext since that is gone later on 162 * Does not cover __inittext since that is gone later on. On
163 * 64bit we do not enforce !NX on the low mapping
162 */ 164 */
163 if (within(address, (unsigned long)_text, (unsigned long)_etext)) 165 if (within(address, (unsigned long)_text, (unsigned long)_etext))
164 pgprot_val(forbidden) |= _PAGE_NX; 166 pgprot_val(forbidden) |= _PAGE_NX;
165 /*
166 * Do the same for the x86-64 high kernel mapping
167 */
168 if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
169 pgprot_val(forbidden) |= _PAGE_NX;
170 167
171 /* The .rodata section needs to be read-only */
172 if (within(address, (unsigned long)__start_rodata,
173 (unsigned long)__end_rodata))
174 pgprot_val(forbidden) |= _PAGE_RW;
175 /* 168 /*
176 * Do the same for the x86-64 high kernel mapping 169 * The .rodata section needs to be read-only. Using the pfn
170 * catches all aliases.
177 */ 171 */
178 if (within(address, virt_to_highmap(__start_rodata), 172 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
179 virt_to_highmap(__end_rodata))) 173 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
180 pgprot_val(forbidden) |= _PAGE_RW; 174 pgprot_val(forbidden) |= _PAGE_RW;
181 175
182 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 176 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
@@ -253,7 +247,7 @@ static int
253try_preserve_large_page(pte_t *kpte, unsigned long address, 247try_preserve_large_page(pte_t *kpte, unsigned long address,
254 struct cpa_data *cpa) 248 struct cpa_data *cpa)
255{ 249{
256 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr; 250 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
257 pte_t new_pte, old_pte, *tmp; 251 pte_t new_pte, old_pte, *tmp;
258 pgprot_t old_prot, new_prot; 252 pgprot_t old_prot, new_prot;
259 int i, do_split = 1; 253 int i, do_split = 1;
@@ -301,7 +295,15 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
301 295
302 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 296 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
303 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 297 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
304 new_prot = static_protections(new_prot, address); 298
299 /*
300 * old_pte points to the large page base address. So we need
301 * to add the offset of the virtual address:
302 */
303 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
304 cpa->pfn = pfn;
305
306 new_prot = static_protections(new_prot, address, pfn);
305 307
306 /* 308 /*
307 * We need to check the full range, whether 309 * We need to check the full range, whether
@@ -309,8 +311,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
309 * the pages in the range we try to preserve: 311 * the pages in the range we try to preserve:
310 */ 312 */
311 addr = address + PAGE_SIZE; 313 addr = address + PAGE_SIZE;
312 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE) { 314 pfn++;
313 pgprot_t chk_prot = static_protections(new_prot, addr); 315 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
316 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
314 317
315 if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 318 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
316 goto out_unlock; 319 goto out_unlock;
@@ -505,46 +508,46 @@ out_unlock:
505 return 0; 508 return 0;
506} 509}
507 510
508static int __change_page_attr(unsigned long address, struct cpa_data *cpa) 511static int __change_page_attr(struct cpa_data *cpa, int primary)
509{ 512{
513 unsigned long address = cpa->vaddr;
510 int do_split, err; 514 int do_split, err;
511 unsigned int level; 515 unsigned int level;
512 struct page *kpte_page; 516 pte_t *kpte, old_pte;
513 pte_t *kpte;
514 517
515repeat: 518repeat:
516 kpte = lookup_address(address, &level); 519 kpte = lookup_address(address, &level);
517 if (!kpte) 520 if (!kpte)
518 return -EINVAL; 521 return primary ? -EINVAL : 0;
519 522
520 kpte_page = virt_to_page(kpte); 523 old_pte = *kpte;
521 BUG_ON(PageLRU(kpte_page)); 524 if (!pte_val(old_pte)) {
522 BUG_ON(PageCompound(kpte_page)); 525 if (!primary)
526 return 0;
527 printk(KERN_WARNING "CPA: called for zero pte. "
528 "vaddr = %lx cpa->vaddr = %lx\n", address,
529 cpa->vaddr);
530 WARN_ON(1);
531 return -EINVAL;
532 }
523 533
524 if (level == PG_LEVEL_4K) { 534 if (level == PG_LEVEL_4K) {
525 pte_t new_pte, old_pte = *kpte; 535 pte_t new_pte;
526 pgprot_t new_prot = pte_pgprot(old_pte); 536 pgprot_t new_prot = pte_pgprot(old_pte);
527 537 unsigned long pfn = pte_pfn(old_pte);
528 if(!pte_val(old_pte)) {
529 printk(KERN_WARNING "CPA: called for zero pte. "
530 "vaddr = %lx cpa->vaddr = %lx\n", address,
531 cpa->vaddr);
532 WARN_ON(1);
533 return -EINVAL;
534 }
535 538
536 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 539 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
537 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 540 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
538 541
539 new_prot = static_protections(new_prot, address); 542 new_prot = static_protections(new_prot, address, pfn);
540 543
541 /* 544 /*
542 * We need to keep the pfn from the existing PTE, 545 * We need to keep the pfn from the existing PTE,
543 * after all we're only going to change it's attributes 546 * after all we're only going to change it's attributes
544 * not the memory it points to 547 * not the memory it points to
545 */ 548 */
546 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); 549 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
547 550 cpa->pfn = pfn;
548 /* 551 /*
549 * Do we really change anything ? 552 * Do we really change anything ?
550 */ 553 */
@@ -581,67 +584,59 @@ repeat:
581 return err; 584 return err;
582} 585}
583 586
584/** 587static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
585 * change_page_attr_addr - Change page table attributes in linear mapping 588
586 * @address: Virtual address in linear mapping. 589static int cpa_process_alias(struct cpa_data *cpa)
587 * @prot: New page table attribute (PAGE_*)
588 *
589 * Change page attributes of a page in the direct mapping. This is a variant
590 * of change_page_attr() that also works on memory holes that do not have
591 * mem_map entry (pfn_valid() is false).
592 *
593 * See change_page_attr() documentation for more details.
594 *
595 * Modules and drivers should use the set_memory_* APIs instead.
596 */
597static int change_page_attr_addr(struct cpa_data *cpa)
598{ 590{
599 int err; 591 struct cpa_data alias_cpa;
600 unsigned long address = cpa->vaddr; 592 int ret = 0;
601 593
602#ifdef CONFIG_X86_64 594 if (cpa->pfn > max_pfn_mapped)
603 unsigned long phys_addr = __pa(address); 595 return 0;
604 596
605 /* 597 /*
606 * If we are inside the high mapped kernel range, then we 598 * No need to redo, when the primary call touched the direct
607 * fixup the low mapping first. __va() returns the virtual 599 * mapping already:
608 * address in the linear mapping:
609 */ 600 */
610 if (within(address, HIGH_MAP_START, HIGH_MAP_END)) 601 if (!within(cpa->vaddr, PAGE_OFFSET,
611 address = (unsigned long) __va(phys_addr); 602 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
612#endif
613 603
614 err = __change_page_attr(address, cpa); 604 alias_cpa = *cpa;
615 if (err) 605 alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
616 return err; 606
607 ret = __change_page_attr_set_clr(&alias_cpa, 0);
608 }
617 609
618#ifdef CONFIG_X86_64 610#ifdef CONFIG_X86_64
611 if (ret)
612 return ret;
613 /*
614 * No need to redo, when the primary call touched the high
615 * mapping already:
616 */
617 if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
618 return 0;
619
619 /* 620 /*
620 * If the physical address is inside the kernel map, we need 621 * If the physical address is inside the kernel map, we need
621 * to touch the high mapped kernel as well: 622 * to touch the high mapped kernel as well:
622 */ 623 */
623 if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) { 624 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
624 /* 625 return 0;
625 * Calc the high mapping address. See __phys_addr()
626 * for the non obvious details.
627 *
628 * Note that NX and other required permissions are
629 * checked in static_protections().
630 */
631 address = phys_addr + HIGH_MAP_START - phys_base;
632 626
633 /* 627 alias_cpa = *cpa;
634 * Our high aliases are imprecise, because we check 628 alias_cpa.vaddr =
635 * everything between 0 and KERNEL_TEXT_SIZE, so do 629 (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
636 * not propagate lookup failures back to users: 630
637 */ 631 /*
638 __change_page_attr(address, cpa); 632 * The high mapping range is imprecise, so ignore the return value.
639 } 633 */
634 __change_page_attr_set_clr(&alias_cpa, 0);
640#endif 635#endif
641 return err; 636 return ret;
642} 637}
643 638
644static int __change_page_attr_set_clr(struct cpa_data *cpa) 639static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
645{ 640{
646 int ret, numpages = cpa->numpages; 641 int ret, numpages = cpa->numpages;
647 642
@@ -651,10 +646,17 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa)
651 * preservation check. 646 * preservation check.
652 */ 647 */
653 cpa->numpages = numpages; 648 cpa->numpages = numpages;
654 ret = change_page_attr_addr(cpa); 649
650 ret = __change_page_attr(cpa, checkalias);
655 if (ret) 651 if (ret)
656 return ret; 652 return ret;
657 653
654 if (checkalias) {
655 ret = cpa_process_alias(cpa);
656 if (ret)
657 return ret;
658 }
659
658 /* 660 /*
659 * Adjust the number of pages with the result of the 661 * Adjust the number of pages with the result of the
660 * CPA operation. Either a large page has been 662 * CPA operation. Either a large page has been
@@ -677,7 +679,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
677 pgprot_t mask_set, pgprot_t mask_clr) 679 pgprot_t mask_set, pgprot_t mask_clr)
678{ 680{
679 struct cpa_data cpa; 681 struct cpa_data cpa;
680 int ret, cache; 682 int ret, cache, checkalias;
681 683
682 /* 684 /*
683 * Check, if we are requested to change a not supported 685 * Check, if we are requested to change a not supported
@@ -703,7 +705,10 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
703 cpa.mask_clr = mask_clr; 705 cpa.mask_clr = mask_clr;
704 cpa.flushtlb = 0; 706 cpa.flushtlb = 0;
705 707
706 ret = __change_page_attr_set_clr(&cpa); 708 /* No alias checking for _NX bit modifications */
709 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
710
711 ret = __change_page_attr_set_clr(&cpa, checkalias);
707 712
708 /* 713 /*
709 * Check whether we really changed something: 714 * Check whether we really changed something:
@@ -841,7 +846,7 @@ static int __set_pages_p(struct page *page, int numpages)
841 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), 846 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
842 .mask_clr = __pgprot(0)}; 847 .mask_clr = __pgprot(0)};
843 848
844 return __change_page_attr_set_clr(&cpa); 849 return __change_page_attr_set_clr(&cpa, 1);
845} 850}
846 851
847static int __set_pages_np(struct page *page, int numpages) 852static int __set_pages_np(struct page *page, int numpages)
@@ -851,7 +856,7 @@ static int __set_pages_np(struct page *page, int numpages)
851 .mask_set = __pgprot(0), 856 .mask_set = __pgprot(0),
852 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; 857 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
853 858
854 return __change_page_attr_set_clr(&cpa); 859 return __change_page_attr_set_clr(&cpa, 1);
855} 860}
856 861
857void kernel_map_pages(struct page *page, int numpages, int enable) 862void kernel_map_pages(struct page *page, int numpages, int enable)
@@ -894,7 +899,24 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
894 */ 899 */
895 cpa_fill_pool(); 900 cpa_fill_pool();
896} 901}
897#endif 902
903#ifdef CONFIG_HIBERNATION
904
905bool kernel_page_present(struct page *page)
906{
907 unsigned int level;
908 pte_t *pte;
909
910 if (PageHighMem(page))
911 return false;
912
913 pte = lookup_address((unsigned long)page_address(page), &level);
914 return (pte_val(*pte) & _PAGE_PRESENT);
915}
916
917#endif /* CONFIG_HIBERNATION */
918
919#endif /* CONFIG_DEBUG_PAGEALLOC */
898 920
899/* 921/*
900 * The testcases use internal knowledge of the implementation that shouldn't 922 * The testcases use internal knowledge of the implementation that shouldn't