aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c104
1 files changed, 67 insertions, 37 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3cfe9ced8a4c..7e600c1962db 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -11,6 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/pfn.h>
14 15
15#include <asm/e820.h> 16#include <asm/e820.h>
16#include <asm/processor.h> 17#include <asm/processor.h>
@@ -590,9 +591,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
590 unsigned int level; 591 unsigned int level;
591 pte_t *kpte, old_pte; 592 pte_t *kpte, old_pte;
592 593
593 if (cpa->flags & CPA_PAGES_ARRAY) 594 if (cpa->flags & CPA_PAGES_ARRAY) {
594 address = (unsigned long)page_address(cpa->pages[cpa->curpage]); 595 struct page *page = cpa->pages[cpa->curpage];
595 else if (cpa->flags & CPA_ARRAY) 596 if (unlikely(PageHighMem(page)))
597 return 0;
598 address = (unsigned long)page_address(page);
599 } else if (cpa->flags & CPA_ARRAY)
596 address = cpa->vaddr[cpa->curpage]; 600 address = cpa->vaddr[cpa->curpage];
597 else 601 else
598 address = *cpa->vaddr; 602 address = *cpa->vaddr;
@@ -681,8 +685,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
681static int cpa_process_alias(struct cpa_data *cpa) 685static int cpa_process_alias(struct cpa_data *cpa)
682{ 686{
683 struct cpa_data alias_cpa; 687 struct cpa_data alias_cpa;
684 int ret = 0; 688 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
685 unsigned long temp_cpa_vaddr, vaddr; 689 unsigned long vaddr, remapped;
690 int ret;
686 691
687 if (cpa->pfn >= max_pfn_mapped) 692 if (cpa->pfn >= max_pfn_mapped)
688 return 0; 693 return 0;
@@ -695,9 +700,12 @@ static int cpa_process_alias(struct cpa_data *cpa)
695 * No need to redo, when the primary call touched the direct 700 * No need to redo, when the primary call touched the direct
696 * mapping already: 701 * mapping already:
697 */ 702 */
698 if (cpa->flags & CPA_PAGES_ARRAY) 703 if (cpa->flags & CPA_PAGES_ARRAY) {
699 vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]); 704 struct page *page = cpa->pages[cpa->curpage];
700 else if (cpa->flags & CPA_ARRAY) 705 if (unlikely(PageHighMem(page)))
706 return 0;
707 vaddr = (unsigned long)page_address(page);
708 } else if (cpa->flags & CPA_ARRAY)
701 vaddr = cpa->vaddr[cpa->curpage]; 709 vaddr = cpa->vaddr[cpa->curpage];
702 else 710 else
703 vaddr = *cpa->vaddr; 711 vaddr = *cpa->vaddr;
@@ -706,42 +714,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
706 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { 714 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
707 715
708 alias_cpa = *cpa; 716 alias_cpa = *cpa;
709 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); 717 alias_cpa.vaddr = &laddr;
710 alias_cpa.vaddr = &temp_cpa_vaddr;
711 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 718 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
712 719
713
714 ret = __change_page_attr_set_clr(&alias_cpa, 0); 720 ret = __change_page_attr_set_clr(&alias_cpa, 0);
721 if (ret)
722 return ret;
715 } 723 }
716 724
717#ifdef CONFIG_X86_64 725#ifdef CONFIG_X86_64
718 if (ret)
719 return ret;
720 /*
721 * No need to redo, when the primary call touched the high
722 * mapping already:
723 */
724 if (within(vaddr, (unsigned long) _text, _brk_end))
725 return 0;
726
727 /* 726 /*
728 * If the physical address is inside the kernel map, we need 727 * If the primary call didn't touch the high mapping already
728 * and the physical address is inside the kernel map, we need
729 * to touch the high mapped kernel as well: 729 * to touch the high mapped kernel as well:
730 */ 730 */
731 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) 731 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
732 return 0; 732 within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
733 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
734 __START_KERNEL_map - phys_base;
735 alias_cpa = *cpa;
736 alias_cpa.vaddr = &temp_cpa_vaddr;
737 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
733 738
734 alias_cpa = *cpa; 739 /*
735 temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; 740 * The high mapping range is imprecise, so ignore the
736 alias_cpa.vaddr = &temp_cpa_vaddr; 741 * return value.
737 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 742 */
743 __change_page_attr_set_clr(&alias_cpa, 0);
744 }
745#endif
738 746
739 /* 747 /*
740 * The high mapping range is imprecise, so ignore the return value. 748 * If the PMD page was partially used for per-cpu remapping,
749 * the recycled area needs to be split and modified. Because
750 * the area is always proper subset of a PMD page
751 * cpa->numpages is guaranteed to be 1 for these areas, so
752 * there's no need to loop over and check for further remaps.
741 */ 753 */
742 __change_page_attr_set_clr(&alias_cpa, 0); 754 remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
743#endif 755 if (remapped) {
744 return ret; 756 WARN_ON(cpa->numpages > 1);
757 alias_cpa = *cpa;
758 alias_cpa.vaddr = &remapped;
759 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
760 ret = __change_page_attr_set_clr(&alias_cpa, 0);
761 if (ret)
762 return ret;
763 }
764
765 return 0;
745} 766}
746 767
747static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 768static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
@@ -982,12 +1003,15 @@ EXPORT_SYMBOL(set_memory_array_uc);
982int _set_memory_wc(unsigned long addr, int numpages) 1003int _set_memory_wc(unsigned long addr, int numpages)
983{ 1004{
984 int ret; 1005 int ret;
1006 unsigned long addr_copy = addr;
1007
985 ret = change_page_attr_set(&addr, numpages, 1008 ret = change_page_attr_set(&addr, numpages,
986 __pgprot(_PAGE_CACHE_UC_MINUS), 0); 1009 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
987
988 if (!ret) { 1010 if (!ret) {
989 ret = change_page_attr_set(&addr, numpages, 1011 ret = change_page_attr_set_clr(&addr_copy, numpages,
990 __pgprot(_PAGE_CACHE_WC), 0); 1012 __pgprot(_PAGE_CACHE_WC),
1013 __pgprot(_PAGE_CACHE_MASK),
1014 0, 0, NULL);
991 } 1015 }
992 return ret; 1016 return ret;
993} 1017}
@@ -1104,7 +1128,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
1104 int free_idx; 1128 int free_idx;
1105 1129
1106 for (i = 0; i < addrinarray; i++) { 1130 for (i = 0; i < addrinarray; i++) {
1107 start = (unsigned long)page_address(pages[i]); 1131 if (PageHighMem(pages[i]))
1132 continue;
1133 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1108 end = start + PAGE_SIZE; 1134 end = start + PAGE_SIZE;
1109 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) 1135 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
1110 goto err_out; 1136 goto err_out;
@@ -1117,7 +1143,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
1117err_out: 1143err_out:
1118 free_idx = i; 1144 free_idx = i;
1119 for (i = 0; i < free_idx; i++) { 1145 for (i = 0; i < free_idx; i++) {
1120 start = (unsigned long)page_address(pages[i]); 1146 if (PageHighMem(pages[i]))
1147 continue;
1148 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1121 end = start + PAGE_SIZE; 1149 end = start + PAGE_SIZE;
1122 free_memtype(start, end); 1150 free_memtype(start, end);
1123 } 1151 }
@@ -1146,7 +1174,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1146 return retval; 1174 return retval;
1147 1175
1148 for (i = 0; i < addrinarray; i++) { 1176 for (i = 0; i < addrinarray; i++) {
1149 start = (unsigned long)page_address(pages[i]); 1177 if (PageHighMem(pages[i]))
1178 continue;
1179 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1150 end = start + PAGE_SIZE; 1180 end = start + PAGE_SIZE;
1151 free_memtype(start, end); 1181 free_memtype(start, end);
1152 } 1182 }