aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c30
1 files changed, 10 insertions, 20 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7e600c1962db..dd38bfbefd1f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -12,6 +12,7 @@
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/pfn.h> 14#include <linux/pfn.h>
15#include <linux/percpu.h>
15 16
16#include <asm/e820.h> 17#include <asm/e820.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
@@ -143,6 +144,7 @@ void clflush_cache_range(void *vaddr, unsigned int size)
143 144
144 mb(); 145 mb();
145} 146}
147EXPORT_SYMBOL_GPL(clflush_cache_range);
146 148
147static void __cpa_flush_all(void *arg) 149static void __cpa_flush_all(void *arg)
148{ 150{
@@ -686,7 +688,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
686{ 688{
687 struct cpa_data alias_cpa; 689 struct cpa_data alias_cpa;
688 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); 690 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
689 unsigned long vaddr, remapped; 691 unsigned long vaddr;
690 int ret; 692 int ret;
691 693
692 if (cpa->pfn >= max_pfn_mapped) 694 if (cpa->pfn >= max_pfn_mapped)
@@ -744,24 +746,6 @@ static int cpa_process_alias(struct cpa_data *cpa)
744 } 746 }
745#endif 747#endif
746 748
747 /*
748 * If the PMD page was partially used for per-cpu remapping,
749 * the recycled area needs to be split and modified. Because
750 * the area is always proper subset of a PMD page
751 * cpa->numpages is guaranteed to be 1 for these areas, so
752 * there's no need to loop over and check for further remaps.
753 */
754 remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
755 if (remapped) {
756 WARN_ON(cpa->numpages > 1);
757 alias_cpa = *cpa;
758 alias_cpa.vaddr = &remapped;
759 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
760 ret = __change_page_attr_set_clr(&alias_cpa, 0);
761 if (ret)
762 return ret;
763 }
764
765 return 0; 749 return 0;
766} 750}
767 751
@@ -822,6 +806,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
822{ 806{
823 struct cpa_data cpa; 807 struct cpa_data cpa;
824 int ret, cache, checkalias; 808 int ret, cache, checkalias;
809 unsigned long baddr = 0;
825 810
826 /* 811 /*
827 * Check, if we are requested to change a not supported 812 * Check, if we are requested to change a not supported
@@ -853,6 +838,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
853 */ 838 */
854 WARN_ON_ONCE(1); 839 WARN_ON_ONCE(1);
855 } 840 }
841 /*
842 * Save address for cache flush. *addr is modified in the call
843 * to __change_page_attr_set_clr() below.
844 */
845 baddr = *addr;
856 } 846 }
857 847
858 /* Must avoid aliasing mappings in the highmem code */ 848 /* Must avoid aliasing mappings in the highmem code */
@@ -900,7 +890,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
900 cpa_flush_array(addr, numpages, cache, 890 cpa_flush_array(addr, numpages, cache,
901 cpa.flags, pages); 891 cpa.flags, pages);
902 } else 892 } else
903 cpa_flush_range(*addr, numpages, cache); 893 cpa_flush_range(baddr, numpages, cache);
904 } else 894 } else
905 cpa_flush_all(cache); 895 cpa_flush_all(cache);
906 896