aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBalbir Singh <bsingharora@gmail.com>2018-02-07 01:35:51 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2018-02-08 07:56:11 -0500
commit4dd5f8a99e791a8c6500e3592f3ce81ae7edcde1 (patch)
treed81242a8d88ac881c8cd503239f44f60cf42f258
parenteeb715c3e995fbdda0cc05e61216c6c5609bce66 (diff)
powerpc/mm/radix: Split linear mapping on hot-unplug
This patch splits the linear mapping if the hot-unplug range is smaller than the mapping size. The code detects if the mapping needs to be split into a smaller size and if so, uses the stop machine infrastructure to clear the existing mapping and then remap the remaining range using a smaller page size. The code will skip any region of the mapping that overlaps with kernel text and warn about it once. We don't want to remove a mapping where the kernel text and the LMB we intend to remove overlap in the same TLB mapping as it may affect the currently executing code. I've tested these changes under a kvm guest with 2 vcpus, from a split mapping point of view, some of the caveats mentioned above applied to the testing I did. Fixes: 4b5d62ca17a1 ("powerpc/mm: add radix__remove_section_mapping()") Signed-off-by: Balbir Singh <bsingharora@gmail.com> [mpe: Tweak change log to match updated behaviour] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/pgtable-radix.c95
1 files changed, 74 insertions, 21 deletions
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 96e07d1f673d..328ff9abc333 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -17,6 +17,7 @@
17#include <linux/of_fdt.h> 17#include <linux/of_fdt.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/string_helpers.h> 19#include <linux/string_helpers.h>
20#include <linux/stop_machine.h>
20 21
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
@@ -685,6 +686,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
685 pud_clear(pud); 686 pud_clear(pud);
686} 687}
687 688
689struct change_mapping_params {
690 pte_t *pte;
691 unsigned long start;
692 unsigned long end;
693 unsigned long aligned_start;
694 unsigned long aligned_end;
695};
696
697static int stop_machine_change_mapping(void *data)
698{
699 struct change_mapping_params *params =
700 (struct change_mapping_params *)data;
701
702 if (!data)
703 return -1;
704
705 spin_unlock(&init_mm.page_table_lock);
706 pte_clear(&init_mm, params->aligned_start, params->pte);
707 create_physical_mapping(params->aligned_start, params->start);
708 create_physical_mapping(params->end, params->aligned_end);
709 spin_lock(&init_mm.page_table_lock);
710 return 0;
711}
712
688static void remove_pte_table(pte_t *pte_start, unsigned long addr, 713static void remove_pte_table(pte_t *pte_start, unsigned long addr,
689 unsigned long end) 714 unsigned long end)
690{ 715{
@@ -713,6 +738,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
713 } 738 }
714} 739}
715 740
741/*
742 * clear the pte and potentially split the mapping helper
743 */
744static void split_kernel_mapping(unsigned long addr, unsigned long end,
745 unsigned long size, pte_t *pte)
746{
747 unsigned long mask = ~(size - 1);
748 unsigned long aligned_start = addr & mask;
749 unsigned long aligned_end = addr + size;
750 struct change_mapping_params params;
751 bool split_region = false;
752
753 if ((end - addr) < size) {
754 /*
755 * We're going to clear the PTE, but not flushed
756 * the mapping, time to remap and flush. The
757 * effects if visible outside the processor or
758 * if we are running in code close to the
759 * mapping we cleared, we are in trouble.
760 */
761 if (overlaps_kernel_text(aligned_start, addr) ||
762 overlaps_kernel_text(end, aligned_end)) {
763 /*
764 * Hack, just return, don't pte_clear
765 */
766 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
767 "text, not splitting\n", addr, end);
768 return;
769 }
770 split_region = true;
771 }
772
773 if (split_region) {
774 params.pte = pte;
775 params.start = addr;
776 params.end = end;
777 params.aligned_start = addr & ~(size - 1);
778 params.aligned_end = min_t(unsigned long, aligned_end,
779 (unsigned long)__va(memblock_end_of_DRAM()));
780 stop_machine(stop_machine_change_mapping, &params, NULL);
781 return;
782 }
783
784 pte_clear(&init_mm, addr, pte);
785}
786
716static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, 787static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
717 unsigned long end) 788 unsigned long end)
718{ 789{
@@ -728,13 +799,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
728 continue; 799 continue;
729 800
730 if (pmd_huge(*pmd)) { 801 if (pmd_huge(*pmd)) {
731 if (!IS_ALIGNED(addr, PMD_SIZE) || 802 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
732 !IS_ALIGNED(next, PMD_SIZE)) {
733 WARN_ONCE(1, "%s: unaligned range\n", __func__);
734 continue;
735 }
736
737 pte_clear(&init_mm, addr, (pte_t *)pmd);
738 continue; 803 continue;
739 } 804 }
740 805
@@ -759,13 +824,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
759 continue; 824 continue;
760 825
761 if (pud_huge(*pud)) { 826 if (pud_huge(*pud)) {
762 if (!IS_ALIGNED(addr, PUD_SIZE) || 827 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
763 !IS_ALIGNED(next, PUD_SIZE)) {
764 WARN_ONCE(1, "%s: unaligned range\n", __func__);
765 continue;
766 }
767
768 pte_clear(&init_mm, addr, (pte_t *)pud);
769 continue; 828 continue;
770 } 829 }
771 830
@@ -791,13 +850,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
791 continue; 850 continue;
792 851
793 if (pgd_huge(*pgd)) { 852 if (pgd_huge(*pgd)) {
794 if (!IS_ALIGNED(addr, PGDIR_SIZE) || 853 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
795 !IS_ALIGNED(next, PGDIR_SIZE)) {
796 WARN_ONCE(1, "%s: unaligned range\n", __func__);
797 continue;
798 }
799
800 pte_clear(&init_mm, addr, (pte_t *)pgd);
801 continue; 854 continue;
802 } 855 }
803 856