diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-03-30 01:11:03 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-04-16 21:23:39 -0400 |
commit | 691e95fd7396905a38d98919e9c150dbc3ea21a3 (patch) | |
tree | d89b898d4f42d167f0da169f482d7104b46870d8 /arch/powerpc/kernel | |
parent | dac5657067919161eb3273ca787d8ae9814801e7 (diff) |
powerpc/mm/thp: Make page table walk safe against thp split/collapse
We can disable a THP split or a hugepage collapse by disabling irq.
We do send IPI to all the cpus in the early part of split/collapse,
and disabling local irq ensure we don't make progress with
split/collapse. If the THP is getting split we return NULL from
find_linux_pte_or_hugepte(). For all the current callers it should be ok.
We need to be careful if we want to use returned pte_t pointer outside
the irq disabled region. W.r.t to THP split, the pfn remains the same,
but then a hugepage collapse will result in a pfn change. There are
few steps we can take to avoid a hugepage collapse.One way is to take page
reference inside the irq disable region. Other option is to take
mmap_sem so that a parallel collapse will not happen. We can also
disable collapse by taking pmd_lock. Another method used by kvm
subsystem is to check whether we had a mmu_notifer update in between
using mmu_notifier_retry().
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/eeh.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/io-workarounds.c | 10 |
2 files changed, 9 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index a4c62eb0ee48..44b480e3a5af 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -334,9 +334,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) | |||
334 | int hugepage_shift; | 334 | int hugepage_shift; |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * We won't find hugepages here, iomem | 337 | * We won't find hugepages here(this is iomem). Hence we are not |
338 | * worried about _PAGE_SPLITTING/collapse. Also we will not hit | ||
339 | * page table free, because of init_mm. | ||
338 | */ | 340 | */ |
339 | ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift); | 341 | ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift); |
340 | if (!ptep) | 342 | if (!ptep) |
341 | return token; | 343 | return token; |
342 | WARN_ON(hugepage_shift); | 344 | WARN_ON(hugepage_shift); |
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 24b968f8e4d8..63d9cc4d7366 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c | |||
@@ -71,15 +71,15 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) | |||
71 | vaddr = (unsigned long)PCI_FIX_ADDR(addr); | 71 | vaddr = (unsigned long)PCI_FIX_ADDR(addr); |
72 | if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) | 72 | if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) |
73 | return NULL; | 73 | return NULL; |
74 | 74 | /* | |
75 | ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr, | 75 | * We won't find huge pages here (iomem). Also can't hit |
76 | * a page table free due to init_mm | ||
77 | */ | ||
78 | ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr, | ||
76 | &hugepage_shift); | 79 | &hugepage_shift); |
77 | if (ptep == NULL) | 80 | if (ptep == NULL) |
78 | paddr = 0; | 81 | paddr = 0; |
79 | else { | 82 | else { |
80 | /* | ||
81 | * we don't have hugepages backing iomem | ||
82 | */ | ||
83 | WARN_ON(hugepage_shift); | 83 | WARN_ON(hugepage_shift); |
84 | paddr = pte_pfn(*ptep) << PAGE_SHIFT; | 84 | paddr = pte_pfn(*ptep) << PAGE_SHIFT; |
85 | } | 85 | } |