aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2013-06-12 17:05:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-12 19:29:46 -0400
commit30dad30922ccc733cfdbfe232090cf674dc374dc (patch)
tree7663089f93435ea0c298eb69058ca834c109f530 /mm
parent27749f2ff0717e115680922000839ad6a576eddf (diff)
mm: migration: add migrate_entry_wait_huge()
When we have a page fault for the address which is backed by a hugepage under migration, the kernel can't wait correctly and do busy looping on hugepage fault until the migration finishes. As a result, users who try to kick hugepage migration (via soft offlining, for example) occasionally experience long delay or soft lockup. This is because pte_offset_map_lock() can't get a correct migration entry or a correct page table lock for hugepage. This patch introduces migration_entry_wait_huge() to solve this. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Andi Kleen <andi@firstfloor.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: <stable@vger.kernel.org> [2.6.35+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/migrate.c23
2 files changed, 19 insertions, 6 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8feeeca6686..e2bfbf73a551 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2839 if (ptep) { 2839 if (ptep) {
2840 entry = huge_ptep_get(ptep); 2840 entry = huge_ptep_get(ptep);
2841 if (unlikely(is_hugetlb_entry_migration(entry))) { 2841 if (unlikely(is_hugetlb_entry_migration(entry))) {
2842 migration_entry_wait(mm, (pmd_t *)ptep, address); 2842 migration_entry_wait_huge(mm, ptep);
2843 return 0; 2843 return 0;
2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2845 return VM_FAULT_HWPOISON_LARGE | 2845 return VM_FAULT_HWPOISON_LARGE |
diff --git a/mm/migrate.c b/mm/migrate.c
index b1f57501de9c..6f0c24438bba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
200 * get to the page and wait until migration is finished. 200 * get to the page and wait until migration is finished.
201 * When we return from this function the fault will be retried. 201 * When we return from this function the fault will be retried.
202 */ 202 */
203void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 203static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
204 unsigned long address) 204 spinlock_t *ptl)
205{ 205{
206 pte_t *ptep, pte; 206 pte_t pte;
207 spinlock_t *ptl;
208 swp_entry_t entry; 207 swp_entry_t entry;
209 struct page *page; 208 struct page *page;
210 209
211 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 210 spin_lock(ptl);
212 pte = *ptep; 211 pte = *ptep;
213 if (!is_swap_pte(pte)) 212 if (!is_swap_pte(pte))
214 goto out; 213 goto out;
@@ -236,6 +235,20 @@ out:
236 pte_unmap_unlock(ptep, ptl); 235 pte_unmap_unlock(ptep, ptl);
237} 236}
238 237
238void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
239 unsigned long address)
240{
241 spinlock_t *ptl = pte_lockptr(mm, pmd);
242 pte_t *ptep = pte_offset_map(pmd, address);
243 __migration_entry_wait(mm, ptep, ptl);
244}
245
246void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
247{
248 spinlock_t *ptl = &(mm)->page_table_lock;
249 __migration_entry_wait(mm, pte, ptl);
250}
251
239#ifdef CONFIG_BLOCK 252#ifdef CONFIG_BLOCK
240/* Returns true if all buffers are successfully locked */ 253/* Returns true if all buffers are successfully locked */
241static bool buffer_migrate_lock_buffers(struct buffer_head *head, 254static bool buffer_migrate_lock_buffers(struct buffer_head *head,