aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:46:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:42 -0500
commit500d65d471018d9a13b0d51b7e141ed2a3555c1d (patch)
tree046dc2337f87a1a365fde126fab7f4ac9ae82793 /mm
parent0af4e98b6b095c74588af04872f83d333c958c32 (diff)
thp: pmd_trans_huge migrate bugcheck
No pmd_trans_huge should ever materialize in migration ptes areas, because we split the hugepage before migration ptes are instantiated. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c5
-rw-r--r--mm/migrate.c7
2 files changed, 11 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c1a80e00458d..12ee1ea237f5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1305,6 +1305,10 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1305 goto out; 1305 goto out;
1306 } 1306 }
1307 if (pmd_trans_huge(*pmd)) { 1307 if (pmd_trans_huge(*pmd)) {
1308 if (flags & FOLL_SPLIT) {
1309 split_huge_page_pmd(mm, pmd);
1310 goto split_fallthrough;
1311 }
1308 spin_lock(&mm->page_table_lock); 1312 spin_lock(&mm->page_table_lock);
1309 if (likely(pmd_trans_huge(*pmd))) { 1313 if (likely(pmd_trans_huge(*pmd))) {
1310 if (unlikely(pmd_trans_splitting(*pmd))) { 1314 if (unlikely(pmd_trans_splitting(*pmd))) {
@@ -1320,6 +1324,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1320 spin_unlock(&mm->page_table_lock); 1324 spin_unlock(&mm->page_table_lock);
1321 /* fall through */ 1325 /* fall through */
1322 } 1326 }
1327split_fallthrough:
1323 if (unlikely(pmd_bad(*pmd))) 1328 if (unlikely(pmd_bad(*pmd)))
1324 goto no_page_table; 1329 goto no_page_table;
1325 1330
diff --git a/mm/migrate.c b/mm/migrate.c
index 690d0de993af..1a531b760b3b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -113,6 +113,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
113 goto out; 113 goto out;
114 114
115 pmd = pmd_offset(pud, addr); 115 pmd = pmd_offset(pud, addr);
116 if (pmd_trans_huge(*pmd))
117 goto out;
116 if (!pmd_present(*pmd)) 118 if (!pmd_present(*pmd))
117 goto out; 119 goto out;
118 120
@@ -632,6 +634,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
632 /* page was freed from under us. So we are done. */ 634 /* page was freed from under us. So we are done. */
633 goto move_newpage; 635 goto move_newpage;
634 } 636 }
637 if (unlikely(PageTransHuge(page)))
638 if (unlikely(split_huge_page(page)))
639 goto move_newpage;
635 640
636 /* prepare cgroup just returns 0 or -ENOMEM */ 641 /* prepare cgroup just returns 0 or -ENOMEM */
637 rc = -EAGAIN; 642 rc = -EAGAIN;
@@ -1063,7 +1068,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
1063 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1068 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1064 goto set_status; 1069 goto set_status;
1065 1070
1066 page = follow_page(vma, pp->addr, FOLL_GET); 1071 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1067 1072
1068 err = PTR_ERR(page); 1073 err = PTR_ERR(page);
1069 if (IS_ERR(page)) 1074 if (IS_ERR(page))