aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:28:43 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 06:39:41 -0400
commitff9042b11a71c81238c70af168cd36b98a6d5a3c (patch)
tree26a09b0c1e8bf5cd091bed61475c6e0433571924 /mm/huge_memory.c
parent0c3a775e1e0b069bf765f8355b723ce0d18dcc6c (diff)
mm: Wait for THP migrations to complete during NUMA hinting faults
The locking for migrating THP is unusual. While normal page migration prevents parallel accesses using a migration PTE, THP migration relies on a combination of the page_table_lock, the page lock and the existance of the NUMA hinting PTE to guarantee safety but there is a bug in the scheme. If a THP page is currently being migrated and another thread traps a fault on the same page it checks if the page is misplaced. If it is not, then pmd_numa is cleared. The problem is that it checks if the page is misplaced without holding the page lock meaning that the racing thread can be migrating the THP when the second thread clears the NUMA bit and faults a stale page. This patch checks if the page is potentially being migrated and stalls using the lock_page if it is potentially being migrated before checking if the page is misplaced or not. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-6-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index dab2bab9d33e..f362363c0fad 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1295,13 +1295,14 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1295 if (current_nid == numa_node_id()) 1295 if (current_nid == numa_node_id())
1296 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1296 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1297 1297
1298 target_nid = mpol_misplaced(page, vma, haddr); 1298 /*
1299 if (target_nid == -1) { 1299 * Acquire the page lock to serialise THP migrations but avoid dropping
1300 put_page(page); 1300 * page_table_lock if at all possible
1301 goto clear_pmdnuma; 1301 */
1302 } 1302 if (trylock_page(page))
1303 goto got_lock;
1303 1304
1304 /* Acquire the page lock to serialise THP migrations */ 1305 /* Serialise against migrationa and check placement check placement */
1305 spin_unlock(&mm->page_table_lock); 1306 spin_unlock(&mm->page_table_lock);
1306 lock_page(page); 1307 lock_page(page);
1307 1308
@@ -1312,9 +1313,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1312 put_page(page); 1313 put_page(page);
1313 goto out_unlock; 1314 goto out_unlock;
1314 } 1315 }
1315 spin_unlock(&mm->page_table_lock); 1316
1317got_lock:
1318 target_nid = mpol_misplaced(page, vma, haddr);
1319 if (target_nid == -1) {
1320 unlock_page(page);
1321 put_page(page);
1322 goto clear_pmdnuma;
1323 }
1316 1324
1317 /* Migrate the THP to the requested node */ 1325 /* Migrate the THP to the requested node */
1326 spin_unlock(&mm->page_table_lock);
1318 migrated = migrate_misplaced_transhuge_page(mm, vma, 1327 migrated = migrate_misplaced_transhuge_page(mm, vma,
1319 pmdp, pmd, addr, page, target_nid); 1328 pmdp, pmd, addr, page, target_nid);
1320 if (!migrated) 1329 if (!migrated)