diff options
author | Mel Gorman <mgorman@suse.de> | 2013-10-07 06:28:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-29 06:37:19 -0400 |
commit | 42836f5f8baa33085f547098b74aa98991ee9216 (patch) | |
tree | a085681ed2bb266b28bc94210eecd95c0ec2ea0c /mm | |
parent | 1dd49bfa3465756b3ce72214b58a33e4afb67aa3 (diff) |
mm: Wait for THP migrations to complete during NUMA hinting faults
The locking for migrating THP is unusual. While normal page migration
prevents parallel accesses using a migration PTE, THP migration relies on
a combination of the page_table_lock, the page lock and the existance of
the NUMA hinting PTE to guarantee safety but there is a bug in the scheme.
If a THP page is currently being migrated and another thread traps a
fault on the same page it checks if the page is misplaced. If it is not,
then pmd_numa is cleared. The problem is that it checks if the page is
misplaced without holding the page lock meaning that the racing thread
can be migrating the THP when the second thread clears the NUMA bit
and faults a stale page.
This patch checks if the page is potentially being migrated and stalls
using the lock_page if it is potentially being migrated before checking
if the page is misplaced or not.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: <stable@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-6-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 23 |
1 files changed, 16 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 33ee637648ba..e10d780c4781 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1295,13 +1295,14 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1295 | if (current_nid == numa_node_id()) | 1295 | if (current_nid == numa_node_id()) |
1296 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); | 1296 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
1297 | 1297 | ||
1298 | target_nid = mpol_misplaced(page, vma, haddr); | 1298 | /* |
1299 | if (target_nid == -1) { | 1299 | * Acquire the page lock to serialise THP migrations but avoid dropping |
1300 | put_page(page); | 1300 | * page_table_lock if at all possible |
1301 | goto clear_pmdnuma; | 1301 | */ |
1302 | } | 1302 | if (trylock_page(page)) |
1303 | goto got_lock; | ||
1303 | 1304 | ||
1304 | /* Acquire the page lock to serialise THP migrations */ | 1305 | /* Serialise against migrationa and check placement check placement */ |
1305 | spin_unlock(&mm->page_table_lock); | 1306 | spin_unlock(&mm->page_table_lock); |
1306 | lock_page(page); | 1307 | lock_page(page); |
1307 | 1308 | ||
@@ -1312,9 +1313,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1312 | put_page(page); | 1313 | put_page(page); |
1313 | goto out_unlock; | 1314 | goto out_unlock; |
1314 | } | 1315 | } |
1315 | spin_unlock(&mm->page_table_lock); | 1316 | |
1317 | got_lock: | ||
1318 | target_nid = mpol_misplaced(page, vma, haddr); | ||
1319 | if (target_nid == -1) { | ||
1320 | unlock_page(page); | ||
1321 | put_page(page); | ||
1322 | goto clear_pmdnuma; | ||
1323 | } | ||
1316 | 1324 | ||
1317 | /* Migrate the THP to the requested node */ | 1325 | /* Migrate the THP to the requested node */ |
1326 | spin_unlock(&mm->page_table_lock); | ||
1318 | migrated = migrate_misplaced_transhuge_page(mm, vma, | 1327 | migrated = migrate_misplaced_transhuge_page(mm, vma, |
1319 | pmdp, pmd, addr, page, target_nid); | 1328 | pmdp, pmd, addr, page, target_nid); |
1320 | if (!migrated) | 1329 | if (!migrated) |