diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-07 06:29:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 08:47:53 -0400 |
commit | 6688cc05473b36a0a3d3971e1adf1712919b32eb (patch) | |
tree | 2305dca14e2df669b48138088bd93cf51db37721 /mm/huge_memory.c | |
parent | 7851a45cd3f6198bf542c30e27b330e8eeb3736c (diff) |
mm: numa: Do not group on RO pages
And here's a little something to make sure not the whole world ends up
in a single group.
As while we don't migrate shared executable pages, we do scan/fault on
them. And since everybody links to libc, everybody ends up in the same
group.
Suggested-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1381141781-10992-47-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index becf92ca54f3..7ab4e32afe12 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1285,6 +1285,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1285 | int target_nid, last_cpupid = -1; | 1285 | int target_nid, last_cpupid = -1; |
1286 | bool page_locked; | 1286 | bool page_locked; |
1287 | bool migrated = false; | 1287 | bool migrated = false; |
1288 | int flags = 0; | ||
1288 | 1289 | ||
1289 | spin_lock(&mm->page_table_lock); | 1290 | spin_lock(&mm->page_table_lock); |
1290 | if (unlikely(!pmd_same(pmd, *pmdp))) | 1291 | if (unlikely(!pmd_same(pmd, *pmdp))) |
@@ -1299,6 +1300,14 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1299 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); | 1300 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
1300 | 1301 | ||
1301 | /* | 1302 | /* |
1303 | * Avoid grouping on DSO/COW pages in specific and RO pages | ||
1304 | * in general, RO pages shouldn't hurt as much anyway since | ||
1305 | * they can be in shared cache state. | ||
1306 | */ | ||
1307 | if (!pmd_write(pmd)) | ||
1308 | flags |= TNF_NO_GROUP; | ||
1309 | |||
1310 | /* | ||
1302 | * Acquire the page lock to serialise THP migrations but avoid dropping | 1311 | * Acquire the page lock to serialise THP migrations but avoid dropping |
1303 | * page_table_lock if at all possible | 1312 | * page_table_lock if at all possible |
1304 | */ | 1313 | */ |
@@ -1343,8 +1352,10 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1343 | spin_unlock(&mm->page_table_lock); | 1352 | spin_unlock(&mm->page_table_lock); |
1344 | migrated = migrate_misplaced_transhuge_page(mm, vma, | 1353 | migrated = migrate_misplaced_transhuge_page(mm, vma, |
1345 | pmdp, pmd, addr, page, target_nid); | 1354 | pmdp, pmd, addr, page, target_nid); |
1346 | if (migrated) | 1355 | if (migrated) { |
1356 | flags |= TNF_MIGRATED; | ||
1347 | page_nid = target_nid; | 1357 | page_nid = target_nid; |
1358 | } | ||
1348 | 1359 | ||
1349 | goto out; | 1360 | goto out; |
1350 | clear_pmdnuma: | 1361 | clear_pmdnuma: |
@@ -1362,7 +1373,7 @@ out: | |||
1362 | page_unlock_anon_vma_read(anon_vma); | 1373 | page_unlock_anon_vma_read(anon_vma); |
1363 | 1374 | ||
1364 | if (page_nid != -1) | 1375 | if (page_nid != -1) |
1365 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, migrated); | 1376 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); |
1366 | 1377 | ||
1367 | return 0; | 1378 | return 0; |
1368 | } | 1379 | } |