summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-02 07:33:45 -0400
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:42 -0500
commit4daae3b4b9e49b7e0935499a352f1c59d90287d2 (patch)
tree2ac600b955c89e3b1b2070110a9b7293a4511b19 /mm/memory.c
parent149c33e1c98f83050870514f380902dc6d617bd5 (diff)
mm: mempolicy: Use _PAGE_NUMA to migrate pages
Note: Based on "mm/mpol: Use special PROT_NONE to migrate pages" but sufficiently different that the signed-off-bys were dropped Combine our previous _PAGE_NUMA, mpol_misplaced and migrate_misplaced_page() pieces into an effective migrate on fault scheme. Note that (on x86) we rely on PROT_NONE pages being !present and avoid the TLB flush from try_to_unmap(TTU_MIGRATION). This greatly improves the page-migration performance. Based-on-work-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c32
1 files changed, 27 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e30616f2cc3d..d52542680e10 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -57,6 +57,7 @@
57#include <linux/swapops.h> 57#include <linux/swapops.h>
58#include <linux/elf.h> 58#include <linux/elf.h>
59#include <linux/gfp.h> 59#include <linux/gfp.h>
60#include <linux/migrate.h>
60 61
61#include <asm/io.h> 62#include <asm/io.h>
62#include <asm/pgalloc.h> 63#include <asm/pgalloc.h>
@@ -3451,8 +3452,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3451int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 3452int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3452 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) 3453 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
3453{ 3454{
3454 struct page *page; 3455 struct page *page = NULL;
3455 spinlock_t *ptl; 3456 spinlock_t *ptl;
3457 int current_nid, target_nid;
3456 3458
3457 /* 3459 /*
3458 * The "pte" at this point cannot be used safely without 3460 * The "pte" at this point cannot be used safely without
@@ -3465,8 +3467,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3465 */ 3467 */
3466 ptl = pte_lockptr(mm, pmd); 3468 ptl = pte_lockptr(mm, pmd);
3467 spin_lock(ptl); 3469 spin_lock(ptl);
3468 if (unlikely(!pte_same(*ptep, pte))) 3470 if (unlikely(!pte_same(*ptep, pte))) {
3469 goto out_unlock; 3471 pte_unmap_unlock(ptep, ptl);
3472 goto out;
3473 }
3474
3470 pte = pte_mknonnuma(pte); 3475 pte = pte_mknonnuma(pte);
3471 set_pte_at(mm, addr, ptep, pte); 3476 set_pte_at(mm, addr, ptep, pte);
3472 update_mmu_cache(vma, addr, ptep); 3477 update_mmu_cache(vma, addr, ptep);
@@ -3477,8 +3482,25 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3477 return 0; 3482 return 0;
3478 } 3483 }
3479 3484
3480out_unlock: 3485 get_page(page);
3486 current_nid = page_to_nid(page);
3487 target_nid = mpol_misplaced(page, vma, addr);
3481 pte_unmap_unlock(ptep, ptl); 3488 pte_unmap_unlock(ptep, ptl);
3489 if (target_nid == -1) {
3490 /*
3491 * Account for the fault against the current node if it not
3492 * being replaced regardless of where the page is located.
3493 */
3494 current_nid = numa_node_id();
3495 put_page(page);
3496 goto out;
3497 }
3498
3499 /* Migrate to the requested node */
3500 if (migrate_misplaced_page(page, target_nid))
3501 current_nid = target_nid;
3502
3503out:
3482 return 0; 3504 return 0;
3483} 3505}
3484 3506
@@ -3655,7 +3677,7 @@ retry:
3655 barrier(); 3677 barrier();
3656 if (pmd_trans_huge(orig_pmd)) { 3678 if (pmd_trans_huge(orig_pmd)) {
3657 if (pmd_numa(*pmd)) 3679 if (pmd_numa(*pmd))
3658 return do_huge_pmd_numa_page(mm, address, 3680 return do_huge_pmd_numa_page(mm, vma, address,
3659 orig_pmd, pmd); 3681 orig_pmd, pmd);
3660 3682
3661 if ((flags & FAULT_FLAG_WRITE) && !pmd_write(orig_pmd)) { 3683 if ((flags & FAULT_FLAG_WRITE) && !pmd_write(orig_pmd)) {