aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-10-25 08:16:43 -0400
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:45 -0500
commitcbee9f88ec1b8dd6b58f25f54e4f52c82ed77690 (patch)
treed4cfbcfa3e89742216cd792d4aa914356406b532 /mm/memory.c
parenta720094ded8cbb303111035be91858011d2eac71 (diff)
mm: numa: Add fault driven placement and migration
NOTE: This patch is based on "sched, numa, mm: Add fault driven placement and migration policy" but as it throws away all the policy to just leave a basic foundation I had to drop the signed-offs-by. This patch creates a bare-bones method for setting PTEs pte_numa in the context of the scheduler that when faulted later will be faulted onto the node the CPU is running on. In itself this does nothing useful but any placement policy will fundamentally depend on receiving hints on placement from fault context and doing something intelligent about it. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d52542680e10..8012c1907895 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3454,7 +3454,8 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3454{ 3454{
3455 struct page *page = NULL; 3455 struct page *page = NULL;
3456 spinlock_t *ptl; 3456 spinlock_t *ptl;
3457 int current_nid, target_nid; 3457 int current_nid = -1;
3458 int target_nid;
3458 3459
3459 /* 3460 /*
3460 * The "pte" at this point cannot be used safely without 3461 * The "pte" at this point cannot be used safely without
@@ -3501,6 +3502,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3501 current_nid = target_nid; 3502 current_nid = target_nid;
3502 3503
3503out: 3504out:
3505 task_numa_fault(current_nid, 1);
3504 return 0; 3506 return 0;
3505} 3507}
3506 3508
@@ -3537,6 +3539,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3537 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) { 3539 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
3538 pte_t pteval = *pte; 3540 pte_t pteval = *pte;
3539 struct page *page; 3541 struct page *page;
3542 int curr_nid;
3540 if (!pte_present(pteval)) 3543 if (!pte_present(pteval))
3541 continue; 3544 continue;
3542 if (!pte_numa(pteval)) 3545 if (!pte_numa(pteval))
@@ -3554,6 +3557,15 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3554 page = vm_normal_page(vma, addr, pteval); 3557 page = vm_normal_page(vma, addr, pteval);
3555 if (unlikely(!page)) 3558 if (unlikely(!page))
3556 continue; 3559 continue;
3560 /* only check non-shared pages */
3561 if (unlikely(page_mapcount(page) != 1))
3562 continue;
3563 pte_unmap_unlock(pte, ptl);
3564
3565 curr_nid = page_to_nid(page);
3566 task_numa_fault(curr_nid, 1);
3567
3568 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
3557 } 3569 }
3558 pte_unmap_unlock(orig_pte, ptl); 3570 pte_unmap_unlock(orig_pte, ptl);
3559 3571