diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2015-02-11 18:27:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 20:06:06 -0500 |
commit | d85f4d6d3bfe3b82e2903ac51a2f837eab7115d7 (patch) | |
tree | 524315f5d1a51e97c8abc4bb516cc003e29dfccd /fs/proc | |
parent | 632fd60fe46f9159f059ed7612eb529e475302a9 (diff) |
numa_maps: remove numa_maps->vma
pagewalk.c can handle vma in itself, so we don't have to pass vma via
walk->private. And show_numa_map() walks pages on vma basis, so using
walk_page_vma() is preferable.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ae4bc2960077..a36db4ad140b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -1283,7 +1283,6 @@ const struct file_operations proc_pagemap_operations = { | |||
1283 | #ifdef CONFIG_NUMA | 1283 | #ifdef CONFIG_NUMA |
1284 | 1284 | ||
1285 | struct numa_maps { | 1285 | struct numa_maps { |
1286 | struct vm_area_struct *vma; | ||
1287 | unsigned long pages; | 1286 | unsigned long pages; |
1288 | unsigned long anon; | 1287 | unsigned long anon; |
1289 | unsigned long active; | 1288 | unsigned long active; |
@@ -1352,18 +1351,17 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, | |||
1352 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | 1351 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, |
1353 | unsigned long end, struct mm_walk *walk) | 1352 | unsigned long end, struct mm_walk *walk) |
1354 | { | 1353 | { |
1355 | struct numa_maps *md; | 1354 | struct numa_maps *md = walk->private; |
1355 | struct vm_area_struct *vma = walk->vma; | ||
1356 | spinlock_t *ptl; | 1356 | spinlock_t *ptl; |
1357 | pte_t *orig_pte; | 1357 | pte_t *orig_pte; |
1358 | pte_t *pte; | 1358 | pte_t *pte; |
1359 | 1359 | ||
1360 | md = walk->private; | 1360 | if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1361 | |||
1362 | if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) { | ||
1363 | pte_t huge_pte = *(pte_t *)pmd; | 1361 | pte_t huge_pte = *(pte_t *)pmd; |
1364 | struct page *page; | 1362 | struct page *page; |
1365 | 1363 | ||
1366 | page = can_gather_numa_stats(huge_pte, md->vma, addr); | 1364 | page = can_gather_numa_stats(huge_pte, vma, addr); |
1367 | if (page) | 1365 | if (page) |
1368 | gather_stats(page, md, pte_dirty(huge_pte), | 1366 | gather_stats(page, md, pte_dirty(huge_pte), |
1369 | HPAGE_PMD_SIZE/PAGE_SIZE); | 1367 | HPAGE_PMD_SIZE/PAGE_SIZE); |
@@ -1375,7 +1373,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | |||
1375 | return 0; | 1373 | return 0; |
1376 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | 1374 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
1377 | do { | 1375 | do { |
1378 | struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | 1376 | struct page *page = can_gather_numa_stats(*pte, vma, addr); |
1379 | if (!page) | 1377 | if (!page) |
1380 | continue; | 1378 | continue; |
1381 | gather_stats(page, md, pte_dirty(*pte), 1); | 1379 | gather_stats(page, md, pte_dirty(*pte), 1); |
@@ -1422,7 +1420,12 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1422 | struct numa_maps *md = &numa_priv->md; | 1420 | struct numa_maps *md = &numa_priv->md; |
1423 | struct file *file = vma->vm_file; | 1421 | struct file *file = vma->vm_file; |
1424 | struct mm_struct *mm = vma->vm_mm; | 1422 | struct mm_struct *mm = vma->vm_mm; |
1425 | struct mm_walk walk = {}; | 1423 | struct mm_walk walk = { |
1424 | .hugetlb_entry = gather_hugetlb_stats, | ||
1425 | .pmd_entry = gather_pte_stats, | ||
1426 | .private = md, | ||
1427 | .mm = mm, | ||
1428 | }; | ||
1426 | struct mempolicy *pol; | 1429 | struct mempolicy *pol; |
1427 | char buffer[64]; | 1430 | char buffer[64]; |
1428 | int nid; | 1431 | int nid; |
@@ -1433,13 +1436,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1433 | /* Ensure we start with an empty set of numa_maps statistics. */ | 1436 | /* Ensure we start with an empty set of numa_maps statistics. */ |
1434 | memset(md, 0, sizeof(*md)); | 1437 | memset(md, 0, sizeof(*md)); |
1435 | 1438 | ||
1436 | md->vma = vma; | ||
1437 | |||
1438 | walk.hugetlb_entry = gather_hugetlb_stats; | ||
1439 | walk.pmd_entry = gather_pte_stats; | ||
1440 | walk.private = md; | ||
1441 | walk.mm = mm; | ||
1442 | |||
1443 | pol = __get_vma_policy(vma, vma->vm_start); | 1439 | pol = __get_vma_policy(vma, vma->vm_start); |
1444 | if (pol) { | 1440 | if (pol) { |
1445 | mpol_to_str(buffer, sizeof(buffer), pol); | 1441 | mpol_to_str(buffer, sizeof(buffer), pol); |
@@ -1473,7 +1469,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1473 | if (is_vm_hugetlb_page(vma)) | 1469 | if (is_vm_hugetlb_page(vma)) |
1474 | seq_puts(m, " huge"); | 1470 | seq_puts(m, " huge"); |
1475 | 1471 | ||
1476 | walk_page_range(vma->vm_start, vma->vm_end, &walk); | 1472 | /* mmap_sem is held by m_start */ |
1473 | walk_page_vma(vma, &walk); | ||
1477 | 1474 | ||
1478 | if (!md->pages) | 1475 | if (!md->pages) |
1479 | goto out; | 1476 | goto out; |