diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2012-03-21 19:33:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:54:57 -0400 |
commit | 092b50bacd1cdbffef2643b7a46f2a215407919c (patch) | |
tree | a2501b424d37c3757e9369ac36231874756bf990 | |
parent | 807f0ccfe15551afd514c062585045c88ca62037 (diff) |
pagemap: introduce data structure for pagemap entry
Currently a local variable of pagemap entry in pagemap_pte_range() is
named pfn and typed with u64, but it's not correct (pfn should be unsigned
long.)
This patch introduces special type for pagemap entries and replaces code
with it.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/proc/task_mmu.c | 69 |
1 files changed, 38 insertions, 31 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 328843de6e9f..c7e3a163295c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -594,9 +594,13 @@ const struct file_operations proc_clear_refs_operations = { | |||
594 | .llseek = noop_llseek, | 594 | .llseek = noop_llseek, |
595 | }; | 595 | }; |
596 | 596 | ||
597 | typedef struct { | ||
598 | u64 pme; | ||
599 | } pagemap_entry_t; | ||
600 | |||
597 | struct pagemapread { | 601 | struct pagemapread { |
598 | int pos, len; | 602 | int pos, len; |
599 | u64 *buffer; | 603 | pagemap_entry_t *buffer; |
600 | }; | 604 | }; |
601 | 605 | ||
602 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 606 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
@@ -619,10 +623,15 @@ struct pagemapread { | |||
619 | #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) | 623 | #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) |
620 | #define PM_END_OF_BUFFER 1 | 624 | #define PM_END_OF_BUFFER 1 |
621 | 625 | ||
622 | static int add_to_pagemap(unsigned long addr, u64 pfn, | 626 | static inline pagemap_entry_t make_pme(u64 val) |
627 | { | ||
628 | return (pagemap_entry_t) { .pme = val }; | ||
629 | } | ||
630 | |||
631 | static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, | ||
623 | struct pagemapread *pm) | 632 | struct pagemapread *pm) |
624 | { | 633 | { |
625 | pm->buffer[pm->pos++] = pfn; | 634 | pm->buffer[pm->pos++] = *pme; |
626 | if (pm->pos >= pm->len) | 635 | if (pm->pos >= pm->len) |
627 | return PM_END_OF_BUFFER; | 636 | return PM_END_OF_BUFFER; |
628 | return 0; | 637 | return 0; |
@@ -634,8 +643,10 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
634 | struct pagemapread *pm = walk->private; | 643 | struct pagemapread *pm = walk->private; |
635 | unsigned long addr; | 644 | unsigned long addr; |
636 | int err = 0; | 645 | int err = 0; |
646 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); | ||
647 | |||
637 | for (addr = start; addr < end; addr += PAGE_SIZE) { | 648 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
638 | err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); | 649 | err = add_to_pagemap(addr, &pme, pm); |
639 | if (err) | 650 | if (err) |
640 | break; | 651 | break; |
641 | } | 652 | } |
@@ -648,36 +659,33 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) | |||
648 | return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); | 659 | return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); |
649 | } | 660 | } |
650 | 661 | ||
651 | static u64 pte_to_pagemap_entry(pte_t pte) | 662 | static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) |
652 | { | 663 | { |
653 | u64 pme = 0; | ||
654 | if (is_swap_pte(pte)) | 664 | if (is_swap_pte(pte)) |
655 | pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) | 665 | *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte)) |
656 | | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; | 666 | | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP); |
657 | else if (pte_present(pte)) | 667 | else if (pte_present(pte)) |
658 | pme = PM_PFRAME(pte_pfn(pte)) | 668 | *pme = make_pme(PM_PFRAME(pte_pfn(pte)) |
659 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | 669 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); |
660 | return pme; | ||
661 | } | 670 | } |
662 | 671 | ||
663 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 672 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
664 | static u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) | 673 | static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, |
674 | pmd_t pmd, int offset) | ||
665 | { | 675 | { |
666 | u64 pme = 0; | ||
667 | /* | 676 | /* |
668 | * Currently pmd for thp is always present because thp can not be | 677 | * Currently pmd for thp is always present because thp can not be |
669 | * swapped-out, migrated, or HWPOISONed (split in such cases instead.) | 678 | * swapped-out, migrated, or HWPOISONed (split in such cases instead.) |
670 | * This if-check is just to prepare for future implementation. | 679 | * This if-check is just to prepare for future implementation. |
671 | */ | 680 | */ |
672 | if (pmd_present(pmd)) | 681 | if (pmd_present(pmd)) |
673 | pme = PM_PFRAME(pmd_pfn(pmd) + offset) | 682 | *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) |
674 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | 683 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); |
675 | return pme; | ||
676 | } | 684 | } |
677 | #else | 685 | #else |
678 | static inline u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) | 686 | static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, |
687 | pmd_t pmd, int offset) | ||
679 | { | 688 | { |
680 | return 0; | ||
681 | } | 689 | } |
682 | #endif | 690 | #endif |
683 | 691 | ||
@@ -688,7 +696,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
688 | struct pagemapread *pm = walk->private; | 696 | struct pagemapread *pm = walk->private; |
689 | pte_t *pte; | 697 | pte_t *pte; |
690 | int err = 0; | 698 | int err = 0; |
691 | u64 pfn = PM_NOT_PRESENT; | 699 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); |
692 | 700 | ||
693 | if (pmd_trans_unstable(pmd)) | 701 | if (pmd_trans_unstable(pmd)) |
694 | return 0; | 702 | return 0; |
@@ -702,8 +710,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
702 | 710 | ||
703 | offset = (addr & ~PAGEMAP_WALK_MASK) >> | 711 | offset = (addr & ~PAGEMAP_WALK_MASK) >> |
704 | PAGE_SHIFT; | 712 | PAGE_SHIFT; |
705 | pfn = thp_pmd_to_pagemap_entry(*pmd, offset); | 713 | thp_pmd_to_pagemap_entry(&pme, *pmd, offset); |
706 | err = add_to_pagemap(addr, pfn, pm); | 714 | err = add_to_pagemap(addr, &pme, pm); |
707 | if (err) | 715 | if (err) |
708 | break; | 716 | break; |
709 | } | 717 | } |
@@ -723,11 +731,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
723 | if (vma && (vma->vm_start <= addr) && | 731 | if (vma && (vma->vm_start <= addr) && |
724 | !is_vm_hugetlb_page(vma)) { | 732 | !is_vm_hugetlb_page(vma)) { |
725 | pte = pte_offset_map(pmd, addr); | 733 | pte = pte_offset_map(pmd, addr); |
726 | pfn = pte_to_pagemap_entry(*pte); | 734 | pte_to_pagemap_entry(&pme, *pte); |
727 | /* unmap before userspace copy */ | 735 | /* unmap before userspace copy */ |
728 | pte_unmap(pte); | 736 | pte_unmap(pte); |
729 | } | 737 | } |
730 | err = add_to_pagemap(addr, pfn, pm); | 738 | err = add_to_pagemap(addr, &pme, pm); |
731 | if (err) | 739 | if (err) |
732 | return err; | 740 | return err; |
733 | } | 741 | } |
@@ -738,13 +746,12 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
738 | } | 746 | } |
739 | 747 | ||
740 | #ifdef CONFIG_HUGETLB_PAGE | 748 | #ifdef CONFIG_HUGETLB_PAGE |
741 | static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) | 749 | static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, |
750 | pte_t pte, int offset) | ||
742 | { | 751 | { |
743 | u64 pme = 0; | ||
744 | if (pte_present(pte)) | 752 | if (pte_present(pte)) |
745 | pme = PM_PFRAME(pte_pfn(pte) + offset) | 753 | *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
746 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | 754 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); |
747 | return pme; | ||
748 | } | 755 | } |
749 | 756 | ||
750 | /* This function walks within one hugetlb entry in the single call */ | 757 | /* This function walks within one hugetlb entry in the single call */ |
@@ -754,12 +761,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
754 | { | 761 | { |
755 | struct pagemapread *pm = walk->private; | 762 | struct pagemapread *pm = walk->private; |
756 | int err = 0; | 763 | int err = 0; |
757 | u64 pfn; | 764 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); |
758 | 765 | ||
759 | for (; addr != end; addr += PAGE_SIZE) { | 766 | for (; addr != end; addr += PAGE_SIZE) { |
760 | int offset = (addr & ~hmask) >> PAGE_SHIFT; | 767 | int offset = (addr & ~hmask) >> PAGE_SHIFT; |
761 | pfn = huge_pte_to_pagemap_entry(*pte, offset); | 768 | huge_pte_to_pagemap_entry(&pme, *pte, offset); |
762 | err = add_to_pagemap(addr, pfn, pm); | 769 | err = add_to_pagemap(addr, &pme, pm); |
763 | if (err) | 770 | if (err) |
764 | return err; | 771 | return err; |
765 | } | 772 | } |