diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2014-06-04 19:10:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:12 -0400 |
commit | 1fdb412bd825998efbced3a16f6ce7e0329728cf (patch) | |
tree | f682bad282240bfb01b8f66ee2d9765915227e8d /mm/memory.c | |
parent | a9b0f8618d46ba027243b8ecb5c2468a7112d235 (diff) |
mm: document do_fault_around() feature
Some clarification on how faultaround works.
[akpm@linux-foundation.org: tweak comment text]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 62a08a7badc4..d67fd9fcf1f2 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2760,6 +2760,10 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, | |||
2760 | 2760 | ||
2761 | static unsigned long fault_around_bytes = 65536; | 2761 | static unsigned long fault_around_bytes = 65536; |
2762 | 2762 | ||
2763 | /* | ||
2764 | * fault_around_pages() and fault_around_mask() round down fault_around_bytes | ||
2765 | * to nearest page order. It's what do_fault_around() expects to see. | ||
2766 | */ | ||
2763 | static inline unsigned long fault_around_pages(void) | 2767 | static inline unsigned long fault_around_pages(void) |
2764 | { | 2768 | { |
2765 | return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE; | 2769 | return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE; |
@@ -2801,6 +2805,29 @@ static int __init fault_around_debugfs(void) | |||
2801 | late_initcall(fault_around_debugfs); | 2805 | late_initcall(fault_around_debugfs); |
2802 | #endif | 2806 | #endif |
2803 | 2807 | ||
2808 | /* | ||
2809 | * do_fault_around() tries to map few pages around the fault address. The hope | ||
2810 | * is that the pages will be needed soon and this will lower the number of | ||
2811 | * faults to handle. | ||
2812 | * | ||
2813 | * It uses vm_ops->map_pages() to map the pages, which skips the page if it's | ||
2814 | * not ready to be mapped: not up-to-date, locked, etc. | ||
2815 | * | ||
2816 | * This function is called with the page table lock taken. In the split ptlock | ||
2817 | * case the page table lock only protects only those entries which belong to | ||
2818 | * the page table corresponding to the fault address. | ||
2819 | * | ||
2820 | * This function doesn't cross the VMA boundaries, in order to call map_pages() | ||
2821 | * only once. | ||
2822 | * | ||
2823 | * fault_around_pages() defines how many pages we'll try to map. | ||
2824 | * do_fault_around() expects it to return a power of two less than or equal to | ||
2825 | * PTRS_PER_PTE. | ||
2826 | * | ||
2827 | * The virtual address of the area that we map is naturally aligned to the | ||
2828 | * fault_around_pages() value (and therefore to page order). This way it's | ||
2829 | * easier to guarantee that we don't cross page table boundaries. | ||
2830 | */ | ||
2804 | static void do_fault_around(struct vm_area_struct *vma, unsigned long address, | 2831 | static void do_fault_around(struct vm_area_struct *vma, unsigned long address, |
2805 | pte_t *pte, pgoff_t pgoff, unsigned int flags) | 2832 | pte_t *pte, pgoff_t pgoff, unsigned int flags) |
2806 | { | 2833 | { |