diff options
author | Michel Lespinasse <walken@google.com> | 2012-10-08 19:31:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:22:39 -0400 |
commit | 6b2dbba8b6ac4df26f72eda1e5ea7bab9f950e08 (patch) | |
tree | 422ed8d7ac2fe45069f20cfba84a9a097bf444af /mm/rmap.c | |
parent | fff3fd8a1210a165252cd7cd01206da7a90d3a06 (diff) |
mm: replace vma prio_tree with an interval tree
Implement an interval tree as a replacement for the VMA prio_tree. The
algorithms are similar to lib/interval_tree.c; however that code can't be
directly reused as the interval endpoints are not explicitly stored in the
VMA. So instead, the common algorithm is moved into a template and the
details (node type, how to get interval endpoints from the node, etc) are
filled in using the C preprocessor.
Once the interval tree functions are available, using them as a
replacement to the VMA prio tree is a relatively simple, mechanical job.
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 18 |
1 files changed, 7 insertions, 11 deletions
@@ -820,7 +820,6 @@ static int page_referenced_file(struct page *page, | |||
820 | struct address_space *mapping = page->mapping; | 820 | struct address_space *mapping = page->mapping; |
821 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 821 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
822 | struct vm_area_struct *vma; | 822 | struct vm_area_struct *vma; |
823 | struct prio_tree_iter iter; | ||
824 | int referenced = 0; | 823 | int referenced = 0; |
825 | 824 | ||
826 | /* | 825 | /* |
@@ -846,7 +845,7 @@ static int page_referenced_file(struct page *page, | |||
846 | */ | 845 | */ |
847 | mapcount = page_mapcount(page); | 846 | mapcount = page_mapcount(page); |
848 | 847 | ||
849 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 848 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
850 | unsigned long address = vma_address(page, vma); | 849 | unsigned long address = vma_address(page, vma); |
851 | if (address == -EFAULT) | 850 | if (address == -EFAULT) |
852 | continue; | 851 | continue; |
@@ -945,13 +944,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) | |||
945 | { | 944 | { |
946 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 945 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
947 | struct vm_area_struct *vma; | 946 | struct vm_area_struct *vma; |
948 | struct prio_tree_iter iter; | ||
949 | int ret = 0; | 947 | int ret = 0; |
950 | 948 | ||
951 | BUG_ON(PageAnon(page)); | 949 | BUG_ON(PageAnon(page)); |
952 | 950 | ||
953 | mutex_lock(&mapping->i_mmap_mutex); | 951 | mutex_lock(&mapping->i_mmap_mutex); |
954 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 952 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
955 | if (vma->vm_flags & VM_SHARED) { | 953 | if (vma->vm_flags & VM_SHARED) { |
956 | unsigned long address = vma_address(page, vma); | 954 | unsigned long address = vma_address(page, vma); |
957 | if (address == -EFAULT) | 955 | if (address == -EFAULT) |
@@ -1547,7 +1545,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1547 | struct address_space *mapping = page->mapping; | 1545 | struct address_space *mapping = page->mapping; |
1548 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 1546 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
1549 | struct vm_area_struct *vma; | 1547 | struct vm_area_struct *vma; |
1550 | struct prio_tree_iter iter; | ||
1551 | int ret = SWAP_AGAIN; | 1548 | int ret = SWAP_AGAIN; |
1552 | unsigned long cursor; | 1549 | unsigned long cursor; |
1553 | unsigned long max_nl_cursor = 0; | 1550 | unsigned long max_nl_cursor = 0; |
@@ -1555,7 +1552,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1555 | unsigned int mapcount; | 1552 | unsigned int mapcount; |
1556 | 1553 | ||
1557 | mutex_lock(&mapping->i_mmap_mutex); | 1554 | mutex_lock(&mapping->i_mmap_mutex); |
1558 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 1555 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
1559 | unsigned long address = vma_address(page, vma); | 1556 | unsigned long address = vma_address(page, vma); |
1560 | if (address == -EFAULT) | 1557 | if (address == -EFAULT) |
1561 | continue; | 1558 | continue; |
@@ -1576,7 +1573,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1576 | goto out; | 1573 | goto out; |
1577 | 1574 | ||
1578 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 1575 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
1579 | shared.vm_set.list) { | 1576 | shared.nonlinear) { |
1580 | cursor = (unsigned long) vma->vm_private_data; | 1577 | cursor = (unsigned long) vma->vm_private_data; |
1581 | if (cursor > max_nl_cursor) | 1578 | if (cursor > max_nl_cursor) |
1582 | max_nl_cursor = cursor; | 1579 | max_nl_cursor = cursor; |
@@ -1608,7 +1605,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1608 | 1605 | ||
1609 | do { | 1606 | do { |
1610 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 1607 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, |
1611 | shared.vm_set.list) { | 1608 | shared.nonlinear) { |
1612 | cursor = (unsigned long) vma->vm_private_data; | 1609 | cursor = (unsigned long) vma->vm_private_data; |
1613 | while ( cursor < max_nl_cursor && | 1610 | while ( cursor < max_nl_cursor && |
1614 | cursor < vma->vm_end - vma->vm_start) { | 1611 | cursor < vma->vm_end - vma->vm_start) { |
@@ -1631,7 +1628,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1631 | * in locked vmas). Reset cursor on all unreserved nonlinear | 1628 | * in locked vmas). Reset cursor on all unreserved nonlinear |
1632 | * vmas, now forgetting on which ones it had fallen behind. | 1629 | * vmas, now forgetting on which ones it had fallen behind. |
1633 | */ | 1630 | */ |
1634 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) | 1631 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) |
1635 | vma->vm_private_data = NULL; | 1632 | vma->vm_private_data = NULL; |
1636 | out: | 1633 | out: |
1637 | mutex_unlock(&mapping->i_mmap_mutex); | 1634 | mutex_unlock(&mapping->i_mmap_mutex); |
@@ -1748,13 +1745,12 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, | |||
1748 | struct address_space *mapping = page->mapping; | 1745 | struct address_space *mapping = page->mapping; |
1749 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 1746 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
1750 | struct vm_area_struct *vma; | 1747 | struct vm_area_struct *vma; |
1751 | struct prio_tree_iter iter; | ||
1752 | int ret = SWAP_AGAIN; | 1748 | int ret = SWAP_AGAIN; |
1753 | 1749 | ||
1754 | if (!mapping) | 1750 | if (!mapping) |
1755 | return ret; | 1751 | return ret; |
1756 | mutex_lock(&mapping->i_mmap_mutex); | 1752 | mutex_lock(&mapping->i_mmap_mutex); |
1757 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 1753 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
1758 | unsigned long address = vma_address(page, vma); | 1754 | unsigned long address = vma_address(page, vma); |
1759 | if (address == -EFAULT) | 1755 | if (address == -EFAULT) |
1760 | continue; | 1756 | continue; |