aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-03-28 17:42:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 20:14:37 -0400
commit0fc9d1040313047edf6a39fd4d7c7defdca97c62 (patch)
tree4503c084da1097e1cf66d8bac1ec548bc69ce69a /mm
parentcebbd29e1c2f7a969919f19f74583070840163d7 (diff)
radix-tree: use iterators in find_get_pages* functions
Replace radix_tree_gang_lookup_slot() and radix_tree_gang_lookup_tag_slot() in page-cache lookup functions with brand-new radix-tree direct iterating. This avoids the double-scanning and pointer copying. Iterator don't stop after nr_pages page-get fails in a row, it continue lookup till the radix-tree end. Thus we can safely remove these restart conditions. Unfortunately, old implementation didn't forbid nr_pages == 0, this corner case does not fit into new code, so the patch adds an extra check at the beginning. Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Tested-by: Hugh Dickins <hughd@google.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c86
1 files changed, 38 insertions, 48 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index c3811bc6b9e..79c4b2b0b14 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -813,20 +813,19 @@ EXPORT_SYMBOL(find_or_create_page);
813unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 813unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
814 unsigned int nr_pages, struct page **pages) 814 unsigned int nr_pages, struct page **pages)
815{ 815{
816 unsigned int i; 816 struct radix_tree_iter iter;
817 unsigned int ret; 817 void **slot;
818 unsigned int nr_found, nr_skip; 818 unsigned ret = 0;
819
820 if (unlikely(!nr_pages))
821 return 0;
819 822
820 rcu_read_lock(); 823 rcu_read_lock();
821restart: 824restart:
822 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 825 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
823 (void ***)pages, NULL, start, nr_pages);
824 ret = 0;
825 nr_skip = 0;
826 for (i = 0; i < nr_found; i++) {
827 struct page *page; 826 struct page *page;
828repeat: 827repeat:
829 page = radix_tree_deref_slot((void **)pages[i]); 828 page = radix_tree_deref_slot(slot);
830 if (unlikely(!page)) 829 if (unlikely(!page))
831 continue; 830 continue;
832 831
@@ -837,7 +836,7 @@ repeat:
837 * when entry at index 0 moves out of or back 836 * when entry at index 0 moves out of or back
838 * to root: none yet gotten, safe to restart. 837 * to root: none yet gotten, safe to restart.
839 */ 838 */
840 WARN_ON(start | i); 839 WARN_ON(iter.index);
841 goto restart; 840 goto restart;
842 } 841 }
843 /* 842 /*
@@ -845,7 +844,6 @@ repeat:
845 * here as an exceptional entry: so skip over it - 844 * here as an exceptional entry: so skip over it -
846 * we only reach this from invalidate_mapping_pages(). 845 * we only reach this from invalidate_mapping_pages().
847 */ 846 */
848 nr_skip++;
849 continue; 847 continue;
850 } 848 }
851 849
@@ -853,21 +851,16 @@ repeat:
853 goto repeat; 851 goto repeat;
854 852
855 /* Has the page moved? */ 853 /* Has the page moved? */
856 if (unlikely(page != *((void **)pages[i]))) { 854 if (unlikely(page != *slot)) {
857 page_cache_release(page); 855 page_cache_release(page);
858 goto repeat; 856 goto repeat;
859 } 857 }
860 858
861 pages[ret] = page; 859 pages[ret] = page;
862 ret++; 860 if (++ret == nr_pages)
861 break;
863 } 862 }
864 863
865 /*
866 * If all entries were removed before we could secure them,
867 * try again, because callers stop trying once 0 is returned.
868 */
869 if (unlikely(!ret && nr_found > nr_skip))
870 goto restart;
871 rcu_read_unlock(); 864 rcu_read_unlock();
872 return ret; 865 return ret;
873} 866}
@@ -887,21 +880,22 @@ repeat:
887unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 880unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
888 unsigned int nr_pages, struct page **pages) 881 unsigned int nr_pages, struct page **pages)
889{ 882{
890 unsigned int i; 883 struct radix_tree_iter iter;
891 unsigned int ret; 884 void **slot;
892 unsigned int nr_found; 885 unsigned int ret = 0;
886
887 if (unlikely(!nr_pages))
888 return 0;
893 889
894 rcu_read_lock(); 890 rcu_read_lock();
895restart: 891restart:
896 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 892 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
897 (void ***)pages, NULL, index, nr_pages);
898 ret = 0;
899 for (i = 0; i < nr_found; i++) {
900 struct page *page; 893 struct page *page;
901repeat: 894repeat:
902 page = radix_tree_deref_slot((void **)pages[i]); 895 page = radix_tree_deref_slot(slot);
896 /* The hole, there no reason to continue */
903 if (unlikely(!page)) 897 if (unlikely(!page))
904 continue; 898 break;
905 899
906 if (radix_tree_exception(page)) { 900 if (radix_tree_exception(page)) {
907 if (radix_tree_deref_retry(page)) { 901 if (radix_tree_deref_retry(page)) {
@@ -924,7 +918,7 @@ repeat:
924 goto repeat; 918 goto repeat;
925 919
926 /* Has the page moved? */ 920 /* Has the page moved? */
927 if (unlikely(page != *((void **)pages[i]))) { 921 if (unlikely(page != *slot)) {
928 page_cache_release(page); 922 page_cache_release(page);
929 goto repeat; 923 goto repeat;
930 } 924 }
@@ -934,14 +928,14 @@ repeat:
934 * otherwise we can get both false positives and false 928 * otherwise we can get both false positives and false
935 * negatives, which is just confusing to the caller. 929 * negatives, which is just confusing to the caller.
936 */ 930 */
937 if (page->mapping == NULL || page->index != index) { 931 if (page->mapping == NULL || page->index != iter.index) {
938 page_cache_release(page); 932 page_cache_release(page);
939 break; 933 break;
940 } 934 }
941 935
942 pages[ret] = page; 936 pages[ret] = page;
943 ret++; 937 if (++ret == nr_pages)
944 index++; 938 break;
945 } 939 }
946 rcu_read_unlock(); 940 rcu_read_unlock();
947 return ret; 941 return ret;
@@ -962,19 +956,20 @@ EXPORT_SYMBOL(find_get_pages_contig);
962unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 956unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
963 int tag, unsigned int nr_pages, struct page **pages) 957 int tag, unsigned int nr_pages, struct page **pages)
964{ 958{
965 unsigned int i; 959 struct radix_tree_iter iter;
966 unsigned int ret; 960 void **slot;
967 unsigned int nr_found; 961 unsigned ret = 0;
962
963 if (unlikely(!nr_pages))
964 return 0;
968 965
969 rcu_read_lock(); 966 rcu_read_lock();
970restart: 967restart:
971 nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree, 968 radix_tree_for_each_tagged(slot, &mapping->page_tree,
972 (void ***)pages, *index, nr_pages, tag); 969 &iter, *index, tag) {
973 ret = 0;
974 for (i = 0; i < nr_found; i++) {
975 struct page *page; 970 struct page *page;
976repeat: 971repeat:
977 page = radix_tree_deref_slot((void **)pages[i]); 972 page = radix_tree_deref_slot(slot);
978 if (unlikely(!page)) 973 if (unlikely(!page))
979 continue; 974 continue;
980 975
@@ -998,21 +993,16 @@ repeat:
998 goto repeat; 993 goto repeat;
999 994
1000 /* Has the page moved? */ 995 /* Has the page moved? */
1001 if (unlikely(page != *((void **)pages[i]))) { 996 if (unlikely(page != *slot)) {
1002 page_cache_release(page); 997 page_cache_release(page);
1003 goto repeat; 998 goto repeat;
1004 } 999 }
1005 1000
1006 pages[ret] = page; 1001 pages[ret] = page;
1007 ret++; 1002 if (++ret == nr_pages)
1003 break;
1008 } 1004 }
1009 1005
1010 /*
1011 * If all entries were removed before we could secure them,
1012 * try again, because callers stop trying once 0 is returned.
1013 */
1014 if (unlikely(!ret && nr_found))
1015 goto restart;
1016 rcu_read_unlock(); 1006 rcu_read_unlock();
1017 1007
1018 if (ret) 1008 if (ret)