aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-07-25 20:12:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 23:57:11 -0400
commit71f0e07a605fad1fb6b288e4dc1dd8dfa78f4872 (patch)
tree1eac7c25f32ac88bba52a31179989e62773aa079
parent708e3508c2a2204cc276dcdb543009a441bfe91b (diff)
tmpfs: refine shmem_file_splice_read
Tidy up shmem_file_splice_read(): Remove readahead: okay, we could implement shmem readahead on swap, but have never done so before, swap being the slow exceptional path. Use shmem_getpage() instead of find_or_create_page() plus ->readpage(). Remove several comments: sorry, I found them more distracting than helpful, and this will not be the reference version of splice_read(). Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/shmem.c138
1 files changed, 19 insertions, 119 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index d176e488f04d..f96614526d1c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1850,6 +1850,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1850 unsigned int flags) 1850 unsigned int flags)
1851{ 1851{
1852 struct address_space *mapping = in->f_mapping; 1852 struct address_space *mapping = in->f_mapping;
1853 struct inode *inode = mapping->host;
1853 unsigned int loff, nr_pages, req_pages; 1854 unsigned int loff, nr_pages, req_pages;
1854 struct page *pages[PIPE_DEF_BUFFERS]; 1855 struct page *pages[PIPE_DEF_BUFFERS];
1855 struct partial_page partial[PIPE_DEF_BUFFERS]; 1856 struct partial_page partial[PIPE_DEF_BUFFERS];
@@ -1865,7 +1866,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1865 .spd_release = spd_release_page, 1866 .spd_release = spd_release_page,
1866 }; 1867 };
1867 1868
1868 isize = i_size_read(in->f_mapping->host); 1869 isize = i_size_read(inode);
1869 if (unlikely(*ppos >= isize)) 1870 if (unlikely(*ppos >= isize))
1870 return 0; 1871 return 0;
1871 1872
@@ -1881,153 +1882,57 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1881 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1882 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1882 nr_pages = min(req_pages, pipe->buffers); 1883 nr_pages = min(req_pages, pipe->buffers);
1883 1884
1884 /*
1885 * Lookup the (hopefully) full range of pages we need.
1886 */
1887 spd.nr_pages = find_get_pages_contig(mapping, index, 1885 spd.nr_pages = find_get_pages_contig(mapping, index,
1888 nr_pages, spd.pages); 1886 nr_pages, spd.pages);
1889 index += spd.nr_pages; 1887 index += spd.nr_pages;
1890
1891 /*
1892 * If find_get_pages_contig() returned fewer pages than we needed,
1893 * readahead/allocate the rest and fill in the holes.
1894 */
1895 if (spd.nr_pages < nr_pages)
1896 page_cache_sync_readahead(mapping, &in->f_ra, in,
1897 index, req_pages - spd.nr_pages);
1898
1899 error = 0; 1888 error = 0;
1900 while (spd.nr_pages < nr_pages) {
1901 /*
1902 * Page could be there, find_get_pages_contig() breaks on
1903 * the first hole.
1904 */
1905 page = find_get_page(mapping, index);
1906 if (!page) {
1907 /*
1908 * page didn't exist, allocate one.
1909 */
1910 page = page_cache_alloc_cold(mapping);
1911 if (!page)
1912 break;
1913
1914 error = add_to_page_cache_lru(page, mapping, index,
1915 GFP_KERNEL);
1916 if (unlikely(error)) {
1917 page_cache_release(page);
1918 if (error == -EEXIST)
1919 continue;
1920 break;
1921 }
1922 /*
1923 * add_to_page_cache() locks the page, unlock it
1924 * to avoid convoluting the logic below even more.
1925 */
1926 unlock_page(page);
1927 }
1928 1889
1890 while (spd.nr_pages < nr_pages) {
1891 page = NULL;
1892 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1893 if (error)
1894 break;
1895 unlock_page(page);
1929 spd.pages[spd.nr_pages++] = page; 1896 spd.pages[spd.nr_pages++] = page;
1930 index++; 1897 index++;
1931 } 1898 }
1932 1899
1933 /*
1934 * Now loop over the map and see if we need to start IO on any
1935 * pages, fill in the partial map, etc.
1936 */
1937 index = *ppos >> PAGE_CACHE_SHIFT; 1900 index = *ppos >> PAGE_CACHE_SHIFT;
1938 nr_pages = spd.nr_pages; 1901 nr_pages = spd.nr_pages;
1939 spd.nr_pages = 0; 1902 spd.nr_pages = 0;
1903
1940 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1904 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1941 unsigned int this_len; 1905 unsigned int this_len;
1942 1906
1943 if (!len) 1907 if (!len)
1944 break; 1908 break;
1945 1909
1946 /*
1947 * this_len is the max we'll use from this page
1948 */
1949 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1910 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1950 page = spd.pages[page_nr]; 1911 page = spd.pages[page_nr];
1951 1912
1952 if (PageReadahead(page)) 1913 if (!PageUptodate(page) || page->mapping != mapping) {
1953 page_cache_async_readahead(mapping, &in->f_ra, in, 1914 page = NULL;
1954 page, index, req_pages - page_nr); 1915 error = shmem_getpage(inode, index, &page,
1955 1916 SGP_CACHE, NULL);
1956 /* 1917 if (error)
1957 * If the page isn't uptodate, we may need to start io on it
1958 */
1959 if (!PageUptodate(page)) {
1960 lock_page(page);
1961
1962 /*
1963 * Page was truncated, or invalidated by the
1964 * filesystem. Redo the find/create, but this time the
1965 * page is kept locked, so there's no chance of another
1966 * race with truncate/invalidate.
1967 */
1968 if (!page->mapping) {
1969 unlock_page(page);
1970 page = find_or_create_page(mapping, index,
1971 mapping_gfp_mask(mapping));
1972
1973 if (!page) {
1974 error = -ENOMEM;
1975 break;
1976 }
1977 page_cache_release(spd.pages[page_nr]);
1978 spd.pages[page_nr] = page;
1979 }
1980 /*
1981 * page was already under io and is now done, great
1982 */
1983 if (PageUptodate(page)) {
1984 unlock_page(page);
1985 goto fill_it;
1986 }
1987
1988 /*
1989 * need to read in the page
1990 */
1991 error = mapping->a_ops->readpage(in, page);
1992 if (unlikely(error)) {
1993 /*
1994 * We really should re-lookup the page here,
1995 * but it complicates things a lot. Instead
1996 * lets just do what we already stored, and
1997 * we'll get it the next time we are called.
1998 */
1999 if (error == AOP_TRUNCATED_PAGE)
2000 error = 0;
2001
2002 break; 1918 break;
2003 } 1919 unlock_page(page);
1920 page_cache_release(spd.pages[page_nr]);
1921 spd.pages[page_nr] = page;
2004 } 1922 }
2005fill_it: 1923
2006 /* 1924 isize = i_size_read(inode);
2007 * i_size must be checked after PageUptodate.
2008 */
2009 isize = i_size_read(mapping->host);
2010 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1925 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
2011 if (unlikely(!isize || index > end_index)) 1926 if (unlikely(!isize || index > end_index))
2012 break; 1927 break;
2013 1928
2014 /*
2015 * if this is the last page, see if we need to shrink
2016 * the length and stop
2017 */
2018 if (end_index == index) { 1929 if (end_index == index) {
2019 unsigned int plen; 1930 unsigned int plen;
2020 1931
2021 /*
2022 * max good bytes in this page
2023 */
2024 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1932 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
2025 if (plen <= loff) 1933 if (plen <= loff)
2026 break; 1934 break;
2027 1935
2028 /*
2029 * force quit after adding this page
2030 */
2031 this_len = min(this_len, plen - loff); 1936 this_len = min(this_len, plen - loff);
2032 len = this_len; 1937 len = this_len;
2033 } 1938 }
@@ -2040,13 +1945,8 @@ fill_it:
2040 index++; 1945 index++;
2041 } 1946 }
2042 1947
2043 /*
2044 * Release any pages at the end, if we quit early. 'page_nr' is how far
2045 * we got, 'nr_pages' is how many pages are in the map.
2046 */
2047 while (page_nr < nr_pages) 1948 while (page_nr < nr_pages)
2048 page_cache_release(spd.pages[page_nr++]); 1949 page_cache_release(spd.pages[page_nr++]);
2049 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
2050 1950
2051 if (spd.nr_pages) 1951 if (spd.nr_pages)
2052 error = splice_to_pipe(pipe, &spd); 1952 error = splice_to_pipe(pipe, &spd);