diff options
author | Hugh Dickins <hughd@google.com> | 2011-07-25 20:12:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-25 23:57:11 -0400 |
commit | 708e3508c2a2204cc276dcdb543009a441bfe91b (patch) | |
tree | 9e301ba4ebf3b34a00228c26977feebfba8ad9ef /mm/shmem.c | |
parent | 2efaca927f5cd7ecd0f1554b8f9b6a9a2c329c03 (diff) |
tmpfs: clone shmem_file_splice_read()
Copy __generic_file_splice_read() and generic_file_splice_read() from
fs/splice.c to shmem_file_splice_read() in mm/shmem.c. Make
page_cache_pipe_buf_ops and spd_release_page() accessible to it.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Jens Axboe <jaxboe@fusionio.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 218 |
1 files changed, 217 insertions, 1 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index c1db11cf220d..d176e488f04d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -51,6 +51,7 @@ static struct vfsmount *shm_mnt; | |||
51 | #include <linux/shmem_fs.h> | 51 | #include <linux/shmem_fs.h> |
52 | #include <linux/writeback.h> | 52 | #include <linux/writeback.h> |
53 | #include <linux/blkdev.h> | 53 | #include <linux/blkdev.h> |
54 | #include <linux/splice.h> | ||
54 | #include <linux/security.h> | 55 | #include <linux/security.h> |
55 | #include <linux/swapops.h> | 56 | #include <linux/swapops.h> |
56 | #include <linux/mempolicy.h> | 57 | #include <linux/mempolicy.h> |
@@ -1844,6 +1845,221 @@ static ssize_t shmem_file_aio_read(struct kiocb *iocb, | |||
1844 | return retval; | 1845 | return retval; |
1845 | } | 1846 | } |
1846 | 1847 | ||
1848 | static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, | ||
1849 | struct pipe_inode_info *pipe, size_t len, | ||
1850 | unsigned int flags) | ||
1851 | { | ||
1852 | struct address_space *mapping = in->f_mapping; | ||
1853 | unsigned int loff, nr_pages, req_pages; | ||
1854 | struct page *pages[PIPE_DEF_BUFFERS]; | ||
1855 | struct partial_page partial[PIPE_DEF_BUFFERS]; | ||
1856 | struct page *page; | ||
1857 | pgoff_t index, end_index; | ||
1858 | loff_t isize, left; | ||
1859 | int error, page_nr; | ||
1860 | struct splice_pipe_desc spd = { | ||
1861 | .pages = pages, | ||
1862 | .partial = partial, | ||
1863 | .flags = flags, | ||
1864 | .ops = &page_cache_pipe_buf_ops, | ||
1865 | .spd_release = spd_release_page, | ||
1866 | }; | ||
1867 | |||
1868 | isize = i_size_read(in->f_mapping->host); | ||
1869 | if (unlikely(*ppos >= isize)) | ||
1870 | return 0; | ||
1871 | |||
1872 | left = isize - *ppos; | ||
1873 | if (unlikely(left < len)) | ||
1874 | len = left; | ||
1875 | |||
1876 | if (splice_grow_spd(pipe, &spd)) | ||
1877 | return -ENOMEM; | ||
1878 | |||
1879 | index = *ppos >> PAGE_CACHE_SHIFT; | ||
1880 | loff = *ppos & ~PAGE_CACHE_MASK; | ||
1881 | req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
1882 | nr_pages = min(req_pages, pipe->buffers); | ||
1883 | |||
1884 | /* | ||
1885 | * Lookup the (hopefully) full range of pages we need. | ||
1886 | */ | ||
1887 | spd.nr_pages = find_get_pages_contig(mapping, index, | ||
1888 | nr_pages, spd.pages); | ||
1889 | index += spd.nr_pages; | ||
1890 | |||
1891 | /* | ||
1892 | * If find_get_pages_contig() returned fewer pages than we needed, | ||
1893 | * readahead/allocate the rest and fill in the holes. | ||
1894 | */ | ||
1895 | if (spd.nr_pages < nr_pages) | ||
1896 | page_cache_sync_readahead(mapping, &in->f_ra, in, | ||
1897 | index, req_pages - spd.nr_pages); | ||
1898 | |||
1899 | error = 0; | ||
1900 | while (spd.nr_pages < nr_pages) { | ||
1901 | /* | ||
1902 | * Page could be there, find_get_pages_contig() breaks on | ||
1903 | * the first hole. | ||
1904 | */ | ||
1905 | page = find_get_page(mapping, index); | ||
1906 | if (!page) { | ||
1907 | /* | ||
1908 | * page didn't exist, allocate one. | ||
1909 | */ | ||
1910 | page = page_cache_alloc_cold(mapping); | ||
1911 | if (!page) | ||
1912 | break; | ||
1913 | |||
1914 | error = add_to_page_cache_lru(page, mapping, index, | ||
1915 | GFP_KERNEL); | ||
1916 | if (unlikely(error)) { | ||
1917 | page_cache_release(page); | ||
1918 | if (error == -EEXIST) | ||
1919 | continue; | ||
1920 | break; | ||
1921 | } | ||
1922 | /* | ||
1923 | * add_to_page_cache() locks the page, unlock it | ||
1924 | * to avoid convoluting the logic below even more. | ||
1925 | */ | ||
1926 | unlock_page(page); | ||
1927 | } | ||
1928 | |||
1929 | spd.pages[spd.nr_pages++] = page; | ||
1930 | index++; | ||
1931 | } | ||
1932 | |||
1933 | /* | ||
1934 | * Now loop over the map and see if we need to start IO on any | ||
1935 | * pages, fill in the partial map, etc. | ||
1936 | */ | ||
1937 | index = *ppos >> PAGE_CACHE_SHIFT; | ||
1938 | nr_pages = spd.nr_pages; | ||
1939 | spd.nr_pages = 0; | ||
1940 | for (page_nr = 0; page_nr < nr_pages; page_nr++) { | ||
1941 | unsigned int this_len; | ||
1942 | |||
1943 | if (!len) | ||
1944 | break; | ||
1945 | |||
1946 | /* | ||
1947 | * this_len is the max we'll use from this page | ||
1948 | */ | ||
1949 | this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); | ||
1950 | page = spd.pages[page_nr]; | ||
1951 | |||
1952 | if (PageReadahead(page)) | ||
1953 | page_cache_async_readahead(mapping, &in->f_ra, in, | ||
1954 | page, index, req_pages - page_nr); | ||
1955 | |||
1956 | /* | ||
1957 | * If the page isn't uptodate, we may need to start io on it | ||
1958 | */ | ||
1959 | if (!PageUptodate(page)) { | ||
1960 | lock_page(page); | ||
1961 | |||
1962 | /* | ||
1963 | * Page was truncated, or invalidated by the | ||
1964 | * filesystem. Redo the find/create, but this time the | ||
1965 | * page is kept locked, so there's no chance of another | ||
1966 | * race with truncate/invalidate. | ||
1967 | */ | ||
1968 | if (!page->mapping) { | ||
1969 | unlock_page(page); | ||
1970 | page = find_or_create_page(mapping, index, | ||
1971 | mapping_gfp_mask(mapping)); | ||
1972 | |||
1973 | if (!page) { | ||
1974 | error = -ENOMEM; | ||
1975 | break; | ||
1976 | } | ||
1977 | page_cache_release(spd.pages[page_nr]); | ||
1978 | spd.pages[page_nr] = page; | ||
1979 | } | ||
1980 | /* | ||
1981 | * page was already under io and is now done, great | ||
1982 | */ | ||
1983 | if (PageUptodate(page)) { | ||
1984 | unlock_page(page); | ||
1985 | goto fill_it; | ||
1986 | } | ||
1987 | |||
1988 | /* | ||
1989 | * need to read in the page | ||
1990 | */ | ||
1991 | error = mapping->a_ops->readpage(in, page); | ||
1992 | if (unlikely(error)) { | ||
1993 | /* | ||
1994 | * We really should re-lookup the page here, | ||
1995 | * but it complicates things a lot. Instead | ||
1996 | * lets just do what we already stored, and | ||
1997 | * we'll get it the next time we are called. | ||
1998 | */ | ||
1999 | if (error == AOP_TRUNCATED_PAGE) | ||
2000 | error = 0; | ||
2001 | |||
2002 | break; | ||
2003 | } | ||
2004 | } | ||
2005 | fill_it: | ||
2006 | /* | ||
2007 | * i_size must be checked after PageUptodate. | ||
2008 | */ | ||
2009 | isize = i_size_read(mapping->host); | ||
2010 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | ||
2011 | if (unlikely(!isize || index > end_index)) | ||
2012 | break; | ||
2013 | |||
2014 | /* | ||
2015 | * if this is the last page, see if we need to shrink | ||
2016 | * the length and stop | ||
2017 | */ | ||
2018 | if (end_index == index) { | ||
2019 | unsigned int plen; | ||
2020 | |||
2021 | /* | ||
2022 | * max good bytes in this page | ||
2023 | */ | ||
2024 | plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | ||
2025 | if (plen <= loff) | ||
2026 | break; | ||
2027 | |||
2028 | /* | ||
2029 | * force quit after adding this page | ||
2030 | */ | ||
2031 | this_len = min(this_len, plen - loff); | ||
2032 | len = this_len; | ||
2033 | } | ||
2034 | |||
2035 | spd.partial[page_nr].offset = loff; | ||
2036 | spd.partial[page_nr].len = this_len; | ||
2037 | len -= this_len; | ||
2038 | loff = 0; | ||
2039 | spd.nr_pages++; | ||
2040 | index++; | ||
2041 | } | ||
2042 | |||
2043 | /* | ||
2044 | * Release any pages at the end, if we quit early. 'page_nr' is how far | ||
2045 | * we got, 'nr_pages' is how many pages are in the map. | ||
2046 | */ | ||
2047 | while (page_nr < nr_pages) | ||
2048 | page_cache_release(spd.pages[page_nr++]); | ||
2049 | in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; | ||
2050 | |||
2051 | if (spd.nr_pages) | ||
2052 | error = splice_to_pipe(pipe, &spd); | ||
2053 | |||
2054 | splice_shrink_spd(pipe, &spd); | ||
2055 | |||
2056 | if (error > 0) { | ||
2057 | *ppos += error; | ||
2058 | file_accessed(in); | ||
2059 | } | ||
2060 | return error; | ||
2061 | } | ||
2062 | |||
1847 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | 2063 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) |
1848 | { | 2064 | { |
1849 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); | 2065 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); |
@@ -2699,7 +2915,7 @@ static const struct file_operations shmem_file_operations = { | |||
2699 | .aio_read = shmem_file_aio_read, | 2915 | .aio_read = shmem_file_aio_read, |
2700 | .aio_write = generic_file_aio_write, | 2916 | .aio_write = generic_file_aio_write, |
2701 | .fsync = noop_fsync, | 2917 | .fsync = noop_fsync, |
2702 | .splice_read = generic_file_splice_read, | 2918 | .splice_read = shmem_file_splice_read, |
2703 | .splice_write = generic_file_splice_write, | 2919 | .splice_write = generic_file_splice_write, |
2704 | #endif | 2920 | #endif |
2705 | }; | 2921 | }; |