aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2011-10-19 15:30:16 -0400
committerJeff Layton <jlayton@redhat.com>2011-10-19 15:30:16 -0400
commit690c5e3163502f229e5b5d455e5212e28c20cd6d (patch)
treefd19fcb81d852b7cb8069b11d8aea1a70e0aac30 /fs/cifs
parente28bc5b1fdbd6e850488234d6072e6b66fc46146 (diff)
cifs: convert cifs_readpages to use async reads
Now that we have code in place to do asynchronous reads, convert cifs_readpages to use it. The new cifs_readpages walks the page_list that gets passed in, locks and adds the pages to the pagecache and sets up cifs_readdata to handle the reads. The rest is handled by the cifs_async_readv infrastructure. Reviewed-and-Tested-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Jeff Layton <jlayton@redhat.com>
Diffstat (limited to 'fs/cifs')
-rw-r--r--fs/cifs/file.c281
1 files changed, 113 insertions, 168 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 852d1f39ada..8f6917816fe 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/mount.h> 33#include <linux/mount.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h>
35#include <asm/div64.h> 36#include <asm/div64.h>
36#include "cifsfs.h" 37#include "cifsfs.h"
37#include "cifspdu.h" 38#include "cifspdu.h"
@@ -2000,82 +2001,24 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2000 return rc; 2001 return rc;
2001} 2002}
2002 2003
2003
2004static void cifs_copy_cache_pages(struct address_space *mapping,
2005 struct list_head *pages, int bytes_read, char *data)
2006{
2007 struct page *page;
2008 char *target;
2009
2010 while (bytes_read > 0) {
2011 if (list_empty(pages))
2012 break;
2013
2014 page = list_entry(pages->prev, struct page, lru);
2015 list_del(&page->lru);
2016
2017 if (add_to_page_cache_lru(page, mapping, page->index,
2018 GFP_KERNEL)) {
2019 page_cache_release(page);
2020 cFYI(1, "Add page cache failed");
2021 data += PAGE_CACHE_SIZE;
2022 bytes_read -= PAGE_CACHE_SIZE;
2023 continue;
2024 }
2025 page_cache_release(page);
2026
2027 target = kmap_atomic(page, KM_USER0);
2028
2029 if (PAGE_CACHE_SIZE > bytes_read) {
2030 memcpy(target, data, bytes_read);
2031 /* zero the tail end of this partial page */
2032 memset(target + bytes_read, 0,
2033 PAGE_CACHE_SIZE - bytes_read);
2034 bytes_read = 0;
2035 } else {
2036 memcpy(target, data, PAGE_CACHE_SIZE);
2037 bytes_read -= PAGE_CACHE_SIZE;
2038 }
2039 kunmap_atomic(target, KM_USER0);
2040
2041 flush_dcache_page(page);
2042 SetPageUptodate(page);
2043 unlock_page(page);
2044 data += PAGE_CACHE_SIZE;
2045
2046 /* add page to FS-Cache */
2047 cifs_readpage_to_fscache(mapping->host, page);
2048 }
2049 return;
2050}
2051
2052static int cifs_readpages(struct file *file, struct address_space *mapping, 2004static int cifs_readpages(struct file *file, struct address_space *mapping,
2053 struct list_head *page_list, unsigned num_pages) 2005 struct list_head *page_list, unsigned num_pages)
2054{ 2006{
2055 int rc = -EACCES; 2007 int rc;
2056 int xid; 2008 struct list_head tmplist;
2057 loff_t offset; 2009 struct cifsFileInfo *open_file = file->private_data;
2058 struct page *page; 2010 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2059 struct cifs_sb_info *cifs_sb; 2011 unsigned int rsize = cifs_sb->rsize;
2060 struct cifs_tcon *pTcon; 2012 pid_t pid;
2061 unsigned int bytes_read = 0;
2062 unsigned int read_size, i;
2063 char *smb_read_data = NULL;
2064 struct smb_com_read_rsp *pSMBr;
2065 struct cifsFileInfo *open_file;
2066 struct cifs_io_parms io_parms;
2067 int buf_type = CIFS_NO_BUFFER;
2068 __u32 pid;
2069 2013
2070 xid = GetXid(); 2014 /*
2071 if (file->private_data == NULL) { 2015 * Give up immediately if rsize is too small to read an entire page.
2072 rc = -EBADF; 2016 * The VFS will fall back to readpage. We should never reach this
2073 FreeXid(xid); 2017 * point however since we set ra_pages to 0 when the rsize is smaller
2074 return rc; 2018 * than a cache page.
2075 } 2019 */
2076 open_file = file->private_data; 2020 if (unlikely(rsize < PAGE_CACHE_SIZE))
2077 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2021 return 0;
2078 pTcon = tlink_tcon(open_file->tlink);
2079 2022
2080 /* 2023 /*
2081 * Reads as many pages as possible from fscache. Returns -ENOBUFS 2024 * Reads as many pages as possible from fscache. Returns -ENOBUFS
@@ -2084,125 +2027,127 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2084 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, 2027 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2085 &num_pages); 2028 &num_pages);
2086 if (rc == 0) 2029 if (rc == 0)
2087 goto read_complete; 2030 return rc;
2088 2031
2089 cFYI(DBG2, "rpages: num pages %d", num_pages);
2090 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2032 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2091 pid = open_file->pid; 2033 pid = open_file->pid;
2092 else 2034 else
2093 pid = current->tgid; 2035 pid = current->tgid;
2094 2036
2095 for (i = 0; i < num_pages; ) { 2037 rc = 0;
2096 unsigned contig_pages; 2038 INIT_LIST_HEAD(&tmplist);
2097 struct page *tmp_page;
2098 unsigned long expected_index;
2099 2039
2100 if (list_empty(page_list)) 2040 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2101 break; 2041 mapping, num_pages);
2042
2043 /*
2044 * Start with the page at end of list and move it to private
2045 * list. Do the same with any following pages until we hit
2046 * the rsize limit, hit an index discontinuity, or run out of
2047 * pages. Issue the async read and then start the loop again
2048 * until the list is empty.
2049 *
2050 * Note that list order is important. The page_list is in
2051 * the order of declining indexes. When we put the pages in
2052 * the rdata->pages, then we want them in increasing order.
2053 */
2054 while (!list_empty(page_list)) {
2055 unsigned int bytes = PAGE_CACHE_SIZE;
2056 unsigned int expected_index;
2057 unsigned int nr_pages = 1;
2058 loff_t offset;
2059 struct page *page, *tpage;
2060 struct cifs_readdata *rdata;
2102 2061
2103 page = list_entry(page_list->prev, struct page, lru); 2062 page = list_entry(page_list->prev, struct page, lru);
2063
2064 /*
2065 * Lock the page and put it in the cache. Since no one else
2066 * should have access to this page, we're safe to simply set
2067 * PG_locked without checking it first.
2068 */
2069 __set_page_locked(page);
2070 rc = add_to_page_cache_locked(page, mapping,
2071 page->index, GFP_KERNEL);
2072
2073 /* give up if we can't stick it in the cache */
2074 if (rc) {
2075 __clear_page_locked(page);
2076 break;
2077 }
2078
2079 /* move first page to the tmplist */
2104 offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 2080 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2081 list_move_tail(&page->lru, &tmplist);
2082
2083 /* now try and add more pages onto the request */
2084 expected_index = page->index + 1;
2085 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2086 /* discontinuity ? */
2087 if (page->index != expected_index)
2088 break;
2089
2090 /* would this page push the read over the rsize? */
2091 if (bytes + PAGE_CACHE_SIZE > rsize)
2092 break;
2105 2093
2106 /* count adjacent pages that we will read into */ 2094 __set_page_locked(page);
2107 contig_pages = 0; 2095 if (add_to_page_cache_locked(page, mapping,
2108 expected_index = 2096 page->index, GFP_KERNEL)) {
2109 list_entry(page_list->prev, struct page, lru)->index; 2097 __clear_page_locked(page);
2110 list_for_each_entry_reverse(tmp_page, page_list, lru) {
2111 if (tmp_page->index == expected_index) {
2112 contig_pages++;
2113 expected_index++;
2114 } else
2115 break; 2098 break;
2099 }
2100 list_move_tail(&page->lru, &tmplist);
2101 bytes += PAGE_CACHE_SIZE;
2102 expected_index++;
2103 nr_pages++;
2116 } 2104 }
2117 if (contig_pages + i > num_pages) 2105
2118 contig_pages = num_pages - i; 2106 rdata = cifs_readdata_alloc(nr_pages);
2119 2107 if (!rdata) {
2120 /* for reads over a certain size could initiate async 2108 /* best to give up if we're out of mem */
2121 read ahead */ 2109 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2122 2110 list_del(&page->lru);
2123 read_size = contig_pages * PAGE_CACHE_SIZE; 2111 lru_cache_add_file(page);
2124 /* Read size needs to be in multiples of one page */ 2112 unlock_page(page);
2125 read_size = min_t(const unsigned int, read_size, 2113 page_cache_release(page);
2126 cifs_sb->rsize & PAGE_CACHE_MASK); 2114 }
2127 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", 2115 rc = -ENOMEM;
2128 read_size, contig_pages); 2116 break;
2129 rc = -EAGAIN; 2117 }
2130 while (rc == -EAGAIN) { 2118
2119 spin_lock(&cifs_file_list_lock);
2120 cifsFileInfo_get(open_file);
2121 spin_unlock(&cifs_file_list_lock);
2122 rdata->cfile = open_file;
2123 rdata->mapping = mapping;
2124 rdata->offset = offset;
2125 rdata->bytes = bytes;
2126 rdata->pid = pid;
2127 list_splice_init(&tmplist, &rdata->pages);
2128
2129 do {
2131 if (open_file->invalidHandle) { 2130 if (open_file->invalidHandle) {
2132 rc = cifs_reopen_file(open_file, true); 2131 rc = cifs_reopen_file(open_file, true);
2133 if (rc != 0) 2132 if (rc != 0)
2134 break; 2133 continue;
2135 } 2134 }
2136 io_parms.netfid = open_file->netfid; 2135 rc = cifs_async_readv(rdata);
2137 io_parms.pid = pid; 2136 } while (rc == -EAGAIN);
2138 io_parms.tcon = pTcon;
2139 io_parms.offset = offset;
2140 io_parms.length = read_size;
2141 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2142 &smb_read_data, &buf_type);
2143 /* BB more RC checks ? */
2144 if (rc == -EAGAIN) {
2145 if (smb_read_data) {
2146 if (buf_type == CIFS_SMALL_BUFFER)
2147 cifs_small_buf_release(smb_read_data);
2148 else if (buf_type == CIFS_LARGE_BUFFER)
2149 cifs_buf_release(smb_read_data);
2150 smb_read_data = NULL;
2151 }
2152 }
2153 }
2154 if ((rc < 0) || (smb_read_data == NULL)) {
2155 cFYI(1, "Read error in readpages: %d", rc);
2156 break;
2157 } else if (bytes_read > 0) {
2158 task_io_account_read(bytes_read);
2159 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2160 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2161 smb_read_data + 4 /* RFC1001 hdr */ +
2162 le16_to_cpu(pSMBr->DataOffset));
2163
2164 i += bytes_read >> PAGE_CACHE_SHIFT;
2165 cifs_stats_bytes_read(pTcon, bytes_read);
2166 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2167 i++; /* account for partial page */
2168
2169 /* server copy of file can have smaller size
2170 than client */
2171 /* BB do we need to verify this common case ?
2172 this case is ok - if we are at server EOF
2173 we will hit it on next read */
2174 2137
2175 /* break; */ 2138 if (rc != 0) {
2139 list_for_each_entry_safe(page, tpage, &rdata->pages,
2140 lru) {
2141 list_del(&page->lru);
2142 lru_cache_add_file(page);
2143 unlock_page(page);
2144 page_cache_release(page);
2176 } 2145 }
2177 } else { 2146 cifs_readdata_free(rdata);
2178 cFYI(1, "No bytes read (%d) at offset %lld . "
2179 "Cleaning remaining pages from readahead list",
2180 bytes_read, offset);
2181 /* BB turn off caching and do new lookup on
2182 file size at server? */
2183 break; 2147 break;
2184 } 2148 }
2185 if (smb_read_data) {
2186 if (buf_type == CIFS_SMALL_BUFFER)
2187 cifs_small_buf_release(smb_read_data);
2188 else if (buf_type == CIFS_LARGE_BUFFER)
2189 cifs_buf_release(smb_read_data);
2190 smb_read_data = NULL;
2191 }
2192 bytes_read = 0;
2193 }
2194
2195/* need to free smb_read_data buf before exit */
2196 if (smb_read_data) {
2197 if (buf_type == CIFS_SMALL_BUFFER)
2198 cifs_small_buf_release(smb_read_data);
2199 else if (buf_type == CIFS_LARGE_BUFFER)
2200 cifs_buf_release(smb_read_data);
2201 smb_read_data = NULL;
2202 } 2149 }
2203 2150
2204read_complete:
2205 FreeXid(xid);
2206 return rc; 2151 return rc;
2207} 2152}
2208 2153