summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-06-01 12:03:05 -0400
committerDarrick J. Wong <darrick.wong@oracle.com>2018-06-01 21:37:32 -0400
commitc534aa3fdd149fab18b094375f334b4bb3635cbf (patch)
treeddedbe71f726ff1921b95dcae15fcb8f83b7ead2 /mm
parent836978b35fcd402a31323a16ac0b4c8242890a84 (diff)
mm: return an unsigned int from __do_page_cache_readahead
We never return an error, so switch to returning an unsigned int. Most callers already did implicit casts to an unsigned type, and the one that didn't can be simplified now. Suggested-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h2
-rw-r--r--mm/readahead.c15
2 files changed, 6 insertions, 11 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 62d8c34e63d5..954003ac766a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -53,7 +53,7 @@ void unmap_page_range(struct mmu_gather *tlb,
53 unsigned long addr, unsigned long end, 53 unsigned long addr, unsigned long end,
54 struct zap_details *details); 54 struct zap_details *details);
55 55
56extern int __do_page_cache_readahead(struct address_space *mapping, 56extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
57 struct file *filp, pgoff_t offset, unsigned long nr_to_read, 57 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
58 unsigned long lookahead_size); 58 unsigned long lookahead_size);
59 59
diff --git a/mm/readahead.c b/mm/readahead.c
index 16d0cb1e2616..fa4d4b767130 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -147,16 +147,16 @@ out:
147 * 147 *
148 * Returns the number of pages requested, or the maximum amount of I/O allowed. 148 * Returns the number of pages requested, or the maximum amount of I/O allowed.
149 */ 149 */
150int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, 150unsigned int __do_page_cache_readahead(struct address_space *mapping,
151 pgoff_t offset, unsigned long nr_to_read, 151 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
152 unsigned long lookahead_size) 152 unsigned long lookahead_size)
153{ 153{
154 struct inode *inode = mapping->host; 154 struct inode *inode = mapping->host;
155 struct page *page; 155 struct page *page;
156 unsigned long end_index; /* The last page we want to read */ 156 unsigned long end_index; /* The last page we want to read */
157 LIST_HEAD(page_pool); 157 LIST_HEAD(page_pool);
158 int page_idx; 158 int page_idx;
159 int nr_pages = 0; 159 unsigned int nr_pages = 0;
160 loff_t isize = i_size_read(inode); 160 loff_t isize = i_size_read(inode);
161 gfp_t gfp_mask = readahead_gfp_mask(mapping); 161 gfp_t gfp_mask = readahead_gfp_mask(mapping);
162 162
@@ -223,16 +223,11 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
223 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); 223 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
224 nr_to_read = min(nr_to_read, max_pages); 224 nr_to_read = min(nr_to_read, max_pages);
225 while (nr_to_read) { 225 while (nr_to_read) {
226 int err;
227
228 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; 226 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
229 227
230 if (this_chunk > nr_to_read) 228 if (this_chunk > nr_to_read)
231 this_chunk = nr_to_read; 229 this_chunk = nr_to_read;
232 err = __do_page_cache_readahead(mapping, filp, 230 __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0);
233 offset, this_chunk, 0);
234 if (err < 0)
235 return err;
236 231
237 offset += this_chunk; 232 offset += this_chunk;
238 nr_to_read -= this_chunk; 233 nr_to_read -= this_chunk;