aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/pagelist.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 5344371a257c..7913961aff22 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -204,6 +204,21 @@ nfs_wait_on_request(struct nfs_page *req)
204 TASK_UNINTERRUPTIBLE); 204 TASK_UNINTERRUPTIBLE);
205} 205}
206 206
207static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
208{
209 /*
210 * FIXME: ideally we should be able to coalesce all requests
211 * that are not block boundary aligned, but currently this
212 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
213 * since nfs_flush_multi and nfs_pagein_multi assume you
214 * can have only one struct nfs_page.
215 */
216 if (desc->pg_bsize < PAGE_SIZE)
217 return 0;
218
219 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
220}
221
207/** 222/**
208 * nfs_pageio_init - initialise a page io descriptor 223 * nfs_pageio_init - initialise a page io descriptor
209 * @desc: pointer to descriptor 224 * @desc: pointer to descriptor
@@ -229,7 +244,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
229 desc->pg_ioflags = io_flags; 244 desc->pg_ioflags = io_flags;
230 desc->pg_error = 0; 245 desc->pg_error = 0;
231 desc->pg_lseg = NULL; 246 desc->pg_lseg = NULL;
232 desc->pg_test = NULL; 247 desc->pg_test = nfs_generic_pg_test;
233 pnfs_pageio_init(desc, inode); 248 pnfs_pageio_init(desc, inode);
234} 249}
235 250
@@ -260,13 +275,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
260 return false; 275 return false;
261 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 276 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
262 return false; 277 return false;
263 /* 278 return pgio->pg_test(pgio, prev, req);
264 * Non-whole file layouts need to check that req is inside of
265 * pgio->pg_lseg.
266 */
267 if (pgio->pg_test && !pgio->pg_test(pgio, prev, req))
268 return false;
269 return true;
270} 279}
271 280
272/** 281/**
@@ -280,31 +289,18 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
280static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, 289static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
281 struct nfs_page *req) 290 struct nfs_page *req)
282{ 291{
283 size_t newlen = req->wb_bytes;
284
285 if (desc->pg_count != 0) { 292 if (desc->pg_count != 0) {
286 struct nfs_page *prev; 293 struct nfs_page *prev;
287 294
288 /*
289 * FIXME: ideally we should be able to coalesce all requests
290 * that are not block boundary aligned, but currently this
291 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
292 * since nfs_flush_multi and nfs_pagein_multi assume you
293 * can have only one struct nfs_page.
294 */
295 if (desc->pg_bsize < PAGE_SIZE)
296 return 0;
297 newlen += desc->pg_count;
298 if (newlen > desc->pg_bsize)
299 return 0;
300 prev = nfs_list_entry(desc->pg_list.prev); 295 prev = nfs_list_entry(desc->pg_list.prev);
301 if (!nfs_can_coalesce_requests(prev, req, desc)) 296 if (!nfs_can_coalesce_requests(prev, req, desc))
302 return 0; 297 return 0;
303 } else 298 } else {
304 desc->pg_base = req->wb_pgbase; 299 desc->pg_base = req->wb_pgbase;
300 }
305 nfs_list_remove_request(req); 301 nfs_list_remove_request(req);
306 nfs_list_add_request(req, &desc->pg_list); 302 nfs_list_add_request(req, &desc->pg_list);
307 desc->pg_count = newlen; 303 desc->pg_count += req->wb_bytes;
308 return 1; 304 return 1;
309} 305}
310 306