aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-06-07 03:39:42 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-06-08 02:34:11 -0400
commit620a324b744a7d66c3c45a83042f8e7fc9fc5a04 (patch)
tree07bf593206a2f38a28ba97811572d036c32b7927 /fs
parent475ecade683566b19ebb84972de864039ac5fce3 (diff)
splice: __generic_file_splice_read: fix read/truncate race
Original patch and description from Neil Brown <neilb@suse.de>, merged and adapted to splice branch by me. Neils text follows: __generic_file_splice_read() currently samples the i_size at the start and doesn't do so again unless it needs to call ->readpage to load a page. After ->readpage it has to re-sample i_size as a truncate may have caused that page to be filled with zeros, and the read() call should not see these. However there are other activities that might cause ->readpage to be called on a page between the time that __generic_file_splice_read() samples i_size and when it finds that it has an uptodate page. These include at least read-ahead and possibly another thread performing a read So we must sample i_size *after* it has an uptodate page. Thus the current sampling at the start and after a read can be replaced with a sampling before page addition into spd. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/splice.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 123fcdb2e4d9..cb211360273a 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -413,37 +413,37 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
413 413
414 break; 414 break;
415 } 415 }
416 }
417fill_it:
418 /*
419 * i_size must be checked after PageUptodate.
420 */
421 isize = i_size_read(mapping->host);
422 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
423 if (unlikely(!isize || index > end_index))
424 break;
425
426 /*
427 * if this is the last page, see if we need to shrink
428 * the length and stop
429 */
430 if (end_index == index) {
431 unsigned int plen;
416 432
417 /* 433 /*
418 * i_size must be checked after ->readpage(). 434 * max good bytes in this page
419 */ 435 */
420 isize = i_size_read(mapping->host); 436 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
421 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 437 if (plen <= loff)
422 if (unlikely(!isize || index > end_index))
423 break; 438 break;
424 439
425 /* 440 /*
426 * if this is the last page, see if we need to shrink 441 * force quit after adding this page
427 * the length and stop
428 */ 442 */
429 if (end_index == index) { 443 this_len = min(this_len, plen - loff);
430 unsigned int plen; 444 len = this_len;
431
432 /*
433 * max good bytes in this page
434 */
435 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
436 if (plen <= loff)
437 break;
438
439 /*
440 * force quit after adding this page
441 */
442 this_len = min(this_len, plen - loff);
443 len = this_len;
444 }
445 } 445 }
446fill_it: 446
447 partial[page_nr].offset = loff; 447 partial[page_nr].offset = loff;
448 partial[page_nr].len = this_len; 448 partial[page_nr].len = this_len;
449 len -= this_len; 449 len -= this_len;