aboutsummaryrefslogtreecommitdiffstats
path: root/fs/direct-io.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-06-13 02:26:10 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-06-23 11:10:39 -0400
commitb31dc66a54ad986b6b73bdc49c8efc17cbad1833 (patch)
tree5591383c1cbffe11512da889c971f899333f1a44 /fs/direct-io.c
parent271f18f102c789f59644bb6c53a69da1df72b2f4 (diff)
[PATCH] Kill PF_SYNCWRITE flag
A process flag to indicate whether we are doing sync io is incredibly ugly. It also causes performance problems when one does a lot of async io and then proceeds to sync it. Part of the io will go out as async, and the other part as sync. This causes a disconnect between the previously submitted io and the synced io. For io schedulers such as CFQ, this will cause us lost merges and suboptimal behaviour in scheduling. Remove PF_SYNCWRITE completely from the fsync/msync paths, and let the O_DIRECT path just directly indicate that the writes are sync by using WRITE_SYNC instead. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs/direct-io.c')
-rw-r--r--fs/direct-io.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b05d1b218776..538fb0418fba 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -162,7 +162,7 @@ static int dio_refill_pages(struct dio *dio)
162 NULL); /* vmas */ 162 NULL); /* vmas */
163 up_read(&current->mm->mmap_sem); 163 up_read(&current->mm->mmap_sem);
164 164
165 if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) { 165 if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
166 struct page *page = ZERO_PAGE(dio->curr_user_address); 166 struct page *page = ZERO_PAGE(dio->curr_user_address);
167 /* 167 /*
168 * A memory fault, but the filesystem has some outstanding 168 * A memory fault, but the filesystem has some outstanding
@@ -535,7 +535,7 @@ static int get_more_blocks(struct dio *dio)
535 map_bh->b_state = 0; 535 map_bh->b_state = 0;
536 map_bh->b_size = fs_count << dio->inode->i_blkbits; 536 map_bh->b_size = fs_count << dio->inode->i_blkbits;
537 537
538 create = dio->rw == WRITE; 538 create = dio->rw & WRITE;
539 if (dio->lock_type == DIO_LOCKING) { 539 if (dio->lock_type == DIO_LOCKING) {
540 if (dio->block_in_file < (i_size_read(dio->inode) >> 540 if (dio->block_in_file < (i_size_read(dio->inode) >>
541 dio->blkbits)) 541 dio->blkbits))
@@ -867,7 +867,7 @@ do_holes:
867 loff_t i_size_aligned; 867 loff_t i_size_aligned;
868 868
869 /* AKPM: eargh, -ENOTBLK is a hack */ 869 /* AKPM: eargh, -ENOTBLK is a hack */
870 if (dio->rw == WRITE) { 870 if (dio->rw & WRITE) {
871 page_cache_release(page); 871 page_cache_release(page);
872 return -ENOTBLK; 872 return -ENOTBLK;
873 } 873 }
@@ -1045,7 +1045,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1045 } 1045 }
1046 } /* end iovec loop */ 1046 } /* end iovec loop */
1047 1047
1048 if (ret == -ENOTBLK && rw == WRITE) { 1048 if (ret == -ENOTBLK && (rw & WRITE)) {
1049 /* 1049 /*
1050 * The remaining part of the request will be 1050 * The remaining part of the request will be
1051 * be handled by buffered I/O when we return 1051 * be handled by buffered I/O when we return
@@ -1089,7 +1089,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1089 if (dio->is_async) { 1089 if (dio->is_async) {
1090 int should_wait = 0; 1090 int should_wait = 0;
1091 1091
1092 if (dio->result < dio->size && rw == WRITE) { 1092 if (dio->result < dio->size && (rw & WRITE)) {
1093 dio->waiter = current; 1093 dio->waiter = current;
1094 should_wait = 1; 1094 should_wait = 1;
1095 } 1095 }
@@ -1142,7 +1142,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1142 ret = transferred; 1142 ret = transferred;
1143 1143
1144 /* We could have also come here on an AIO file extend */ 1144 /* We could have also come here on an AIO file extend */
1145 if (!is_sync_kiocb(iocb) && rw == WRITE && 1145 if (!is_sync_kiocb(iocb) && (rw & WRITE) &&
1146 ret >= 0 && dio->result == dio->size) 1146 ret >= 0 && dio->result == dio->size)
1147 /* 1147 /*
1148 * For AIO writes where we have completed the 1148 * For AIO writes where we have completed the
@@ -1194,7 +1194,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1194 int acquire_i_mutex = 0; 1194 int acquire_i_mutex = 0;
1195 1195
1196 if (rw & WRITE) 1196 if (rw & WRITE)
1197 current->flags |= PF_SYNCWRITE; 1197 rw = WRITE_SYNC;
1198 1198
1199 if (bdev) 1199 if (bdev)
1200 bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); 1200 bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
@@ -1270,7 +1270,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1270 * even for AIO, we need to wait for i/o to complete before 1270 * even for AIO, we need to wait for i/o to complete before
1271 * returning in this case. 1271 * returning in this case.
1272 */ 1272 */
1273 dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) && 1273 dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
1274 (end > i_size_read(inode))); 1274 (end > i_size_read(inode)));
1275 1275
1276 retval = direct_io_worker(rw, iocb, inode, iov, offset, 1276 retval = direct_io_worker(rw, iocb, inode, iov, offset,
@@ -1284,8 +1284,6 @@ out:
1284 mutex_unlock(&inode->i_mutex); 1284 mutex_unlock(&inode->i_mutex);
1285 else if (acquire_i_mutex) 1285 else if (acquire_i_mutex)
1286 mutex_lock(&inode->i_mutex); 1286 mutex_lock(&inode->i_mutex);
1287 if (rw & WRITE)
1288 current->flags &= ~PF_SYNCWRITE;
1289 return retval; 1287 return retval;
1290} 1288}
1291EXPORT_SYMBOL(__blockdev_direct_IO); 1289EXPORT_SYMBOL(__blockdev_direct_IO);