aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-06-13 02:26:10 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-06-23 11:10:39 -0400
commitb31dc66a54ad986b6b73bdc49c8efc17cbad1833 (patch)
tree5591383c1cbffe11512da889c971f899333f1a44 /block
parent271f18f102c789f59644bb6c53a69da1df72b2f4 (diff)
[PATCH] Kill PF_SYNCWRITE flag
A process flag to indicate whether we are doing sync io is incredibly ugly. It also causes performance problems when one does a lot of async io and then proceeds to sync it. Part of the io will go out as async, and the other part as sync. This causes a disconnect between the previously submitted io and the synced io. For io schedulers such as CFQ, this will cause us lost merges and suboptimal behaviour in scheduling. Remove PF_SYNCWRITE completely from the fsync/msync paths, and let the O_DIRECT path just directly indicate that the writes are sync by using WRITE_SYNC instead. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/ll_rw_blk.c3
3 files changed, 5 insertions, 4 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 9b13d72ffefa..56c99fa037df 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1339,7 +1339,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1339 arq->state = AS_RQ_NEW; 1339 arq->state = AS_RQ_NEW;
1340 1340
1341 if (rq_data_dir(arq->request) == READ 1341 if (rq_data_dir(arq->request) == READ
1342 || current->flags&PF_SYNCWRITE) 1342 || (arq->request->flags & REQ_RW_SYNC))
1343 arq->is_sync = 1; 1343 arq->is_sync = 1;
1344 else 1344 else
1345 arq->is_sync = 0; 1345 arq->is_sync = 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c88f161d3fb3..4c4e9cc3ae26 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -277,8 +277,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi
277static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); 277static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
278static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); 278static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
279 279
280#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
281
282/* 280/*
283 * lots of deadline iosched dupes, can be abstracted later... 281 * lots of deadline iosched dupes, can be abstracted later...
284 */ 282 */
@@ -334,7 +332,7 @@ static int cfq_queue_empty(request_queue_t *q)
334 332
335static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 333static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
336{ 334{
337 if (rw == READ || process_sync(task)) 335 if (rw == READ || rw == WRITE_SYNC)
338 return task->pid; 336 return task->pid;
339 337
340 return CFQ_KEY_ASYNC; 338 return CFQ_KEY_ASYNC;
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 17c42ddd31db..2270bb451385 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2827,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
2827 if (unlikely(bio_barrier(bio))) 2827 if (unlikely(bio_barrier(bio)))
2828 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 2828 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2829 2829
2830 if (bio_sync(bio))
2831 req->flags |= REQ_RW_SYNC;
2832
2830 req->errors = 0; 2833 req->errors = 0;
2831 req->hard_sector = req->sector = bio->bi_sector; 2834 req->hard_sector = req->sector = bio->bi_sector;
2832 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); 2835 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);