aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2006-12-13 07:02:26 -0500
committerJens Axboe <jens.axboe@oracle.com>2006-12-13 07:02:26 -0500
commit7749a8d423c483a51983b666613acda1a4dd9c1b (patch)
tree5a2a20640cca9ca519324b7933005f6fd9c4a6a5 /block/cfq-iosched.c
parent445722f97a0ecd3aed3f53d9f0dcaacaef8c6223 (diff)
[PATCH] Propagate down request sync flag
We need to do this, otherwise the io schedulers don't get access to the sync flag. Then they cannot tell the difference between a regular write and an O_DIRECT write, which can cause a performance loss. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 78c6b312bd30..533a2938ffd6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
219 return !cfqd->busy_queues; 219 return !cfqd->busy_queues;
220} 220}
221 221
222static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 222static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
223{ 223{
224 if (rw == READ || rw == WRITE_SYNC) 224 /*
225 * Use the per-process queue, for read requests and syncronous writes
226 */
227 if (!(rw & REQ_RW) || is_sync)
225 return task->pid; 228 return task->pid;
226 229
227 return CFQ_KEY_ASYNC; 230 return CFQ_KEY_ASYNC;
@@ -473,7 +476,7 @@ static struct request *
473cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 476cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
474{ 477{
475 struct task_struct *tsk = current; 478 struct task_struct *tsk = current;
476 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 479 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
477 struct cfq_queue *cfqq; 480 struct cfq_queue *cfqq;
478 481
479 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 482 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1748 struct cfq_data *cfqd = q->elevator->elevator_data; 1751 struct cfq_data *cfqd = q->elevator->elevator_data;
1749 struct task_struct *tsk = current; 1752 struct task_struct *tsk = current;
1750 struct cfq_queue *cfqq; 1753 struct cfq_queue *cfqq;
1754 unsigned int key;
1755
1756 key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1751 1757
1752 /* 1758 /*
1753 * don't force setup of a queue from here, as a call to may_queue 1759 * don't force setup of a queue from here, as a call to may_queue
@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1755 * so just lookup a possibly existing queue, or return 'may queue' 1761 * so just lookup a possibly existing queue, or return 'may queue'
1756 * if that fails 1762 * if that fails
1757 */ 1763 */
1758 cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); 1764 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1759 if (cfqq) { 1765 if (cfqq) {
1760 cfq_init_prio_data(cfqq); 1766 cfq_init_prio_data(cfqq);
1761 cfq_prio_boost(cfqq); 1767 cfq_prio_boost(cfqq);
@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1798 struct task_struct *tsk = current; 1804 struct task_struct *tsk = current;
1799 struct cfq_io_context *cic; 1805 struct cfq_io_context *cic;
1800 const int rw = rq_data_dir(rq); 1806 const int rw = rq_data_dir(rq);
1801 pid_t key = cfq_queue_pid(tsk, rw); 1807 const int is_sync = rq_is_sync(rq);
1808 pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1802 struct cfq_queue *cfqq; 1809 struct cfq_queue *cfqq;
1803 unsigned long flags; 1810 unsigned long flags;
1804 int is_sync = key != CFQ_KEY_ASYNC;
1805 1811
1806 might_sleep_if(gfp_mask & __GFP_WAIT); 1812 might_sleep_if(gfp_mask & __GFP_WAIT);
1807 1813