aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/ll_rw_blk.c28
2 files changed, 32 insertions, 14 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 78c6b312bd30..533a2938ffd6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
219 return !cfqd->busy_queues; 219 return !cfqd->busy_queues;
220} 220}
221 221
222static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 222static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
223{ 223{
224 if (rw == READ || rw == WRITE_SYNC) 224 /*
225 * Use the per-process queue, for read requests and syncronous writes
226 */
227 if (!(rw & REQ_RW) || is_sync)
225 return task->pid; 228 return task->pid;
226 229
227 return CFQ_KEY_ASYNC; 230 return CFQ_KEY_ASYNC;
@@ -473,7 +476,7 @@ static struct request *
473cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 476cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
474{ 477{
475 struct task_struct *tsk = current; 478 struct task_struct *tsk = current;
476 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 479 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
477 struct cfq_queue *cfqq; 480 struct cfq_queue *cfqq;
478 481
479 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 482 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1748 struct cfq_data *cfqd = q->elevator->elevator_data; 1751 struct cfq_data *cfqd = q->elevator->elevator_data;
1749 struct task_struct *tsk = current; 1752 struct task_struct *tsk = current;
1750 struct cfq_queue *cfqq; 1753 struct cfq_queue *cfqq;
1754 unsigned int key;
1755
1756 key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1751 1757
1752 /* 1758 /*
1753 * don't force setup of a queue from here, as a call to may_queue 1759 * don't force setup of a queue from here, as a call to may_queue
@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1755 * so just lookup a possibly existing queue, or return 'may queue' 1761 * so just lookup a possibly existing queue, or return 'may queue'
1756 * if that fails 1762 * if that fails
1757 */ 1763 */
1758 cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); 1764 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1759 if (cfqq) { 1765 if (cfqq) {
1760 cfq_init_prio_data(cfqq); 1766 cfq_init_prio_data(cfqq);
1761 cfq_prio_boost(cfqq); 1767 cfq_prio_boost(cfqq);
@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1798 struct task_struct *tsk = current; 1804 struct task_struct *tsk = current;
1799 struct cfq_io_context *cic; 1805 struct cfq_io_context *cic;
1800 const int rw = rq_data_dir(rq); 1806 const int rw = rq_data_dir(rq);
1801 pid_t key = cfq_queue_pid(tsk, rw); 1807 const int is_sync = rq_is_sync(rq);
1808 pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1802 struct cfq_queue *cfqq; 1809 struct cfq_queue *cfqq;
1803 unsigned long flags; 1810 unsigned long flags;
1804 int is_sync = key != CFQ_KEY_ASYNC;
1805 1811
1806 might_sleep_if(gfp_mask & __GFP_WAIT); 1812 might_sleep_if(gfp_mask & __GFP_WAIT);
1807 1813
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a541b42c08e3..79807dbc306e 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
2058 * Returns NULL on failure, with queue_lock held. 2058 * Returns NULL on failure, with queue_lock held.
2059 * Returns !NULL on success, with queue_lock *not held*. 2059 * Returns !NULL on success, with queue_lock *not held*.
2060 */ 2060 */
2061static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 2061static struct request *get_request(request_queue_t *q, int rw_flags,
2062 gfp_t gfp_mask) 2062 struct bio *bio, gfp_t gfp_mask)
2063{ 2063{
2064 struct request *rq = NULL; 2064 struct request *rq = NULL;
2065 struct request_list *rl = &q->rq; 2065 struct request_list *rl = &q->rq;
2066 struct io_context *ioc = NULL; 2066 struct io_context *ioc = NULL;
2067 const int rw = rw_flags & 0x01;
2067 int may_queue, priv; 2068 int may_queue, priv;
2068 2069
2069 may_queue = elv_may_queue(q, rw); 2070 may_queue = elv_may_queue(q, rw_flags);
2070 if (may_queue == ELV_MQUEUE_NO) 2071 if (may_queue == ELV_MQUEUE_NO)
2071 goto rq_starved; 2072 goto rq_starved;
2072 2073
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
2114 2115
2115 spin_unlock_irq(q->queue_lock); 2116 spin_unlock_irq(q->queue_lock);
2116 2117
2117 rq = blk_alloc_request(q, rw, priv, gfp_mask); 2118 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
2118 if (unlikely(!rq)) { 2119 if (unlikely(!rq)) {
2119 /* 2120 /*
2120 * Allocation failed presumably due to memory. Undo anything 2121 * Allocation failed presumably due to memory. Undo anything
@@ -2162,12 +2163,13 @@ out:
2162 * 2163 *
2163 * Called with q->queue_lock held, and returns with it unlocked. 2164 * Called with q->queue_lock held, and returns with it unlocked.
2164 */ 2165 */
2165static struct request *get_request_wait(request_queue_t *q, int rw, 2166static struct request *get_request_wait(request_queue_t *q, int rw_flags,
2166 struct bio *bio) 2167 struct bio *bio)
2167{ 2168{
2169 const int rw = rw_flags & 0x01;
2168 struct request *rq; 2170 struct request *rq;
2169 2171
2170 rq = get_request(q, rw, bio, GFP_NOIO); 2172 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2171 while (!rq) { 2173 while (!rq) {
2172 DEFINE_WAIT(wait); 2174 DEFINE_WAIT(wait);
2173 struct request_list *rl = &q->rq; 2175 struct request_list *rl = &q->rq;
@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
2175 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 2177 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
2176 TASK_UNINTERRUPTIBLE); 2178 TASK_UNINTERRUPTIBLE);
2177 2179
2178 rq = get_request(q, rw, bio, GFP_NOIO); 2180 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2179 2181
2180 if (!rq) { 2182 if (!rq) {
2181 struct io_context *ioc; 2183 struct io_context *ioc;
@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2910 int el_ret, nr_sectors, barrier, err; 2912 int el_ret, nr_sectors, barrier, err;
2911 const unsigned short prio = bio_prio(bio); 2913 const unsigned short prio = bio_prio(bio);
2912 const int sync = bio_sync(bio); 2914 const int sync = bio_sync(bio);
2915 int rw_flags;
2913 2916
2914 nr_sectors = bio_sectors(bio); 2917 nr_sectors = bio_sectors(bio);
2915 2918
@@ -2984,10 +2987,19 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2984 2987
2985get_rq: 2988get_rq:
2986 /* 2989 /*
2990 * This sync check and mask will be re-done in init_request_from_bio(),
2991 * but we need to set it earlier to expose the sync flag to the
2992 * rq allocator and io schedulers.
2993 */
2994 rw_flags = bio_data_dir(bio);
2995 if (sync)
2996 rw_flags |= REQ_RW_SYNC;
2997
2998 /*
2987 * Grab a free request. This is might sleep but can not fail. 2999 * Grab a free request. This is might sleep but can not fail.
2988 * Returns with the queue unlocked. 3000 * Returns with the queue unlocked.
2989 */ 3001 */
2990 req = get_request_wait(q, bio_data_dir(bio), bio); 3002 req = get_request_wait(q, rw_flags, bio);
2991 3003
2992 /* 3004 /*
2993 * After dropping the lock and possibly sleeping here, our request 3005 * After dropping the lock and possibly sleeping here, our request