aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-10-13 06:29:45 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-13 06:29:45 -0400
commitc30f33437c3f85ec48353a1ef811e148217a2aaf (patch)
treed5a0ca1e8d091a30fece2e9aeed285225c026049 /block
parent132cc538cd90f60a0b5df6a512dfd4bc5fe2039a (diff)
parent2ec24ff1d1875defa742c76c9c7d74dca06b7e1f (diff)
Merge branch 'for-linus' into for-2.6.33
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/blk-tag.c2
-rw-r--r--block/cfq-iosched.c259
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c4
7 files changed, 153 insertions, 136 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 81f34311659a..ac0fa10f8fa5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
70 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
71 else { 71 else {
72 part_round_stats(cpu, part); 72 part_round_stats(cpu, part);
73 part_inc_in_flight(part); 73 part_inc_in_flight(part, rw);
74 } 74 }
75 75
76 part_stat_unlock(); 76 part_stat_unlock();
@@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
1030 if (now == part->stamp) 1030 if (now == part->stamp)
1031 return; 1031 return;
1032 1032
1033 if (part->in_flight) { 1033 if (part_in_flight(part)) {
1034 __part_stat_add(cpu, part, time_in_queue, 1034 __part_stat_add(cpu, part, time_in_queue,
1035 part->in_flight * (now - part->stamp)); 1035 part_in_flight(part) * (now - part->stamp));
1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1037 } 1037 }
1038 part->stamp = now; 1038 part->stamp = now;
@@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
1739 part_stat_inc(cpu, part, ios[rw]); 1739 part_stat_inc(cpu, part, ios[rw]);
1740 part_stat_add(cpu, part, ticks[rw], duration); 1740 part_stat_add(cpu, part, ticks[rw], duration);
1741 part_round_stats(cpu, part); 1741 part_round_stats(cpu, part);
1742 part_dec_in_flight(part); 1742 part_dec_in_flight(part, rw);
1743 1743
1744 part_stat_unlock(); 1744 part_stat_unlock();
1745 } 1745 }
@@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2492} 2492}
2493EXPORT_SYMBOL(kblockd_schedule_work); 2493EXPORT_SYMBOL(kblockd_schedule_work);
2494 2494
2495int kblockd_schedule_delayed_work(struct request_queue *q,
2496 struct delayed_work *work,
2497 unsigned long delay)
2498{
2499 return queue_delayed_work(kblockd_workqueue, work, delay);
2500}
2501EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2502
2503int __init blk_dev_init(void) 2495int __init blk_dev_init(void)
2504{ 2496{
2505 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2497 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b0de8574fdc8..99cb5cf1f447 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
352 352
353 part_round_stats(cpu, part); 353 part_round_stats(cpu, part);
354 part_dec_in_flight(part); 354 part_dec_in_flight(part, rq_data_dir(req));
355 355
356 part_stat_unlock(); 356 part_stat_unlock();
357 } 357 }
diff --git a/block/blk-settings.c b/block/blk-settings.c
index e0695bca7027..66d4aa8799b7 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
242/** 242/**
243 * blk_queue_max_discard_sectors - set max sectors for a single discard 243 * blk_queue_max_discard_sectors - set max sectors for a single discard
244 * @q: the request queue for the device 244 * @q: the request queue for the device
245 * @max_discard: maximum number of sectors to discard 245 * @max_discard_sectors: maximum number of sectors to discard
246 **/ 246 **/
247void blk_queue_max_discard_sectors(struct request_queue *q, 247void blk_queue_max_discard_sectors(struct request_queue *q,
248 unsigned int max_discard_sectors) 248 unsigned int max_discard_sectors)
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 2e5cfeb59333..6b0f52c20964 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
359 max_depth -= 2; 359 max_depth -= 2;
360 if (!max_depth) 360 if (!max_depth)
361 max_depth = 1; 361 max_depth = 1;
362 if (q->in_flight[0] > max_depth) 362 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
363 return 1; 363 return 1;
364 } 364 }
365 365
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9c4b679908f4..069a61017c02 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -150,7 +150,7 @@ struct cfq_data {
150 * idle window management 150 * idle window management
151 */ 151 */
152 struct timer_list idle_slice_timer; 152 struct timer_list idle_slice_timer;
153 struct delayed_work unplug_work; 153 struct work_struct unplug_work;
154 154
155 struct cfq_queue *active_queue; 155 struct cfq_queue *active_queue;
156 struct cfq_io_context *active_cic; 156 struct cfq_io_context *active_cic;
@@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
231 231
232static void cfq_dispatch_insert(struct request_queue *, struct request *); 232static void cfq_dispatch_insert(struct request_queue *, struct request *);
233static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 233static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
234 struct io_context *, gfp_t); 234 struct io_context *, gfp_t);
235static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 235static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
236 struct io_context *); 236 struct io_context *);
@@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
241} 241}
242 242
243static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 243static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
244 int is_sync) 244 bool is_sync)
245{ 245{
246 return cic->cfqq[!!is_sync]; 246 return cic->cfqq[is_sync];
247} 247}
248 248
249static inline void cic_set_cfqq(struct cfq_io_context *cic, 249static inline void cic_set_cfqq(struct cfq_io_context *cic,
250 struct cfq_queue *cfqq, int is_sync) 250 struct cfq_queue *cfqq, bool is_sync)
251{ 251{
252 cic->cfqq[!!is_sync] = cfqq; 252 cic->cfqq[is_sync] = cfqq;
253} 253}
254 254
255/* 255/*
256 * We regard a request as SYNC, if it's either a read or has the SYNC bit 256 * We regard a request as SYNC, if it's either a read or has the SYNC bit
257 * set (in which case it could also be direct WRITE). 257 * set (in which case it could also be direct WRITE).
258 */ 258 */
259static inline int cfq_bio_sync(struct bio *bio) 259static inline bool cfq_bio_sync(struct bio *bio)
260{ 260{
261 if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) 261 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
262 return 1;
263
264 return 0;
265} 262}
266 263
267/* 264/*
268 * scheduler run of queue, if there are requests pending and no one in the 265 * scheduler run of queue, if there are requests pending and no one in the
269 * driver that will restart queueing 266 * driver that will restart queueing
270 */ 267 */
271static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, 268static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
272 unsigned long delay)
273{ 269{
274 if (cfqd->busy_queues) { 270 if (cfqd->busy_queues) {
275 cfq_log(cfqd, "schedule dispatch"); 271 cfq_log(cfqd, "schedule dispatch");
276 kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, 272 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
277 delay);
278 } 273 }
279} 274}
280 275
@@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
290 * if a queue is marked sync and has sync io queued. A sync queue with async 285 * if a queue is marked sync and has sync io queued. A sync queue with async
291 * io only, should not get full sync slice length. 286 * io only, should not get full sync slice length.
292 */ 287 */
293static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 288static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
294 unsigned short prio) 289 unsigned short prio)
295{ 290{
296 const int base_slice = cfqd->cfq_slice[sync]; 291 const int base_slice = cfqd->cfq_slice[sync];
@@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
318 * isn't valid until the first request from the dispatch is activated 313 * isn't valid until the first request from the dispatch is activated
319 * and the slice time set. 314 * and the slice time set.
320 */ 315 */
321static inline int cfq_slice_used(struct cfq_queue *cfqq) 316static inline bool cfq_slice_used(struct cfq_queue *cfqq)
322{ 317{
323 if (cfq_cfqq_slice_new(cfqq)) 318 if (cfq_cfqq_slice_new(cfqq))
324 return 0; 319 return 0;
@@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
493 * we will service the queues. 488 * we will service the queues.
494 */ 489 */
495static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 490static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
496 int add_front) 491 bool add_front)
497{ 492{
498 struct rb_node **p, *parent; 493 struct rb_node **p, *parent;
499 struct cfq_queue *__cfqq; 494 struct cfq_queue *__cfqq;
@@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
509 } else 504 } else
510 rb_key += jiffies; 505 rb_key += jiffies;
511 } else if (!add_front) { 506 } else if (!add_front) {
507 /*
508 * Get our rb key offset. Subtract any residual slice
509 * value carried from last service. A negative resid
510 * count indicates slice overrun, and this should position
511 * the next service time further away in the tree.
512 */
512 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 513 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
513 rb_key += cfqq->slice_resid; 514 rb_key -= cfqq->slice_resid;
514 cfqq->slice_resid = 0; 515 cfqq->slice_resid = 0;
515 } else 516 } else {
516 rb_key = 0; 517 rb_key = -HZ;
518 __cfqq = cfq_rb_first(&cfqd->service_tree);
519 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
520 }
517 521
518 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 522 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
519 /* 523 /*
@@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
547 n = &(*p)->rb_left; 551 n = &(*p)->rb_left;
548 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 552 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
549 n = &(*p)->rb_right; 553 n = &(*p)->rb_right;
550 else if (rb_key < __cfqq->rb_key) 554 else if (time_before(rb_key, __cfqq->rb_key))
551 n = &(*p)->rb_left; 555 n = &(*p)->rb_left;
552 else 556 else
553 n = &(*p)->rb_right; 557 n = &(*p)->rb_right;
@@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
827 * reposition in fifo if next is older than rq 831 * reposition in fifo if next is older than rq
828 */ 832 */
829 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 833 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
830 time_before(next->start_time, rq->start_time)) 834 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
831 list_move(&rq->queuelist, &next->queuelist); 835 list_move(&rq->queuelist, &next->queuelist);
836 rq_set_fifo_time(rq, rq_fifo_time(next));
837 }
832 838
833 cfq_remove_request(next); 839 cfq_remove_request(next);
834} 840}
@@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
844 * Disallow merge of a sync bio into an async request. 850 * Disallow merge of a sync bio into an async request.
845 */ 851 */
846 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 852 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
847 return 0; 853 return false;
848 854
849 /* 855 /*
850 * Lookup the cfqq that this bio will be queued with. Allow 856 * Lookup the cfqq that this bio will be queued with. Allow
@@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
852 */ 858 */
853 cic = cfq_cic_lookup(cfqd, current->io_context); 859 cic = cfq_cic_lookup(cfqd, current->io_context);
854 if (!cic) 860 if (!cic)
855 return 0; 861 return false;
856 862
857 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 863 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
858 if (cfqq == RQ_CFQQ(rq)) 864 return cfqq == RQ_CFQQ(rq);
859 return 1;
860
861 return 0;
862} 865}
863 866
864static void __cfq_set_active_queue(struct cfq_data *cfqd, 867static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
886 */ 889 */
887static void 890static void
888__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 891__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
889 int timed_out) 892 bool timed_out)
890{ 893{
891 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 894 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
892 895
@@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
914 } 917 }
915} 918}
916 919
917static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 920static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
918{ 921{
919 struct cfq_queue *cfqq = cfqd->active_queue; 922 struct cfq_queue *cfqq = cfqd->active_queue;
920 923
@@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1026 */ 1029 */
1027static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1030static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1028 struct cfq_queue *cur_cfqq, 1031 struct cfq_queue *cur_cfqq,
1029 int probe) 1032 bool probe)
1030{ 1033{
1031 struct cfq_queue *cfqq; 1034 struct cfq_queue *cfqq;
1032 1035
@@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1090 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1093 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1091 return; 1094 return;
1092 1095
1096 /*
1097 * If our average think time is larger than the remaining time
1098 * slice, then don't idle. This avoids overrunning the allotted
1099 * time slice.
1100 */
1101 if (sample_valid(cic->ttime_samples) &&
1102 (cfqq->slice_end - jiffies < cic->ttime_mean))
1103 return;
1104
1093 cfq_mark_cfqq_wait_request(cfqq); 1105 cfq_mark_cfqq_wait_request(cfqq);
1094 1106
1095 /* 1107 /*
@@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1129 */ 1141 */
1130static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 1142static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1131{ 1143{
1132 struct cfq_data *cfqd = cfqq->cfqd; 1144 struct request *rq = NULL;
1133 struct request *rq;
1134 int fifo;
1135 1145
1136 if (cfq_cfqq_fifo_expire(cfqq)) 1146 if (cfq_cfqq_fifo_expire(cfqq))
1137 return NULL; 1147 return NULL;
@@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1141 if (list_empty(&cfqq->fifo)) 1151 if (list_empty(&cfqq->fifo))
1142 return NULL; 1152 return NULL;
1143 1153
1144 fifo = cfq_cfqq_sync(cfqq);
1145 rq = rq_entry_fifo(cfqq->fifo.next); 1154 rq = rq_entry_fifo(cfqq->fifo.next);
1146 1155 if (time_before(jiffies, rq_fifo_time(rq)))
1147 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
1148 rq = NULL; 1156 rq = NULL;
1149 1157
1150 cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); 1158 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1151 return rq; 1159 return rq;
1152} 1160}
1153 1161
@@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1248 return dispatched; 1256 return dispatched;
1249} 1257}
1250 1258
1251/* 1259static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1252 * Dispatch a request from cfqq, moving them to the request queue
1253 * dispatch list.
1254 */
1255static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1256{ 1260{
1257 struct request *rq;
1258
1259 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1260
1261 /*
1262 * follow expired path, else get first next available
1263 */
1264 rq = cfq_check_fifo(cfqq);
1265 if (!rq)
1266 rq = cfqq->next_rq;
1267
1268 /*
1269 * insert request into driver dispatch list
1270 */
1271 cfq_dispatch_insert(cfqd->queue, rq);
1272
1273 if (!cfqd->active_cic) {
1274 struct cfq_io_context *cic = RQ_CIC(rq);
1275
1276 atomic_long_inc(&cic->ioc->refcount);
1277 cfqd->active_cic = cic;
1278 }
1279}
1280
1281/*
1282 * Find the cfqq that we need to service and move a request from that to the
1283 * dispatch list
1284 */
1285static int cfq_dispatch_requests(struct request_queue *q, int force)
1286{
1287 struct cfq_data *cfqd = q->elevator->elevator_data;
1288 struct cfq_queue *cfqq;
1289 unsigned int max_dispatch; 1261 unsigned int max_dispatch;
1290 1262
1291 if (!cfqd->busy_queues)
1292 return 0;
1293
1294 if (unlikely(force))
1295 return cfq_forced_dispatch(cfqd);
1296
1297 cfqq = cfq_select_queue(cfqd);
1298 if (!cfqq)
1299 return 0;
1300
1301 /* 1263 /*
1302 * Drain async requests before we start sync IO 1264 * Drain async requests before we start sync IO
1303 */ 1265 */
1304 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1266 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1305 return 0; 1267 return false;
1306 1268
1307 /* 1269 /*
1308 * If this is an async queue and we have sync IO in flight, let it wait 1270 * If this is an async queue and we have sync IO in flight, let it wait
1309 */ 1271 */
1310 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1272 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1311 return 0; 1273 return false;
1312 1274
1313 max_dispatch = cfqd->cfq_quantum; 1275 max_dispatch = cfqd->cfq_quantum;
1314 if (cfq_class_idle(cfqq)) 1276 if (cfq_class_idle(cfqq))
@@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1322 * idle queue must always only have a single IO in flight 1284 * idle queue must always only have a single IO in flight
1323 */ 1285 */
1324 if (cfq_class_idle(cfqq)) 1286 if (cfq_class_idle(cfqq))
1325 return 0; 1287 return false;
1326 1288
1327 /* 1289 /*
1328 * We have other queues, don't allow more IO from this one 1290 * We have other queues, don't allow more IO from this one
1329 */ 1291 */
1330 if (cfqd->busy_queues > 1) 1292 if (cfqd->busy_queues > 1)
1331 return 0; 1293 return false;
1332 1294
1333 /* 1295 /*
1334 * Sole queue user, allow bigger slice 1296 * Sole queue user, allow bigger slice
@@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1352 max_dispatch = depth; 1314 max_dispatch = depth;
1353 } 1315 }
1354 1316
1355 if (cfqq->dispatched >= max_dispatch) 1317 /*
1318 * If we're below the current max, allow a dispatch
1319 */
1320 return cfqq->dispatched < max_dispatch;
1321}
1322
1323/*
1324 * Dispatch a request from cfqq, moving them to the request queue
1325 * dispatch list.
1326 */
1327static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1328{
1329 struct request *rq;
1330
1331 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1332
1333 if (!cfq_may_dispatch(cfqd, cfqq))
1334 return false;
1335
1336 /*
1337 * follow expired path, else get first next available
1338 */
1339 rq = cfq_check_fifo(cfqq);
1340 if (!rq)
1341 rq = cfqq->next_rq;
1342
1343 /*
1344 * insert request into driver dispatch list
1345 */
1346 cfq_dispatch_insert(cfqd->queue, rq);
1347
1348 if (!cfqd->active_cic) {
1349 struct cfq_io_context *cic = RQ_CIC(rq);
1350
1351 atomic_long_inc(&cic->ioc->refcount);
1352 cfqd->active_cic = cic;
1353 }
1354
1355 return true;
1356}
1357
1358/*
1359 * Find the cfqq that we need to service and move a request from that to the
1360 * dispatch list
1361 */
1362static int cfq_dispatch_requests(struct request_queue *q, int force)
1363{
1364 struct cfq_data *cfqd = q->elevator->elevator_data;
1365 struct cfq_queue *cfqq;
1366
1367 if (!cfqd->busy_queues)
1368 return 0;
1369
1370 if (unlikely(force))
1371 return cfq_forced_dispatch(cfqd);
1372
1373 cfqq = cfq_select_queue(cfqd);
1374 if (!cfqq)
1356 return 0; 1375 return 0;
1357 1376
1358 /* 1377 /*
1359 * Dispatch a request from this cfqq 1378 * Dispatch a request from this cfqq, if it is allowed
1360 */ 1379 */
1361 cfq_dispatch_request(cfqd, cfqq); 1380 if (!cfq_dispatch_request(cfqd, cfqq))
1381 return 0;
1382
1362 cfqq->slice_dispatch++; 1383 cfqq->slice_dispatch++;
1363 cfq_clear_cfqq_must_dispatch(cfqq); 1384 cfq_clear_cfqq_must_dispatch(cfqq);
1364 1385
@@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1399 1420
1400 if (unlikely(cfqd->active_queue == cfqq)) { 1421 if (unlikely(cfqd->active_queue == cfqq)) {
1401 __cfq_slice_expired(cfqd, cfqq, 0); 1422 __cfq_slice_expired(cfqd, cfqq, 0);
1402 cfq_schedule_dispatch(cfqd, 0); 1423 cfq_schedule_dispatch(cfqd);
1403 } 1424 }
1404 1425
1405 kmem_cache_free(cfq_pool, cfqq); 1426 kmem_cache_free(cfq_pool, cfqq);
@@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1494{ 1515{
1495 if (unlikely(cfqq == cfqd->active_queue)) { 1516 if (unlikely(cfqq == cfqd->active_queue)) {
1496 __cfq_slice_expired(cfqd, cfqq, 0); 1517 __cfq_slice_expired(cfqd, cfqq, 0);
1497 cfq_schedule_dispatch(cfqd, 0); 1518 cfq_schedule_dispatch(cfqd);
1498 } 1519 }
1499 1520
1500 cfq_put_queue(cfqq); 1521 cfq_put_queue(cfqq);
@@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
1658} 1679}
1659 1680
1660static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1681static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1661 pid_t pid, int is_sync) 1682 pid_t pid, bool is_sync)
1662{ 1683{
1663 RB_CLEAR_NODE(&cfqq->rb_node); 1684 RB_CLEAR_NODE(&cfqq->rb_node);
1664 RB_CLEAR_NODE(&cfqq->p_node); 1685 RB_CLEAR_NODE(&cfqq->p_node);
@@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1678} 1699}
1679 1700
1680static struct cfq_queue * 1701static struct cfq_queue *
1681cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1702cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
1682 struct io_context *ioc, gfp_t gfp_mask) 1703 struct io_context *ioc, gfp_t gfp_mask)
1683{ 1704{
1684 struct cfq_queue *cfqq, *new_cfqq = NULL; 1705 struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1742} 1763}
1743 1764
1744static struct cfq_queue * 1765static struct cfq_queue *
1745cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1766cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
1746 gfp_t gfp_mask) 1767 gfp_t gfp_mask)
1747{ 1768{
1748 const int ioprio = task_ioprio(ioc); 1769 const int ioprio = task_ioprio(ioc);
@@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1977 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 1998 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
1978 enable_idle = 0; 1999 enable_idle = 0;
1979 else if (sample_valid(cic->ttime_samples)) { 2000 else if (sample_valid(cic->ttime_samples)) {
1980 if (cic->ttime_mean > cfqd->cfq_slice_idle) 2001 unsigned int slice_idle = cfqd->cfq_slice_idle;
2002 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
2003 slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
2004 if (cic->ttime_mean > slice_idle)
1981 enable_idle = 0; 2005 enable_idle = 0;
1982 else 2006 else
1983 enable_idle = 1; 2007 enable_idle = 1;
@@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1996 * Check if new_cfqq should preempt the currently active queue. Return 0 for 2020 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1997 * no or if we aren't sure, a 1 will cause a preempt. 2021 * no or if we aren't sure, a 1 will cause a preempt.
1998 */ 2022 */
1999static int 2023static bool
2000cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 2024cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2001 struct request *rq) 2025 struct request *rq)
2002{ 2026{
@@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2004 2028
2005 cfqq = cfqd->active_queue; 2029 cfqq = cfqd->active_queue;
2006 if (!cfqq) 2030 if (!cfqq)
2007 return 0; 2031 return false;
2008 2032
2009 if (cfq_slice_used(cfqq)) 2033 if (cfq_slice_used(cfqq))
2010 return 1; 2034 return true;
2011 2035
2012 if (cfq_class_idle(new_cfqq)) 2036 if (cfq_class_idle(new_cfqq))
2013 return 0; 2037 return false;
2014 2038
2015 if (cfq_class_idle(cfqq)) 2039 if (cfq_class_idle(cfqq))
2016 return 1; 2040 return true;
2017 2041
2018 /* 2042 /*
2019 * if the new request is sync, but the currently running queue is 2043 * if the new request is sync, but the currently running queue is
2020 * not, let the sync request have priority. 2044 * not, let the sync request have priority.
2021 */ 2045 */
2022 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 2046 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2023 return 1; 2047 return true;
2024 2048
2025 /* 2049 /*
2026 * So both queues are sync. Let the new request get disk time if 2050 * So both queues are sync. Let the new request get disk time if
2027 * it's a metadata request and the current queue is doing regular IO. 2051 * it's a metadata request and the current queue is doing regular IO.
2028 */ 2052 */
2029 if (rq_is_meta(rq) && !cfqq->meta_pending) 2053 if (rq_is_meta(rq) && !cfqq->meta_pending)
2030 return 1; 2054 return false;
2031 2055
2032 /* 2056 /*
2033 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2057 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2034 */ 2058 */
2035 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 2059 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2036 return 1; 2060 return true;
2037 2061
2038 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 2062 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2039 return 0; 2063 return false;
2040 2064
2041 /* 2065 /*
2042 * if this request is as-good as one we would expect from the 2066 * if this request is as-good as one we would expect from the
2043 * current cfqq, let it preempt 2067 * current cfqq, let it preempt
2044 */ 2068 */
2045 if (cfq_rq_close(cfqd, rq)) 2069 if (cfq_rq_close(cfqd, rq))
2046 return 1; 2070 return true;
2047 2071
2048 return 0; 2072 return false;
2049} 2073}
2050 2074
2051/* 2075/*
@@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
2130 2154
2131 cfq_add_rq_rb(rq); 2155 cfq_add_rq_rb(rq);
2132 2156
2157 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2133 list_add_tail(&rq->queuelist, &cfqq->fifo); 2158 list_add_tail(&rq->queuelist, &cfqq->fifo);
2134 2159
2135 cfq_rq_enqueued(cfqd, cfqq, rq); 2160 cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2211 } 2236 }
2212 2237
2213 if (!rq_in_driver(cfqd)) 2238 if (!rq_in_driver(cfqd))
2214 cfq_schedule_dispatch(cfqd, 0); 2239 cfq_schedule_dispatch(cfqd);
2215} 2240}
2216 2241
2217/* 2242/*
@@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2309 struct cfq_data *cfqd = q->elevator->elevator_data; 2334 struct cfq_data *cfqd = q->elevator->elevator_data;
2310 struct cfq_io_context *cic; 2335 struct cfq_io_context *cic;
2311 const int rw = rq_data_dir(rq); 2336 const int rw = rq_data_dir(rq);
2312 const int is_sync = rq_is_sync(rq); 2337 const bool is_sync = rq_is_sync(rq);
2313 struct cfq_queue *cfqq; 2338 struct cfq_queue *cfqq;
2314 unsigned long flags; 2339 unsigned long flags;
2315 2340
@@ -2341,7 +2366,7 @@ queue_fail:
2341 if (cic) 2366 if (cic)
2342 put_io_context(cic->ioc); 2367 put_io_context(cic->ioc);
2343 2368
2344 cfq_schedule_dispatch(cfqd, 0); 2369 cfq_schedule_dispatch(cfqd);
2345 spin_unlock_irqrestore(q->queue_lock, flags); 2370 spin_unlock_irqrestore(q->queue_lock, flags);
2346 cfq_log(cfqd, "set_request fail"); 2371 cfq_log(cfqd, "set_request fail");
2347 return 1; 2372 return 1;
@@ -2350,7 +2375,7 @@ queue_fail:
2350static void cfq_kick_queue(struct work_struct *work) 2375static void cfq_kick_queue(struct work_struct *work)
2351{ 2376{
2352 struct cfq_data *cfqd = 2377 struct cfq_data *cfqd =
2353 container_of(work, struct cfq_data, unplug_work.work); 2378 container_of(work, struct cfq_data, unplug_work);
2354 struct request_queue *q = cfqd->queue; 2379 struct request_queue *q = cfqd->queue;
2355 2380
2356 spin_lock_irq(q->queue_lock); 2381 spin_lock_irq(q->queue_lock);
@@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2404expire: 2429expire:
2405 cfq_slice_expired(cfqd, timed_out); 2430 cfq_slice_expired(cfqd, timed_out);
2406out_kick: 2431out_kick:
2407 cfq_schedule_dispatch(cfqd, 0); 2432 cfq_schedule_dispatch(cfqd);
2408out_cont: 2433out_cont:
2409 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2434 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2410} 2435}
@@ -2412,7 +2437,7 @@ out_cont:
2412static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2437static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2413{ 2438{
2414 del_timer_sync(&cfqd->idle_slice_timer); 2439 del_timer_sync(&cfqd->idle_slice_timer);
2415 cancel_delayed_work_sync(&cfqd->unplug_work); 2440 cancel_work_sync(&cfqd->unplug_work);
2416} 2441}
2417 2442
2418static void cfq_put_async_queues(struct cfq_data *cfqd) 2443static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
2494 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2519 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2495 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2520 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2496 2521
2497 INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); 2522 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2498 2523
2499 cfqd->cfq_quantum = cfq_quantum; 2524 cfqd->cfq_quantum = cfq_quantum;
2500 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2525 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/elevator.c b/block/elevator.c
index bb30f0e92d4d..9ad5ccc4c5ee 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1053,9 +1053,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1053 return count; 1053 return count;
1054 1054
1055 strlcpy(elevator_name, name, sizeof(elevator_name)); 1055 strlcpy(elevator_name, name, sizeof(elevator_name));
1056 strstrip(elevator_name); 1056 e = elevator_get(strstrip(elevator_name));
1057
1058 e = elevator_get(elevator_name);
1059 if (!e) { 1057 if (!e) {
1060 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 1058 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1061 return -EINVAL; 1059 return -EINVAL;
diff --git a/block/genhd.c b/block/genhd.c
index 5a0861da324d..517e4332cb37 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
872static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
872#ifdef CONFIG_FAIL_MAKE_REQUEST 873#ifdef CONFIG_FAIL_MAKE_REQUEST
873static struct device_attribute dev_attr_fail = 874static struct device_attribute dev_attr_fail =
874 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); 875 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
888 &dev_attr_alignment_offset.attr, 889 &dev_attr_alignment_offset.attr,
889 &dev_attr_capability.attr, 890 &dev_attr_capability.attr,
890 &dev_attr_stat.attr, 891 &dev_attr_stat.attr,
892 &dev_attr_inflight.attr,
891#ifdef CONFIG_FAIL_MAKE_REQUEST 893#ifdef CONFIG_FAIL_MAKE_REQUEST
892 &dev_attr_fail.attr, 894 &dev_attr_fail.attr,
893#endif 895#endif
@@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
1053 part_stat_read(hd, merges[1]), 1055 part_stat_read(hd, merges[1]),
1054 (unsigned long long)part_stat_read(hd, sectors[1]), 1056 (unsigned long long)part_stat_read(hd, sectors[1]),
1055 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1057 jiffies_to_msecs(part_stat_read(hd, ticks[1])),
1056 hd->in_flight, 1058 part_in_flight(hd),
1057 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1059 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1058 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1060 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1059 ); 1061 );