diff options
author | David S. Miller <davem@davemloft.net> | 2009-11-19 01:19:03 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-19 01:19:03 -0500 |
commit | 3505d1a9fd65e2d3e00827857b6795d9d8983658 (patch) | |
tree | 941cfafdb57c427bb6b7ebf6354ee93b2a3693b5 /block | |
parent | dfef948ed2ba69cf041840b5e860d6b4e16fa0b1 (diff) | |
parent | 66b00a7c93ec782d118d2c03bd599cfd041e80a1 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/sfc/sfe4001.c
drivers/net/wireless/libertas/cmd.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/rtl8187se/Kconfig
drivers/staging/rtl8192e/Kconfig
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 18 | ||||
-rw-r--r-- | block/blk-merge.c | 2 | ||||
-rw-r--r-- | block/blk-settings.c | 2 | ||||
-rw-r--r-- | block/blk-tag.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 276 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | block/genhd.c | 4 |
7 files changed, 169 insertions, 139 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 81f34311659a..71da5111120c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io) | |||
70 | part_stat_inc(cpu, part, merges[rw]); | 70 | part_stat_inc(cpu, part, merges[rw]); |
71 | else { | 71 | else { |
72 | part_round_stats(cpu, part); | 72 | part_round_stats(cpu, part); |
73 | part_inc_in_flight(part); | 73 | part_inc_in_flight(part, rw); |
74 | } | 74 | } |
75 | 75 | ||
76 | part_stat_unlock(); | 76 | part_stat_unlock(); |
@@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, | |||
1030 | if (now == part->stamp) | 1030 | if (now == part->stamp) |
1031 | return; | 1031 | return; |
1032 | 1032 | ||
1033 | if (part->in_flight) { | 1033 | if (part_in_flight(part)) { |
1034 | __part_stat_add(cpu, part, time_in_queue, | 1034 | __part_stat_add(cpu, part, time_in_queue, |
1035 | part->in_flight * (now - part->stamp)); | 1035 | part_in_flight(part) * (now - part->stamp)); |
1036 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 1036 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); |
1037 | } | 1037 | } |
1038 | part->stamp = now; | 1038 | part->stamp = now; |
@@ -1161,7 +1161,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1161 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1161 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1162 | int rw_flags; | 1162 | int rw_flags; |
1163 | 1163 | ||
1164 | if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) && | 1164 | if (bio_rw_flagged(bio, BIO_RW_BARRIER) && |
1165 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1165 | (q->next_ordered == QUEUE_ORDERED_NONE)) { |
1166 | bio_endio(bio, -EOPNOTSUPP); | 1166 | bio_endio(bio, -EOPNOTSUPP); |
1167 | return 0; | 1167 | return 0; |
@@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req) | |||
1739 | part_stat_inc(cpu, part, ios[rw]); | 1739 | part_stat_inc(cpu, part, ios[rw]); |
1740 | part_stat_add(cpu, part, ticks[rw], duration); | 1740 | part_stat_add(cpu, part, ticks[rw], duration); |
1741 | part_round_stats(cpu, part); | 1741 | part_round_stats(cpu, part); |
1742 | part_dec_in_flight(part); | 1742 | part_dec_in_flight(part, rw); |
1743 | 1743 | ||
1744 | part_stat_unlock(); | 1744 | part_stat_unlock(); |
1745 | } | 1745 | } |
@@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2492 | } | 2492 | } |
2493 | EXPORT_SYMBOL(kblockd_schedule_work); | 2493 | EXPORT_SYMBOL(kblockd_schedule_work); |
2494 | 2494 | ||
2495 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2496 | struct delayed_work *work, | ||
2497 | unsigned long delay) | ||
2498 | { | ||
2499 | return queue_delayed_work(kblockd_workqueue, work, delay); | ||
2500 | } | ||
2501 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2502 | |||
2503 | int __init blk_dev_init(void) | 2495 | int __init blk_dev_init(void) |
2504 | { | 2496 | { |
2505 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2497 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/blk-merge.c b/block/blk-merge.c index b0de8574fdc8..99cb5cf1f447 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req) | |||
351 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 351 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
352 | 352 | ||
353 | part_round_stats(cpu, part); | 353 | part_round_stats(cpu, part); |
354 | part_dec_in_flight(part); | 354 | part_dec_in_flight(part, rq_data_dir(req)); |
355 | 355 | ||
356 | part_stat_unlock(); | 356 | part_stat_unlock(); |
357 | } | 357 | } |
diff --git a/block/blk-settings.c b/block/blk-settings.c index e0695bca7027..66d4aa8799b7 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors); | |||
242 | /** | 242 | /** |
243 | * blk_queue_max_discard_sectors - set max sectors for a single discard | 243 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
244 | * @q: the request queue for the device | 244 | * @q: the request queue for the device |
245 | * @max_discard: maximum number of sectors to discard | 245 | * @max_discard_sectors: maximum number of sectors to discard |
246 | **/ | 246 | **/ |
247 | void blk_queue_max_discard_sectors(struct request_queue *q, | 247 | void blk_queue_max_discard_sectors(struct request_queue *q, |
248 | unsigned int max_discard_sectors) | 248 | unsigned int max_discard_sectors) |
diff --git a/block/blk-tag.c b/block/blk-tag.c index 2e5cfeb59333..6b0f52c20964 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
359 | max_depth -= 2; | 359 | max_depth -= 2; |
360 | if (!max_depth) | 360 | if (!max_depth) |
361 | max_depth = 1; | 361 | max_depth = 1; |
362 | if (q->in_flight[0] > max_depth) | 362 | if (q->in_flight[BLK_RW_ASYNC] > max_depth) |
363 | return 1; | 363 | return 1; |
364 | } | 364 | } |
365 | 365 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9c4b679908f4..aa1e9535e358 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -150,7 +150,7 @@ struct cfq_data { | |||
150 | * idle window management | 150 | * idle window management |
151 | */ | 151 | */ |
152 | struct timer_list idle_slice_timer; | 152 | struct timer_list idle_slice_timer; |
153 | struct delayed_work unplug_work; | 153 | struct work_struct unplug_work; |
154 | 154 | ||
155 | struct cfq_queue *active_queue; | 155 | struct cfq_queue *active_queue; |
156 | struct cfq_io_context *active_cic; | 156 | struct cfq_io_context *active_cic; |
@@ -196,6 +196,7 @@ enum cfqq_state_flags { | |||
196 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ | 196 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ |
197 | CFQ_CFQQ_FLAG_sync, /* synchronous queue */ | 197 | CFQ_CFQQ_FLAG_sync, /* synchronous queue */ |
198 | CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ | 198 | CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ |
199 | CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */ | ||
199 | }; | 200 | }; |
200 | 201 | ||
201 | #define CFQ_CFQQ_FNS(name) \ | 202 | #define CFQ_CFQQ_FNS(name) \ |
@@ -222,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed); | |||
222 | CFQ_CFQQ_FNS(slice_new); | 223 | CFQ_CFQQ_FNS(slice_new); |
223 | CFQ_CFQQ_FNS(sync); | 224 | CFQ_CFQQ_FNS(sync); |
224 | CFQ_CFQQ_FNS(coop); | 225 | CFQ_CFQQ_FNS(coop); |
226 | CFQ_CFQQ_FNS(coop_preempt); | ||
225 | #undef CFQ_CFQQ_FNS | 227 | #undef CFQ_CFQQ_FNS |
226 | 228 | ||
227 | #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ | 229 | #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ |
@@ -230,7 +232,7 @@ CFQ_CFQQ_FNS(coop); | |||
230 | blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) | 232 | blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) |
231 | 233 | ||
232 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 234 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
233 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | 235 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, |
234 | struct io_context *, gfp_t); | 236 | struct io_context *, gfp_t); |
235 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, | 237 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
236 | struct io_context *); | 238 | struct io_context *); |
@@ -241,40 +243,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd) | |||
241 | } | 243 | } |
242 | 244 | ||
243 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | 245 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, |
244 | int is_sync) | 246 | bool is_sync) |
245 | { | 247 | { |
246 | return cic->cfqq[!!is_sync]; | 248 | return cic->cfqq[is_sync]; |
247 | } | 249 | } |
248 | 250 | ||
249 | static inline void cic_set_cfqq(struct cfq_io_context *cic, | 251 | static inline void cic_set_cfqq(struct cfq_io_context *cic, |
250 | struct cfq_queue *cfqq, int is_sync) | 252 | struct cfq_queue *cfqq, bool is_sync) |
251 | { | 253 | { |
252 | cic->cfqq[!!is_sync] = cfqq; | 254 | cic->cfqq[is_sync] = cfqq; |
253 | } | 255 | } |
254 | 256 | ||
255 | /* | 257 | /* |
256 | * We regard a request as SYNC, if it's either a read or has the SYNC bit | 258 | * We regard a request as SYNC, if it's either a read or has the SYNC bit |
257 | * set (in which case it could also be direct WRITE). | 259 | * set (in which case it could also be direct WRITE). |
258 | */ | 260 | */ |
259 | static inline int cfq_bio_sync(struct bio *bio) | 261 | static inline bool cfq_bio_sync(struct bio *bio) |
260 | { | 262 | { |
261 | if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) | 263 | return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); |
262 | return 1; | ||
263 | |||
264 | return 0; | ||
265 | } | 264 | } |
266 | 265 | ||
267 | /* | 266 | /* |
268 | * scheduler run of queue, if there are requests pending and no one in the | 267 | * scheduler run of queue, if there are requests pending and no one in the |
269 | * driver that will restart queueing | 268 | * driver that will restart queueing |
270 | */ | 269 | */ |
271 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, | 270 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) |
272 | unsigned long delay) | ||
273 | { | 271 | { |
274 | if (cfqd->busy_queues) { | 272 | if (cfqd->busy_queues) { |
275 | cfq_log(cfqd, "schedule dispatch"); | 273 | cfq_log(cfqd, "schedule dispatch"); |
276 | kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, | 274 | kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); |
277 | delay); | ||
278 | } | 275 | } |
279 | } | 276 | } |
280 | 277 | ||
@@ -290,7 +287,7 @@ static int cfq_queue_empty(struct request_queue *q) | |||
290 | * if a queue is marked sync and has sync io queued. A sync queue with async | 287 | * if a queue is marked sync and has sync io queued. A sync queue with async |
291 | * io only, should not get full sync slice length. | 288 | * io only, should not get full sync slice length. |
292 | */ | 289 | */ |
293 | static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, | 290 | static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, |
294 | unsigned short prio) | 291 | unsigned short prio) |
295 | { | 292 | { |
296 | const int base_slice = cfqd->cfq_slice[sync]; | 293 | const int base_slice = cfqd->cfq_slice[sync]; |
@@ -318,7 +315,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
318 | * isn't valid until the first request from the dispatch is activated | 315 | * isn't valid until the first request from the dispatch is activated |
319 | * and the slice time set. | 316 | * and the slice time set. |
320 | */ | 317 | */ |
321 | static inline int cfq_slice_used(struct cfq_queue *cfqq) | 318 | static inline bool cfq_slice_used(struct cfq_queue *cfqq) |
322 | { | 319 | { |
323 | if (cfq_cfqq_slice_new(cfqq)) | 320 | if (cfq_cfqq_slice_new(cfqq)) |
324 | return 0; | 321 | return 0; |
@@ -493,7 +490,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, | |||
493 | * we will service the queues. | 490 | * we will service the queues. |
494 | */ | 491 | */ |
495 | static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 492 | static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
496 | int add_front) | 493 | bool add_front) |
497 | { | 494 | { |
498 | struct rb_node **p, *parent; | 495 | struct rb_node **p, *parent; |
499 | struct cfq_queue *__cfqq; | 496 | struct cfq_queue *__cfqq; |
@@ -509,11 +506,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
509 | } else | 506 | } else |
510 | rb_key += jiffies; | 507 | rb_key += jiffies; |
511 | } else if (!add_front) { | 508 | } else if (!add_front) { |
509 | /* | ||
510 | * Get our rb key offset. Subtract any residual slice | ||
511 | * value carried from last service. A negative resid | ||
512 | * count indicates slice overrun, and this should position | ||
513 | * the next service time further away in the tree. | ||
514 | */ | ||
512 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; | 515 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; |
513 | rb_key += cfqq->slice_resid; | 516 | rb_key -= cfqq->slice_resid; |
514 | cfqq->slice_resid = 0; | 517 | cfqq->slice_resid = 0; |
515 | } else | 518 | } else { |
516 | rb_key = 0; | 519 | rb_key = -HZ; |
520 | __cfqq = cfq_rb_first(&cfqd->service_tree); | ||
521 | rb_key += __cfqq ? __cfqq->rb_key : jiffies; | ||
522 | } | ||
517 | 523 | ||
518 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) { | 524 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) { |
519 | /* | 525 | /* |
@@ -547,7 +553,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
547 | n = &(*p)->rb_left; | 553 | n = &(*p)->rb_left; |
548 | else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) | 554 | else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) |
549 | n = &(*p)->rb_right; | 555 | n = &(*p)->rb_right; |
550 | else if (rb_key < __cfqq->rb_key) | 556 | else if (time_before(rb_key, __cfqq->rb_key)) |
551 | n = &(*p)->rb_left; | 557 | n = &(*p)->rb_left; |
552 | else | 558 | else |
553 | n = &(*p)->rb_right; | 559 | n = &(*p)->rb_right; |
@@ -827,8 +833,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
827 | * reposition in fifo if next is older than rq | 833 | * reposition in fifo if next is older than rq |
828 | */ | 834 | */ |
829 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && | 835 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
830 | time_before(next->start_time, rq->start_time)) | 836 | time_before(rq_fifo_time(next), rq_fifo_time(rq))) { |
831 | list_move(&rq->queuelist, &next->queuelist); | 837 | list_move(&rq->queuelist, &next->queuelist); |
838 | rq_set_fifo_time(rq, rq_fifo_time(next)); | ||
839 | } | ||
832 | 840 | ||
833 | cfq_remove_request(next); | 841 | cfq_remove_request(next); |
834 | } | 842 | } |
@@ -844,7 +852,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
844 | * Disallow merge of a sync bio into an async request. | 852 | * Disallow merge of a sync bio into an async request. |
845 | */ | 853 | */ |
846 | if (cfq_bio_sync(bio) && !rq_is_sync(rq)) | 854 | if (cfq_bio_sync(bio) && !rq_is_sync(rq)) |
847 | return 0; | 855 | return false; |
848 | 856 | ||
849 | /* | 857 | /* |
850 | * Lookup the cfqq that this bio will be queued with. Allow | 858 | * Lookup the cfqq that this bio will be queued with. Allow |
@@ -852,13 +860,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
852 | */ | 860 | */ |
853 | cic = cfq_cic_lookup(cfqd, current->io_context); | 861 | cic = cfq_cic_lookup(cfqd, current->io_context); |
854 | if (!cic) | 862 | if (!cic) |
855 | return 0; | 863 | return false; |
856 | 864 | ||
857 | cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); | 865 | cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
858 | if (cfqq == RQ_CFQQ(rq)) | 866 | return cfqq == RQ_CFQQ(rq); |
859 | return 1; | ||
860 | |||
861 | return 0; | ||
862 | } | 867 | } |
863 | 868 | ||
864 | static void __cfq_set_active_queue(struct cfq_data *cfqd, | 869 | static void __cfq_set_active_queue(struct cfq_data *cfqd, |
@@ -886,7 +891,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
886 | */ | 891 | */ |
887 | static void | 892 | static void |
888 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 893 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
889 | int timed_out) | 894 | bool timed_out) |
890 | { | 895 | { |
891 | cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); | 896 | cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); |
892 | 897 | ||
@@ -914,7 +919,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
914 | } | 919 | } |
915 | } | 920 | } |
916 | 921 | ||
917 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) | 922 | static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) |
918 | { | 923 | { |
919 | struct cfq_queue *cfqq = cfqd->active_queue; | 924 | struct cfq_queue *cfqq = cfqd->active_queue; |
920 | 925 | ||
@@ -942,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, | |||
942 | { | 947 | { |
943 | if (!cfqq) { | 948 | if (!cfqq) { |
944 | cfqq = cfq_get_next_queue(cfqd); | 949 | cfqq = cfq_get_next_queue(cfqd); |
945 | if (cfqq) | 950 | if (cfqq && !cfq_cfqq_coop_preempt(cfqq)) |
946 | cfq_clear_cfqq_coop(cfqq); | 951 | cfq_clear_cfqq_coop(cfqq); |
947 | } | 952 | } |
948 | 953 | ||
954 | if (cfqq) | ||
955 | cfq_clear_cfqq_coop_preempt(cfqq); | ||
956 | |||
949 | __cfq_set_active_queue(cfqd, cfqq); | 957 | __cfq_set_active_queue(cfqd, cfqq); |
950 | return cfqq; | 958 | return cfqq; |
951 | } | 959 | } |
@@ -1026,7 +1034,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
1026 | */ | 1034 | */ |
1027 | static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | 1035 | static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, |
1028 | struct cfq_queue *cur_cfqq, | 1036 | struct cfq_queue *cur_cfqq, |
1029 | int probe) | 1037 | bool probe) |
1030 | { | 1038 | { |
1031 | struct cfq_queue *cfqq; | 1039 | struct cfq_queue *cfqq; |
1032 | 1040 | ||
@@ -1090,6 +1098,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1090 | if (!cic || !atomic_read(&cic->ioc->nr_tasks)) | 1098 | if (!cic || !atomic_read(&cic->ioc->nr_tasks)) |
1091 | return; | 1099 | return; |
1092 | 1100 | ||
1101 | /* | ||
1102 | * If our average think time is larger than the remaining time | ||
1103 | * slice, then don't idle. This avoids overrunning the allotted | ||
1104 | * time slice. | ||
1105 | */ | ||
1106 | if (sample_valid(cic->ttime_samples) && | ||
1107 | (cfqq->slice_end - jiffies < cic->ttime_mean)) | ||
1108 | return; | ||
1109 | |||
1093 | cfq_mark_cfqq_wait_request(cfqq); | 1110 | cfq_mark_cfqq_wait_request(cfqq); |
1094 | 1111 | ||
1095 | /* | 1112 | /* |
@@ -1129,9 +1146,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1129 | */ | 1146 | */ |
1130 | static struct request *cfq_check_fifo(struct cfq_queue *cfqq) | 1147 | static struct request *cfq_check_fifo(struct cfq_queue *cfqq) |
1131 | { | 1148 | { |
1132 | struct cfq_data *cfqd = cfqq->cfqd; | 1149 | struct request *rq = NULL; |
1133 | struct request *rq; | ||
1134 | int fifo; | ||
1135 | 1150 | ||
1136 | if (cfq_cfqq_fifo_expire(cfqq)) | 1151 | if (cfq_cfqq_fifo_expire(cfqq)) |
1137 | return NULL; | 1152 | return NULL; |
@@ -1141,13 +1156,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) | |||
1141 | if (list_empty(&cfqq->fifo)) | 1156 | if (list_empty(&cfqq->fifo)) |
1142 | return NULL; | 1157 | return NULL; |
1143 | 1158 | ||
1144 | fifo = cfq_cfqq_sync(cfqq); | ||
1145 | rq = rq_entry_fifo(cfqq->fifo.next); | 1159 | rq = rq_entry_fifo(cfqq->fifo.next); |
1146 | 1160 | if (time_before(jiffies, rq_fifo_time(rq))) | |
1147 | if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) | ||
1148 | rq = NULL; | 1161 | rq = NULL; |
1149 | 1162 | ||
1150 | cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); | 1163 | cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); |
1151 | return rq; | 1164 | return rq; |
1152 | } | 1165 | } |
1153 | 1166 | ||
@@ -1248,67 +1261,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1248 | return dispatched; | 1261 | return dispatched; |
1249 | } | 1262 | } |
1250 | 1263 | ||
1251 | /* | 1264 | static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1252 | * Dispatch a request from cfqq, moving them to the request queue | ||
1253 | * dispatch list. | ||
1254 | */ | ||
1255 | static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1256 | { | ||
1257 | struct request *rq; | ||
1258 | |||
1259 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); | ||
1260 | |||
1261 | /* | ||
1262 | * follow expired path, else get first next available | ||
1263 | */ | ||
1264 | rq = cfq_check_fifo(cfqq); | ||
1265 | if (!rq) | ||
1266 | rq = cfqq->next_rq; | ||
1267 | |||
1268 | /* | ||
1269 | * insert request into driver dispatch list | ||
1270 | */ | ||
1271 | cfq_dispatch_insert(cfqd->queue, rq); | ||
1272 | |||
1273 | if (!cfqd->active_cic) { | ||
1274 | struct cfq_io_context *cic = RQ_CIC(rq); | ||
1275 | |||
1276 | atomic_long_inc(&cic->ioc->refcount); | ||
1277 | cfqd->active_cic = cic; | ||
1278 | } | ||
1279 | } | ||
1280 | |||
1281 | /* | ||
1282 | * Find the cfqq that we need to service and move a request from that to the | ||
1283 | * dispatch list | ||
1284 | */ | ||
1285 | static int cfq_dispatch_requests(struct request_queue *q, int force) | ||
1286 | { | 1265 | { |
1287 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1288 | struct cfq_queue *cfqq; | ||
1289 | unsigned int max_dispatch; | 1266 | unsigned int max_dispatch; |
1290 | 1267 | ||
1291 | if (!cfqd->busy_queues) | ||
1292 | return 0; | ||
1293 | |||
1294 | if (unlikely(force)) | ||
1295 | return cfq_forced_dispatch(cfqd); | ||
1296 | |||
1297 | cfqq = cfq_select_queue(cfqd); | ||
1298 | if (!cfqq) | ||
1299 | return 0; | ||
1300 | |||
1301 | /* | 1268 | /* |
1302 | * Drain async requests before we start sync IO | 1269 | * Drain async requests before we start sync IO |
1303 | */ | 1270 | */ |
1304 | if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) | 1271 | if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) |
1305 | return 0; | 1272 | return false; |
1306 | 1273 | ||
1307 | /* | 1274 | /* |
1308 | * If this is an async queue and we have sync IO in flight, let it wait | 1275 | * If this is an async queue and we have sync IO in flight, let it wait |
1309 | */ | 1276 | */ |
1310 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | 1277 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) |
1311 | return 0; | 1278 | return false; |
1312 | 1279 | ||
1313 | max_dispatch = cfqd->cfq_quantum; | 1280 | max_dispatch = cfqd->cfq_quantum; |
1314 | if (cfq_class_idle(cfqq)) | 1281 | if (cfq_class_idle(cfqq)) |
@@ -1322,13 +1289,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1322 | * idle queue must always only have a single IO in flight | 1289 | * idle queue must always only have a single IO in flight |
1323 | */ | 1290 | */ |
1324 | if (cfq_class_idle(cfqq)) | 1291 | if (cfq_class_idle(cfqq)) |
1325 | return 0; | 1292 | return false; |
1326 | 1293 | ||
1327 | /* | 1294 | /* |
1328 | * We have other queues, don't allow more IO from this one | 1295 | * We have other queues, don't allow more IO from this one |
1329 | */ | 1296 | */ |
1330 | if (cfqd->busy_queues > 1) | 1297 | if (cfqd->busy_queues > 1) |
1331 | return 0; | 1298 | return false; |
1332 | 1299 | ||
1333 | /* | 1300 | /* |
1334 | * Sole queue user, allow bigger slice | 1301 | * Sole queue user, allow bigger slice |
@@ -1352,13 +1319,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1352 | max_dispatch = depth; | 1319 | max_dispatch = depth; |
1353 | } | 1320 | } |
1354 | 1321 | ||
1355 | if (cfqq->dispatched >= max_dispatch) | 1322 | /* |
1323 | * If we're below the current max, allow a dispatch | ||
1324 | */ | ||
1325 | return cfqq->dispatched < max_dispatch; | ||
1326 | } | ||
1327 | |||
1328 | /* | ||
1329 | * Dispatch a request from cfqq, moving them to the request queue | ||
1330 | * dispatch list. | ||
1331 | */ | ||
1332 | static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1333 | { | ||
1334 | struct request *rq; | ||
1335 | |||
1336 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); | ||
1337 | |||
1338 | if (!cfq_may_dispatch(cfqd, cfqq)) | ||
1339 | return false; | ||
1340 | |||
1341 | /* | ||
1342 | * follow expired path, else get first next available | ||
1343 | */ | ||
1344 | rq = cfq_check_fifo(cfqq); | ||
1345 | if (!rq) | ||
1346 | rq = cfqq->next_rq; | ||
1347 | |||
1348 | /* | ||
1349 | * insert request into driver dispatch list | ||
1350 | */ | ||
1351 | cfq_dispatch_insert(cfqd->queue, rq); | ||
1352 | |||
1353 | if (!cfqd->active_cic) { | ||
1354 | struct cfq_io_context *cic = RQ_CIC(rq); | ||
1355 | |||
1356 | atomic_long_inc(&cic->ioc->refcount); | ||
1357 | cfqd->active_cic = cic; | ||
1358 | } | ||
1359 | |||
1360 | return true; | ||
1361 | } | ||
1362 | |||
1363 | /* | ||
1364 | * Find the cfqq that we need to service and move a request from that to the | ||
1365 | * dispatch list | ||
1366 | */ | ||
1367 | static int cfq_dispatch_requests(struct request_queue *q, int force) | ||
1368 | { | ||
1369 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1370 | struct cfq_queue *cfqq; | ||
1371 | |||
1372 | if (!cfqd->busy_queues) | ||
1373 | return 0; | ||
1374 | |||
1375 | if (unlikely(force)) | ||
1376 | return cfq_forced_dispatch(cfqd); | ||
1377 | |||
1378 | cfqq = cfq_select_queue(cfqd); | ||
1379 | if (!cfqq) | ||
1356 | return 0; | 1380 | return 0; |
1357 | 1381 | ||
1358 | /* | 1382 | /* |
1359 | * Dispatch a request from this cfqq | 1383 | * Dispatch a request from this cfqq, if it is allowed |
1360 | */ | 1384 | */ |
1361 | cfq_dispatch_request(cfqd, cfqq); | 1385 | if (!cfq_dispatch_request(cfqd, cfqq)) |
1386 | return 0; | ||
1387 | |||
1362 | cfqq->slice_dispatch++; | 1388 | cfqq->slice_dispatch++; |
1363 | cfq_clear_cfqq_must_dispatch(cfqq); | 1389 | cfq_clear_cfqq_must_dispatch(cfqq); |
1364 | 1390 | ||
@@ -1399,7 +1425,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1399 | 1425 | ||
1400 | if (unlikely(cfqd->active_queue == cfqq)) { | 1426 | if (unlikely(cfqd->active_queue == cfqq)) { |
1401 | __cfq_slice_expired(cfqd, cfqq, 0); | 1427 | __cfq_slice_expired(cfqd, cfqq, 0); |
1402 | cfq_schedule_dispatch(cfqd, 0); | 1428 | cfq_schedule_dispatch(cfqd); |
1403 | } | 1429 | } |
1404 | 1430 | ||
1405 | kmem_cache_free(cfq_pool, cfqq); | 1431 | kmem_cache_free(cfq_pool, cfqq); |
@@ -1494,7 +1520,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1494 | { | 1520 | { |
1495 | if (unlikely(cfqq == cfqd->active_queue)) { | 1521 | if (unlikely(cfqq == cfqd->active_queue)) { |
1496 | __cfq_slice_expired(cfqd, cfqq, 0); | 1522 | __cfq_slice_expired(cfqd, cfqq, 0); |
1497 | cfq_schedule_dispatch(cfqd, 0); | 1523 | cfq_schedule_dispatch(cfqd); |
1498 | } | 1524 | } |
1499 | 1525 | ||
1500 | cfq_put_queue(cfqq); | 1526 | cfq_put_queue(cfqq); |
@@ -1658,7 +1684,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1658 | } | 1684 | } |
1659 | 1685 | ||
1660 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1686 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1661 | pid_t pid, int is_sync) | 1687 | pid_t pid, bool is_sync) |
1662 | { | 1688 | { |
1663 | RB_CLEAR_NODE(&cfqq->rb_node); | 1689 | RB_CLEAR_NODE(&cfqq->rb_node); |
1664 | RB_CLEAR_NODE(&cfqq->p_node); | 1690 | RB_CLEAR_NODE(&cfqq->p_node); |
@@ -1678,7 +1704,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1678 | } | 1704 | } |
1679 | 1705 | ||
1680 | static struct cfq_queue * | 1706 | static struct cfq_queue * |
1681 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, | 1707 | cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, |
1682 | struct io_context *ioc, gfp_t gfp_mask) | 1708 | struct io_context *ioc, gfp_t gfp_mask) |
1683 | { | 1709 | { |
1684 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1710 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
@@ -1742,7 +1768,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) | |||
1742 | } | 1768 | } |
1743 | 1769 | ||
1744 | static struct cfq_queue * | 1770 | static struct cfq_queue * |
1745 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, | 1771 | cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, |
1746 | gfp_t gfp_mask) | 1772 | gfp_t gfp_mask) |
1747 | { | 1773 | { |
1748 | const int ioprio = task_ioprio(ioc); | 1774 | const int ioprio = task_ioprio(ioc); |
@@ -1977,7 +2003,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1977 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) | 2003 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) |
1978 | enable_idle = 0; | 2004 | enable_idle = 0; |
1979 | else if (sample_valid(cic->ttime_samples)) { | 2005 | else if (sample_valid(cic->ttime_samples)) { |
1980 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 2006 | unsigned int slice_idle = cfqd->cfq_slice_idle; |
2007 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) | ||
2008 | slice_idle = msecs_to_jiffies(CFQ_MIN_TT); | ||
2009 | if (cic->ttime_mean > slice_idle) | ||
1981 | enable_idle = 0; | 2010 | enable_idle = 0; |
1982 | else | 2011 | else |
1983 | enable_idle = 1; | 2012 | enable_idle = 1; |
@@ -1996,7 +2025,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1996 | * Check if new_cfqq should preempt the currently active queue. Return 0 for | 2025 | * Check if new_cfqq should preempt the currently active queue. Return 0 for |
1997 | * no or if we aren't sure, a 1 will cause a preempt. | 2026 | * no or if we aren't sure, a 1 will cause a preempt. |
1998 | */ | 2027 | */ |
1999 | static int | 2028 | static bool |
2000 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | 2029 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
2001 | struct request *rq) | 2030 | struct request *rq) |
2002 | { | 2031 | { |
@@ -2004,48 +2033,56 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
2004 | 2033 | ||
2005 | cfqq = cfqd->active_queue; | 2034 | cfqq = cfqd->active_queue; |
2006 | if (!cfqq) | 2035 | if (!cfqq) |
2007 | return 0; | 2036 | return false; |
2008 | 2037 | ||
2009 | if (cfq_slice_used(cfqq)) | 2038 | if (cfq_slice_used(cfqq)) |
2010 | return 1; | 2039 | return true; |
2011 | 2040 | ||
2012 | if (cfq_class_idle(new_cfqq)) | 2041 | if (cfq_class_idle(new_cfqq)) |
2013 | return 0; | 2042 | return false; |
2014 | 2043 | ||
2015 | if (cfq_class_idle(cfqq)) | 2044 | if (cfq_class_idle(cfqq)) |
2016 | return 1; | 2045 | return true; |
2017 | 2046 | ||
2018 | /* | 2047 | /* |
2019 | * if the new request is sync, but the currently running queue is | 2048 | * if the new request is sync, but the currently running queue is |
2020 | * not, let the sync request have priority. | 2049 | * not, let the sync request have priority. |
2021 | */ | 2050 | */ |
2022 | if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) | 2051 | if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) |
2023 | return 1; | 2052 | return true; |
2024 | 2053 | ||
2025 | /* | 2054 | /* |
2026 | * So both queues are sync. Let the new request get disk time if | 2055 | * So both queues are sync. Let the new request get disk time if |
2027 | * it's a metadata request and the current queue is doing regular IO. | 2056 | * it's a metadata request and the current queue is doing regular IO. |
2028 | */ | 2057 | */ |
2029 | if (rq_is_meta(rq) && !cfqq->meta_pending) | 2058 | if (rq_is_meta(rq) && !cfqq->meta_pending) |
2030 | return 1; | 2059 | return true; |
2031 | 2060 | ||
2032 | /* | 2061 | /* |
2033 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. | 2062 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. |
2034 | */ | 2063 | */ |
2035 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) | 2064 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) |
2036 | return 1; | 2065 | return true; |
2037 | 2066 | ||
2038 | if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) | 2067 | if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) |
2039 | return 0; | 2068 | return false; |
2040 | 2069 | ||
2041 | /* | 2070 | /* |
2042 | * if this request is as-good as one we would expect from the | 2071 | * if this request is as-good as one we would expect from the |
2043 | * current cfqq, let it preempt | 2072 | * current cfqq, let it preempt |
2044 | */ | 2073 | */ |
2045 | if (cfq_rq_close(cfqd, rq)) | 2074 | if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) || |
2046 | return 1; | 2075 | cfqd->busy_queues == 1)) { |
2076 | /* | ||
2077 | * Mark new queue coop_preempt, so its coop flag will not be | ||
2078 | * cleared when new queue gets scheduled at the very first time | ||
2079 | */ | ||
2080 | cfq_mark_cfqq_coop_preempt(new_cfqq); | ||
2081 | cfq_mark_cfqq_coop(new_cfqq); | ||
2082 | return true; | ||
2083 | } | ||
2047 | 2084 | ||
2048 | return 0; | 2085 | return false; |
2049 | } | 2086 | } |
2050 | 2087 | ||
2051 | /* | 2088 | /* |
@@ -2130,6 +2167,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
2130 | 2167 | ||
2131 | cfq_add_rq_rb(rq); | 2168 | cfq_add_rq_rb(rq); |
2132 | 2169 | ||
2170 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); | ||
2133 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 2171 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
2134 | 2172 | ||
2135 | cfq_rq_enqueued(cfqd, cfqq, rq); | 2173 | cfq_rq_enqueued(cfqd, cfqq, rq); |
@@ -2211,7 +2249,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2211 | } | 2249 | } |
2212 | 2250 | ||
2213 | if (!rq_in_driver(cfqd)) | 2251 | if (!rq_in_driver(cfqd)) |
2214 | cfq_schedule_dispatch(cfqd, 0); | 2252 | cfq_schedule_dispatch(cfqd); |
2215 | } | 2253 | } |
2216 | 2254 | ||
2217 | /* | 2255 | /* |
@@ -2309,7 +2347,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
2309 | struct cfq_data *cfqd = q->elevator->elevator_data; | 2347 | struct cfq_data *cfqd = q->elevator->elevator_data; |
2310 | struct cfq_io_context *cic; | 2348 | struct cfq_io_context *cic; |
2311 | const int rw = rq_data_dir(rq); | 2349 | const int rw = rq_data_dir(rq); |
2312 | const int is_sync = rq_is_sync(rq); | 2350 | const bool is_sync = rq_is_sync(rq); |
2313 | struct cfq_queue *cfqq; | 2351 | struct cfq_queue *cfqq; |
2314 | unsigned long flags; | 2352 | unsigned long flags; |
2315 | 2353 | ||
@@ -2341,7 +2379,7 @@ queue_fail: | |||
2341 | if (cic) | 2379 | if (cic) |
2342 | put_io_context(cic->ioc); | 2380 | put_io_context(cic->ioc); |
2343 | 2381 | ||
2344 | cfq_schedule_dispatch(cfqd, 0); | 2382 | cfq_schedule_dispatch(cfqd); |
2345 | spin_unlock_irqrestore(q->queue_lock, flags); | 2383 | spin_unlock_irqrestore(q->queue_lock, flags); |
2346 | cfq_log(cfqd, "set_request fail"); | 2384 | cfq_log(cfqd, "set_request fail"); |
2347 | return 1; | 2385 | return 1; |
@@ -2350,7 +2388,7 @@ queue_fail: | |||
2350 | static void cfq_kick_queue(struct work_struct *work) | 2388 | static void cfq_kick_queue(struct work_struct *work) |
2351 | { | 2389 | { |
2352 | struct cfq_data *cfqd = | 2390 | struct cfq_data *cfqd = |
2353 | container_of(work, struct cfq_data, unplug_work.work); | 2391 | container_of(work, struct cfq_data, unplug_work); |
2354 | struct request_queue *q = cfqd->queue; | 2392 | struct request_queue *q = cfqd->queue; |
2355 | 2393 | ||
2356 | spin_lock_irq(q->queue_lock); | 2394 | spin_lock_irq(q->queue_lock); |
@@ -2404,7 +2442,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2404 | expire: | 2442 | expire: |
2405 | cfq_slice_expired(cfqd, timed_out); | 2443 | cfq_slice_expired(cfqd, timed_out); |
2406 | out_kick: | 2444 | out_kick: |
2407 | cfq_schedule_dispatch(cfqd, 0); | 2445 | cfq_schedule_dispatch(cfqd); |
2408 | out_cont: | 2446 | out_cont: |
2409 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2447 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
2410 | } | 2448 | } |
@@ -2412,7 +2450,7 @@ out_cont: | |||
2412 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2450 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2413 | { | 2451 | { |
2414 | del_timer_sync(&cfqd->idle_slice_timer); | 2452 | del_timer_sync(&cfqd->idle_slice_timer); |
2415 | cancel_delayed_work_sync(&cfqd->unplug_work); | 2453 | cancel_work_sync(&cfqd->unplug_work); |
2416 | } | 2454 | } |
2417 | 2455 | ||
2418 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2456 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
@@ -2494,7 +2532,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2494 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2532 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2495 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2533 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
2496 | 2534 | ||
2497 | INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2535 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2498 | 2536 | ||
2499 | cfqd->cfq_quantum = cfq_quantum; | 2537 | cfqd->cfq_quantum = cfq_quantum; |
2500 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2538 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
diff --git a/block/elevator.c b/block/elevator.c index 1975b619c86d..a847046c6e53 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |||
1059 | return count; | 1059 | return count; |
1060 | 1060 | ||
1061 | strlcpy(elevator_name, name, sizeof(elevator_name)); | 1061 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
1062 | strstrip(elevator_name); | 1062 | e = elevator_get(strstrip(elevator_name)); |
1063 | |||
1064 | e = elevator_get(elevator_name); | ||
1065 | if (!e) { | 1063 | if (!e) { |
1066 | printk(KERN_ERR "elevator: type %s not found\n", elevator_name); | 1064 | printk(KERN_ERR "elevator: type %s not found\n", elevator_name); |
1067 | return -EINVAL; | 1065 | return -EINVAL; |
diff --git a/block/genhd.c b/block/genhd.c index 5a0861da324d..517e4332cb37 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | |||
869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); | 869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); |
870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); | 870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); |
871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
872 | static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); | ||
872 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 873 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
873 | static struct device_attribute dev_attr_fail = | 874 | static struct device_attribute dev_attr_fail = |
874 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); | 875 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); |
@@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = { | |||
888 | &dev_attr_alignment_offset.attr, | 889 | &dev_attr_alignment_offset.attr, |
889 | &dev_attr_capability.attr, | 890 | &dev_attr_capability.attr, |
890 | &dev_attr_stat.attr, | 891 | &dev_attr_stat.attr, |
892 | &dev_attr_inflight.attr, | ||
891 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 893 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
892 | &dev_attr_fail.attr, | 894 | &dev_attr_fail.attr, |
893 | #endif | 895 | #endif |
@@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) | |||
1053 | part_stat_read(hd, merges[1]), | 1055 | part_stat_read(hd, merges[1]), |
1054 | (unsigned long long)part_stat_read(hd, sectors[1]), | 1056 | (unsigned long long)part_stat_read(hd, sectors[1]), |
1055 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), | 1057 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), |
1056 | hd->in_flight, | 1058 | part_in_flight(hd), |
1057 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), | 1059 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), |
1058 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) | 1060 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) |
1059 | ); | 1061 | ); |