aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c270
1 files changed, 225 insertions, 45 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a4809de6fea6..0d3b70de3d80 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -56,9 +56,6 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
58 58
59#define ASYNC (0)
60#define SYNC (1)
61
62#define sample_valid(samples) ((samples) > 80) 59#define sample_valid(samples) ((samples) > 80)
63 60
64/* 61/*
@@ -83,6 +80,14 @@ struct cfq_data {
83 * rr list of queues with requests and the count of them 80 * rr list of queues with requests and the count of them
84 */ 81 */
85 struct cfq_rb_root service_tree; 82 struct cfq_rb_root service_tree;
83
84 /*
85 * Each priority tree is sorted by next_request position. These
86 * trees are used when determining if two or more queues are
87 * interleaving requests (see cfq_close_cooperator).
88 */
89 struct rb_root prio_trees[CFQ_PRIO_LISTS];
90
86 unsigned int busy_queues; 91 unsigned int busy_queues;
87 /* 92 /*
88 * Used to track any pending rt requests so we can pre-empt current 93 * Used to track any pending rt requests so we can pre-empt current
@@ -147,6 +152,8 @@ struct cfq_queue {
147 struct rb_node rb_node; 152 struct rb_node rb_node;
148 /* service_tree key */ 153 /* service_tree key */
149 unsigned long rb_key; 154 unsigned long rb_key;
155 /* prio tree member */
156 struct rb_node p_node;
150 /* sorted list of pending requests */ 157 /* sorted list of pending requests */
151 struct rb_root sort_list; 158 struct rb_root sort_list;
152 /* if fifo isn't expired, next request to serve */ 159 /* if fifo isn't expired, next request to serve */
@@ -185,6 +192,7 @@ enum cfqq_state_flags {
185 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 192 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 193 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 194 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
195 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
188}; 196};
189 197
190#define CFQ_CFQQ_FNS(name) \ 198#define CFQ_CFQQ_FNS(name) \
@@ -211,6 +219,7 @@ CFQ_CFQQ_FNS(idle_window);
211CFQ_CFQQ_FNS(prio_changed); 219CFQ_CFQQ_FNS(prio_changed);
212CFQ_CFQQ_FNS(slice_new); 220CFQ_CFQQ_FNS(slice_new);
213CFQ_CFQQ_FNS(sync); 221CFQ_CFQQ_FNS(sync);
222CFQ_CFQQ_FNS(coop);
214#undef CFQ_CFQQ_FNS 223#undef CFQ_CFQQ_FNS
215 224
216#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 225#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -419,13 +428,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
419 return NULL; 428 return NULL;
420} 429}
421 430
431static void rb_erase_init(struct rb_node *n, struct rb_root *root)
432{
433 rb_erase(n, root);
434 RB_CLEAR_NODE(n);
435}
436
422static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 437static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
423{ 438{
424 if (root->left == n) 439 if (root->left == n)
425 root->left = NULL; 440 root->left = NULL;
426 441 rb_erase_init(n, &root->rb);
427 rb_erase(n, &root->rb);
428 RB_CLEAR_NODE(n);
429} 442}
430 443
431/* 444/*
@@ -470,8 +483,8 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
470 * requests waiting to be processed. It is sorted in the order that 483 * requests waiting to be processed. It is sorted in the order that
471 * we will service the queues. 484 * we will service the queues.
472 */ 485 */
473static void cfq_service_tree_add(struct cfq_data *cfqd, 486static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
474 struct cfq_queue *cfqq, int add_front) 487 int add_front)
475{ 488{
476 struct rb_node **p, *parent; 489 struct rb_node **p, *parent;
477 struct cfq_queue *__cfqq; 490 struct cfq_queue *__cfqq;
@@ -544,6 +557,63 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
544 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 557 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
545} 558}
546 559
560static struct cfq_queue *
561cfq_prio_tree_lookup(struct cfq_data *cfqd, int ioprio, sector_t sector,
562 struct rb_node **ret_parent, struct rb_node ***rb_link)
563{
564 struct rb_root *root = &cfqd->prio_trees[ioprio];
565 struct rb_node **p, *parent;
566 struct cfq_queue *cfqq = NULL;
567
568 parent = NULL;
569 p = &root->rb_node;
570 while (*p) {
571 struct rb_node **n;
572
573 parent = *p;
574 cfqq = rb_entry(parent, struct cfq_queue, p_node);
575
576 /*
577 * Sort strictly based on sector. Smallest to the left,
578 * largest to the right.
579 */
580 if (sector > cfqq->next_rq->sector)
581 n = &(*p)->rb_right;
582 else if (sector < cfqq->next_rq->sector)
583 n = &(*p)->rb_left;
584 else
585 break;
586 p = n;
587 }
588
589 *ret_parent = parent;
590 if (rb_link)
591 *rb_link = p;
592 return NULL;
593}
594
595static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
596{
597 struct rb_root *root = &cfqd->prio_trees[cfqq->ioprio];
598 struct rb_node **p, *parent;
599 struct cfq_queue *__cfqq;
600
601 if (!RB_EMPTY_NODE(&cfqq->p_node))
602 rb_erase_init(&cfqq->p_node, root);
603
604 if (cfq_class_idle(cfqq))
605 return;
606 if (!cfqq->next_rq)
607 return;
608
609 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->ioprio, cfqq->next_rq->sector,
610 &parent, &p);
611 BUG_ON(__cfqq);
612
613 rb_link_node(&cfqq->p_node, parent, p);
614 rb_insert_color(&cfqq->p_node, root);
615}
616
547/* 617/*
548 * Update cfqq's position in the service tree. 618 * Update cfqq's position in the service tree.
549 */ 619 */
@@ -552,8 +622,10 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
552 /* 622 /*
553 * Resorting requires the cfqq to be on the RR list already. 623 * Resorting requires the cfqq to be on the RR list already.
554 */ 624 */
555 if (cfq_cfqq_on_rr(cfqq)) 625 if (cfq_cfqq_on_rr(cfqq)) {
556 cfq_service_tree_add(cfqd, cfqq, 0); 626 cfq_service_tree_add(cfqd, cfqq, 0);
627 cfq_prio_tree_add(cfqd, cfqq);
628 }
557} 629}
558 630
559/* 631/*
@@ -584,6 +656,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
584 656
585 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 657 if (!RB_EMPTY_NODE(&cfqq->rb_node))
586 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 658 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
659 if (!RB_EMPTY_NODE(&cfqq->p_node))
660 rb_erase_init(&cfqq->p_node, &cfqd->prio_trees[cfqq->ioprio]);
587 661
588 BUG_ON(!cfqd->busy_queues); 662 BUG_ON(!cfqd->busy_queues);
589 cfqd->busy_queues--; 663 cfqd->busy_queues--;
@@ -613,7 +687,7 @@ static void cfq_add_rq_rb(struct request *rq)
613{ 687{
614 struct cfq_queue *cfqq = RQ_CFQQ(rq); 688 struct cfq_queue *cfqq = RQ_CFQQ(rq);
615 struct cfq_data *cfqd = cfqq->cfqd; 689 struct cfq_data *cfqd = cfqq->cfqd;
616 struct request *__alias; 690 struct request *__alias, *prev;
617 691
618 cfqq->queued[rq_is_sync(rq)]++; 692 cfqq->queued[rq_is_sync(rq)]++;
619 693
@@ -630,7 +704,15 @@ static void cfq_add_rq_rb(struct request *rq)
630 /* 704 /*
631 * check if this request is a better next-serve candidate 705 * check if this request is a better next-serve candidate
632 */ 706 */
707 prev = cfqq->next_rq;
633 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 708 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
709
710 /*
711 * adjust priority tree position, if ->next_rq changes
712 */
713 if (prev != cfqq->next_rq)
714 cfq_prio_tree_add(cfqd, cfqq);
715
634 BUG_ON(!cfqq->next_rq); 716 BUG_ON(!cfqq->next_rq);
635} 717}
636 718
@@ -843,11 +925,15 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
843/* 925/*
844 * Get and set a new active queue for service. 926 * Get and set a new active queue for service.
845 */ 927 */
846static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 928static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
929 struct cfq_queue *cfqq)
847{ 930{
848 struct cfq_queue *cfqq; 931 if (!cfqq) {
932 cfqq = cfq_get_next_queue(cfqd);
933 if (cfqq)
934 cfq_clear_cfqq_coop(cfqq);
935 }
849 936
850 cfqq = cfq_get_next_queue(cfqd);
851 __cfq_set_active_queue(cfqd, cfqq); 937 __cfq_set_active_queue(cfqd, cfqq);
852 return cfqq; 938 return cfqq;
853} 939}
@@ -871,17 +957,89 @@ static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
871 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 957 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
872} 958}
873 959
874static int cfq_close_cooperator(struct cfq_data *cfq_data, 960static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
875 struct cfq_queue *cfqq) 961 struct cfq_queue *cur_cfqq)
962{
963 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->ioprio];
964 struct rb_node *parent, *node;
965 struct cfq_queue *__cfqq;
966 sector_t sector = cfqd->last_position;
967
968 if (RB_EMPTY_ROOT(root))
969 return NULL;
970
971 /*
972 * First, if we find a request starting at the end of the last
973 * request, choose it.
974 */
975 __cfqq = cfq_prio_tree_lookup(cfqd, cur_cfqq->ioprio,
976 sector, &parent, NULL);
977 if (__cfqq)
978 return __cfqq;
979
980 /*
981 * If the exact sector wasn't found, the parent of the NULL leaf
982 * will contain the closest sector.
983 */
984 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
985 if (cfq_rq_close(cfqd, __cfqq->next_rq))
986 return __cfqq;
987
988 if (__cfqq->next_rq->sector < sector)
989 node = rb_next(&__cfqq->p_node);
990 else
991 node = rb_prev(&__cfqq->p_node);
992 if (!node)
993 return NULL;
994
995 __cfqq = rb_entry(node, struct cfq_queue, p_node);
996 if (cfq_rq_close(cfqd, __cfqq->next_rq))
997 return __cfqq;
998
999 return NULL;
1000}
1001
1002/*
1003 * cfqd - obvious
1004 * cur_cfqq - passed in so that we don't decide that the current queue is
1005 * closely cooperating with itself.
1006 *
1007 * So, basically we're assuming that that cur_cfqq has dispatched at least
1008 * one request, and that cfqd->last_position reflects a position on the disk
1009 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1010 * assumption.
1011 */
1012static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1013 struct cfq_queue *cur_cfqq,
1014 int probe)
876{ 1015{
1016 struct cfq_queue *cfqq;
1017
1018 /*
1019 * A valid cfq_io_context is necessary to compare requests against
1020 * the seek_mean of the current cfqq.
1021 */
1022 if (!cfqd->active_cic)
1023 return NULL;
1024
877 /* 1025 /*
878 * We should notice if some of the queues are cooperating, eg 1026 * We should notice if some of the queues are cooperating, eg
879 * working closely on the same area of the disk. In that case, 1027 * working closely on the same area of the disk. In that case,
880 * we can group them together and don't waste time idling. 1028 * we can group them together and don't waste time idling.
881 */ 1029 */
882 return 0; 1030 cfqq = cfqq_close(cfqd, cur_cfqq);
1031 if (!cfqq)
1032 return NULL;
1033
1034 if (cfq_cfqq_coop(cfqq))
1035 return NULL;
1036
1037 if (!probe)
1038 cfq_mark_cfqq_coop(cfqq);
1039 return cfqq;
883} 1040}
884 1041
1042
885#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 1043#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
886 1044
887static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1045static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -920,13 +1078,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
920 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1078 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
921 return; 1079 return;
922 1080
923 /*
924 * See if this prio level has a good candidate
925 */
926 if (cfq_close_cooperator(cfqd, cfqq) &&
927 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
928 return;
929
930 cfq_mark_cfqq_wait_request(cfqq); 1081 cfq_mark_cfqq_wait_request(cfqq);
931 1082
932 /* 1083 /*
@@ -939,7 +1090,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
939 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 1090 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
940 1091
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1092 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 cfq_log(cfqd, "arm_idle: %lu", sl); 1093 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
943} 1094}
944 1095
945/* 1096/*
@@ -1003,7 +1154,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1003 */ 1154 */
1004static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 1155static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1005{ 1156{
1006 struct cfq_queue *cfqq; 1157 struct cfq_queue *cfqq, *new_cfqq = NULL;
1007 1158
1008 cfqq = cfqd->active_queue; 1159 cfqq = cfqd->active_queue;
1009 if (!cfqq) 1160 if (!cfqq)
@@ -1037,6 +1188,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1037 goto keep_queue; 1188 goto keep_queue;
1038 1189
1039 /* 1190 /*
1191 * If another queue has a request waiting within our mean seek
1192 * distance, let it run. The expire code will check for close
1193 * cooperators and put the close queue at the front of the service
1194 * tree.
1195 */
1196 new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
1197 if (new_cfqq)
1198 goto expire;
1199
1200 /*
1040 * No requests pending. If the active queue still has requests in 1201 * No requests pending. If the active queue still has requests in
1041 * flight or is idling for a new request, allow either of these 1202 * flight or is idling for a new request, allow either of these
1042 * conditions to happen (or time out) before selecting a new queue. 1203 * conditions to happen (or time out) before selecting a new queue.
@@ -1050,7 +1211,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1050expire: 1211expire:
1051 cfq_slice_expired(cfqd, 0); 1212 cfq_slice_expired(cfqd, 0);
1052new_queue: 1213new_queue:
1053 cfqq = cfq_set_active_queue(cfqd); 1214 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1054keep_queue: 1215keep_queue:
1055 return cfqq; 1216 return cfqq;
1056} 1217}
@@ -1333,14 +1494,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1333 if (ioc->ioc_data == cic) 1494 if (ioc->ioc_data == cic)
1334 rcu_assign_pointer(ioc->ioc_data, NULL); 1495 rcu_assign_pointer(ioc->ioc_data, NULL);
1335 1496
1336 if (cic->cfqq[ASYNC]) { 1497 if (cic->cfqq[BLK_RW_ASYNC]) {
1337 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1498 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1338 cic->cfqq[ASYNC] = NULL; 1499 cic->cfqq[BLK_RW_ASYNC] = NULL;
1339 } 1500 }
1340 1501
1341 if (cic->cfqq[SYNC]) { 1502 if (cic->cfqq[BLK_RW_SYNC]) {
1342 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1503 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1343 cic->cfqq[SYNC] = NULL; 1504 cic->cfqq[BLK_RW_SYNC] = NULL;
1344 } 1505 }
1345} 1506}
1346 1507
@@ -1449,17 +1610,18 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1449 1610
1450 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1611 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1451 1612
1452 cfqq = cic->cfqq[ASYNC]; 1613 cfqq = cic->cfqq[BLK_RW_ASYNC];
1453 if (cfqq) { 1614 if (cfqq) {
1454 struct cfq_queue *new_cfqq; 1615 struct cfq_queue *new_cfqq;
1455 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); 1616 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1617 GFP_ATOMIC);
1456 if (new_cfqq) { 1618 if (new_cfqq) {
1457 cic->cfqq[ASYNC] = new_cfqq; 1619 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
1458 cfq_put_queue(cfqq); 1620 cfq_put_queue(cfqq);
1459 } 1621 }
1460 } 1622 }
1461 1623
1462 cfqq = cic->cfqq[SYNC]; 1624 cfqq = cic->cfqq[BLK_RW_SYNC];
1463 if (cfqq) 1625 if (cfqq)
1464 cfq_mark_cfqq_prio_changed(cfqq); 1626 cfq_mark_cfqq_prio_changed(cfqq);
1465 1627
@@ -1510,6 +1672,7 @@ retry:
1510 } 1672 }
1511 1673
1512 RB_CLEAR_NODE(&cfqq->rb_node); 1674 RB_CLEAR_NODE(&cfqq->rb_node);
1675 RB_CLEAR_NODE(&cfqq->p_node);
1513 INIT_LIST_HEAD(&cfqq->fifo); 1676 INIT_LIST_HEAD(&cfqq->fifo);
1514 1677
1515 atomic_set(&cfqq->ref, 0); 1678 atomic_set(&cfqq->ref, 0);
@@ -1905,10 +2068,20 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1905 * Remember that we saw a request from this process, but 2068 * Remember that we saw a request from this process, but
1906 * don't start queuing just yet. Otherwise we risk seeing lots 2069 * don't start queuing just yet. Otherwise we risk seeing lots
1907 * of tiny requests, because we disrupt the normal plugging 2070 * of tiny requests, because we disrupt the normal plugging
1908 * and merging. 2071 * and merging. If the request is already larger than a single
2072 * page, let it rip immediately. For that case we assume that
2073 * merging is already done. Ditto for a busy system that
2074 * has other work pending, don't risk delaying until the
2075 * idle timer unplug to continue working.
1909 */ 2076 */
1910 if (cfq_cfqq_wait_request(cfqq)) 2077 if (cfq_cfqq_wait_request(cfqq)) {
2078 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2079 cfqd->busy_queues > 1) {
2080 del_timer(&cfqd->idle_slice_timer);
2081 blk_start_queueing(cfqd->queue);
2082 }
1911 cfq_mark_cfqq_must_dispatch(cfqq); 2083 cfq_mark_cfqq_must_dispatch(cfqq);
2084 }
1912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 2085 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1913 /* 2086 /*
1914 * not the active queue - expire current slice if it is 2087 * not the active queue - expire current slice if it is
@@ -1992,16 +2165,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
1992 * or if we want to idle in case it has no pending requests. 2165 * or if we want to idle in case it has no pending requests.
1993 */ 2166 */
1994 if (cfqd->active_queue == cfqq) { 2167 if (cfqd->active_queue == cfqq) {
2168 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2169
1995 if (cfq_cfqq_slice_new(cfqq)) { 2170 if (cfq_cfqq_slice_new(cfqq)) {
1996 cfq_set_prio_slice(cfqd, cfqq); 2171 cfq_set_prio_slice(cfqd, cfqq);
1997 cfq_clear_cfqq_slice_new(cfqq); 2172 cfq_clear_cfqq_slice_new(cfqq);
1998 } 2173 }
2174 /*
2175 * If there are no requests waiting in this queue, and
2176 * there are other queues ready to issue requests, AND
2177 * those other queues are issuing requests within our
2178 * mean seek distance, give them a chance to run instead
2179 * of idling.
2180 */
1999 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 2181 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2000 cfq_slice_expired(cfqd, 1); 2182 cfq_slice_expired(cfqd, 1);
2001 else if (sync && !rq_noidle(rq) && 2183 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
2002 RB_EMPTY_ROOT(&cfqq->sort_list)) { 2184 sync && !rq_noidle(rq))
2003 cfq_arm_slice_timer(cfqd); 2185 cfq_arm_slice_timer(cfqd);
2004 }
2005 } 2186 }
2006 2187
2007 if (!cfqd->rq_in_driver) 2188 if (!cfqd->rq_in_driver)
@@ -2062,7 +2243,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
2062 if (!cic) 2243 if (!cic)
2063 return ELV_MQUEUE_MAY; 2244 return ELV_MQUEUE_MAY;
2064 2245
2065 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 2246 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2066 if (cfqq) { 2247 if (cfqq) {
2067 cfq_init_prio_data(cfqq, cic->ioc); 2248 cfq_init_prio_data(cfqq, cic->ioc);
2068 cfq_prio_boost(cfqq); 2249 cfq_prio_boost(cfqq);
@@ -2152,11 +2333,10 @@ static void cfq_kick_queue(struct work_struct *work)
2152 struct cfq_data *cfqd = 2333 struct cfq_data *cfqd =
2153 container_of(work, struct cfq_data, unplug_work); 2334 container_of(work, struct cfq_data, unplug_work);
2154 struct request_queue *q = cfqd->queue; 2335 struct request_queue *q = cfqd->queue;
2155 unsigned long flags;
2156 2336
2157 spin_lock_irqsave(q->queue_lock, flags); 2337 spin_lock_irq(q->queue_lock);
2158 blk_start_queueing(q); 2338 blk_start_queueing(q);
2159 spin_unlock_irqrestore(q->queue_lock, flags); 2339 spin_unlock_irq(q->queue_lock);
2160} 2340}
2161 2341
2162/* 2342/*