aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c202
1 files changed, 106 insertions, 96 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9e809345f71a..a4809de6fea6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -160,6 +160,7 @@ struct cfq_queue {
160 160
161 unsigned long slice_end; 161 unsigned long slice_end;
162 long slice_resid; 162 long slice_resid;
163 unsigned int slice_dispatch;
163 164
164 /* pending metadata requests */ 165 /* pending metadata requests */
165 int meta_pending; 166 int meta_pending;
@@ -176,13 +177,12 @@ struct cfq_queue {
176enum cfqq_state_flags { 177enum cfqq_state_flags {
177 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 178 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
178 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 179 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
180 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
179 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 181 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
180 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 182 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
181 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
182 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 183 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
183 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 184 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
184 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 185 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
185 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
188}; 188};
@@ -203,13 +203,12 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
203 203
204CFQ_CFQQ_FNS(on_rr); 204CFQ_CFQQ_FNS(on_rr);
205CFQ_CFQQ_FNS(wait_request); 205CFQ_CFQQ_FNS(wait_request);
206CFQ_CFQQ_FNS(must_dispatch);
206CFQ_CFQQ_FNS(must_alloc); 207CFQ_CFQQ_FNS(must_alloc);
207CFQ_CFQQ_FNS(must_alloc_slice); 208CFQ_CFQQ_FNS(must_alloc_slice);
208CFQ_CFQQ_FNS(must_dispatch);
209CFQ_CFQQ_FNS(fifo_expire); 209CFQ_CFQQ_FNS(fifo_expire);
210CFQ_CFQQ_FNS(idle_window); 210CFQ_CFQQ_FNS(idle_window);
211CFQ_CFQQ_FNS(prio_changed); 211CFQ_CFQQ_FNS(prio_changed);
212CFQ_CFQQ_FNS(queue_new);
213CFQ_CFQQ_FNS(slice_new); 212CFQ_CFQQ_FNS(slice_new);
214CFQ_CFQQ_FNS(sync); 213CFQ_CFQQ_FNS(sync);
215#undef CFQ_CFQQ_FNS 214#undef CFQ_CFQQ_FNS
@@ -774,10 +773,15 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
774 if (cfqq) { 773 if (cfqq) {
775 cfq_log_cfqq(cfqd, cfqq, "set_active"); 774 cfq_log_cfqq(cfqd, cfqq, "set_active");
776 cfqq->slice_end = 0; 775 cfqq->slice_end = 0;
776 cfqq->slice_dispatch = 0;
777
778 cfq_clear_cfqq_wait_request(cfqq);
779 cfq_clear_cfqq_must_dispatch(cfqq);
777 cfq_clear_cfqq_must_alloc_slice(cfqq); 780 cfq_clear_cfqq_must_alloc_slice(cfqq);
778 cfq_clear_cfqq_fifo_expire(cfqq); 781 cfq_clear_cfqq_fifo_expire(cfqq);
779 cfq_mark_cfqq_slice_new(cfqq); 782 cfq_mark_cfqq_slice_new(cfqq);
780 cfq_clear_cfqq_queue_new(cfqq); 783
784 del_timer(&cfqd->idle_slice_timer);
781 } 785 }
782 786
783 cfqd->active_queue = cfqq; 787 cfqd->active_queue = cfqq;
@@ -795,7 +799,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
795 if (cfq_cfqq_wait_request(cfqq)) 799 if (cfq_cfqq_wait_request(cfqq))
796 del_timer(&cfqd->idle_slice_timer); 800 del_timer(&cfqd->idle_slice_timer);
797 801
798 cfq_clear_cfqq_must_dispatch(cfqq);
799 cfq_clear_cfqq_wait_request(cfqq); 802 cfq_clear_cfqq_wait_request(cfqq);
800 803
801 /* 804 /*
@@ -924,7 +927,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
924 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 927 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
925 return; 928 return;
926 929
927 cfq_mark_cfqq_must_dispatch(cfqq);
928 cfq_mark_cfqq_wait_request(cfqq); 930 cfq_mark_cfqq_wait_request(cfqq);
929 931
930 /* 932 /*
@@ -1010,7 +1012,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1010 /* 1012 /*
1011 * The active queue has run out of time, expire it and select new. 1013 * The active queue has run out of time, expire it and select new.
1012 */ 1014 */
1013 if (cfq_slice_used(cfqq)) 1015 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1014 goto expire; 1016 goto expire;
1015 1017
1016 /* 1018 /*
@@ -1053,66 +1055,6 @@ keep_queue:
1053 return cfqq; 1055 return cfqq;
1054} 1056}
1055 1057
1056/*
1057 * Dispatch some requests from cfqq, moving them to the request queue
1058 * dispatch list.
1059 */
1060static int
1061__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1062 int max_dispatch)
1063{
1064 int dispatched = 0;
1065
1066 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1067
1068 do {
1069 struct request *rq;
1070
1071 /*
1072 * follow expired path, else get first next available
1073 */
1074 rq = cfq_check_fifo(cfqq);
1075 if (rq == NULL)
1076 rq = cfqq->next_rq;
1077
1078 /*
1079 * finally, insert request into driver dispatch list
1080 */
1081 cfq_dispatch_insert(cfqd->queue, rq);
1082
1083 dispatched++;
1084
1085 if (!cfqd->active_cic) {
1086 atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1087 cfqd->active_cic = RQ_CIC(rq);
1088 }
1089
1090 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1091 break;
1092
1093 /*
1094 * If there is a non-empty RT cfqq waiting for current
1095 * cfqq's timeslice to complete, pre-empt this cfqq
1096 */
1097 if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
1098 break;
1099
1100 } while (dispatched < max_dispatch);
1101
1102 /*
1103 * expire an async queue immediately if it has used up its slice. idle
1104 * queue always expire after 1 dispatch round.
1105 */
1106 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1107 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1108 cfq_class_idle(cfqq))) {
1109 cfqq->slice_end = jiffies + 1;
1110 cfq_slice_expired(cfqd, 0);
1111 }
1112
1113 return dispatched;
1114}
1115
1116static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1058static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1117{ 1059{
1118 int dispatched = 0; 1060 int dispatched = 0;
@@ -1146,11 +1088,45 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1146 return dispatched; 1088 return dispatched;
1147} 1089}
1148 1090
1091/*
1092 * Dispatch a request from cfqq, moving them to the request queue
1093 * dispatch list.
1094 */
1095static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1096{
1097 struct request *rq;
1098
1099 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1100
1101 /*
1102 * follow expired path, else get first next available
1103 */
1104 rq = cfq_check_fifo(cfqq);
1105 if (!rq)
1106 rq = cfqq->next_rq;
1107
1108 /*
1109 * insert request into driver dispatch list
1110 */
1111 cfq_dispatch_insert(cfqd->queue, rq);
1112
1113 if (!cfqd->active_cic) {
1114 struct cfq_io_context *cic = RQ_CIC(rq);
1115
1116 atomic_inc(&cic->ioc->refcount);
1117 cfqd->active_cic = cic;
1118 }
1119}
1120
1121/*
1122 * Find the cfqq that we need to service and move a request from that to the
1123 * dispatch list
1124 */
1149static int cfq_dispatch_requests(struct request_queue *q, int force) 1125static int cfq_dispatch_requests(struct request_queue *q, int force)
1150{ 1126{
1151 struct cfq_data *cfqd = q->elevator->elevator_data; 1127 struct cfq_data *cfqd = q->elevator->elevator_data;
1152 struct cfq_queue *cfqq; 1128 struct cfq_queue *cfqq;
1153 int dispatched; 1129 unsigned int max_dispatch;
1154 1130
1155 if (!cfqd->busy_queues) 1131 if (!cfqd->busy_queues)
1156 return 0; 1132 return 0;
@@ -1158,29 +1134,63 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1158 if (unlikely(force)) 1134 if (unlikely(force))
1159 return cfq_forced_dispatch(cfqd); 1135 return cfq_forced_dispatch(cfqd);
1160 1136
1161 dispatched = 0; 1137 cfqq = cfq_select_queue(cfqd);
1162 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1138 if (!cfqq)
1163 int max_dispatch; 1139 return 0;
1140
1141 /*
1142 * If this is an async queue and we have sync IO in flight, let it wait
1143 */
1144 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1145 return 0;
1146
1147 max_dispatch = cfqd->cfq_quantum;
1148 if (cfq_class_idle(cfqq))
1149 max_dispatch = 1;
1164 1150
1165 max_dispatch = cfqd->cfq_quantum; 1151 /*
1152 * Does this cfqq already have too much IO in flight?
1153 */
1154 if (cfqq->dispatched >= max_dispatch) {
1155 /*
1156 * idle queue must always only have a single IO in flight
1157 */
1166 if (cfq_class_idle(cfqq)) 1158 if (cfq_class_idle(cfqq))
1167 max_dispatch = 1; 1159 return 0;
1168 1160
1169 if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1) 1161 /*
1170 break; 1162 * We have other queues, don't allow more IO from this one
1163 */
1164 if (cfqd->busy_queues > 1)
1165 return 0;
1171 1166
1172 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1167 /*
1173 break; 1168 * we are the only queue, allow up to 4 times of 'quantum'
1169 */
1170 if (cfqq->dispatched >= 4 * max_dispatch)
1171 return 0;
1172 }
1174 1173
1175 cfq_clear_cfqq_must_dispatch(cfqq); 1174 /*
1176 cfq_clear_cfqq_wait_request(cfqq); 1175 * Dispatch a request from this cfqq
1177 del_timer(&cfqd->idle_slice_timer); 1176 */
1177 cfq_dispatch_request(cfqd, cfqq);
1178 cfqq->slice_dispatch++;
1179 cfq_clear_cfqq_must_dispatch(cfqq);
1178 1180
1179 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1181 /*
1182 * expire an async queue immediately if it has used up its slice. idle
1183 * queue always expire after 1 dispatch round.
1184 */
1185 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1186 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1187 cfq_class_idle(cfqq))) {
1188 cfqq->slice_end = jiffies + 1;
1189 cfq_slice_expired(cfqd, 0);
1180 } 1190 }
1181 1191
1182 cfq_log(cfqd, "dispatched=%d", dispatched); 1192 cfq_log(cfqd, "dispatched a request");
1183 return dispatched; 1193 return 1;
1184} 1194}
1185 1195
1186/* 1196/*
@@ -1506,7 +1516,6 @@ retry:
1506 cfqq->cfqd = cfqd; 1516 cfqq->cfqd = cfqd;
1507 1517
1508 cfq_mark_cfqq_prio_changed(cfqq); 1518 cfq_mark_cfqq_prio_changed(cfqq);
1509 cfq_mark_cfqq_queue_new(cfqq);
1510 1519
1511 cfq_init_prio_data(cfqq, ioc); 1520 cfq_init_prio_data(cfqq, ioc);
1512 1521
@@ -1893,15 +1902,13 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1893 1902
1894 if (cfqq == cfqd->active_queue) { 1903 if (cfqq == cfqd->active_queue) {
1895 /* 1904 /*
1896 * if we are waiting for a request for this queue, let it rip 1905 * Remember that we saw a request from this process, but
1897 * immediately and flag that we must not expire this queue 1906 * don't start queuing just yet. Otherwise we risk seeing lots
1898 * just now 1907 * of tiny requests, because we disrupt the normal plugging
1908 * and merging.
1899 */ 1909 */
1900 if (cfq_cfqq_wait_request(cfqq)) { 1910 if (cfq_cfqq_wait_request(cfqq))
1901 cfq_mark_cfqq_must_dispatch(cfqq); 1911 cfq_mark_cfqq_must_dispatch(cfqq);
1902 del_timer(&cfqd->idle_slice_timer);
1903 blk_start_queueing(cfqd->queue);
1904 }
1905 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1906 /* 1913 /*
1907 * not the active queue - expire current slice if it is 1914 * not the active queue - expire current slice if it is
@@ -1910,7 +1917,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1910 * this new queue is RT and the current one is BE 1917 * this new queue is RT and the current one is BE
1911 */ 1918 */
1912 cfq_preempt_queue(cfqd, cfqq); 1919 cfq_preempt_queue(cfqd, cfqq);
1913 cfq_mark_cfqq_must_dispatch(cfqq);
1914 blk_start_queueing(cfqd->queue); 1920 blk_start_queueing(cfqd->queue);
1915 } 1921 }
1916} 1922}
@@ -2172,6 +2178,12 @@ static void cfq_idle_slice_timer(unsigned long data)
2172 timed_out = 0; 2178 timed_out = 0;
2173 2179
2174 /* 2180 /*
2181 * We saw a request before the queue expired, let it through
2182 */
2183 if (cfq_cfqq_must_dispatch(cfqq))
2184 goto out_kick;
2185
2186 /*
2175 * expired 2187 * expired
2176 */ 2188 */
2177 if (cfq_slice_used(cfqq)) 2189 if (cfq_slice_used(cfqq))
@@ -2187,10 +2199,8 @@ static void cfq_idle_slice_timer(unsigned long data)
2187 /* 2199 /*
2188 * not expired and it has a request pending, let it dispatch 2200 * not expired and it has a request pending, let it dispatch
2189 */ 2201 */
2190 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 2202 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2191 cfq_mark_cfqq_must_dispatch(cfqq);
2192 goto out_kick; 2203 goto out_kick;
2193 }
2194 } 2204 }
2195expire: 2205expire:
2196 cfq_slice_expired(cfqd, timed_out); 2206 cfq_slice_expired(cfqd, timed_out);