aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-10-06 14:49:37 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-06 14:49:37 -0400
commit0b182d617eb50762b483658dd6dd9a9fbcb25758 (patch)
tree28c7084cb5566cb26e5c6f96b3c055304afcbbbf
parent1b59dd511b9a36d4be3c01d7c7024aeec36dc651 (diff)
cfq-iosched: abstract out the 'may this cfqq dispatch' logic
Makes the whole thing easier to read, cfq_dispatch_requests() was a bit messy before. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/cfq-iosched.c121
1 files changed, 67 insertions, 54 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 690ebd96dc42..5c3cee93329a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1247,67 +1247,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1247 return dispatched; 1247 return dispatched;
1248} 1248}
1249 1249
1250/* 1250static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1251 * Dispatch a request from cfqq, moving them to the request queue
1252 * dispatch list.
1253 */
1254static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1255{ 1251{
1256 struct request *rq;
1257
1258 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1259
1260 /*
1261 * follow expired path, else get first next available
1262 */
1263 rq = cfq_check_fifo(cfqq);
1264 if (!rq)
1265 rq = cfqq->next_rq;
1266
1267 /*
1268 * insert request into driver dispatch list
1269 */
1270 cfq_dispatch_insert(cfqd->queue, rq);
1271
1272 if (!cfqd->active_cic) {
1273 struct cfq_io_context *cic = RQ_CIC(rq);
1274
1275 atomic_long_inc(&cic->ioc->refcount);
1276 cfqd->active_cic = cic;
1277 }
1278}
1279
1280/*
1281 * Find the cfqq that we need to service and move a request from that to the
1282 * dispatch list
1283 */
1284static int cfq_dispatch_requests(struct request_queue *q, int force)
1285{
1286 struct cfq_data *cfqd = q->elevator->elevator_data;
1287 struct cfq_queue *cfqq;
1288 unsigned int max_dispatch; 1252 unsigned int max_dispatch;
1289 1253
1290 if (!cfqd->busy_queues)
1291 return 0;
1292
1293 if (unlikely(force))
1294 return cfq_forced_dispatch(cfqd);
1295
1296 cfqq = cfq_select_queue(cfqd);
1297 if (!cfqq)
1298 return 0;
1299
1300 /* 1254 /*
1301 * Drain async requests before we start sync IO 1255 * Drain async requests before we start sync IO
1302 */ 1256 */
1303 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1257 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1304 return 0; 1258 return false;
1305 1259
1306 /* 1260 /*
1307 * If this is an async queue and we have sync IO in flight, let it wait 1261 * If this is an async queue and we have sync IO in flight, let it wait
1308 */ 1262 */
1309 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1263 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1310 return 0; 1264 return false;
1311 1265
1312 max_dispatch = cfqd->cfq_quantum; 1266 max_dispatch = cfqd->cfq_quantum;
1313 if (cfq_class_idle(cfqq)) 1267 if (cfq_class_idle(cfqq))
@@ -1321,13 +1275,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1321 * idle queue must always only have a single IO in flight 1275 * idle queue must always only have a single IO in flight
1322 */ 1276 */
1323 if (cfq_class_idle(cfqq)) 1277 if (cfq_class_idle(cfqq))
1324 return 0; 1278 return false;
1325 1279
1326 /* 1280 /*
1327 * We have other queues, don't allow more IO from this one 1281 * We have other queues, don't allow more IO from this one
1328 */ 1282 */
1329 if (cfqd->busy_queues > 1) 1283 if (cfqd->busy_queues > 1)
1330 return 0; 1284 return false;
1331 1285
1332 /* 1286 /*
1333 * Sole queue user, allow bigger slice 1287 * Sole queue user, allow bigger slice
@@ -1351,13 +1305,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1351 max_dispatch = depth; 1305 max_dispatch = depth;
1352 } 1306 }
1353 1307
1354 if (cfqq->dispatched >= max_dispatch) 1308 /*
1309 * If we're below the current max, allow a dispatch
1310 */
1311 return cfqq->dispatched < max_dispatch;
1312}
1313
1314/*
1315 * Dispatch a request from cfqq, moving them to the request queue
1316 * dispatch list.
1317 */
1318static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1319{
1320 struct request *rq;
1321
1322 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1323
1324 if (!cfq_may_dispatch(cfqd, cfqq))
1325 return false;
1326
1327 /*
1328 * follow expired path, else get first next available
1329 */
1330 rq = cfq_check_fifo(cfqq);
1331 if (!rq)
1332 rq = cfqq->next_rq;
1333
1334 /*
1335 * insert request into driver dispatch list
1336 */
1337 cfq_dispatch_insert(cfqd->queue, rq);
1338
1339 if (!cfqd->active_cic) {
1340 struct cfq_io_context *cic = RQ_CIC(rq);
1341
1342 atomic_long_inc(&cic->ioc->refcount);
1343 cfqd->active_cic = cic;
1344 }
1345
1346 return true;
1347}
1348
1349/*
1350 * Find the cfqq that we need to service and move a request from that to the
1351 * dispatch list
1352 */
1353static int cfq_dispatch_requests(struct request_queue *q, int force)
1354{
1355 struct cfq_data *cfqd = q->elevator->elevator_data;
1356 struct cfq_queue *cfqq;
1357
1358 if (!cfqd->busy_queues)
1359 return 0;
1360
1361 if (unlikely(force))
1362 return cfq_forced_dispatch(cfqd);
1363
1364 cfqq = cfq_select_queue(cfqd);
1365 if (!cfqq)
1355 return 0; 1366 return 0;
1356 1367
1357 /* 1368 /*
1358 * Dispatch a request from this cfqq 1369 * Dispatch a request from this cfqq, if it is allowed
1359 */ 1370 */
1360 cfq_dispatch_request(cfqd, cfqq); 1371 if (!cfq_dispatch_request(cfqd, cfqq))
1372 return 0;
1373
1361 cfqq->slice_dispatch++; 1374 cfqq->slice_dispatch++;
1362 cfq_clear_cfqq_must_dispatch(cfqq); 1375 cfq_clear_cfqq_must_dispatch(cfqq);
1363 1376