aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cfq-iosched.c49
1 files changed, 24 insertions, 25 deletions
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index ff1cc968f96d..de5746e38af9 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -300,7 +300,6 @@ CFQ_CRQ_FNS(requeued);
300static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 300static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
301static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 301static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
302static void cfq_put_cfqd(struct cfq_data *cfqd); 302static void cfq_put_cfqd(struct cfq_data *cfqd);
303static inline int cfq_pending_requests(struct cfq_data *cfqd);
304 303
305#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 304#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
306 305
@@ -348,6 +347,28 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
348 return NULL; 347 return NULL;
349} 348}
350 349
350static inline int cfq_pending_requests(struct cfq_data *cfqd)
351{
352 return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
353}
354
355/*
356 * scheduler run of queue, if there are requests pending and no one in the
357 * driver that will restart queueing
358 */
359static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
360{
361 if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
362 kblockd_schedule_work(&cfqd->unplug_work);
363}
364
365static int cfq_queue_empty(request_queue_t *q)
366{
367 struct cfq_data *cfqd = q->elevator->elevator_data;
368
369 return !cfq_pending_requests(cfqd);
370}
371
351/* 372/*
352 * Lifted from AS - choose which of crq1 and crq2 that is best served now. 373 * Lifted from AS - choose which of crq1 and crq2 that is best served now.
353 * We choose the request that is closest to the head right now. Distance 374 * We choose the request that is closest to the head right now. Distance
@@ -1072,16 +1093,6 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1072} 1093}
1073 1094
1074/* 1095/*
1075 * scheduler run of queue, if there are requests pending and no one in the
1076 * driver that will restart queueing
1077 */
1078static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
1079{
1080 if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
1081 kblockd_schedule_work(&cfqd->unplug_work);
1082}
1083
1084/*
1085 * get next queue for service 1096 * get next queue for service
1086 */ 1097 */
1087static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force) 1098static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
@@ -1846,18 +1857,6 @@ cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1846 } 1857 }
1847} 1858}
1848 1859
1849static inline int cfq_pending_requests(struct cfq_data *cfqd)
1850{
1851 return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
1852}
1853
1854static int cfq_queue_empty(request_queue_t *q)
1855{
1856 struct cfq_data *cfqd = q->elevator->elevator_data;
1857
1858 return !cfq_pending_requests(cfqd);
1859}
1860
1861static void cfq_completed_request(request_queue_t *q, struct request *rq) 1860static void cfq_completed_request(request_queue_t *q, struct request *rq)
1862{ 1861{
1863 struct cfq_rq *crq = RQ_DATA(rq); 1862 struct cfq_rq *crq = RQ_DATA(rq);
@@ -1952,7 +1951,7 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1952{ 1951{
1953#if 1 1952#if 1
1954 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1953 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1955 !cfq_cfqq_must_alloc_slice) { 1954 !cfq_cfqq_must_alloc_slice(cfqq)) {
1956 cfq_mark_cfqq_must_alloc_slice(cfqq); 1955 cfq_mark_cfqq_must_alloc_slice(cfqq);
1957 return ELV_MQUEUE_MUST; 1956 return ELV_MQUEUE_MUST;
1958 } 1957 }
@@ -1969,7 +1968,7 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1969 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we 1968 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
1970 * can quickly flood the queue with writes from a single task 1969 * can quickly flood the queue with writes from a single task
1971 */ 1970 */
1972 if (rw == READ || !cfq_cfqq_must_alloc_slice) { 1971 if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
1973 cfq_mark_cfqq_must_alloc_slice(cfqq); 1972 cfq_mark_cfqq_must_alloc_slice(cfqq);
1974 return ELV_MQUEUE_MUST; 1973 return ELV_MQUEUE_MUST;
1975 } 1974 }