aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c48
1 files changed, 3 insertions, 45 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e2e6ad0a158e..c88f161d3fb3 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -123,8 +123,6 @@ struct cfq_data {
123 */ 123 */
124 struct hlist_head *crq_hash; 124 struct hlist_head *crq_hash;
125 125
126 unsigned int max_queued;
127
128 mempool_t *crq_pool; 126 mempool_t *crq_pool;
129 127
130 int rq_in_driver; 128 int rq_in_driver;
@@ -1910,7 +1908,6 @@ static inline int
1910__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1908__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1911 struct task_struct *task, int rw) 1909 struct task_struct *task, int rw)
1912{ 1910{
1913#if 1
1914 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1911 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1915 !cfq_cfqq_must_alloc_slice(cfqq)) { 1912 !cfq_cfqq_must_alloc_slice(cfqq)) {
1916 cfq_mark_cfqq_must_alloc_slice(cfqq); 1913 cfq_mark_cfqq_must_alloc_slice(cfqq);
@@ -1918,39 +1915,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1918 } 1915 }
1919 1916
1920 return ELV_MQUEUE_MAY; 1917 return ELV_MQUEUE_MAY;
1921#else
1922 if (!cfqq || task->flags & PF_MEMALLOC)
1923 return ELV_MQUEUE_MAY;
1924 if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
1925 if (cfq_cfqq_wait_request(cfqq))
1926 return ELV_MQUEUE_MUST;
1927
1928 /*
1929 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
1930 * can quickly flood the queue with writes from a single task
1931 */
1932 if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
1933 cfq_mark_cfqq_must_alloc_slice(cfqq);
1934 return ELV_MQUEUE_MUST;
1935 }
1936
1937 return ELV_MQUEUE_MAY;
1938 }
1939 if (cfq_class_idle(cfqq))
1940 return ELV_MQUEUE_NO;
1941 if (cfqq->allocated[rw] >= cfqd->max_queued) {
1942 struct io_context *ioc = get_io_context(GFP_ATOMIC);
1943 int ret = ELV_MQUEUE_NO;
1944
1945 if (ioc && ioc->nr_batch_requests)
1946 ret = ELV_MQUEUE_MAY;
1947
1948 put_io_context(ioc);
1949 return ret;
1950 }
1951
1952 return ELV_MQUEUE_MAY;
1953#endif
1954} 1918}
1955 1919
1956static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) 1920static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
@@ -1979,16 +1943,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
1979static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) 1943static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1980{ 1944{
1981 struct cfq_data *cfqd = q->elevator->elevator_data; 1945 struct cfq_data *cfqd = q->elevator->elevator_data;
1982 struct request_list *rl = &q->rq;
1983 1946
1984 if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { 1947 if (unlikely(cfqd->rq_starved)) {
1948 struct request_list *rl = &q->rq;
1949
1985 smp_mb(); 1950 smp_mb();
1986 if (waitqueue_active(&rl->wait[READ])) 1951 if (waitqueue_active(&rl->wait[READ]))
1987 wake_up(&rl->wait[READ]); 1952 wake_up(&rl->wait[READ]);
1988 }
1989
1990 if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
1991 smp_mb();
1992 if (waitqueue_active(&rl->wait[WRITE])) 1953 if (waitqueue_active(&rl->wait[WRITE]))
1993 wake_up(&rl->wait[WRITE]); 1954 wake_up(&rl->wait[WRITE]);
1994 } 1955 }
@@ -2278,9 +2239,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2278 2239
2279 cfqd->queue = q; 2240 cfqd->queue = q;
2280 2241
2281 cfqd->max_queued = q->nr_requests / 4;
2282 q->nr_batching = cfq_queued;
2283
2284 init_timer(&cfqd->idle_slice_timer); 2242 init_timer(&cfqd->idle_slice_timer);
2285 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2243 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2286 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2244 cfqd->idle_slice_timer.data = (unsigned long) cfqd;