aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-11-12 05:09:12 -0500
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-01-06 03:39:04 -0500
commit88ee5ef157202624de2b43b3512fdcb54fda1ab5 (patch)
treea95cd472fb62eab2a6bd1c651ddf44c3f248868d /block
parentef9be1d336378de279d4e37779f1b83cebadbcc0 (diff)
[BLOCK] ll_rw_blk: fastpath get_request()
Originally from: Nick Piggin <nickpiggin@yahoo.com.au> Move current_io_context out of the get_request fastpth. Also try to streamline a few other things in this area. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c70
1 files changed, 37 insertions, 33 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d4beb9a89ee0..97f4e7ecedfe 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1908,40 +1908,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1908{ 1908{
1909 struct request *rq = NULL; 1909 struct request *rq = NULL;
1910 struct request_list *rl = &q->rq; 1910 struct request_list *rl = &q->rq;
1911 struct io_context *ioc = current_io_context(GFP_ATOMIC); 1911 struct io_context *ioc = NULL;
1912 int priv; 1912 int may_queue, priv;
1913 1913
1914 if (rl->count[rw]+1 >= q->nr_requests) { 1914 may_queue = elv_may_queue(q, rw, bio);
1915 /* 1915 if (may_queue == ELV_MQUEUE_NO)
1916 * The queue will fill after this allocation, so set it as 1916 goto rq_starved;
1917 * full, and mark this process as "batching". This process
1918 * will be allowed to complete a batch of requests, others
1919 * will be blocked.
1920 */
1921 if (!blk_queue_full(q, rw)) {
1922 ioc_set_batching(q, ioc);
1923 blk_set_queue_full(q, rw);
1924 }
1925 }
1926 1917
1927 switch (elv_may_queue(q, rw, bio)) { 1918 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
1928 case ELV_MQUEUE_NO: 1919 if (rl->count[rw]+1 >= q->nr_requests) {
1929 goto rq_starved; 1920 ioc = current_io_context(GFP_ATOMIC);
1930 case ELV_MQUEUE_MAY: 1921 /*
1931 break; 1922 * The queue will fill after this allocation, so set
1932 case ELV_MQUEUE_MUST: 1923 * it as full, and mark this process as "batching".
1933 goto get_rq; 1924 * This process will be allowed to complete a batch of
1934 } 1925 * requests, others will be blocked.
1935 1926 */
1936 if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) { 1927 if (!blk_queue_full(q, rw)) {
1937 /* 1928 ioc_set_batching(q, ioc);
1938 * The queue is full and the allocating process is not a 1929 blk_set_queue_full(q, rw);
1939 * "batcher", and not exempted by the IO scheduler 1930 } else {
1940 */ 1931 if (may_queue != ELV_MQUEUE_MUST
1941 goto out; 1932 && !ioc_batching(q, ioc)) {
1933 /*
1934 * The queue is full and the allocating
1935 * process is not a "batcher", and not
1936 * exempted by the IO scheduler
1937 */
1938 goto out;
1939 }
1940 }
1941 }
1942 set_queue_congested(q, rw);
1942 } 1943 }
1943 1944
1944get_rq:
1945 /* 1945 /*
1946 * Only allow batching queuers to allocate up to 50% over the defined 1946 * Only allow batching queuers to allocate up to 50% over the defined
1947 * limit of requests, otherwise we could have thousands of requests 1947 * limit of requests, otherwise we could have thousands of requests
@@ -1952,8 +1952,6 @@ get_rq:
1952 1952
1953 rl->count[rw]++; 1953 rl->count[rw]++;
1954 rl->starved[rw] = 0; 1954 rl->starved[rw] = 0;
1955 if (rl->count[rw] >= queue_congestion_on_threshold(q))
1956 set_queue_congested(q, rw);
1957 1955
1958 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1956 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1959 if (priv) 1957 if (priv)
@@ -1962,7 +1960,7 @@ get_rq:
1962 spin_unlock_irq(q->queue_lock); 1960 spin_unlock_irq(q->queue_lock);
1963 1961
1964 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); 1962 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
1965 if (!rq) { 1963 if (unlikely(!rq)) {
1966 /* 1964 /*
1967 * Allocation failed presumably due to memory. Undo anything 1965 * Allocation failed presumably due to memory. Undo anything
1968 * we might have messed up. 1966 * we might have messed up.
@@ -1987,6 +1985,12 @@ rq_starved:
1987 goto out; 1985 goto out;
1988 } 1986 }
1989 1987
1988 /*
1989 * ioc may be NULL here, and ioc_batching will be false. That's
1990 * OK, if the queue is under the request limit then requests need
1991 * not count toward the nr_batch_requests limit. There will always
1992 * be some limit enforced by BLK_BATCH_TIME.
1993 */
1990 if (ioc_batching(q, ioc)) 1994 if (ioc_batching(q, ioc))
1991 ioc->nr_batch_requests--; 1995 ioc->nr_batch_requests--;
1992 1996