aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/ll_rw_blk.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 67431f280154..5caebe2cf0a1 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1867,19 +1867,20 @@ static void freed_request(request_queue_t *q, int rw)
1867 1867
1868#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) 1868#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
1869/* 1869/*
1870 * Get a free request, queue_lock must not be held 1870 * Get a free request, queue_lock must be held.
1871 * Returns NULL on failure, with queue_lock held.
1872 * Returns !NULL on success, with queue_lock *not held*.
1871 */ 1873 */
1872static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 1874static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1873 int gfp_mask) 1875 int gfp_mask)
1874{ 1876{
1875 struct request *rq = NULL; 1877 struct request *rq = NULL;
1876 struct request_list *rl = &q->rq; 1878 struct request_list *rl = &q->rq;
1877 struct io_context *ioc = get_io_context(gfp_mask); 1879 struct io_context *ioc = get_io_context(GFP_ATOMIC);
1878 1880
1879 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) 1881 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
1880 goto out; 1882 goto out;
1881 1883
1882 spin_lock_irq(q->queue_lock);
1883 if (rl->count[rw]+1 >= q->nr_requests) { 1884 if (rl->count[rw]+1 >= q->nr_requests) {
1884 /* 1885 /*
1885 * The queue will fill after this allocation, so set it as 1886 * The queue will fill after this allocation, so set it as
@@ -1907,7 +1908,6 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1907 * The queue is full and the allocating process is not a 1908 * The queue is full and the allocating process is not a
1908 * "batcher", and not exempted by the IO scheduler 1909 * "batcher", and not exempted by the IO scheduler
1909 */ 1910 */
1910 spin_unlock_irq(q->queue_lock);
1911 goto out; 1911 goto out;
1912 } 1912 }
1913 1913
@@ -1950,7 +1950,6 @@ rq_starved:
1950 if (unlikely(rl->count[rw] == 0)) 1950 if (unlikely(rl->count[rw] == 0))
1951 rl->starved[rw] = 1; 1951 rl->starved[rw] = 1;
1952 1952
1953 spin_unlock_irq(q->queue_lock);
1954 goto out; 1953 goto out;
1955 } 1954 }
1956 1955
@@ -1967,6 +1966,8 @@ out:
1967/* 1966/*
1968 * No available requests for this queue, unplug the device and wait for some 1967 * No available requests for this queue, unplug the device and wait for some
1969 * requests to become available. 1968 * requests to become available.
1969 *
1970 * Called with q->queue_lock held, and returns with it unlocked.
1970 */ 1971 */
1971static struct request *get_request_wait(request_queue_t *q, int rw, 1972static struct request *get_request_wait(request_queue_t *q, int rw,
1972 struct bio *bio) 1973 struct bio *bio)
@@ -1986,7 +1987,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
1986 if (!rq) { 1987 if (!rq) {
1987 struct io_context *ioc; 1988 struct io_context *ioc;
1988 1989
1989 generic_unplug_device(q); 1990 __generic_unplug_device(q);
1991 spin_unlock_irq(q->queue_lock);
1990 io_schedule(); 1992 io_schedule();
1991 1993
1992 /* 1994 /*
@@ -1998,6 +2000,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
1998 ioc = get_io_context(GFP_NOIO); 2000 ioc = get_io_context(GFP_NOIO);
1999 ioc_set_batching(q, ioc); 2001 ioc_set_batching(q, ioc);
2000 put_io_context(ioc); 2002 put_io_context(ioc);
2003
2004 spin_lock_irq(q->queue_lock);
2001 } 2005 }
2002 finish_wait(&rl->wait[rw], &wait); 2006 finish_wait(&rl->wait[rw], &wait);
2003 } 2007 }
@@ -2011,14 +2015,18 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
2011 2015
2012 BUG_ON(rw != READ && rw != WRITE); 2016 BUG_ON(rw != READ && rw != WRITE);
2013 2017
2014 if (gfp_mask & __GFP_WAIT) 2018 spin_lock_irq(q->queue_lock);
2019 if (gfp_mask & __GFP_WAIT) {
2015 rq = get_request_wait(q, rw, NULL); 2020 rq = get_request_wait(q, rw, NULL);
2016 else 2021 } else {
2017 rq = get_request(q, rw, NULL, gfp_mask); 2022 rq = get_request(q, rw, NULL, gfp_mask);
2023 if (!rq)
2024 spin_unlock_irq(q->queue_lock);
2025 }
2026 /* q->queue_lock is unlocked at this point */
2018 2027
2019 return rq; 2028 return rq;
2020} 2029}
2021
2022EXPORT_SYMBOL(blk_get_request); 2030EXPORT_SYMBOL(blk_get_request);
2023 2031
2024/** 2032/**
@@ -2605,9 +2613,10 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2605get_rq: 2613get_rq:
2606 /* 2614 /*
2607 * Grab a free request. This is might sleep but can not fail. 2615 * Grab a free request. This is might sleep but can not fail.
2616 * Returns with the queue unlocked.
2608 */ 2617 */
2609 spin_unlock_irq(q->queue_lock);
2610 req = get_request_wait(q, rw, bio); 2618 req = get_request_wait(q, rw, bio);
2619
2611 /* 2620 /*
2612 * After dropping the lock and possibly sleeping here, our request 2621 * After dropping the lock and possibly sleeping here, our request
2613 * may now be mergeable after it had proven unmergeable (above). 2622 * may now be mergeable after it had proven unmergeable (above).