diff options
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r-- | drivers/block/ll_rw_blk.c | 193 |
1 files changed, 55 insertions, 138 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index baedac522945..0af73512b9a8 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
263 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 263 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
264 | 264 | ||
265 | blk_queue_activity_fn(q, NULL, NULL); | 265 | blk_queue_activity_fn(q, NULL, NULL); |
266 | |||
267 | INIT_LIST_HEAD(&q->drain_list); | ||
268 | } | 266 | } |
269 | 267 | ||
270 | EXPORT_SYMBOL(blk_queue_make_request); | 268 | EXPORT_SYMBOL(blk_queue_make_request); |
@@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq) | |||
353 | struct request *rq = flush_rq->end_io_data; | 351 | struct request *rq = flush_rq->end_io_data; |
354 | request_queue_t *q = rq->q; | 352 | request_queue_t *q = rq->q; |
355 | 353 | ||
354 | elv_completed_request(q, flush_rq); | ||
355 | |||
356 | rq->flags |= REQ_BAR_PREFLUSH; | 356 | rq->flags |= REQ_BAR_PREFLUSH; |
357 | 357 | ||
358 | if (!flush_rq->errors) | 358 | if (!flush_rq->errors) |
@@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq) | |||
369 | struct request *rq = flush_rq->end_io_data; | 369 | struct request *rq = flush_rq->end_io_data; |
370 | request_queue_t *q = rq->q; | 370 | request_queue_t *q = rq->q; |
371 | 371 | ||
372 | elv_completed_request(q, flush_rq); | ||
373 | |||
372 | rq->flags |= REQ_BAR_POSTFLUSH; | 374 | rq->flags |= REQ_BAR_POSTFLUSH; |
373 | 375 | ||
374 | q->end_flush_fn(q, flush_rq); | 376 | q->end_flush_fn(q, flush_rq); |
@@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) | |||
408 | if (!list_empty(&rq->queuelist)) | 410 | if (!list_empty(&rq->queuelist)) |
409 | blkdev_dequeue_request(rq); | 411 | blkdev_dequeue_request(rq); |
410 | 412 | ||
411 | elv_deactivate_request(q, rq); | ||
412 | |||
413 | flush_rq->end_io_data = rq; | 413 | flush_rq->end_io_data = rq; |
414 | flush_rq->end_io = blk_pre_flush_end_io; | 414 | flush_rq->end_io = blk_pre_flush_end_io; |
415 | 415 | ||
@@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); | |||
1040 | static char *rq_flags[] = { | 1040 | static char *rq_flags[] = { |
1041 | "REQ_RW", | 1041 | "REQ_RW", |
1042 | "REQ_FAILFAST", | 1042 | "REQ_FAILFAST", |
1043 | "REQ_SORTED", | ||
1043 | "REQ_SOFTBARRIER", | 1044 | "REQ_SOFTBARRIER", |
1044 | "REQ_HARDBARRIER", | 1045 | "REQ_HARDBARRIER", |
1045 | "REQ_CMD", | 1046 | "REQ_CMD", |
@@ -1047,6 +1048,7 @@ static char *rq_flags[] = { | |||
1047 | "REQ_STARTED", | 1048 | "REQ_STARTED", |
1048 | "REQ_DONTPREP", | 1049 | "REQ_DONTPREP", |
1049 | "REQ_QUEUED", | 1050 | "REQ_QUEUED", |
1051 | "REQ_ELVPRIV", | ||
1050 | "REQ_PC", | 1052 | "REQ_PC", |
1051 | "REQ_BLOCK_PC", | 1053 | "REQ_BLOCK_PC", |
1052 | "REQ_SENSE", | 1054 | "REQ_SENSE", |
@@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q) | |||
1637 | 1639 | ||
1638 | rl->count[READ] = rl->count[WRITE] = 0; | 1640 | rl->count[READ] = rl->count[WRITE] = 0; |
1639 | rl->starved[READ] = rl->starved[WRITE] = 0; | 1641 | rl->starved[READ] = rl->starved[WRITE] = 0; |
1642 | rl->elvpriv = 0; | ||
1640 | init_waitqueue_head(&rl->wait[READ]); | 1643 | init_waitqueue_head(&rl->wait[READ]); |
1641 | init_waitqueue_head(&rl->wait[WRITE]); | 1644 | init_waitqueue_head(&rl->wait[WRITE]); |
1642 | init_waitqueue_head(&rl->drain); | ||
1643 | 1645 | ||
1644 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 1646 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
1645 | mempool_free_slab, request_cachep, q->node); | 1647 | mempool_free_slab, request_cachep, q->node); |
@@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q) | |||
1652 | 1654 | ||
1653 | static int __make_request(request_queue_t *, struct bio *); | 1655 | static int __make_request(request_queue_t *, struct bio *); |
1654 | 1656 | ||
1655 | request_queue_t *blk_alloc_queue(int gfp_mask) | 1657 | request_queue_t *blk_alloc_queue(gfp_t gfp_mask) |
1656 | { | 1658 | { |
1657 | return blk_alloc_queue_node(gfp_mask, -1); | 1659 | return blk_alloc_queue_node(gfp_mask, -1); |
1658 | } | 1660 | } |
1659 | EXPORT_SYMBOL(blk_alloc_queue); | 1661 | EXPORT_SYMBOL(blk_alloc_queue); |
1660 | 1662 | ||
1661 | request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) | 1663 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
1662 | { | 1664 | { |
1663 | request_queue_t *q; | 1665 | request_queue_t *q; |
1664 | 1666 | ||
@@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue); | |||
1782 | 1784 | ||
1783 | static inline void blk_free_request(request_queue_t *q, struct request *rq) | 1785 | static inline void blk_free_request(request_queue_t *q, struct request *rq) |
1784 | { | 1786 | { |
1785 | elv_put_request(q, rq); | 1787 | if (rq->flags & REQ_ELVPRIV) |
1788 | elv_put_request(q, rq); | ||
1786 | mempool_free(rq, q->rq.rq_pool); | 1789 | mempool_free(rq, q->rq.rq_pool); |
1787 | } | 1790 | } |
1788 | 1791 | ||
1789 | static inline struct request * | 1792 | static inline struct request * |
1790 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) | 1793 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, |
1794 | int priv, gfp_t gfp_mask) | ||
1791 | { | 1795 | { |
1792 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 1796 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
1793 | 1797 | ||
@@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) | |||
1800 | */ | 1804 | */ |
1801 | rq->flags = rw; | 1805 | rq->flags = rw; |
1802 | 1806 | ||
1803 | if (!elv_set_request(q, rq, bio, gfp_mask)) | 1807 | if (priv) { |
1804 | return rq; | 1808 | if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { |
1809 | mempool_free(rq, q->rq.rq_pool); | ||
1810 | return NULL; | ||
1811 | } | ||
1812 | rq->flags |= REQ_ELVPRIV; | ||
1813 | } | ||
1805 | 1814 | ||
1806 | mempool_free(rq, q->rq.rq_pool); | 1815 | return rq; |
1807 | return NULL; | ||
1808 | } | 1816 | } |
1809 | 1817 | ||
1810 | /* | 1818 | /* |
@@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw) | |||
1860 | * A request has just been released. Account for it, update the full and | 1868 | * A request has just been released. Account for it, update the full and |
1861 | * congestion status, wake up any waiters. Called under q->queue_lock. | 1869 | * congestion status, wake up any waiters. Called under q->queue_lock. |
1862 | */ | 1870 | */ |
1863 | static void freed_request(request_queue_t *q, int rw) | 1871 | static void freed_request(request_queue_t *q, int rw, int priv) |
1864 | { | 1872 | { |
1865 | struct request_list *rl = &q->rq; | 1873 | struct request_list *rl = &q->rq; |
1866 | 1874 | ||
1867 | rl->count[rw]--; | 1875 | rl->count[rw]--; |
1876 | if (priv) | ||
1877 | rl->elvpriv--; | ||
1868 | 1878 | ||
1869 | __freed_request(q, rw); | 1879 | __freed_request(q, rw); |
1870 | 1880 | ||
1871 | if (unlikely(rl->starved[rw ^ 1])) | 1881 | if (unlikely(rl->starved[rw ^ 1])) |
1872 | __freed_request(q, rw ^ 1); | 1882 | __freed_request(q, rw ^ 1); |
1873 | |||
1874 | if (!rl->count[READ] && !rl->count[WRITE]) { | ||
1875 | smp_mb(); | ||
1876 | if (unlikely(waitqueue_active(&rl->drain))) | ||
1877 | wake_up(&rl->drain); | ||
1878 | } | ||
1879 | } | 1883 | } |
1880 | 1884 | ||
1881 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) | 1885 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) |
@@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw) | |||
1885 | * Returns !NULL on success, with queue_lock *not held*. | 1889 | * Returns !NULL on success, with queue_lock *not held*. |
1886 | */ | 1890 | */ |
1887 | static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | 1891 | static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, |
1888 | int gfp_mask) | 1892 | gfp_t gfp_mask) |
1889 | { | 1893 | { |
1890 | struct request *rq = NULL; | 1894 | struct request *rq = NULL; |
1891 | struct request_list *rl = &q->rq; | 1895 | struct request_list *rl = &q->rq; |
1892 | struct io_context *ioc = current_io_context(GFP_ATOMIC); | 1896 | struct io_context *ioc = current_io_context(GFP_ATOMIC); |
1893 | 1897 | int priv; | |
1894 | if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) | ||
1895 | goto out; | ||
1896 | 1898 | ||
1897 | if (rl->count[rw]+1 >= q->nr_requests) { | 1899 | if (rl->count[rw]+1 >= q->nr_requests) { |
1898 | /* | 1900 | /* |
@@ -1937,9 +1939,14 @@ get_rq: | |||
1937 | rl->starved[rw] = 0; | 1939 | rl->starved[rw] = 0; |
1938 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) | 1940 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) |
1939 | set_queue_congested(q, rw); | 1941 | set_queue_congested(q, rw); |
1942 | |||
1943 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | ||
1944 | if (priv) | ||
1945 | rl->elvpriv++; | ||
1946 | |||
1940 | spin_unlock_irq(q->queue_lock); | 1947 | spin_unlock_irq(q->queue_lock); |
1941 | 1948 | ||
1942 | rq = blk_alloc_request(q, rw, bio, gfp_mask); | 1949 | rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); |
1943 | if (!rq) { | 1950 | if (!rq) { |
1944 | /* | 1951 | /* |
1945 | * Allocation failed presumably due to memory. Undo anything | 1952 | * Allocation failed presumably due to memory. Undo anything |
@@ -1949,7 +1956,7 @@ get_rq: | |||
1949 | * wait queue, but this is pretty rare. | 1956 | * wait queue, but this is pretty rare. |
1950 | */ | 1957 | */ |
1951 | spin_lock_irq(q->queue_lock); | 1958 | spin_lock_irq(q->queue_lock); |
1952 | freed_request(q, rw); | 1959 | freed_request(q, rw, priv); |
1953 | 1960 | ||
1954 | /* | 1961 | /* |
1955 | * in the very unlikely event that allocation failed and no | 1962 | * in the very unlikely event that allocation failed and no |
@@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, | |||
2019 | return rq; | 2026 | return rq; |
2020 | } | 2027 | } |
2021 | 2028 | ||
2022 | struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) | 2029 | struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) |
2023 | { | 2030 | { |
2024 | struct request *rq; | 2031 | struct request *rq; |
2025 | 2032 | ||
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); | |||
2251 | * @gfp_mask: memory allocation flags | 2258 | * @gfp_mask: memory allocation flags |
2252 | */ | 2259 | */ |
2253 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | 2260 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, |
2254 | unsigned int len, unsigned int gfp_mask) | 2261 | unsigned int len, gfp_t gfp_mask) |
2255 | { | 2262 | { |
2256 | struct bio *bio; | 2263 | struct bio *bio; |
2257 | 2264 | ||
@@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk) | |||
2433 | { | 2440 | { |
2434 | unsigned long now = jiffies; | 2441 | unsigned long now = jiffies; |
2435 | 2442 | ||
2436 | __disk_stat_add(disk, time_in_queue, | 2443 | if (now == disk->stamp) |
2437 | disk->in_flight * (now - disk->stamp)); | 2444 | return; |
2438 | disk->stamp = now; | ||
2439 | 2445 | ||
2440 | if (disk->in_flight) | 2446 | if (disk->in_flight) { |
2441 | __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle)); | 2447 | __disk_stat_add(disk, time_in_queue, |
2442 | disk->stamp_idle = now; | 2448 | disk->in_flight * (now - disk->stamp)); |
2449 | __disk_stat_add(disk, io_ticks, (now - disk->stamp)); | ||
2450 | } | ||
2451 | disk->stamp = now; | ||
2443 | } | 2452 | } |
2444 | 2453 | ||
2445 | /* | 2454 | /* |
@@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2454 | if (unlikely(--req->ref_count)) | 2463 | if (unlikely(--req->ref_count)) |
2455 | return; | 2464 | return; |
2456 | 2465 | ||
2466 | elv_completed_request(q, req); | ||
2467 | |||
2457 | req->rq_status = RQ_INACTIVE; | 2468 | req->rq_status = RQ_INACTIVE; |
2458 | req->rl = NULL; | 2469 | req->rl = NULL; |
2459 | 2470 | ||
@@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2463 | */ | 2474 | */ |
2464 | if (rl) { | 2475 | if (rl) { |
2465 | int rw = rq_data_dir(req); | 2476 | int rw = rq_data_dir(req); |
2466 | 2477 | int priv = req->flags & REQ_ELVPRIV; | |
2467 | elv_completed_request(q, req); | ||
2468 | 2478 | ||
2469 | BUG_ON(!list_empty(&req->queuelist)); | 2479 | BUG_ON(!list_empty(&req->queuelist)); |
2470 | 2480 | ||
2471 | blk_free_request(q, req); | 2481 | blk_free_request(q, req); |
2472 | freed_request(q, rw); | 2482 | freed_request(q, rw, priv); |
2473 | } | 2483 | } |
2474 | } | 2484 | } |
2475 | 2485 | ||
2476 | void blk_put_request(struct request *req) | 2486 | void blk_put_request(struct request *req) |
2477 | { | 2487 | { |
2488 | unsigned long flags; | ||
2489 | request_queue_t *q = req->q; | ||
2490 | |||
2478 | /* | 2491 | /* |
2479 | * if req->rl isn't set, this request didnt originate from the | 2492 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the |
2480 | * block layer, so it's safe to just disregard it | 2493 | * following if (q) test. |
2481 | */ | 2494 | */ |
2482 | if (req->rl) { | 2495 | if (q) { |
2483 | unsigned long flags; | ||
2484 | request_queue_t *q = req->q; | ||
2485 | |||
2486 | spin_lock_irqsave(q->queue_lock, flags); | 2496 | spin_lock_irqsave(q->queue_lock, flags); |
2487 | __blk_put_request(q, req); | 2497 | __blk_put_request(q, req); |
2488 | spin_unlock_irqrestore(q->queue_lock, flags); | 2498 | spin_unlock_irqrestore(q->queue_lock, flags); |
@@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio) | |||
2797 | } | 2807 | } |
2798 | } | 2808 | } |
2799 | 2809 | ||
2800 | void blk_finish_queue_drain(request_queue_t *q) | ||
2801 | { | ||
2802 | struct request_list *rl = &q->rq; | ||
2803 | struct request *rq; | ||
2804 | int requeued = 0; | ||
2805 | |||
2806 | spin_lock_irq(q->queue_lock); | ||
2807 | clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); | ||
2808 | |||
2809 | while (!list_empty(&q->drain_list)) { | ||
2810 | rq = list_entry_rq(q->drain_list.next); | ||
2811 | |||
2812 | list_del_init(&rq->queuelist); | ||
2813 | elv_requeue_request(q, rq); | ||
2814 | requeued++; | ||
2815 | } | ||
2816 | |||
2817 | if (requeued) | ||
2818 | q->request_fn(q); | ||
2819 | |||
2820 | spin_unlock_irq(q->queue_lock); | ||
2821 | |||
2822 | wake_up(&rl->wait[0]); | ||
2823 | wake_up(&rl->wait[1]); | ||
2824 | wake_up(&rl->drain); | ||
2825 | } | ||
2826 | |||
2827 | static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch) | ||
2828 | { | ||
2829 | int wait = rl->count[READ] + rl->count[WRITE]; | ||
2830 | |||
2831 | if (dispatch) | ||
2832 | wait += !list_empty(&q->queue_head); | ||
2833 | |||
2834 | return wait; | ||
2835 | } | ||
2836 | |||
2837 | /* | ||
2838 | * We rely on the fact that only requests allocated through blk_alloc_request() | ||
2839 | * have io scheduler private data structures associated with them. Any other | ||
2840 | * type of request (allocated on stack or through kmalloc()) should not go | ||
2841 | * to the io scheduler core, but be attached to the queue head instead. | ||
2842 | */ | ||
2843 | void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch) | ||
2844 | { | ||
2845 | struct request_list *rl = &q->rq; | ||
2846 | DEFINE_WAIT(wait); | ||
2847 | |||
2848 | spin_lock_irq(q->queue_lock); | ||
2849 | set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); | ||
2850 | |||
2851 | while (wait_drain(q, rl, wait_dispatch)) { | ||
2852 | prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE); | ||
2853 | |||
2854 | if (wait_drain(q, rl, wait_dispatch)) { | ||
2855 | __generic_unplug_device(q); | ||
2856 | spin_unlock_irq(q->queue_lock); | ||
2857 | io_schedule(); | ||
2858 | spin_lock_irq(q->queue_lock); | ||
2859 | } | ||
2860 | |||
2861 | finish_wait(&rl->drain, &wait); | ||
2862 | } | ||
2863 | |||
2864 | spin_unlock_irq(q->queue_lock); | ||
2865 | } | ||
2866 | |||
2867 | /* | ||
2868 | * block waiting for the io scheduler being started again. | ||
2869 | */ | ||
2870 | static inline void block_wait_queue_running(request_queue_t *q) | ||
2871 | { | ||
2872 | DEFINE_WAIT(wait); | ||
2873 | |||
2874 | while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) { | ||
2875 | struct request_list *rl = &q->rq; | ||
2876 | |||
2877 | prepare_to_wait_exclusive(&rl->drain, &wait, | ||
2878 | TASK_UNINTERRUPTIBLE); | ||
2879 | |||
2880 | /* | ||
2881 | * re-check the condition. avoids using prepare_to_wait() | ||
2882 | * in the fast path (queue is running) | ||
2883 | */ | ||
2884 | if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) | ||
2885 | io_schedule(); | ||
2886 | |||
2887 | finish_wait(&rl->drain, &wait); | ||
2888 | } | ||
2889 | } | ||
2890 | |||
2891 | static void handle_bad_sector(struct bio *bio) | 2810 | static void handle_bad_sector(struct bio *bio) |
2892 | { | 2811 | { |
2893 | char b[BDEVNAME_SIZE]; | 2812 | char b[BDEVNAME_SIZE]; |
@@ -2983,8 +2902,6 @@ end_io: | |||
2983 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | 2902 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) |
2984 | goto end_io; | 2903 | goto end_io; |
2985 | 2904 | ||
2986 | block_wait_queue_running(q); | ||
2987 | |||
2988 | /* | 2905 | /* |
2989 | * If this device has partitions, remap block n | 2906 | * If this device has partitions, remap block n |
2990 | * of partition p to block n+start(p) of the disk. | 2907 | * of partition p to block n+start(p) of the disk. |
@@ -3393,7 +3310,7 @@ void exit_io_context(void) | |||
3393 | * but since the current task itself holds a reference, the context can be | 3310 | * but since the current task itself holds a reference, the context can be |
3394 | * used in general code, so long as it stays within `current` context. | 3311 | * used in general code, so long as it stays within `current` context. |
3395 | */ | 3312 | */ |
3396 | struct io_context *current_io_context(int gfp_flags) | 3313 | struct io_context *current_io_context(gfp_t gfp_flags) |
3397 | { | 3314 | { |
3398 | struct task_struct *tsk = current; | 3315 | struct task_struct *tsk = current; |
3399 | struct io_context *ret; | 3316 | struct io_context *ret; |
@@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context); | |||
3424 | * | 3341 | * |
3425 | * This is always called in the context of the task which submitted the I/O. | 3342 | * This is always called in the context of the task which submitted the I/O. |
3426 | */ | 3343 | */ |
3427 | struct io_context *get_io_context(int gfp_flags) | 3344 | struct io_context *get_io_context(gfp_t gfp_flags) |
3428 | { | 3345 | { |
3429 | struct io_context *ret; | 3346 | struct io_context *ret; |
3430 | ret = current_io_context(gfp_flags); | 3347 | ret = current_io_context(gfp_flags); |