aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c62
1 files changed, 33 insertions, 29 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 8b1ae69bc5ac..65c4efc02adf 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -36,6 +36,8 @@
36static void blk_unplug_work(void *data); 36static void blk_unplug_work(void *data);
37static void blk_unplug_timeout(unsigned long data); 37static void blk_unplug_timeout(unsigned long data);
38static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 38static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
39static void init_request_from_bio(struct request *req, struct bio *bio);
40static int __make_request(request_queue_t *q, struct bio *bio);
39 41
40/* 42/*
41 * For the allocated request tables 43 * For the allocated request tables
@@ -1667,8 +1669,6 @@ static int blk_init_free_list(request_queue_t *q)
1667 return 0; 1669 return 0;
1668} 1670}
1669 1671
1670static int __make_request(request_queue_t *, struct bio *);
1671
1672request_queue_t *blk_alloc_queue(gfp_t gfp_mask) 1672request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
1673{ 1673{
1674 return blk_alloc_queue_node(gfp_mask, -1); 1674 return blk_alloc_queue_node(gfp_mask, -1);
@@ -2659,6 +2659,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
2659 2659
2660EXPORT_SYMBOL(blk_attempt_remerge); 2660EXPORT_SYMBOL(blk_attempt_remerge);
2661 2661
2662static void init_request_from_bio(struct request *req, struct bio *bio)
2663{
2664 req->flags |= REQ_CMD;
2665
2666 /*
2667 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2668 */
2669 if (bio_rw_ahead(bio) || bio_failfast(bio))
2670 req->flags |= REQ_FAILFAST;
2671
2672 /*
2673 * REQ_BARRIER implies no merging, but lets make it explicit
2674 */
2675 if (unlikely(bio_barrier(bio)))
2676 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2677
2678 req->errors = 0;
2679 req->hard_sector = req->sector = bio->bi_sector;
2680 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
2681 req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
2682 req->nr_phys_segments = bio_phys_segments(req->q, bio);
2683 req->nr_hw_segments = bio_hw_segments(req->q, bio);
2684 req->buffer = bio_data(bio); /* see ->buffer comment above */
2685 req->waiting = NULL;
2686 req->bio = req->biotail = bio;
2687 req->ioprio = bio_prio(bio);
2688 req->rq_disk = bio->bi_bdev->bd_disk;
2689 req->start_time = jiffies;
2690}
2691
2662static int __make_request(request_queue_t *q, struct bio *bio) 2692static int __make_request(request_queue_t *q, struct bio *bio)
2663{ 2693{
2664 struct request *req; 2694 struct request *req;
@@ -2754,33 +2784,7 @@ get_rq:
2754 * We don't worry about that case for efficiency. It won't happen 2784 * We don't worry about that case for efficiency. It won't happen
2755 * often, and the elevators are able to handle it. 2785 * often, and the elevators are able to handle it.
2756 */ 2786 */
2757 2787 init_request_from_bio(req, bio);
2758 req->flags |= REQ_CMD;
2759
2760 /*
2761 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2762 */
2763 if (bio_rw_ahead(bio) || bio_failfast(bio))
2764 req->flags |= REQ_FAILFAST;
2765
2766 /*
2767 * REQ_BARRIER implies no merging, but lets make it explicit
2768 */
2769 if (unlikely(barrier))
2770 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2771
2772 req->errors = 0;
2773 req->hard_sector = req->sector = sector;
2774 req->hard_nr_sectors = req->nr_sectors = nr_sectors;
2775 req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
2776 req->nr_phys_segments = bio_phys_segments(q, bio);
2777 req->nr_hw_segments = bio_hw_segments(q, bio);
2778 req->buffer = bio_data(bio); /* see ->buffer comment above */
2779 req->waiting = NULL;
2780 req->bio = req->biotail = bio;
2781 req->ioprio = prio;
2782 req->rq_disk = bio->bi_bdev->bd_disk;
2783 req->start_time = jiffies;
2784 2788
2785 spin_lock_irq(q->queue_lock); 2789 spin_lock_irq(q->queue_lock);
2786 if (elv_queue_empty(q)) 2790 if (elv_queue_empty(q))