diff options
author | Tejun Heo <htejun@gmail.com> | 2006-01-06 03:49:58 -0500 |
---|---|---|
committer | Jens Axboe <axboe@suse.de> | 2006-01-06 03:49:58 -0500 |
commit | 52d9e675361261a1eb1716b02222ec6177ec342b (patch) | |
tree | a9ed62b6fe9b6622b7c42249e983136f38f75255 /block/ll_rw_blk.c | |
parent | 8ffdc6550c47f75ca4e6c9f30a2a89063e035cf2 (diff) |
[BLOCK] ll_rw_blk: separate out bio init part from __make_request
Separate out bio initialization part from __make_request. It
will be used by the following blk_ordered_reimpl.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 62 |
1 files changed, 33 insertions, 29 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b1ae69bc5ac..65c4efc02adf 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -36,6 +36,8 @@ | |||
36 | static void blk_unplug_work(void *data); | 36 | static void blk_unplug_work(void *data); |
37 | static void blk_unplug_timeout(unsigned long data); | 37 | static void blk_unplug_timeout(unsigned long data); |
38 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 38 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); |
39 | static void init_request_from_bio(struct request *req, struct bio *bio); | ||
40 | static int __make_request(request_queue_t *q, struct bio *bio); | ||
39 | 41 | ||
40 | /* | 42 | /* |
41 | * For the allocated request tables | 43 | * For the allocated request tables |
@@ -1667,8 +1669,6 @@ static int blk_init_free_list(request_queue_t *q) | |||
1667 | return 0; | 1669 | return 0; |
1668 | } | 1670 | } |
1669 | 1671 | ||
1670 | static int __make_request(request_queue_t *, struct bio *); | ||
1671 | |||
1672 | request_queue_t *blk_alloc_queue(gfp_t gfp_mask) | 1672 | request_queue_t *blk_alloc_queue(gfp_t gfp_mask) |
1673 | { | 1673 | { |
1674 | return blk_alloc_queue_node(gfp_mask, -1); | 1674 | return blk_alloc_queue_node(gfp_mask, -1); |
@@ -2659,6 +2659,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq) | |||
2659 | 2659 | ||
2660 | EXPORT_SYMBOL(blk_attempt_remerge); | 2660 | EXPORT_SYMBOL(blk_attempt_remerge); |
2661 | 2661 | ||
2662 | static void init_request_from_bio(struct request *req, struct bio *bio) | ||
2663 | { | ||
2664 | req->flags |= REQ_CMD; | ||
2665 | |||
2666 | /* | ||
2667 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | ||
2668 | */ | ||
2669 | if (bio_rw_ahead(bio) || bio_failfast(bio)) | ||
2670 | req->flags |= REQ_FAILFAST; | ||
2671 | |||
2672 | /* | ||
2673 | * REQ_BARRIER implies no merging, but lets make it explicit | ||
2674 | */ | ||
2675 | if (unlikely(bio_barrier(bio))) | ||
2676 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
2677 | |||
2678 | req->errors = 0; | ||
2679 | req->hard_sector = req->sector = bio->bi_sector; | ||
2680 | req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); | ||
2681 | req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio); | ||
2682 | req->nr_phys_segments = bio_phys_segments(req->q, bio); | ||
2683 | req->nr_hw_segments = bio_hw_segments(req->q, bio); | ||
2684 | req->buffer = bio_data(bio); /* see ->buffer comment above */ | ||
2685 | req->waiting = NULL; | ||
2686 | req->bio = req->biotail = bio; | ||
2687 | req->ioprio = bio_prio(bio); | ||
2688 | req->rq_disk = bio->bi_bdev->bd_disk; | ||
2689 | req->start_time = jiffies; | ||
2690 | } | ||
2691 | |||
2662 | static int __make_request(request_queue_t *q, struct bio *bio) | 2692 | static int __make_request(request_queue_t *q, struct bio *bio) |
2663 | { | 2693 | { |
2664 | struct request *req; | 2694 | struct request *req; |
@@ -2754,33 +2784,7 @@ get_rq: | |||
2754 | * We don't worry about that case for efficiency. It won't happen | 2784 | * We don't worry about that case for efficiency. It won't happen |
2755 | * often, and the elevators are able to handle it. | 2785 | * often, and the elevators are able to handle it. |
2756 | */ | 2786 | */ |
2757 | 2787 | init_request_from_bio(req, bio); | |
2758 | req->flags |= REQ_CMD; | ||
2759 | |||
2760 | /* | ||
2761 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | ||
2762 | */ | ||
2763 | if (bio_rw_ahead(bio) || bio_failfast(bio)) | ||
2764 | req->flags |= REQ_FAILFAST; | ||
2765 | |||
2766 | /* | ||
2767 | * REQ_BARRIER implies no merging, but lets make it explicit | ||
2768 | */ | ||
2769 | if (unlikely(barrier)) | ||
2770 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
2771 | |||
2772 | req->errors = 0; | ||
2773 | req->hard_sector = req->sector = sector; | ||
2774 | req->hard_nr_sectors = req->nr_sectors = nr_sectors; | ||
2775 | req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors; | ||
2776 | req->nr_phys_segments = bio_phys_segments(q, bio); | ||
2777 | req->nr_hw_segments = bio_hw_segments(q, bio); | ||
2778 | req->buffer = bio_data(bio); /* see ->buffer comment above */ | ||
2779 | req->waiting = NULL; | ||
2780 | req->bio = req->biotail = bio; | ||
2781 | req->ioprio = prio; | ||
2782 | req->rq_disk = bio->bi_bdev->bd_disk; | ||
2783 | req->start_time = jiffies; | ||
2784 | 2788 | ||
2785 | spin_lock_irq(q->queue_lock); | 2789 | spin_lock_irq(q->queue_lock); |
2786 | if (elv_queue_empty(q)) | 2790 | if (elv_queue_empty(q)) |