diff options
| author | Asias He <asias@redhat.com> | 2012-08-02 17:42:03 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2012-08-02 17:42:03 -0400 |
| commit | 963ab9e5da95c654bb3ab937cc478de4f7088a96 (patch) | |
| tree | 644caf1f513e1f206d4c732044b943c276af9f8d /block | |
| parent | 53362a05ae683e12a20d9ffdf58a88094a0bed9d (diff) | |
block: Introduce __blk_segment_map_sg() helper
Split the mapping code in blk_rq_map_sg() to a helper
__blk_segment_map_sg(), so that other mapping function, e.g.
blk_bio_map_sg(), can share the code.
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Tejun Heo <tj@kernel.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org
Suggested-by: Jens Axboe <axboe@kernel.dk>
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-merge.c | 80 |
1 files changed, 45 insertions, 35 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 160035f54882..576b68e79248 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
| 110 | return 0; | 110 | return 0; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static void | ||
| 114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | ||
| 115 | struct scatterlist *sglist, struct bio_vec **bvprv, | ||
| 116 | struct scatterlist **sg, int *nsegs, int *cluster) | ||
| 117 | { | ||
| 118 | |||
| 119 | int nbytes = bvec->bv_len; | ||
| 120 | |||
| 121 | if (*bvprv && *cluster) { | ||
| 122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | ||
| 123 | goto new_segment; | ||
| 124 | |||
| 125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) | ||
| 126 | goto new_segment; | ||
| 127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) | ||
| 128 | goto new_segment; | ||
| 129 | |||
| 130 | (*sg)->length += nbytes; | ||
| 131 | } else { | ||
| 132 | new_segment: | ||
| 133 | if (!*sg) | ||
| 134 | *sg = sglist; | ||
| 135 | else { | ||
| 136 | /* | ||
| 137 | * If the driver previously mapped a shorter | ||
| 138 | * list, we could see a termination bit | ||
| 139 | * prematurely unless it fully inits the sg | ||
| 140 | * table on each mapping. We KNOW that there | ||
| 141 | * must be more entries here or the driver | ||
| 142 | * would be buggy, so force clear the | ||
| 143 | * termination bit to avoid doing a full | ||
| 144 | * sg_init_table() in drivers for each command. | ||
| 145 | */ | ||
| 146 | (*sg)->page_link &= ~0x02; | ||
| 147 | *sg = sg_next(*sg); | ||
| 148 | } | ||
| 149 | |||
| 150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | ||
| 151 | (*nsegs)++; | ||
| 152 | } | ||
| 153 | *bvprv = bvec; | ||
| 154 | } | ||
| 155 | |||
| 113 | /* | 156 | /* |
| 114 | * map a request to scatterlist, return number of sg entries setup. Caller | 157 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 115 | * must make sure sg can hold rq->nr_phys_segments entries | 158 | * must make sure sg can hold rq->nr_phys_segments entries |
| @@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
| 131 | bvprv = NULL; | 174 | bvprv = NULL; |
| 132 | sg = NULL; | 175 | sg = NULL; |
| 133 | rq_for_each_segment(bvec, rq, iter) { | 176 | rq_for_each_segment(bvec, rq, iter) { |
| 134 | int nbytes = bvec->bv_len; | 177 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
| 135 | 178 | &nsegs, &cluster); | |
| 136 | if (bvprv && cluster) { | ||
| 137 | if (sg->length + nbytes > queue_max_segment_size(q)) | ||
| 138 | goto new_segment; | ||
| 139 | |||
| 140 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | ||
| 141 | goto new_segment; | ||
| 142 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | ||
| 143 | goto new_segment; | ||
| 144 | |||
| 145 | sg->length += nbytes; | ||
| 146 | } else { | ||
| 147 | new_segment: | ||
| 148 | if (!sg) | ||
| 149 | sg = sglist; | ||
| 150 | else { | ||
| 151 | /* | ||
| 152 | * If the driver previously mapped a shorter | ||
| 153 | * list, we could see a termination bit | ||
| 154 | * prematurely unless it fully inits the sg | ||
| 155 | * table on each mapping. We KNOW that there | ||
| 156 | * must be more entries here or the driver | ||
| 157 | * would be buggy, so force clear the | ||
| 158 | * termination bit to avoid doing a full | ||
| 159 | * sg_init_table() in drivers for each command. | ||
| 160 | */ | ||
| 161 | sg->page_link &= ~0x02; | ||
| 162 | sg = sg_next(sg); | ||
| 163 | } | ||
| 164 | |||
| 165 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | ||
| 166 | nsegs++; | ||
| 167 | } | ||
| 168 | bvprv = bvec; | ||
| 169 | } /* segments in rq */ | 179 | } /* segments in rq */ |
| 170 | 180 | ||
| 171 | 181 | ||
