diff options
| -rw-r--r-- | block/blk-merge.c | 94 | ||||
| -rw-r--r-- | block/genhd.c | 16 | ||||
| -rw-r--r-- | drivers/block/cciss.c | 10 | ||||
| -rw-r--r-- | drivers/block/xen-blkfront.c | 30 | ||||
| -rw-r--r-- | fs/bio.c | 2 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 2 |
6 files changed, 94 insertions, 60 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index b92f5b0866b0..a104593e70c3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) | |||
| 38 | } | 38 | } |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | void blk_recalc_rq_segments(struct request *rq) | 41 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
| 42 | struct bio *bio, | ||
| 43 | unsigned int *seg_size_ptr) | ||
| 42 | { | 44 | { |
| 43 | int nr_phys_segs; | ||
| 44 | unsigned int phys_size; | 45 | unsigned int phys_size; |
| 45 | struct bio_vec *bv, *bvprv = NULL; | 46 | struct bio_vec *bv, *bvprv = NULL; |
| 46 | int seg_size; | 47 | int cluster, i, high, highprv = 1; |
| 47 | int cluster; | 48 | unsigned int seg_size, nr_phys_segs; |
| 48 | struct req_iterator iter; | 49 | struct bio *fbio; |
| 49 | int high, highprv = 1; | ||
| 50 | struct request_queue *q = rq->q; | ||
| 51 | 50 | ||
| 52 | if (!rq->bio) | 51 | if (!bio) |
| 53 | return; | 52 | return 0; |
| 54 | 53 | ||
| 54 | fbio = bio; | ||
| 55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
| 56 | seg_size = 0; | 56 | seg_size = 0; |
| 57 | phys_size = nr_phys_segs = 0; | 57 | phys_size = nr_phys_segs = 0; |
| 58 | rq_for_each_segment(bv, rq, iter) { | 58 | for_each_bio(bio) { |
| 59 | /* | 59 | bio_for_each_segment(bv, bio, i) { |
| 60 | * the trick here is making sure that a high page is never | 60 | /* |
| 61 | * considered part of another segment, since that might | 61 | * the trick here is making sure that a high page is |
| 62 | * change with the bounce page. | 62 | * never considered part of another segment, since that |
| 63 | */ | 63 | * might change with the bounce page. |
| 64 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | 64 | */ |
| 65 | if (high || highprv) | 65 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; |
| 66 | goto new_segment; | 66 | if (high || highprv) |
| 67 | if (cluster) { | ||
| 68 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
| 69 | goto new_segment; | ||
| 70 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
| 71 | goto new_segment; | ||
| 72 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
| 73 | goto new_segment; | 67 | goto new_segment; |
| 68 | if (cluster) { | ||
| 69 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
| 70 | goto new_segment; | ||
| 71 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
| 72 | goto new_segment; | ||
| 73 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
| 74 | goto new_segment; | ||
| 75 | |||
| 76 | seg_size += bv->bv_len; | ||
| 77 | bvprv = bv; | ||
| 78 | continue; | ||
| 79 | } | ||
| 80 | new_segment: | ||
| 81 | if (nr_phys_segs == 1 && seg_size > | ||
| 82 | fbio->bi_seg_front_size) | ||
| 83 | fbio->bi_seg_front_size = seg_size; | ||
| 74 | 84 | ||
| 75 | seg_size += bv->bv_len; | 85 | nr_phys_segs++; |
| 76 | bvprv = bv; | 86 | bvprv = bv; |
| 77 | continue; | 87 | seg_size = bv->bv_len; |
| 88 | highprv = high; | ||
| 78 | } | 89 | } |
| 79 | new_segment: | ||
| 80 | if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | ||
| 81 | rq->bio->bi_seg_front_size = seg_size; | ||
| 82 | |||
| 83 | nr_phys_segs++; | ||
| 84 | bvprv = bv; | ||
| 85 | seg_size = bv->bv_len; | ||
| 86 | highprv = high; | ||
| 87 | } | 90 | } |
| 88 | 91 | ||
| 89 | if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | 92 | if (seg_size_ptr) |
| 93 | *seg_size_ptr = seg_size; | ||
| 94 | |||
| 95 | return nr_phys_segs; | ||
| 96 | } | ||
| 97 | |||
| 98 | void blk_recalc_rq_segments(struct request *rq) | ||
| 99 | { | ||
| 100 | unsigned int seg_size = 0, phys_segs; | ||
| 101 | |||
| 102 | phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size); | ||
| 103 | |||
| 104 | if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | ||
| 90 | rq->bio->bi_seg_front_size = seg_size; | 105 | rq->bio->bi_seg_front_size = seg_size; |
| 91 | if (seg_size > rq->biotail->bi_seg_back_size) | 106 | if (seg_size > rq->biotail->bi_seg_back_size) |
| 92 | rq->biotail->bi_seg_back_size = seg_size; | 107 | rq->biotail->bi_seg_back_size = seg_size; |
| 93 | 108 | ||
| 94 | rq->nr_phys_segments = nr_phys_segs; | 109 | rq->nr_phys_segments = phys_segs; |
| 95 | } | 110 | } |
| 96 | 111 | ||
| 97 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 112 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 98 | { | 113 | { |
| 99 | struct request rq; | ||
| 100 | struct bio *nxt = bio->bi_next; | 114 | struct bio *nxt = bio->bi_next; |
| 101 | rq.q = q; | 115 | |
| 102 | rq.bio = rq.biotail = bio; | ||
| 103 | bio->bi_next = NULL; | 116 | bio->bi_next = NULL; |
| 104 | blk_recalc_rq_segments(&rq); | 117 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL); |
| 105 | bio->bi_next = nxt; | 118 | bio->bi_next = nxt; |
| 106 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
| 107 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 119 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
| 108 | } | 120 | } |
| 109 | EXPORT_SYMBOL(blk_recount_segments); | 121 | EXPORT_SYMBOL(blk_recount_segments); |
diff --git a/block/genhd.c b/block/genhd.c index e1eadcc9546a..a9ec910974c1 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset) | |||
| 256 | } | 256 | } |
| 257 | #endif /* CONFIG_PROC_FS */ | 257 | #endif /* CONFIG_PROC_FS */ |
| 258 | 258 | ||
| 259 | /** | ||
| 260 | * register_blkdev - register a new block device | ||
| 261 | * | ||
| 262 | * @major: the requested major device number [1..255]. If @major=0, try to | ||
| 263 | * allocate any unused major number. | ||
| 264 | * @name: the name of the new block device as a zero terminated string | ||
| 265 | * | ||
| 266 | * The @name must be unique within the system. | ||
| 267 | * | ||
| 268 | * The return value depends on the @major input parameter. | ||
| 269 | * - if a major device number was requested in range [1..255] then the | ||
| 270 | * function returns zero on success, or a negative error code | ||
| 271 | * - if any unused major number was requested with @major=0 parameter | ||
| 272 | * then the return value is the allocated major number in range | ||
| 273 | * [1..255] or a negative error code otherwise | ||
| 274 | */ | ||
| 259 | int register_blkdev(unsigned int major, const char *name) | 275 | int register_blkdev(unsigned int major, const char *name) |
| 260 | { | 276 | { |
| 261 | struct blk_major_name **n, *p; | 277 | struct blk_major_name **n, *p; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index d2cb67b61176..b5a061114630 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
| @@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 3611 | schedule_timeout_uninterruptible(30*HZ); | 3611 | schedule_timeout_uninterruptible(30*HZ); |
| 3612 | 3612 | ||
| 3613 | /* Now try to get the controller to respond to a no-op */ | 3613 | /* Now try to get the controller to respond to a no-op */ |
| 3614 | for (i=0; i<12; i++) { | 3614 | for (i=0; i<30; i++) { |
| 3615 | if (cciss_noop(pdev) == 0) | 3615 | if (cciss_noop(pdev) == 0) |
| 3616 | break; | 3616 | break; |
| 3617 | else | 3617 | |
| 3618 | printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : "")); | 3618 | schedule_timeout_uninterruptible(HZ); |
| 3619 | } | ||
| 3620 | if (i == 30) { | ||
| 3621 | printk(KERN_ERR "cciss: controller seems dead\n"); | ||
| 3622 | return -EBUSY; | ||
| 3619 | } | 3623 | } |
| 3620 | } | 3624 | } |
| 3621 | 3625 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 918ef725de41..b6c8ce254359 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/hdreg.h> | 40 | #include <linux/hdreg.h> |
| 41 | #include <linux/cdrom.h> | 41 | #include <linux/cdrom.h> |
| 42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
| 43 | #include <linux/scatterlist.h> | ||
| 43 | 44 | ||
| 44 | #include <xen/xenbus.h> | 45 | #include <xen/xenbus.h> |
| 45 | #include <xen/grant_table.h> | 46 | #include <xen/grant_table.h> |
| @@ -82,6 +83,7 @@ struct blkfront_info | |||
| 82 | enum blkif_state connected; | 83 | enum blkif_state connected; |
| 83 | int ring_ref; | 84 | int ring_ref; |
| 84 | struct blkif_front_ring ring; | 85 | struct blkif_front_ring ring; |
| 86 | struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
| 85 | unsigned int evtchn, irq; | 87 | unsigned int evtchn, irq; |
| 86 | struct request_queue *rq; | 88 | struct request_queue *rq; |
| 87 | struct work_struct work; | 89 | struct work_struct work; |
| @@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req) | |||
| 204 | struct blkfront_info *info = req->rq_disk->private_data; | 206 | struct blkfront_info *info = req->rq_disk->private_data; |
| 205 | unsigned long buffer_mfn; | 207 | unsigned long buffer_mfn; |
| 206 | struct blkif_request *ring_req; | 208 | struct blkif_request *ring_req; |
| 207 | struct req_iterator iter; | ||
| 208 | struct bio_vec *bvec; | ||
| 209 | unsigned long id; | 209 | unsigned long id; |
| 210 | unsigned int fsect, lsect; | 210 | unsigned int fsect, lsect; |
| 211 | int ref; | 211 | int i, ref; |
| 212 | grant_ref_t gref_head; | 212 | grant_ref_t gref_head; |
| 213 | struct scatterlist *sg; | ||
| 213 | 214 | ||
| 214 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) | 215 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) |
| 215 | return 1; | 216 | return 1; |
| @@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req) | |||
| 238 | if (blk_barrier_rq(req)) | 239 | if (blk_barrier_rq(req)) |
| 239 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | 240 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; |
| 240 | 241 | ||
| 241 | ring_req->nr_segments = 0; | 242 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
| 242 | rq_for_each_segment(bvec, req, iter) { | 243 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
| 243 | BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST); | 244 | |
| 244 | buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); | 245 | for_each_sg(info->sg, sg, ring_req->nr_segments, i) { |
| 245 | fsect = bvec->bv_offset >> 9; | 246 | buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); |
| 246 | lsect = fsect + (bvec->bv_len >> 9) - 1; | 247 | fsect = sg->offset >> 9; |
| 248 | lsect = fsect + (sg->length >> 9) - 1; | ||
| 247 | /* install a grant reference. */ | 249 | /* install a grant reference. */ |
| 248 | ref = gnttab_claim_grant_reference(&gref_head); | 250 | ref = gnttab_claim_grant_reference(&gref_head); |
| 249 | BUG_ON(ref == -ENOSPC); | 251 | BUG_ON(ref == -ENOSPC); |
| @@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req) | |||
| 254 | buffer_mfn, | 256 | buffer_mfn, |
| 255 | rq_data_dir(req) ); | 257 | rq_data_dir(req) ); |
| 256 | 258 | ||
| 257 | info->shadow[id].frame[ring_req->nr_segments] = | 259 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); |
| 258 | mfn_to_pfn(buffer_mfn); | 260 | ring_req->seg[i] = |
| 259 | |||
| 260 | ring_req->seg[ring_req->nr_segments] = | ||
| 261 | (struct blkif_request_segment) { | 261 | (struct blkif_request_segment) { |
| 262 | .gref = ref, | 262 | .gref = ref, |
| 263 | .first_sect = fsect, | 263 | .first_sect = fsect, |
| 264 | .last_sect = lsect }; | 264 | .last_sect = lsect }; |
| 265 | |||
| 266 | ring_req->nr_segments++; | ||
| 267 | } | 265 | } |
| 268 | 266 | ||
| 269 | info->ring.req_prod_pvt++; | 267 | info->ring.req_prod_pvt++; |
| @@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev, | |||
| 622 | SHARED_RING_INIT(sring); | 620 | SHARED_RING_INIT(sring); |
| 623 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | 621 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); |
| 624 | 622 | ||
| 623 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
| 624 | |||
| 625 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | 625 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); |
| 626 | if (err < 0) { | 626 | if (err < 0) { |
| 627 | free_page((unsigned long)sring); | 627 | free_page((unsigned long)sring); |
| @@ -302,7 +302,7 @@ void bio_init(struct bio *bio) | |||
| 302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
| 303 | { | 303 | { |
| 304 | struct bio *bio = NULL; | 304 | struct bio *bio = NULL; |
| 305 | void *p; | 305 | void *uninitialized_var(p); |
| 306 | 306 | ||
| 307 | if (bs) { | 307 | if (bs) { |
| 308 | p = mempool_alloc(bs->bio_pool, gfp_mask); | 308 | p = mempool_alloc(bs->bio_pool, gfp_mask); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index dcaa0fd84b02..465d6babc847 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -708,6 +708,8 @@ struct req_iterator { | |||
| 708 | }; | 708 | }; |
| 709 | 709 | ||
| 710 | /* This should not be used directly - use rq_for_each_segment */ | 710 | /* This should not be used directly - use rq_for_each_segment */ |
| 711 | #define for_each_bio(_bio) \ | ||
| 712 | for (; _bio; _bio = _bio->bi_next) | ||
| 711 | #define __rq_for_each_bio(_bio, rq) \ | 713 | #define __rq_for_each_bio(_bio, rq) \ |
| 712 | if ((rq->bio)) \ | 714 | if ((rq->bio)) \ |
| 713 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 715 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
