aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-03 20:29:19 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-03 20:29:19 -0500
commit91d75e209bd59695f0708d66964d928d45b3b2f3 (patch)
tree32cab1359d951e4193bebb181a0f0319824a2b95 /block
parent9976b39b5031bbf76f715893cf080b6a17683881 (diff)
parent8b0e5860cb099d7958d13b00ffbc35ad02735700 (diff)
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c94
-rw-r--r--block/genhd.c16
2 files changed, 69 insertions, 41 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b92f5b0866b0..a104593e70c3 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
38 } 38 }
39} 39}
40 40
41void blk_recalc_rq_segments(struct request *rq) 41static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio,
43 unsigned int *seg_size_ptr)
42{ 44{
43 int nr_phys_segs;
44 unsigned int phys_size; 45 unsigned int phys_size;
45 struct bio_vec *bv, *bvprv = NULL; 46 struct bio_vec *bv, *bvprv = NULL;
46 int seg_size; 47 int cluster, i, high, highprv = 1;
47 int cluster; 48 unsigned int seg_size, nr_phys_segs;
48 struct req_iterator iter; 49 struct bio *fbio;
49 int high, highprv = 1;
50 struct request_queue *q = rq->q;
51 50
52 if (!rq->bio) 51 if (!bio)
53 return; 52 return 0;
54 53
54 fbio = bio;
55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
56 seg_size = 0; 56 seg_size = 0;
57 phys_size = nr_phys_segs = 0; 57 phys_size = nr_phys_segs = 0;
58 rq_for_each_segment(bv, rq, iter) { 58 for_each_bio(bio) {
59 /* 59 bio_for_each_segment(bv, bio, i) {
60 * the trick here is making sure that a high page is never 60 /*
61 * considered part of another segment, since that might 61 * the trick here is making sure that a high page is
62 * change with the bounce page. 62 * never considered part of another segment, since that
63 */ 63 * might change with the bounce page.
64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 64 */
65 if (high || highprv) 65 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
66 goto new_segment; 66 if (high || highprv)
67 if (cluster) {
68 if (seg_size + bv->bv_len > q->max_segment_size)
69 goto new_segment;
70 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
71 goto new_segment;
72 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
73 goto new_segment; 67 goto new_segment;
68 if (cluster) {
69 if (seg_size + bv->bv_len > q->max_segment_size)
70 goto new_segment;
71 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
72 goto new_segment;
73 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
74 goto new_segment;
75
76 seg_size += bv->bv_len;
77 bvprv = bv;
78 continue;
79 }
80new_segment:
81 if (nr_phys_segs == 1 && seg_size >
82 fbio->bi_seg_front_size)
83 fbio->bi_seg_front_size = seg_size;
74 84
75 seg_size += bv->bv_len; 85 nr_phys_segs++;
76 bvprv = bv; 86 bvprv = bv;
77 continue; 87 seg_size = bv->bv_len;
88 highprv = high;
78 } 89 }
79new_segment:
80 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
81 rq->bio->bi_seg_front_size = seg_size;
82
83 nr_phys_segs++;
84 bvprv = bv;
85 seg_size = bv->bv_len;
86 highprv = high;
87 } 90 }
88 91
89 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) 92 if (seg_size_ptr)
93 *seg_size_ptr = seg_size;
94
95 return nr_phys_segs;
96}
97
98void blk_recalc_rq_segments(struct request *rq)
99{
100 unsigned int seg_size = 0, phys_segs;
101
102 phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
103
104 if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
90 rq->bio->bi_seg_front_size = seg_size; 105 rq->bio->bi_seg_front_size = seg_size;
91 if (seg_size > rq->biotail->bi_seg_back_size) 106 if (seg_size > rq->biotail->bi_seg_back_size)
92 rq->biotail->bi_seg_back_size = seg_size; 107 rq->biotail->bi_seg_back_size = seg_size;
93 108
94 rq->nr_phys_segments = nr_phys_segs; 109 rq->nr_phys_segments = phys_segs;
95} 110}
96 111
97void blk_recount_segments(struct request_queue *q, struct bio *bio) 112void blk_recount_segments(struct request_queue *q, struct bio *bio)
98{ 113{
99 struct request rq;
100 struct bio *nxt = bio->bi_next; 114 struct bio *nxt = bio->bi_next;
101 rq.q = q; 115
102 rq.bio = rq.biotail = bio;
103 bio->bi_next = NULL; 116 bio->bi_next = NULL;
104 blk_recalc_rq_segments(&rq); 117 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
105 bio->bi_next = nxt; 118 bio->bi_next = nxt;
106 bio->bi_phys_segments = rq.nr_phys_segments;
107 bio->bi_flags |= (1 << BIO_SEG_VALID); 119 bio->bi_flags |= (1 << BIO_SEG_VALID);
108} 120}
109EXPORT_SYMBOL(blk_recount_segments); 121EXPORT_SYMBOL(blk_recount_segments);
diff --git a/block/genhd.c b/block/genhd.c
index e1eadcc9546a..a9ec910974c1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
256} 256}
257#endif /* CONFIG_PROC_FS */ 257#endif /* CONFIG_PROC_FS */
258 258
259/**
260 * register_blkdev - register a new block device
261 *
262 * @major: the requested major device number [1..255]. If @major=0, try to
263 * allocate any unused major number.
264 * @name: the name of the new block device as a zero terminated string
265 *
266 * The @name must be unique within the system.
267 *
268 * The return value depends on the @major input parameter.
269 * - if a major device number was requested in range [1..255] then the
270 * function returns zero on success, or a negative error code
271 * - if any unused major number was requested with @major=0 parameter
272 * then the return value is the allocated major number in range
273 * [1..255] or a negative error code otherwise
274 */
259int register_blkdev(unsigned int major, const char *name) 275int register_blkdev(unsigned int major, const char *name)
260{ 276{
261 struct blk_major_name **n, *p; 277 struct blk_major_name **n, *p;