aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-11 02:10:07 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-11 02:10:07 -0400
commite14eee56c2280953c6e3d24d5dce42bd90836b81 (patch)
tree21ab792d9ad6fbbab460058f352a0158f995644e /block
parentd6ee6f7e4c74d9a0fed7544f4d389bde004651d3 (diff)
parent99adcd9d67aaf04e28f5ae96df280f236bde4b66 (diff)
Merge commit 'origin/master' into next
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c93
-rw-r--r--block/blk-timeout.c9
-rw-r--r--block/blktrace.c2
-rw-r--r--block/bsg.c17
-rw-r--r--block/genhd.c24
5 files changed, 92 insertions, 53 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b92f5b0866b0..5a244f05360f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -38,72 +38,77 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
38 } 38 }
39} 39}
40 40
41void blk_recalc_rq_segments(struct request *rq) 41static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio)
42{ 43{
43 int nr_phys_segs;
44 unsigned int phys_size; 44 unsigned int phys_size;
45 struct bio_vec *bv, *bvprv = NULL; 45 struct bio_vec *bv, *bvprv = NULL;
46 int seg_size; 46 int cluster, i, high, highprv = 1;
47 int cluster; 47 unsigned int seg_size, nr_phys_segs;
48 struct req_iterator iter; 48 struct bio *fbio, *bbio;
49 int high, highprv = 1;
50 struct request_queue *q = rq->q;
51 49
52 if (!rq->bio) 50 if (!bio)
53 return; 51 return 0;
54 52
53 fbio = bio;
55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 54 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
56 seg_size = 0; 55 seg_size = 0;
57 phys_size = nr_phys_segs = 0; 56 phys_size = nr_phys_segs = 0;
58 rq_for_each_segment(bv, rq, iter) { 57 for_each_bio(bio) {
59 /* 58 bio_for_each_segment(bv, bio, i) {
60 * the trick here is making sure that a high page is never 59 /*
61 * considered part of another segment, since that might 60 * the trick here is making sure that a high page is
62 * change with the bounce page. 61 * never considered part of another segment, since that
63 */ 62 * might change with the bounce page.
64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 63 */
65 if (high || highprv) 64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
66 goto new_segment; 65 if (high || highprv)
67 if (cluster) {
68 if (seg_size + bv->bv_len > q->max_segment_size)
69 goto new_segment;
70 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
71 goto new_segment;
72 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
73 goto new_segment; 66 goto new_segment;
67 if (cluster) {
68 if (seg_size + bv->bv_len > q->max_segment_size)
69 goto new_segment;
70 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
71 goto new_segment;
72 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
73 goto new_segment;
74
75 seg_size += bv->bv_len;
76 bvprv = bv;
77 continue;
78 }
79new_segment:
80 if (nr_phys_segs == 1 && seg_size >
81 fbio->bi_seg_front_size)
82 fbio->bi_seg_front_size = seg_size;
74 83
75 seg_size += bv->bv_len; 84 nr_phys_segs++;
76 bvprv = bv; 85 bvprv = bv;
77 continue; 86 seg_size = bv->bv_len;
87 highprv = high;
78 } 88 }
79new_segment: 89 bbio = bio;
80 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
81 rq->bio->bi_seg_front_size = seg_size;
82
83 nr_phys_segs++;
84 bvprv = bv;
85 seg_size = bv->bv_len;
86 highprv = high;
87 } 90 }
88 91
89 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) 92 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
90 rq->bio->bi_seg_front_size = seg_size; 93 fbio->bi_seg_front_size = seg_size;
91 if (seg_size > rq->biotail->bi_seg_back_size) 94 if (seg_size > bbio->bi_seg_back_size)
92 rq->biotail->bi_seg_back_size = seg_size; 95 bbio->bi_seg_back_size = seg_size;
96
97 return nr_phys_segs;
98}
93 99
94 rq->nr_phys_segments = nr_phys_segs; 100void blk_recalc_rq_segments(struct request *rq)
101{
102 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
95} 103}
96 104
97void blk_recount_segments(struct request_queue *q, struct bio *bio) 105void blk_recount_segments(struct request_queue *q, struct bio *bio)
98{ 106{
99 struct request rq;
100 struct bio *nxt = bio->bi_next; 107 struct bio *nxt = bio->bi_next;
101 rq.q = q; 108
102 rq.bio = rq.biotail = bio;
103 bio->bi_next = NULL; 109 bio->bi_next = NULL;
104 blk_recalc_rq_segments(&rq); 110 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
105 bio->bi_next = nxt; 111 bio->bi_next = nxt;
106 bio->bi_phys_segments = rq.nr_phys_segments;
107 bio->bi_flags |= (1 << BIO_SEG_VALID); 112 bio->bi_flags |= (1 << BIO_SEG_VALID);
108} 113}
109EXPORT_SYMBOL(blk_recount_segments); 114EXPORT_SYMBOL(blk_recount_segments);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a09535377a94..bbbdc4b8ccf2 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -209,12 +209,19 @@ void blk_abort_queue(struct request_queue *q)
209{ 209{
210 unsigned long flags; 210 unsigned long flags;
211 struct request *rq, *tmp; 211 struct request *rq, *tmp;
212 LIST_HEAD(list);
212 213
213 spin_lock_irqsave(q->queue_lock, flags); 214 spin_lock_irqsave(q->queue_lock, flags);
214 215
215 elv_abort_queue(q); 216 elv_abort_queue(q);
216 217
217 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) 218 /*
219 * Splice entries to local list, to avoid deadlocking if entries
220 * get readded to the timeout list by error handling
221 */
222 list_splice_init(&q->timeout_list, &list);
223
224 list_for_each_entry_safe(rq, tmp, &list, timeout_list)
218 blk_abort_request(rq); 225 blk_abort_request(rq);
219 226
220 spin_unlock_irqrestore(q->queue_lock, flags); 227 spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/block/blktrace.c b/block/blktrace.c
index 39cc3bfe56e4..7cf9d1ff45a0 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -142,7 +142,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
142 142
143 what |= ddir_act[rw & WRITE]; 143 what |= ddir_act[rw & WRITE];
144 what |= MASK_TC_BIT(rw, BARRIER); 144 what |= MASK_TC_BIT(rw, BARRIER);
145 what |= MASK_TC_BIT(rw, SYNC); 145 what |= MASK_TC_BIT(rw, SYNCIO);
146 what |= MASK_TC_BIT(rw, AHEAD); 146 what |= MASK_TC_BIT(rw, AHEAD);
147 what |= MASK_TC_BIT(rw, META); 147 what |= MASK_TC_BIT(rw, META);
148 what |= MASK_TC_BIT(rw, DISCARD); 148 what |= MASK_TC_BIT(rw, DISCARD);
diff --git a/block/bsg.c b/block/bsg.c
index d414bb5607e8..0ce8806dd0c1 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -244,7 +244,8 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
244 * map sg_io_v4 to a request. 244 * map sg_io_v4 to a request.
245 */ 245 */
246static struct request * 246static struct request *
247bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) 247bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
248 u8 *sense)
248{ 249{
249 struct request_queue *q = bd->queue; 250 struct request_queue *q = bd->queue;
250 struct request *rq, *next_rq = NULL; 251 struct request *rq, *next_rq = NULL;
@@ -306,6 +307,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
306 if (ret) 307 if (ret)
307 goto out; 308 goto out;
308 } 309 }
310
311 rq->sense = sense;
312 rq->sense_len = 0;
313
309 return rq; 314 return rq;
310out: 315out:
311 if (rq->cmd != rq->__cmd) 316 if (rq->cmd != rq->__cmd)
@@ -348,9 +353,6 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
348static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 353static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
349 struct bsg_command *bc, struct request *rq) 354 struct bsg_command *bc, struct request *rq)
350{ 355{
351 rq->sense = bc->sense;
352 rq->sense_len = 0;
353
354 /* 356 /*
355 * add bc command to busy queue and submit rq for io 357 * add bc command to busy queue and submit rq for io
356 */ 358 */
@@ -419,7 +421,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
419{ 421{
420 int ret = 0; 422 int ret = 0;
421 423
422 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); 424 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
423 /* 425 /*
424 * fill in all the output members 426 * fill in all the output members
425 */ 427 */
@@ -635,7 +637,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
635 /* 637 /*
636 * get a request, fill in the blanks, and add to request queue 638 * get a request, fill in the blanks, and add to request queue
637 */ 639 */
638 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm); 640 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
639 if (IS_ERR(rq)) { 641 if (IS_ERR(rq)) {
640 ret = PTR_ERR(rq); 642 ret = PTR_ERR(rq);
641 rq = NULL; 643 rq = NULL;
@@ -922,11 +924,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
922 struct request *rq; 924 struct request *rq;
923 struct bio *bio, *bidi_bio = NULL; 925 struct bio *bio, *bidi_bio = NULL;
924 struct sg_io_v4 hdr; 926 struct sg_io_v4 hdr;
927 u8 sense[SCSI_SENSE_BUFFERSIZE];
925 928
926 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 929 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
927 return -EFAULT; 930 return -EFAULT;
928 931
929 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE); 932 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
930 if (IS_ERR(rq)) 933 if (IS_ERR(rq))
931 return PTR_ERR(rq); 934 return PTR_ERR(rq);
932 935
diff --git a/block/genhd.c b/block/genhd.c
index 397960cf26af..a9ec910974c1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
256} 256}
257#endif /* CONFIG_PROC_FS */ 257#endif /* CONFIG_PROC_FS */
258 258
259/**
260 * register_blkdev - register a new block device
261 *
262 * @major: the requested major device number [1..255]. If @major=0, try to
263 * allocate any unused major number.
264 * @name: the name of the new block device as a zero terminated string
265 *
266 * The @name must be unique within the system.
267 *
268 * The return value depends on the @major input parameter.
269 * - if a major device number was requested in range [1..255] then the
270 * function returns zero on success, or a negative error code
271 * - if any unused major number was requested with @major=0 parameter
272 * then the return value is the allocated major number in range
273 * [1..255] or a negative error code otherwise
274 */
259int register_blkdev(unsigned int major, const char *name) 275int register_blkdev(unsigned int major, const char *name)
260{ 276{
261 struct blk_major_name **n, *p; 277 struct blk_major_name **n, *p;
@@ -1087,6 +1103,14 @@ dev_t blk_lookup_devt(const char *name, int partno)
1087 if (strcmp(dev_name(dev), name)) 1103 if (strcmp(dev_name(dev), name))
1088 continue; 1104 continue;
1089 1105
1106 if (partno < disk->minors) {
1107 /* We need to return the right devno, even
1108 * if the partition doesn't exist yet.
1109 */
1110 devt = MKDEV(MAJOR(dev->devt),
1111 MINOR(dev->devt) + partno);
1112 break;
1113 }
1090 part = disk_get_part(disk, partno); 1114 part = disk_get_part(disk, partno);
1091 if (part) { 1115 if (part) {
1092 devt = part_devt(part); 1116 devt = part_devt(part);