aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:55:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:55:15 -0400
commit355bbd8cb82e60a592f6cd86ce6dbe5677615cf4 (patch)
tree23678e50ad4687f1656edc972388ee8014e7b89d /drivers/md
parent39695224bd84dc4be29abad93a0ec232a16fc519 (diff)
parent746cd1e7e4a555ddaee53b19a46e05c9c61eaf09 (diff)
Merge branch 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block: (29 commits) block: use blkdev_issue_discard in blk_ioctl_discard Make DISCARD_BARRIER and DISCARD_NOBARRIER writes instead of reads block: don't assume device has a request list backing in nr_requests store block: Optimal I/O limit wrapper cfq: choose a new next_req when a request is dispatched Seperate read and write statistics of in_flight requests aoe: end barrier bios with EOPNOTSUPP block: trace bio queueing trial only when it occurs block: enable rq CPU completion affinity by default cfq: fix the log message after dispatched a request block: use printk_once cciss: memory leak in cciss_init_one() splice: update mtime and atime on files block: make blk_iopoll_prep_sched() follow normal 0/1 return convention cfq-iosched: get rid of must_alloc flag block: use interrupts disabled version of raise_softirq_irqoff() block: fix comment in blk-iopoll.c block: adjust default budget for blk-iopoll block: fix long lines in block/blk-iopoll.c block: add blk-iopoll, a NAPI like approach for block devices ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm.c28
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c2
9 files changed, 35 insertions, 29 deletions
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 33f179e66bf5..cc9dc79b0784 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1129,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1129 if (error == -EOPNOTSUPP) 1129 if (error == -EOPNOTSUPP)
1130 goto out; 1130 goto out;
1131 1131
1132 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 1132 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1133 goto out; 1133 goto out;
1134 1134
1135 if (unlikely(error)) { 1135 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3e563d251733..e0efc1adcaff 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
285 if (!error) 285 if (!error)
286 return 0; /* I/O complete */ 286 return 0; /* I/O complete */
287 287
288 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 288 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
289 return error; 289 return error;
290 290
291 if (error == -EOPNOTSUPP) 291 if (error == -EOPNOTSUPP)
@@ -336,7 +336,7 @@ static void stripe_io_hints(struct dm_target *ti,
336 unsigned chunk_size = (sc->chunk_mask + 1) << 9; 336 unsigned chunk_size = (sc->chunk_mask + 1) << 9;
337 337
338 blk_limits_io_min(limits, chunk_size); 338 blk_limits_io_min(limits, chunk_size);
339 limits->io_opt = chunk_size * sc->stripes; 339 blk_limits_io_opt(limits, chunk_size * sc->stripes);
340} 340}
341 341
342static struct target_type stripe_target = { 342static struct target_type stripe_target = {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b4845b14740d..eee28fac210c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -130,7 +130,7 @@ struct mapped_device {
130 /* 130 /*
131 * A list of ios that arrived while we were suspended. 131 * A list of ios that arrived while we were suspended.
132 */ 132 */
133 atomic_t pending; 133 atomic_t pending[2];
134 wait_queue_head_t wait; 134 wait_queue_head_t wait;
135 struct work_struct work; 135 struct work_struct work;
136 struct bio_list deferred; 136 struct bio_list deferred;
@@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
453{ 453{
454 struct mapped_device *md = io->md; 454 struct mapped_device *md = io->md;
455 int cpu; 455 int cpu;
456 int rw = bio_data_dir(io->bio);
456 457
457 io->start_time = jiffies; 458 io->start_time = jiffies;
458 459
459 cpu = part_stat_lock(); 460 cpu = part_stat_lock();
460 part_round_stats(cpu, &dm_disk(md)->part0); 461 part_round_stats(cpu, &dm_disk(md)->part0);
461 part_stat_unlock(); 462 part_stat_unlock();
462 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 463 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
463} 464}
464 465
465static void end_io_acct(struct dm_io *io) 466static void end_io_acct(struct dm_io *io)
@@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
479 * After this is decremented the bio must not be touched if it is 480 * After this is decremented the bio must not be touched if it is
480 * a barrier. 481 * a barrier.
481 */ 482 */
482 dm_disk(md)->part0.in_flight = pending = 483 dm_disk(md)->part0.in_flight[rw] = pending =
483 atomic_dec_return(&md->pending); 484 atomic_dec_return(&md->pending[rw]);
485 pending += atomic_read(&md->pending[rw^0x1]);
484 486
485 /* nudge anyone waiting on suspend queue */ 487 /* nudge anyone waiting on suspend queue */
486 if (!pending) 488 if (!pending)
@@ -586,7 +588,7 @@ static void dec_pending(struct dm_io *io, int error)
586 */ 588 */
587 spin_lock_irqsave(&md->deferred_lock, flags); 589 spin_lock_irqsave(&md->deferred_lock, flags);
588 if (__noflush_suspending(md)) { 590 if (__noflush_suspending(md)) {
589 if (!bio_barrier(io->bio)) 591 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
590 bio_list_add_head(&md->deferred, 592 bio_list_add_head(&md->deferred,
591 io->bio); 593 io->bio);
592 } else 594 } else
@@ -598,7 +600,7 @@ static void dec_pending(struct dm_io *io, int error)
598 io_error = io->error; 600 io_error = io->error;
599 bio = io->bio; 601 bio = io->bio;
600 602
601 if (bio_barrier(bio)) { 603 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
602 /* 604 /*
603 * There can be just one barrier request so we use 605 * There can be just one barrier request so we use
604 * a per-device variable for error reporting. 606 * a per-device variable for error reporting.
@@ -1209,7 +1211,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1209 1211
1210 ci.map = dm_get_table(md); 1212 ci.map = dm_get_table(md);
1211 if (unlikely(!ci.map)) { 1213 if (unlikely(!ci.map)) {
1212 if (!bio_barrier(bio)) 1214 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
1213 bio_io_error(bio); 1215 bio_io_error(bio);
1214 else 1216 else
1215 if (!md->barrier_error) 1217 if (!md->barrier_error)
@@ -1321,7 +1323,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1321 * we have to queue this io for later. 1323 * we have to queue this io for later.
1322 */ 1324 */
1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1325 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1324 unlikely(bio_barrier(bio))) { 1326 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1325 up_read(&md->io_lock); 1327 up_read(&md->io_lock);
1326 1328
1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1329 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1344,7 +1346,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
1344{ 1346{
1345 struct mapped_device *md = q->queuedata; 1347 struct mapped_device *md = q->queuedata;
1346 1348
1347 if (unlikely(bio_barrier(bio))) { 1349 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1348 bio_endio(bio, -EOPNOTSUPP); 1350 bio_endio(bio, -EOPNOTSUPP);
1349 return 0; 1351 return 0;
1350 } 1352 }
@@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
1785 if (!md->disk) 1787 if (!md->disk)
1786 goto bad_disk; 1788 goto bad_disk;
1787 1789
1788 atomic_set(&md->pending, 0); 1790 atomic_set(&md->pending[0], 0);
1791 atomic_set(&md->pending[1], 0);
1789 init_waitqueue_head(&md->wait); 1792 init_waitqueue_head(&md->wait);
1790 INIT_WORK(&md->work, dm_wq_work); 1793 INIT_WORK(&md->work, dm_wq_work);
1791 init_waitqueue_head(&md->eventq); 1794 init_waitqueue_head(&md->eventq);
@@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2088 break; 2091 break;
2089 } 2092 }
2090 spin_unlock_irqrestore(q->queue_lock, flags); 2093 spin_unlock_irqrestore(q->queue_lock, flags);
2091 } else if (!atomic_read(&md->pending)) 2094 } else if (!atomic_read(&md->pending[0]) &&
2095 !atomic_read(&md->pending[1]))
2092 break; 2096 break;
2093 2097
2094 if (interruptible == TASK_INTERRUPTIBLE && 2098 if (interruptible == TASK_INTERRUPTIBLE &&
@@ -2164,7 +2168,7 @@ static void dm_wq_work(struct work_struct *work)
2164 if (dm_request_based(md)) 2168 if (dm_request_based(md))
2165 generic_make_request(c); 2169 generic_make_request(c);
2166 else { 2170 else {
2167 if (bio_barrier(c)) 2171 if (bio_rw_flagged(c, BIO_RW_BARRIER))
2168 process_barrier(md, c); 2172 process_barrier(md, c);
2169 else 2173 else
2170 __split_and_process_bio(md, c); 2174 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5fe39c2a3d2b..ea4842905444 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,7 +288,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
288 sector_t start_sector; 288 sector_t start_sector;
289 int cpu; 289 int cpu;
290 290
291 if (unlikely(bio_barrier(bio))) { 291 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
292 bio_endio(bio, -EOPNOTSUPP); 292 bio_endio(bio, -EOPNOTSUPP);
293 return 0; 293 return 0;
294 } 294 }
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 7140909f6662..89e76819f61f 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error)
90 90
91 if (uptodate) 91 if (uptodate)
92 multipath_end_bh_io(mp_bh, 0); 92 multipath_end_bh_io(mp_bh, 0);
93 else if (!bio_rw_ahead(bio)) { 93 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
94 /* 94 /*
95 * oops, IO error: 95 * oops, IO error:
96 */ 96 */
@@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
144 const int rw = bio_data_dir(bio); 144 const int rw = bio_data_dir(bio);
145 int cpu; 145 int cpu;
146 146
147 if (unlikely(bio_barrier(bio))) { 147 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
148 bio_endio(bio, -EOPNOTSUPP); 148 bio_endio(bio, -EOPNOTSUPP);
149 return 0; 149 return 0;
150 } 150 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 898e2bdfee47..f845ed98fec9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -448,7 +448,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
448 const int rw = bio_data_dir(bio); 448 const int rw = bio_data_dir(bio);
449 int cpu; 449 int cpu;
450 450
451 if (unlikely(bio_barrier(bio))) { 451 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
452 bio_endio(bio, -EOPNOTSUPP); 452 bio_endio(bio, -EOPNOTSUPP);
453 return 0; 453 return 0;
454 } 454 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8726fd7ebce5..ff7ed3335995 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -782,8 +782,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
782 struct bio_list bl; 782 struct bio_list bl;
783 struct page **behind_pages = NULL; 783 struct page **behind_pages = NULL;
784 const int rw = bio_data_dir(bio); 784 const int rw = bio_data_dir(bio);
785 const int do_sync = bio_sync(bio); 785 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
786 int cpu, do_barriers; 786 int cpu;
787 bool do_barriers;
787 mdk_rdev_t *blocked_rdev; 788 mdk_rdev_t *blocked_rdev;
788 789
789 /* 790 /*
@@ -797,7 +798,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
797 798
798 md_write_start(mddev, bio); /* wait on superblock update early */ 799 md_write_start(mddev, bio); /* wait on superblock update early */
799 800
800 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 801 if (unlikely(!mddev->barriers_work &&
802 bio_rw_flagged(bio, BIO_RW_BARRIER))) {
801 if (rw == WRITE) 803 if (rw == WRITE)
802 md_write_end(mddev); 804 md_write_end(mddev);
803 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
@@ -925,7 +927,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
925 atomic_set(&r1_bio->remaining, 0); 927 atomic_set(&r1_bio->remaining, 0);
926 atomic_set(&r1_bio->behind_remaining, 0); 928 atomic_set(&r1_bio->behind_remaining, 0);
927 929
928 do_barriers = bio_barrier(bio); 930 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
929 if (do_barriers) 931 if (do_barriers)
930 set_bit(R1BIO_Barrier, &r1_bio->state); 932 set_bit(R1BIO_Barrier, &r1_bio->state);
931 933
@@ -1600,7 +1602,7 @@ static void raid1d(mddev_t *mddev)
1600 * We already have a nr_pending reference on these rdevs. 1602 * We already have a nr_pending reference on these rdevs.
1601 */ 1603 */
1602 int i; 1604 int i;
1603 const int do_sync = bio_sync(r1_bio->master_bio); 1605 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1604 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1606 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1605 clear_bit(R1BIO_Barrier, &r1_bio->state); 1607 clear_bit(R1BIO_Barrier, &r1_bio->state);
1606 for (i=0; i < conf->raid_disks; i++) 1608 for (i=0; i < conf->raid_disks; i++)
@@ -1654,7 +1656,7 @@ static void raid1d(mddev_t *mddev)
1654 (unsigned long long)r1_bio->sector); 1656 (unsigned long long)r1_bio->sector);
1655 raid_end_bio_io(r1_bio); 1657 raid_end_bio_io(r1_bio);
1656 } else { 1658 } else {
1657 const int do_sync = bio_sync(r1_bio->master_bio); 1659 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1658 r1_bio->bios[r1_bio->read_disk] = 1660 r1_bio->bios[r1_bio->read_disk] =
1659 mddev->ro ? IO_BLOCKED : NULL; 1661 mddev->ro ? IO_BLOCKED : NULL;
1660 r1_bio->read_disk = disk; 1662 r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3d9020cf6f6e..d0a2152e064f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -796,12 +796,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 int i; 796 int i;
797 int chunk_sects = conf->chunk_mask + 1; 797 int chunk_sects = conf->chunk_mask + 1;
798 const int rw = bio_data_dir(bio); 798 const int rw = bio_data_dir(bio);
799 const int do_sync = bio_sync(bio); 799 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
800 struct bio_list bl; 800 struct bio_list bl;
801 unsigned long flags; 801 unsigned long flags;
802 mdk_rdev_t *blocked_rdev; 802 mdk_rdev_t *blocked_rdev;
803 803
804 if (unlikely(bio_barrier(bio))) { 804 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
805 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
806 return 0; 806 return 0;
807 } 807 }
@@ -1610,7 +1610,7 @@ static void raid10d(mddev_t *mddev)
1610 raid_end_bio_io(r10_bio); 1610 raid_end_bio_io(r10_bio);
1611 bio_put(bio); 1611 bio_put(bio);
1612 } else { 1612 } else {
1613 const int do_sync = bio_sync(r10_bio->master_bio); 1613 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
1614 bio_put(bio); 1614 bio_put(bio);
1615 rdev = conf->mirrors[mirror].rdev; 1615 rdev = conf->mirrors[mirror].rdev;
1616 if (printk_ratelimit()) 1616 if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b8a2c5dc67ba..826eb3467357 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3606,7 +3606,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3606 const int rw = bio_data_dir(bi); 3606 const int rw = bio_data_dir(bi);
3607 int cpu, remaining; 3607 int cpu, remaining;
3608 3608
3609 if (unlikely(bio_barrier(bi))) { 3609 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
3610 bio_endio(bi, -EOPNOTSUPP); 3610 bio_endio(bi, -EOPNOTSUPP);
3611 return 0; 3611 return 0;
3612 } 3612 }