aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 23e76fe0d359..724efc63904d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -47,6 +47,7 @@ struct dm_io {
47 atomic_t io_count; 47 atomic_t io_count;
48 struct bio *bio; 48 struct bio *bio;
49 unsigned long start_time; 49 unsigned long start_time;
50 spinlock_t endio_lock;
50}; 51};
51 52
52/* 53/*
@@ -130,7 +131,7 @@ struct mapped_device {
130 /* 131 /*
131 * A list of ios that arrived while we were suspended. 132 * A list of ios that arrived while we were suspended.
132 */ 133 */
133 atomic_t pending; 134 atomic_t pending[2];
134 wait_queue_head_t wait; 135 wait_queue_head_t wait;
135 struct work_struct work; 136 struct work_struct work;
136 struct bio_list deferred; 137 struct bio_list deferred;
@@ -453,13 +454,14 @@ static void start_io_acct(struct dm_io *io)
453{ 454{
454 struct mapped_device *md = io->md; 455 struct mapped_device *md = io->md;
455 int cpu; 456 int cpu;
457 int rw = bio_data_dir(io->bio);
456 458
457 io->start_time = jiffies; 459 io->start_time = jiffies;
458 460
459 cpu = part_stat_lock(); 461 cpu = part_stat_lock();
460 part_round_stats(cpu, &dm_disk(md)->part0); 462 part_round_stats(cpu, &dm_disk(md)->part0);
461 part_stat_unlock(); 463 part_stat_unlock();
462 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 464 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
463} 465}
464 466
465static void end_io_acct(struct dm_io *io) 467static void end_io_acct(struct dm_io *io)
@@ -479,8 +481,9 @@ static void end_io_acct(struct dm_io *io)
479 * After this is decremented the bio must not be touched if it is 481 * After this is decremented the bio must not be touched if it is
480 * a barrier. 482 * a barrier.
481 */ 483 */
482 dm_disk(md)->part0.in_flight = pending = 484 dm_disk(md)->part0.in_flight[rw] = pending =
483 atomic_dec_return(&md->pending); 485 atomic_dec_return(&md->pending[rw]);
486 pending += atomic_read(&md->pending[rw^0x1]);
484 487
485 /* nudge anyone waiting on suspend queue */ 488 /* nudge anyone waiting on suspend queue */
486 if (!pending) 489 if (!pending)
@@ -576,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
576 struct mapped_device *md = io->md; 579 struct mapped_device *md = io->md;
577 580
578 /* Push-back supersedes any I/O errors */ 581 /* Push-back supersedes any I/O errors */
579 if (error && !(io->error > 0 && __noflush_suspending(md))) 582 if (unlikely(error)) {
580 io->error = error; 583 spin_lock_irqsave(&io->endio_lock, flags);
584 if (!(io->error > 0 && __noflush_suspending(md)))
585 io->error = error;
586 spin_unlock_irqrestore(&io->endio_lock, flags);
587 }
581 588
582 if (atomic_dec_and_test(&io->io_count)) { 589 if (atomic_dec_and_test(&io->io_count)) {
583 if (io->error == DM_ENDIO_REQUEUE) { 590 if (io->error == DM_ENDIO_REQUEUE) {
@@ -1224,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1224 atomic_set(&ci.io->io_count, 1); 1231 atomic_set(&ci.io->io_count, 1);
1225 ci.io->bio = bio; 1232 ci.io->bio = bio;
1226 ci.io->md = md; 1233 ci.io->md = md;
1234 spin_lock_init(&ci.io->endio_lock);
1227 ci.sector = bio->bi_sector; 1235 ci.sector = bio->bi_sector;
1228 ci.sector_count = bio_sectors(bio); 1236 ci.sector_count = bio_sectors(bio);
1229 if (unlikely(bio_empty_barrier(bio))) 1237 if (unlikely(bio_empty_barrier(bio)))
@@ -1785,7 +1793,8 @@ static struct mapped_device *alloc_dev(int minor)
1785 if (!md->disk) 1793 if (!md->disk)
1786 goto bad_disk; 1794 goto bad_disk;
1787 1795
1788 atomic_set(&md->pending, 0); 1796 atomic_set(&md->pending[0], 0);
1797 atomic_set(&md->pending[1], 0);
1789 init_waitqueue_head(&md->wait); 1798 init_waitqueue_head(&md->wait);
1790 INIT_WORK(&md->work, dm_wq_work); 1799 INIT_WORK(&md->work, dm_wq_work);
1791 init_waitqueue_head(&md->eventq); 1800 init_waitqueue_head(&md->eventq);
@@ -1819,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
1819bad_bdev: 1828bad_bdev:
1820 destroy_workqueue(md->wq); 1829 destroy_workqueue(md->wq);
1821bad_thread: 1830bad_thread:
1831 del_gendisk(md->disk);
1822 put_disk(md->disk); 1832 put_disk(md->disk);
1823bad_disk: 1833bad_disk:
1824 blk_cleanup_queue(md->queue); 1834 blk_cleanup_queue(md->queue);
@@ -2088,7 +2098,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2088 break; 2098 break;
2089 } 2099 }
2090 spin_unlock_irqrestore(q->queue_lock, flags); 2100 spin_unlock_irqrestore(q->queue_lock, flags);
2091 } else if (!atomic_read(&md->pending)) 2101 } else if (!atomic_read(&md->pending[0]) &&
2102 !atomic_read(&md->pending[1]))
2092 break; 2103 break;
2093 2104
2094 if (interruptible == TASK_INTERRUPTIBLE && 2105 if (interruptible == TASK_INTERRUPTIBLE &&