aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2009-04-02 14:55:39 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-04-02 14:55:39 -0400
commit022c261100e15652d720395b17ce76304fb2f97f (patch)
treecbab417b11242d4561ba45e60b3838f1672013cb /drivers/md/dm.c
parent401600dfd368305e641d79db16d514f55c084544 (diff)
dm: merge pushback and deferred bio lists
Merge pushback and deferred lists into one list - use deferred list for both deferred and pushed-back bios. This will be needed for proper support of barrier bios: it is impossible to support ordering correctly with two lists because the requests on both lists will be mixed up. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ae21833b270a..f5703727d974 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -102,7 +102,6 @@ union map_info *dm_get_mapinfo(struct bio *bio)
102struct mapped_device { 102struct mapped_device {
103 struct rw_semaphore io_lock; 103 struct rw_semaphore io_lock;
104 struct mutex suspend_lock; 104 struct mutex suspend_lock;
105 spinlock_t pushback_lock;
106 rwlock_t map_lock; 105 rwlock_t map_lock;
107 atomic_t holders; 106 atomic_t holders;
108 atomic_t open_count; 107 atomic_t open_count;
@@ -122,7 +121,7 @@ struct mapped_device {
122 wait_queue_head_t wait; 121 wait_queue_head_t wait;
123 struct work_struct work; 122 struct work_struct work;
124 struct bio_list deferred; 123 struct bio_list deferred;
125 struct bio_list pushback; 124 spinlock_t deferred_lock;
126 125
127 /* 126 /*
128 * Processing queue (flush/barriers) 127 * Processing queue (flush/barriers)
@@ -445,7 +444,9 @@ static int queue_io(struct mapped_device *md, struct bio *bio)
445 return 1; 444 return 1;
446 } 445 }
447 446
447 spin_lock_irq(&md->deferred_lock);
448 bio_list_add(&md->deferred, bio); 448 bio_list_add(&md->deferred, bio);
449 spin_unlock_irq(&md->deferred_lock);
449 450
450 up_write(&md->io_lock); 451 up_write(&md->io_lock);
451 return 0; /* deferred successfully */ 452 return 0; /* deferred successfully */
@@ -529,16 +530,14 @@ static void dec_pending(struct dm_io *io, int error)
529 if (io->error == DM_ENDIO_REQUEUE) { 530 if (io->error == DM_ENDIO_REQUEUE) {
530 /* 531 /*
531 * Target requested pushing back the I/O. 532 * Target requested pushing back the I/O.
532 * This must be handled before the sleeper on
533 * suspend queue merges the pushback list.
534 */ 533 */
535 spin_lock_irqsave(&md->pushback_lock, flags); 534 spin_lock_irqsave(&md->deferred_lock, flags);
536 if (__noflush_suspending(md)) 535 if (__noflush_suspending(md))
537 bio_list_add(&md->pushback, io->bio); 536 bio_list_add(&md->deferred, io->bio);
538 else 537 else
539 /* noflush suspend was interrupted. */ 538 /* noflush suspend was interrupted. */
540 io->error = -EIO; 539 io->error = -EIO;
541 spin_unlock_irqrestore(&md->pushback_lock, flags); 540 spin_unlock_irqrestore(&md->deferred_lock, flags);
542 } 541 }
543 542
544 end_io_acct(io); 543 end_io_acct(io);
@@ -1096,7 +1095,7 @@ static struct mapped_device *alloc_dev(int minor)
1096 1095
1097 init_rwsem(&md->io_lock); 1096 init_rwsem(&md->io_lock);
1098 mutex_init(&md->suspend_lock); 1097 mutex_init(&md->suspend_lock);
1099 spin_lock_init(&md->pushback_lock); 1098 spin_lock_init(&md->deferred_lock);
1100 rwlock_init(&md->map_lock); 1099 rwlock_init(&md->map_lock);
1101 atomic_set(&md->holders, 1); 1100 atomic_set(&md->holders, 1);
1102 atomic_set(&md->open_count, 0); 1101 atomic_set(&md->open_count, 0);
@@ -1410,25 +1409,21 @@ static void dm_wq_work(struct work_struct *work)
1410 1409
1411 down_write(&md->io_lock); 1410 down_write(&md->io_lock);
1412 1411
1413 while ((c = bio_list_pop(&md->deferred))) 1412next_bio:
1413 spin_lock_irq(&md->deferred_lock);
1414 c = bio_list_pop(&md->deferred);
1415 spin_unlock_irq(&md->deferred_lock);
1416
1417 if (c) {
1414 __split_and_process_bio(md, c); 1418 __split_and_process_bio(md, c);
1419 goto next_bio;
1420 }
1415 1421
1416 clear_bit(DMF_BLOCK_IO, &md->flags); 1422 clear_bit(DMF_BLOCK_IO, &md->flags);
1417 1423
1418 up_write(&md->io_lock); 1424 up_write(&md->io_lock);
1419} 1425}
1420 1426
1421static void __merge_pushback_list(struct mapped_device *md)
1422{
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&md->pushback_lock, flags);
1426 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1427 bio_list_merge_head(&md->deferred, &md->pushback);
1428 bio_list_init(&md->pushback);
1429 spin_unlock_irqrestore(&md->pushback_lock, flags);
1430}
1431
1432static void dm_queue_flush(struct mapped_device *md) 1427static void dm_queue_flush(struct mapped_device *md)
1433{ 1428{
1434 queue_work(md->wq, &md->work); 1429 queue_work(md->wq, &md->work);
@@ -1572,7 +1567,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1572 remove_wait_queue(&md->wait, &wait); 1567 remove_wait_queue(&md->wait, &wait);
1573 1568
1574 if (noflush) 1569 if (noflush)
1575 __merge_pushback_list(md); 1570 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1576 up_write(&md->io_lock); 1571 up_write(&md->io_lock);
1577 1572
1578 /* were we interrupted ? */ 1573 /* were we interrupted ? */