aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 020a9e1993a7..7cac7220937f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -89,12 +89,13 @@ union map_info *dm_get_mapinfo(struct bio *bio)
89/* 89/*
90 * Bits for the md->flags field. 90 * Bits for the md->flags field.
91 */ 91 */
92#define DMF_BLOCK_IO 0 92#define DMF_BLOCK_IO_FOR_SUSPEND 0
93#define DMF_SUSPENDED 1 93#define DMF_SUSPENDED 1
94#define DMF_FROZEN 2 94#define DMF_FROZEN 2
95#define DMF_FREEING 3 95#define DMF_FREEING 3
96#define DMF_DELETING 4 96#define DMF_DELETING 4
97#define DMF_NOFLUSH_SUSPENDING 5 97#define DMF_NOFLUSH_SUSPENDING 5
98#define DMF_QUEUE_IO_TO_THREAD 6
98 99
99/* 100/*
100 * Work processed by per-device workqueue. 101 * Work processed by per-device workqueue.
@@ -439,7 +440,7 @@ static int queue_io(struct mapped_device *md, struct bio *bio)
439{ 440{
440 down_write(&md->io_lock); 441 down_write(&md->io_lock);
441 442
442 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 443 if (!test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) {
443 up_write(&md->io_lock); 444 up_write(&md->io_lock);
444 return 1; 445 return 1;
445 } 446 }
@@ -950,10 +951,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
950 part_stat_unlock(); 951 part_stat_unlock();
951 952
952 /* 953 /*
953 * If we're suspended we have to queue 954 * If we're suspended or the thread is processing barriers
954 * this io for later. 955 * we have to queue this io for later.
955 */ 956 */
956 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 957 while (test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) {
957 up_read(&md->io_lock); 958 up_read(&md->io_lock);
958 959
959 if (bio_rw(bio) != READA) 960 if (bio_rw(bio) != READA)
@@ -997,7 +998,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
997 struct mapped_device *md = congested_data; 998 struct mapped_device *md = congested_data;
998 struct dm_table *map; 999 struct dm_table *map;
999 1000
1000 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 1001 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1001 map = dm_get_table(md); 1002 map = dm_get_table(md);
1002 if (map) { 1003 if (map) {
1003 r = dm_table_any_congested(map, bdi_bits); 1004 r = dm_table_any_congested(map, bdi_bits);
@@ -1443,7 +1444,8 @@ static void dm_wq_work(struct work_struct *work)
1443 spin_unlock_irq(&md->deferred_lock); 1444 spin_unlock_irq(&md->deferred_lock);
1444 1445
1445 if (!c) { 1446 if (!c) {
1446 clear_bit(DMF_BLOCK_IO, &md->flags); 1447 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1448 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1447 break; 1449 break;
1448 } 1450 }
1449 1451
@@ -1574,10 +1576,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1574 } 1576 }
1575 1577
1576 /* 1578 /*
1577 * First we set the BLOCK_IO flag so no more ios will be mapped. 1579 * First we set the DMF_QUEUE_IO_TO_THREAD flag so no more ios
1580 * will be mapped.
1578 */ 1581 */
1579 down_write(&md->io_lock); 1582 down_write(&md->io_lock);
1580 set_bit(DMF_BLOCK_IO, &md->flags); 1583 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1584 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1581 1585
1582 up_write(&md->io_lock); 1586 up_write(&md->io_lock);
1583 1587