aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-24 15:53:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-24 15:53:11 -0500
commit6ffeba9607343f15303a399bc402a538800d89d9 (patch)
treeb06dba2c9f1239daacd4e41e4b6de89e09802d72
parent81b1a832d79749058863cffe2c0ed4ef40f6e6ec (diff)
parent0fcb04d59351f790efb8da18edefd6ab4d9bbf3b (diff)
Merge tag 'dm-4.4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: "Two fixes for 4.4-rc1's DM ioctl changes that introduced the potential for infinite recursion on ioctl (with DM multipath). And four stable fixes: - A DM thin-provisioning fix to restore 'error_if_no_space' setting when a thin-pool is made writable again (after having been out of space). - A DM thin-provisioning fix to properly advertise discard support for thin volumes that are stacked on a thin-pool whose underlying data device doesn't support discards. - A DM ioctl fix to allow ctrl-c to break out of an ioctl retry loop when DM multipath is configured to 'queue_if_no_path'. - A DM crypt fix for a possible hang on dm-crypt device removal" * tag 'dm-4.4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm thin: fix regression in advertised discard limits dm crypt: fix a possible hang due to race condition on exit dm mpath: fix infinite recursion in ioctl when no paths and !queue_if_no_path dm: do not reuse dm_blk_ioctl block_device input as local variable dm: fix ioctl retry termination with signal dm thin: restore requested 'error_if_no_space' setting on OODS to WRITE transition
-rw-r--r--drivers/md/dm-crypt.c22
-rw-r--r--drivers/md/dm-mpath.c30
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm.c7
4 files changed, 36 insertions, 29 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 917d47e290ae..3147c8d09ea8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -112,7 +112,8 @@ struct iv_tcw_private {
112 * and encrypts / decrypts at the same time. 112 * and encrypts / decrypts at the same time.
113 */ 113 */
114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
116 DM_CRYPT_EXIT_THREAD};
116 117
117/* 118/*
118 * The fields in here must be read only after initialization. 119 * The fields in here must be read only after initialization.
@@ -1203,20 +1204,18 @@ continue_locked:
1203 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1204 if (!RB_EMPTY_ROOT(&cc->write_tree))
1204 goto pop_from_list; 1205 goto pop_from_list;
1205 1206
1207 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1208 spin_unlock_irq(&cc->write_thread_wait.lock);
1209 break;
1210 }
1211
1206 __set_current_state(TASK_INTERRUPTIBLE); 1212 __set_current_state(TASK_INTERRUPTIBLE);
1207 __add_wait_queue(&cc->write_thread_wait, &wait); 1213 __add_wait_queue(&cc->write_thread_wait, &wait);
1208 1214
1209 spin_unlock_irq(&cc->write_thread_wait.lock); 1215 spin_unlock_irq(&cc->write_thread_wait.lock);
1210 1216
1211 if (unlikely(kthread_should_stop())) {
1212 set_task_state(current, TASK_RUNNING);
1213 remove_wait_queue(&cc->write_thread_wait, &wait);
1214 break;
1215 }
1216
1217 schedule(); 1217 schedule();
1218 1218
1219 set_task_state(current, TASK_RUNNING);
1220 spin_lock_irq(&cc->write_thread_wait.lock); 1219 spin_lock_irq(&cc->write_thread_wait.lock);
1221 __remove_wait_queue(&cc->write_thread_wait, &wait); 1220 __remove_wait_queue(&cc->write_thread_wait, &wait);
1222 goto continue_locked; 1221 goto continue_locked;
@@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
1531 if (!cc) 1530 if (!cc)
1532 return; 1531 return;
1533 1532
1534 if (cc->write_thread) 1533 if (cc->write_thread) {
1534 spin_lock_irq(&cc->write_thread_wait.lock);
1535 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1536 wake_up_locked(&cc->write_thread_wait);
1537 spin_unlock_irq(&cc->write_thread_wait.lock);
1535 kthread_stop(cc->write_thread); 1538 kthread_stop(cc->write_thread);
1539 }
1536 1540
1537 if (cc->io_queue) 1541 if (cc->io_queue)
1538 destroy_workqueue(cc->io_queue); 1542 destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aaa6caa46a9f..cfa29f574c2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
1537 struct block_device **bdev, fmode_t *mode) 1537 struct block_device **bdev, fmode_t *mode)
1538{ 1538{
1539 struct multipath *m = ti->private; 1539 struct multipath *m = ti->private;
1540 struct pgpath *pgpath;
1541 unsigned long flags; 1540 unsigned long flags;
1542 int r; 1541 int r;
1543 1542
1544 r = 0;
1545
1546 spin_lock_irqsave(&m->lock, flags); 1543 spin_lock_irqsave(&m->lock, flags);
1547 1544
1548 if (!m->current_pgpath) 1545 if (!m->current_pgpath)
1549 __choose_pgpath(m, 0); 1546 __choose_pgpath(m, 0);
1550 1547
1551 pgpath = m->current_pgpath; 1548 if (m->current_pgpath) {
1552 1549 if (!m->queue_io) {
1553 if (pgpath) { 1550 *bdev = m->current_pgpath->path.dev->bdev;
1554 *bdev = pgpath->path.dev->bdev; 1551 *mode = m->current_pgpath->path.dev->mode;
1555 *mode = pgpath->path.dev->mode; 1552 r = 0;
1553 } else {
1554 /* pg_init has not started or completed */
1555 r = -ENOTCONN;
1556 }
1557 } else {
1558 /* No path is available */
1559 if (m->queue_if_no_path)
1560 r = -ENOTCONN;
1561 else
1562 r = -EIO;
1556 } 1563 }
1557 1564
1558 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1559 r = -ENOTCONN;
1560 else if (!*bdev)
1561 r = -EIO;
1562
1563 spin_unlock_irqrestore(&m->lock, flags); 1565 spin_unlock_irqrestore(&m->lock, flags);
1564 1566
1565 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 1567 if (r == -ENOTCONN) {
1566 spin_lock_irqsave(&m->lock, flags); 1568 spin_lock_irqsave(&m->lock, flags);
1567 if (!m->current_pg) { 1569 if (!m->current_pg) {
1568 /* Path status changed, redo selection */ 1570 /* Path status changed, redo selection */
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 3897b90bd462..63903a5a5d9e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2432 case PM_WRITE: 2432 case PM_WRITE:
2433 if (old_mode != new_mode) 2433 if (old_mode != new_mode)
2434 notify_of_pool_mode_change(pool, "write"); 2434 notify_of_pool_mode_change(pool, "write");
2435 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2435 dm_pool_metadata_read_write(pool->pmd); 2436 dm_pool_metadata_read_write(pool->pmd);
2436 pool->process_bio = process_bio; 2437 pool->process_bio = process_bio;
2437 pool->process_discard = process_discard_bio; 2438 pool->process_discard = process_discard_bio;
@@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4250{
4250 struct thin_c *tc = ti->private; 4251 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4252 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253 4253
4254 if (!pool_limits->discard_granularity) 4254 if (!pool->pf.discard_enabled)
4255 return; /* pool's discard support is disabled */ 4255 return;
4256 4256
4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e15f3565892..5df40480228b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -591,7 +591,7 @@ retry:
591 591
592out: 592out:
593 dm_put_live_table(md, *srcu_idx); 593 dm_put_live_table(md, *srcu_idx);
594 if (r == -ENOTCONN) { 594 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
595 msleep(10); 595 msleep(10);
596 goto retry; 596 goto retry;
597 } 597 }
@@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
603{ 603{
604 struct mapped_device *md = bdev->bd_disk->private_data; 604 struct mapped_device *md = bdev->bd_disk->private_data;
605 struct dm_target *tgt; 605 struct dm_target *tgt;
606 struct block_device *tgt_bdev = NULL;
606 int srcu_idx, r; 607 int srcu_idx, r;
607 608
608 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 609 r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
609 if (r < 0) 610 if (r < 0)
610 return r; 611 return r;
611 612
@@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
620 goto out; 621 goto out;
621 } 622 }
622 623
623 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 624 r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
624out: 625out:
625 dm_put_live_table(md, srcu_idx); 626 dm_put_live_table(md, srcu_idx);
626 return r; 627 return r;