diff options
author | Dongsheng Yang <dongsheng.yang@easystack.cn> | 2018-03-26 10:22:55 -0400 |
---|---|---|
committer | Ilya Dryomov <idryomov@gmail.com> | 2018-04-16 03:38:40 -0400 |
commit | 34f55d0b3a0a39c95134c0c89173893b846d4c80 (patch) | |
tree | c4421bd5aa961599e55a35a0151f18b528c9f8d6 | |
parent | 2f18d46683cb3047c41229d57cf7c6e2ee48676f (diff) |
rbd: support timeout in rbd_wait_state_locked()
currently, the rbd_wait_state_locked() will wait forever if we
can't get our state locked. Example:
rbd map --exclusive test1 --> /dev/rbd0
rbd map test1 --> /dev/rbd1
dd if=/dev/zero of=/dev/rbd1 bs=1M count=1 --> IO blocked
To avoid this problem, this patch introduce a timeout design
in rbd_wait_state_locked(). Then rbd_wait_state_locked() will
return error when we reach a timeout.
This patch allow user to set the lock_timeout in rbd mapping.
Signed-off-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
-rw-r--r-- | drivers/block/rbd.c | 22 |
1 files changed, 21 insertions, 1 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f4b1b91e6d4d..d5a51493e8b5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) | |||
732 | */ | 732 | */ |
733 | enum { | 733 | enum { |
734 | Opt_queue_depth, | 734 | Opt_queue_depth, |
735 | Opt_lock_timeout, | ||
735 | Opt_last_int, | 736 | Opt_last_int, |
736 | /* int args above */ | 737 | /* int args above */ |
737 | Opt_last_string, | 738 | Opt_last_string, |
@@ -745,6 +746,7 @@ enum { | |||
745 | 746 | ||
746 | static match_table_t rbd_opts_tokens = { | 747 | static match_table_t rbd_opts_tokens = { |
747 | {Opt_queue_depth, "queue_depth=%d"}, | 748 | {Opt_queue_depth, "queue_depth=%d"}, |
749 | {Opt_lock_timeout, "lock_timeout=%d"}, | ||
748 | /* int args above */ | 750 | /* int args above */ |
749 | /* string args above */ | 751 | /* string args above */ |
750 | {Opt_read_only, "read_only"}, | 752 | {Opt_read_only, "read_only"}, |
@@ -758,12 +760,14 @@ static match_table_t rbd_opts_tokens = { | |||
758 | 760 | ||
759 | struct rbd_options { | 761 | struct rbd_options { |
760 | int queue_depth; | 762 | int queue_depth; |
763 | unsigned long lock_timeout; | ||
761 | bool read_only; | 764 | bool read_only; |
762 | bool lock_on_read; | 765 | bool lock_on_read; |
763 | bool exclusive; | 766 | bool exclusive; |
764 | }; | 767 | }; |
765 | 768 | ||
766 | #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ | 769 | #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ |
770 | #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ | ||
767 | #define RBD_READ_ONLY_DEFAULT false | 771 | #define RBD_READ_ONLY_DEFAULT false |
768 | #define RBD_LOCK_ON_READ_DEFAULT false | 772 | #define RBD_LOCK_ON_READ_DEFAULT false |
769 | #define RBD_EXCLUSIVE_DEFAULT false | 773 | #define RBD_EXCLUSIVE_DEFAULT false |
@@ -796,6 +800,14 @@ static int parse_rbd_opts_token(char *c, void *private) | |||
796 | } | 800 | } |
797 | rbd_opts->queue_depth = intval; | 801 | rbd_opts->queue_depth = intval; |
798 | break; | 802 | break; |
803 | case Opt_lock_timeout: | ||
804 | /* 0 is "wait forever" (i.e. infinite timeout) */ | ||
805 | if (intval < 0 || intval > INT_MAX / 1000) { | ||
806 | pr_err("lock_timeout out of range\n"); | ||
807 | return -EINVAL; | ||
808 | } | ||
809 | rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); | ||
810 | break; | ||
799 | case Opt_read_only: | 811 | case Opt_read_only: |
800 | rbd_opts->read_only = true; | 812 | rbd_opts->read_only = true; |
801 | break; | 813 | break; |
@@ -3536,6 +3548,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, | |||
3536 | static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) | 3548 | static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) |
3537 | { | 3549 | { |
3538 | DEFINE_WAIT(wait); | 3550 | DEFINE_WAIT(wait); |
3551 | unsigned long timeout; | ||
3539 | int ret = 0; | 3552 | int ret = 0; |
3540 | 3553 | ||
3541 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) | 3554 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) |
@@ -3559,12 +3572,18 @@ static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) | |||
3559 | prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, | 3572 | prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, |
3560 | TASK_UNINTERRUPTIBLE); | 3573 | TASK_UNINTERRUPTIBLE); |
3561 | up_read(&rbd_dev->lock_rwsem); | 3574 | up_read(&rbd_dev->lock_rwsem); |
3562 | schedule(); | 3575 | timeout = schedule_timeout(ceph_timeout_jiffies( |
3576 | rbd_dev->opts->lock_timeout)); | ||
3563 | down_read(&rbd_dev->lock_rwsem); | 3577 | down_read(&rbd_dev->lock_rwsem); |
3564 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | 3578 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { |
3565 | ret = -EBLACKLISTED; | 3579 | ret = -EBLACKLISTED; |
3566 | break; | 3580 | break; |
3567 | } | 3581 | } |
3582 | if (!timeout) { | ||
3583 | rbd_warn(rbd_dev, "timed out waiting for lock"); | ||
3584 | ret = -ETIMEDOUT; | ||
3585 | break; | ||
3586 | } | ||
3568 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); | 3587 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); |
3569 | 3588 | ||
3570 | finish_wait(&rbd_dev->lock_waitq, &wait); | 3589 | finish_wait(&rbd_dev->lock_waitq, &wait); |
@@ -5186,6 +5205,7 @@ static int rbd_add_parse_args(const char *buf, | |||
5186 | 5205 | ||
5187 | rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; | 5206 | rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; |
5188 | rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; | 5207 | rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; |
5208 | rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; | ||
5189 | rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; | 5209 | rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; |
5190 | rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; | 5210 | rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; |
5191 | 5211 | ||