diff options
author | Mike Christie <mchristi@redhat.com> | 2017-11-28 13:40:40 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2018-01-12 18:07:19 -0500 |
commit | 9103575ae34e9d60d40940bebf47fc9e9652067a (patch) | |
tree | c4b6876c6ec96498a9ab435e8091a07ed49a617b /drivers/target | |
parent | af1dd7ff46824a94da1d90443bd07db2796bd545 (diff) |
tcmu: make ring buffer timer configurable
This adds a timer, qfull_time_out, that controls how long a
device will wait for ring buffer space to open before
failing the commands in the queue. It is useful to separate
this timer from the cmd_time_out and default 30 sec one,
because for HA setups cmd_time_out may be disbled and 30
seconds is too long to wait when some OSs like ESX will
timeout commands after as little as 8 - 15 seconds.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_user.c | 149 |
1 files changed, 115 insertions, 34 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 52fc1d440d23..c6a0c3198ccc 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -142,8 +142,12 @@ struct tcmu_dev { | |||
142 | 142 | ||
143 | struct idr commands; | 143 | struct idr commands; |
144 | 144 | ||
145 | struct timer_list timeout; | 145 | struct timer_list cmd_timer; |
146 | unsigned int cmd_time_out; | 146 | unsigned int cmd_time_out; |
147 | |||
148 | struct timer_list qfull_timer; | ||
149 | int qfull_time_out; | ||
150 | |||
147 | struct list_head timedout_entry; | 151 | struct list_head timedout_entry; |
148 | 152 | ||
149 | spinlock_t nl_cmd_lock; | 153 | spinlock_t nl_cmd_lock; |
@@ -741,18 +745,14 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, | |||
741 | return command_size; | 745 | return command_size; |
742 | } | 746 | } |
743 | 747 | ||
744 | static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) | 748 | static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, |
749 | struct timer_list *timer) | ||
745 | { | 750 | { |
746 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | 751 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; |
747 | unsigned long tmo = udev->cmd_time_out; | ||
748 | int cmd_id; | 752 | int cmd_id; |
749 | 753 | ||
750 | /* | ||
751 | * If it was on the cmdr queue waiting we do not reset the timer | ||
752 | * for requeues and when it is finally sent to userspace. | ||
753 | */ | ||
754 | if (tcmu_cmd->cmd_id) | 754 | if (tcmu_cmd->cmd_id) |
755 | return 0; | 755 | goto setup_timer; |
756 | 756 | ||
757 | cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); | 757 | cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); |
758 | if (cmd_id < 0) { | 758 | if (cmd_id < 0) { |
@@ -761,23 +761,38 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) | |||
761 | } | 761 | } |
762 | tcmu_cmd->cmd_id = cmd_id; | 762 | tcmu_cmd->cmd_id = cmd_id; |
763 | 763 | ||
764 | if (!tmo) | ||
765 | tmo = TCMU_TIME_OUT; | ||
766 | |||
767 | pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, | 764 | pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, |
768 | udev->name, tmo / MSEC_PER_SEC); | 765 | udev->name, tmo / MSEC_PER_SEC); |
769 | 766 | ||
767 | setup_timer: | ||
768 | if (!tmo) | ||
769 | return 0; | ||
770 | |||
770 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); | 771 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); |
771 | mod_timer(&udev->timeout, tcmu_cmd->deadline); | 772 | mod_timer(timer, tcmu_cmd->deadline); |
772 | return 0; | 773 | return 0; |
773 | } | 774 | } |
774 | 775 | ||
775 | static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | 776 | static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) |
776 | { | 777 | { |
777 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | 778 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; |
779 | unsigned int tmo; | ||
778 | int ret; | 780 | int ret; |
779 | 781 | ||
780 | ret = tcmu_setup_cmd_timer(tcmu_cmd); | 782 | /* |
783 | * For backwards compat if qfull_time_out is not set use | ||
784 | * cmd_time_out and if that's not set use the default time out. | ||
785 | */ | ||
786 | if (!udev->qfull_time_out) | ||
787 | return -ETIMEDOUT; | ||
788 | else if (udev->qfull_time_out > 0) | ||
789 | tmo = udev->qfull_time_out; | ||
790 | else if (udev->cmd_time_out) | ||
791 | tmo = udev->cmd_time_out; | ||
792 | else | ||
793 | tmo = TCMU_TIME_OUT; | ||
794 | |||
795 | ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); | ||
781 | if (ret) | 796 | if (ret) |
782 | return ret; | 797 | return ret; |
783 | 798 | ||
@@ -901,7 +916,8 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) | |||
901 | } | 916 | } |
902 | entry->req.iov_bidi_cnt = iov_cnt; | 917 | entry->req.iov_bidi_cnt = iov_cnt; |
903 | 918 | ||
904 | ret = tcmu_setup_cmd_timer(tcmu_cmd); | 919 | ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, |
920 | &udev->cmd_timer); | ||
905 | if (ret) { | 921 | if (ret) { |
906 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); | 922 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); |
907 | mutex_unlock(&udev->cmdr_lock); | 923 | mutex_unlock(&udev->cmdr_lock); |
@@ -1049,14 +1065,19 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
1049 | handled++; | 1065 | handled++; |
1050 | } | 1066 | } |
1051 | 1067 | ||
1052 | if (mb->cmd_tail == mb->cmd_head && list_empty(&udev->cmdr_queue)) { | 1068 | if (mb->cmd_tail == mb->cmd_head) { |
1053 | del_timer(&udev->timeout); | 1069 | /* no more pending commands */ |
1054 | /* | 1070 | del_timer(&udev->cmd_timer); |
1055 | * not more pending or waiting commands so try to reclaim | 1071 | |
1056 | * blocks if needed. | 1072 | if (list_empty(&udev->cmdr_queue)) { |
1057 | */ | 1073 | /* |
1058 | if (atomic_read(&global_db_count) > TCMU_GLOBAL_MAX_BLOCKS) | 1074 | * no more pending or waiting commands so try to |
1059 | schedule_delayed_work(&tcmu_unmap_work, 0); | 1075 | * reclaim blocks if needed. |
1076 | */ | ||
1077 | if (atomic_read(&global_db_count) > | ||
1078 | TCMU_GLOBAL_MAX_BLOCKS) | ||
1079 | schedule_delayed_work(&tcmu_unmap_work, 0); | ||
1080 | } | ||
1060 | } | 1081 | } |
1061 | 1082 | ||
1062 | return handled; | 1083 | return handled; |
@@ -1077,13 +1098,15 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
1077 | return 0; | 1098 | return 0; |
1078 | 1099 | ||
1079 | is_running = list_empty(&cmd->cmdr_queue_entry); | 1100 | is_running = list_empty(&cmd->cmdr_queue_entry); |
1080 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", | ||
1081 | id, udev->name, is_running ? "inflight" : "queued"); | ||
1082 | |||
1083 | se_cmd = cmd->se_cmd; | ||
1084 | cmd->se_cmd = NULL; | ||
1085 | 1101 | ||
1086 | if (is_running) { | 1102 | if (is_running) { |
1103 | /* | ||
1104 | * If cmd_time_out is disabled but qfull is set deadline | ||
1105 | * will only reflect the qfull timeout. Ignore it. | ||
1106 | */ | ||
1107 | if (!udev->cmd_time_out) | ||
1108 | return 0; | ||
1109 | |||
1087 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); | 1110 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); |
1088 | /* | 1111 | /* |
1089 | * target_complete_cmd will translate this to LUN COMM FAILURE | 1112 | * target_complete_cmd will translate this to LUN COMM FAILURE |
@@ -1096,16 +1119,18 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
1096 | tcmu_free_cmd(cmd); | 1119 | tcmu_free_cmd(cmd); |
1097 | scsi_status = SAM_STAT_TASK_SET_FULL; | 1120 | scsi_status = SAM_STAT_TASK_SET_FULL; |
1098 | } | 1121 | } |
1122 | |||
1123 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", | ||
1124 | id, udev->name, is_running ? "inflight" : "queued"); | ||
1125 | |||
1126 | se_cmd = cmd->se_cmd; | ||
1127 | cmd->se_cmd = NULL; | ||
1099 | target_complete_cmd(se_cmd, scsi_status); | 1128 | target_complete_cmd(se_cmd, scsi_status); |
1100 | return 0; | 1129 | return 0; |
1101 | } | 1130 | } |
1102 | 1131 | ||
1103 | static void tcmu_device_timedout(struct timer_list *t) | 1132 | static void tcmu_device_timedout(struct tcmu_dev *udev) |
1104 | { | 1133 | { |
1105 | struct tcmu_dev *udev = from_timer(udev, t, timeout); | ||
1106 | |||
1107 | pr_debug("%s cmd timeout has expired\n", udev->name); | ||
1108 | |||
1109 | spin_lock(&timed_out_udevs_lock); | 1134 | spin_lock(&timed_out_udevs_lock); |
1110 | if (list_empty(&udev->timedout_entry)) | 1135 | if (list_empty(&udev->timedout_entry)) |
1111 | list_add_tail(&udev->timedout_entry, &timed_out_udevs); | 1136 | list_add_tail(&udev->timedout_entry, &timed_out_udevs); |
@@ -1114,6 +1139,22 @@ static void tcmu_device_timedout(struct timer_list *t) | |||
1114 | schedule_delayed_work(&tcmu_unmap_work, 0); | 1139 | schedule_delayed_work(&tcmu_unmap_work, 0); |
1115 | } | 1140 | } |
1116 | 1141 | ||
1142 | static void tcmu_cmd_timedout(struct timer_list *t) | ||
1143 | { | ||
1144 | struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); | ||
1145 | |||
1146 | pr_debug("%s cmd timeout has expired\n", udev->name); | ||
1147 | tcmu_device_timedout(udev); | ||
1148 | } | ||
1149 | |||
1150 | static void tcmu_qfull_timedout(struct timer_list *t) | ||
1151 | { | ||
1152 | struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); | ||
1153 | |||
1154 | pr_debug("%s qfull timeout has expired\n", udev->name); | ||
1155 | tcmu_device_timedout(udev); | ||
1156 | } | ||
1157 | |||
1117 | static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) | 1158 | static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) |
1118 | { | 1159 | { |
1119 | struct tcmu_hba *tcmu_hba; | 1160 | struct tcmu_hba *tcmu_hba; |
@@ -1151,6 +1192,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
1151 | 1192 | ||
1152 | udev->hba = hba; | 1193 | udev->hba = hba; |
1153 | udev->cmd_time_out = TCMU_TIME_OUT; | 1194 | udev->cmd_time_out = TCMU_TIME_OUT; |
1195 | udev->qfull_time_out = -1; | ||
1154 | 1196 | ||
1155 | mutex_init(&udev->cmdr_lock); | 1197 | mutex_init(&udev->cmdr_lock); |
1156 | 1198 | ||
@@ -1158,7 +1200,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
1158 | INIT_LIST_HEAD(&udev->cmdr_queue); | 1200 | INIT_LIST_HEAD(&udev->cmdr_queue); |
1159 | idr_init(&udev->commands); | 1201 | idr_init(&udev->commands); |
1160 | 1202 | ||
1161 | timer_setup(&udev->timeout, tcmu_device_timedout, 0); | 1203 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); |
1204 | timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); | ||
1162 | 1205 | ||
1163 | init_waitqueue_head(&udev->nl_cmd_wq); | 1206 | init_waitqueue_head(&udev->nl_cmd_wq); |
1164 | spin_lock_init(&udev->nl_cmd_lock); | 1207 | spin_lock_init(&udev->nl_cmd_lock); |
@@ -1213,6 +1256,8 @@ static bool run_cmdr_queue(struct tcmu_dev *udev) | |||
1213 | goto done; | 1256 | goto done; |
1214 | } | 1257 | } |
1215 | } | 1258 | } |
1259 | if (list_empty(&udev->cmdr_queue)) | ||
1260 | del_timer(&udev->qfull_timer); | ||
1216 | done: | 1261 | done: |
1217 | return drained; | 1262 | return drained; |
1218 | } | 1263 | } |
@@ -1712,7 +1757,8 @@ static void tcmu_destroy_device(struct se_device *dev) | |||
1712 | { | 1757 | { |
1713 | struct tcmu_dev *udev = TCMU_DEV(dev); | 1758 | struct tcmu_dev *udev = TCMU_DEV(dev); |
1714 | 1759 | ||
1715 | del_timer_sync(&udev->timeout); | 1760 | del_timer_sync(&udev->cmd_timer); |
1761 | del_timer_sync(&udev->qfull_timer); | ||
1716 | 1762 | ||
1717 | mutex_lock(&root_udev_mutex); | 1763 | mutex_lock(&root_udev_mutex); |
1718 | list_del(&udev->node); | 1764 | list_del(&udev->node); |
@@ -1893,6 +1939,40 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag | |||
1893 | } | 1939 | } |
1894 | CONFIGFS_ATTR(tcmu_, cmd_time_out); | 1940 | CONFIGFS_ATTR(tcmu_, cmd_time_out); |
1895 | 1941 | ||
1942 | static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) | ||
1943 | { | ||
1944 | struct se_dev_attrib *da = container_of(to_config_group(item), | ||
1945 | struct se_dev_attrib, da_group); | ||
1946 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | ||
1947 | |||
1948 | return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? | ||
1949 | udev->qfull_time_out : | ||
1950 | udev->qfull_time_out / MSEC_PER_SEC); | ||
1951 | } | ||
1952 | |||
1953 | static ssize_t tcmu_qfull_time_out_store(struct config_item *item, | ||
1954 | const char *page, size_t count) | ||
1955 | { | ||
1956 | struct se_dev_attrib *da = container_of(to_config_group(item), | ||
1957 | struct se_dev_attrib, da_group); | ||
1958 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | ||
1959 | s32 val; | ||
1960 | int ret; | ||
1961 | |||
1962 | ret = kstrtos32(page, 0, &val); | ||
1963 | if (ret < 0) | ||
1964 | return ret; | ||
1965 | |||
1966 | if (val >= 0) { | ||
1967 | udev->qfull_time_out = val * MSEC_PER_SEC; | ||
1968 | } else { | ||
1969 | printk(KERN_ERR "Invalid qfull timeout value %d\n", val); | ||
1970 | return -EINVAL; | ||
1971 | } | ||
1972 | return count; | ||
1973 | } | ||
1974 | CONFIGFS_ATTR(tcmu_, qfull_time_out); | ||
1975 | |||
1896 | static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) | 1976 | static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) |
1897 | { | 1977 | { |
1898 | struct se_dev_attrib *da = container_of(to_config_group(item), | 1978 | struct se_dev_attrib *da = container_of(to_config_group(item), |
@@ -2038,6 +2118,7 @@ CONFIGFS_ATTR(tcmu_, emulate_write_cache); | |||
2038 | 2118 | ||
2039 | static struct configfs_attribute *tcmu_attrib_attrs[] = { | 2119 | static struct configfs_attribute *tcmu_attrib_attrs[] = { |
2040 | &tcmu_attr_cmd_time_out, | 2120 | &tcmu_attr_cmd_time_out, |
2121 | &tcmu_attr_qfull_time_out, | ||
2041 | &tcmu_attr_dev_config, | 2122 | &tcmu_attr_dev_config, |
2042 | &tcmu_attr_dev_size, | 2123 | &tcmu_attr_dev_size, |
2043 | &tcmu_attr_emulate_write_cache, | 2124 | &tcmu_attr_emulate_write_cache, |