summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2017-11-28 13:40:31 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2018-01-12 18:07:13 -0500
commit488ebe4c355fdead39dbb3f6a51329c16cbfcc60 (patch)
treeb23d76cc19a8e4fd937ecf94ed517a8571cd17bc /drivers/target
parent9972cebb59a653cca735178a70c8ab09a5f4de1a (diff)
tcmu: move expired command completion to unmap thread
This moves the expired command completion handling to the unmap wq, so the next patch can use a mutex in tcmu_check_expired_cmd. Note: tcmu_device_timedout's use of spin_lock_irq was not needed. The commands_lock is used between thread context (tcmu_queue_cmd_ring and tcmu_irqcontrol (even though this is named irqcontrol it is not run in irq context)) and timer/bh context. In the timer/bh context bhs are disabled, so you need to use the _bh lock calls from the thread context callers. Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_user.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index a9f5c52e8b1d..2ccc8e61449b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -143,6 +143,7 @@ struct tcmu_dev {
143 143
144 struct timer_list timeout; 144 struct timer_list timeout;
145 unsigned int cmd_time_out; 145 unsigned int cmd_time_out;
146 struct list_head timedout_entry;
146 147
147 spinlock_t nl_cmd_lock; 148 spinlock_t nl_cmd_lock;
148 struct tcmu_nl_cmd curr_nl_cmd; 149 struct tcmu_nl_cmd curr_nl_cmd;
@@ -179,6 +180,9 @@ struct tcmu_cmd {
179static DEFINE_MUTEX(root_udev_mutex); 180static DEFINE_MUTEX(root_udev_mutex);
180static LIST_HEAD(root_udev); 181static LIST_HEAD(root_udev);
181 182
183static DEFINE_SPINLOCK(timed_out_udevs_lock);
184static LIST_HEAD(timed_out_udevs);
185
182static atomic_t global_db_count = ATOMIC_INIT(0); 186static atomic_t global_db_count = ATOMIC_INIT(0);
183static struct work_struct tcmu_unmap_work; 187static struct work_struct tcmu_unmap_work;
184 188
@@ -1057,18 +1061,15 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1057static void tcmu_device_timedout(struct timer_list *t) 1061static void tcmu_device_timedout(struct timer_list *t)
1058{ 1062{
1059 struct tcmu_dev *udev = from_timer(udev, t, timeout); 1063 struct tcmu_dev *udev = from_timer(udev, t, timeout);
1060 unsigned long flags;
1061 1064
1062 spin_lock_irqsave(&udev->commands_lock, flags); 1065 pr_debug("%s cmd timeout has expired\n", udev->name);
1063 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
1064 spin_unlock_irqrestore(&udev->commands_lock, flags);
1065 1066
1066 schedule_work(&tcmu_unmap_work); 1067 spin_lock(&timed_out_udevs_lock);
1068 if (list_empty(&udev->timedout_entry))
1069 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1070 spin_unlock(&timed_out_udevs_lock);
1067 1071
1068 /* 1072 schedule_work(&tcmu_unmap_work);
1069 * We don't need to wakeup threads on wait_cmdr since they have their
1070 * own timeout.
1071 */
1072} 1073}
1073 1074
1074static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1075static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
@@ -1112,6 +1113,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1112 init_waitqueue_head(&udev->wait_cmdr); 1113 init_waitqueue_head(&udev->wait_cmdr);
1113 mutex_init(&udev->cmdr_lock); 1114 mutex_init(&udev->cmdr_lock);
1114 1115
1116 INIT_LIST_HEAD(&udev->timedout_entry);
1115 idr_init(&udev->commands); 1117 idr_init(&udev->commands);
1116 spin_lock_init(&udev->commands_lock); 1118 spin_lock_init(&udev->commands_lock);
1117 1119
@@ -1325,6 +1327,11 @@ static void tcmu_dev_kref_release(struct kref *kref)
1325 vfree(udev->mb_addr); 1327 vfree(udev->mb_addr);
1326 udev->mb_addr = NULL; 1328 udev->mb_addr = NULL;
1327 1329
1330 spin_lock_bh(&timed_out_udevs_lock);
1331 if (!list_empty(&udev->timedout_entry))
1332 list_del(&udev->timedout_entry);
1333 spin_unlock_bh(&timed_out_udevs_lock);
1334
1328 /* Upper layer should drain all requests before calling this */ 1335 /* Upper layer should drain all requests before calling this */
1329 spin_lock_irq(&udev->commands_lock); 1336 spin_lock_irq(&udev->commands_lock);
1330 idr_for_each_entry(&udev->commands, cmd, i) { 1337 idr_for_each_entry(&udev->commands, cmd, i) {
@@ -2041,8 +2048,31 @@ static void run_cmdr_queues(void)
2041 mutex_unlock(&root_udev_mutex); 2048 mutex_unlock(&root_udev_mutex);
2042} 2049}
2043 2050
2051static void check_timedout_devices(void)
2052{
2053 struct tcmu_dev *udev, *tmp_dev;
2054 LIST_HEAD(devs);
2055
2056 spin_lock_bh(&timed_out_udevs_lock);
2057 list_splice_init(&timed_out_udevs, &devs);
2058
2059 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2060 list_del_init(&udev->timedout_entry);
2061 spin_unlock_bh(&timed_out_udevs_lock);
2062
2063 spin_lock(&udev->commands_lock);
2064 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2065 spin_unlock(&udev->commands_lock);
2066
2067 spin_lock_bh(&timed_out_udevs_lock);
2068 }
2069
2070 spin_unlock_bh(&timed_out_udevs_lock);
2071}
2072
2044static void tcmu_unmap_work_fn(struct work_struct *work) 2073static void tcmu_unmap_work_fn(struct work_struct *work)
2045{ 2074{
2075 check_timedout_devices();
2046 find_free_blocks(); 2076 find_free_blocks();
2047 run_cmdr_queues(); 2077 run_cmdr_queues();
2048} 2078}