summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2017-11-28 13:40:30 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2018-01-12 18:07:12 -0500
commit9972cebb59a653cca735178a70c8ab09a5f4de1a (patch)
treeb112dbd69e692dbffe3f2acecc7c888fbfbce7ca /drivers/target
parent89ec9cfd3b644fbc36047e36776509130d2fc1ec (diff)
tcmu: fix unmap thread race
If the unmap thread has already run find_free_blocks but not yet run prepare_to_wait when a wake_up(&unmap_wait) call is done, the unmap thread is going to miss the wake call. Instead of adding checks for if new waiters were added this just has us use a work queue which will run us again in this type of case. Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_user.c43
1 files changed, 10 insertions, 33 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index cab6c72eb012..a9f5c52e8b1d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -32,7 +32,7 @@
32#include <linux/highmem.h> 32#include <linux/highmem.h>
33#include <linux/configfs.h> 33#include <linux/configfs.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/kthread.h> 35#include <linux/workqueue.h>
36#include <net/genetlink.h> 36#include <net/genetlink.h>
37#include <scsi/scsi_common.h> 37#include <scsi/scsi_common.h>
38#include <scsi/scsi_proto.h> 38#include <scsi/scsi_proto.h>
@@ -176,12 +176,11 @@ struct tcmu_cmd {
176 unsigned long flags; 176 unsigned long flags;
177}; 177};
178 178
179static struct task_struct *unmap_thread;
180static wait_queue_head_t unmap_wait;
181static DEFINE_MUTEX(root_udev_mutex); 179static DEFINE_MUTEX(root_udev_mutex);
182static LIST_HEAD(root_udev); 180static LIST_HEAD(root_udev);
183 181
184static atomic_t global_db_count = ATOMIC_INIT(0); 182static atomic_t global_db_count = ATOMIC_INIT(0);
183static struct work_struct tcmu_unmap_work;
185 184
186static struct kmem_cache *tcmu_cmd_cache; 185static struct kmem_cache *tcmu_cmd_cache;
187 186
@@ -389,8 +388,7 @@ static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
389 388
390err: 389err:
391 udev->waiting_global = true; 390 udev->waiting_global = true;
392 /* Try to wake up the unmap thread */ 391 schedule_work(&tcmu_unmap_work);
393 wake_up(&unmap_wait);
394 return false; 392 return false;
395} 393}
396 394
@@ -1065,8 +1063,7 @@ static void tcmu_device_timedout(struct timer_list *t)
1065 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 1063 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
1066 spin_unlock_irqrestore(&udev->commands_lock, flags); 1064 spin_unlock_irqrestore(&udev->commands_lock, flags);
1067 1065
1068 /* Try to wake up the ummap thread */ 1066 schedule_work(&tcmu_unmap_work);
1069 wake_up(&unmap_wait);
1070 1067
1071 /* 1068 /*
1072 * We don't need to wakeup threads on wait_cmdr since they have their 1069 * We don't need to wakeup threads on wait_cmdr since they have their
@@ -2044,23 +2041,10 @@ static void run_cmdr_queues(void)
2044 mutex_unlock(&root_udev_mutex); 2041 mutex_unlock(&root_udev_mutex);
2045} 2042}
2046 2043
2047static int unmap_thread_fn(void *data) 2044static void tcmu_unmap_work_fn(struct work_struct *work)
2048{ 2045{
2049 while (!kthread_should_stop()) { 2046 find_free_blocks();
2050 DEFINE_WAIT(__wait); 2047 run_cmdr_queues();
2051
2052 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
2053 schedule();
2054 finish_wait(&unmap_wait, &__wait);
2055
2056 if (kthread_should_stop())
2057 break;
2058
2059 find_free_blocks();
2060 run_cmdr_queues();
2061 }
2062
2063 return 0;
2064} 2048}
2065 2049
2066static int __init tcmu_module_init(void) 2050static int __init tcmu_module_init(void)
@@ -2069,6 +2053,8 @@ static int __init tcmu_module_init(void)
2069 2053
2070 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 2054 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2071 2055
2056 INIT_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2057
2072 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 2058 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2073 sizeof(struct tcmu_cmd), 2059 sizeof(struct tcmu_cmd),
2074 __alignof__(struct tcmu_cmd), 2060 __alignof__(struct tcmu_cmd),
@@ -2114,17 +2100,8 @@ static int __init tcmu_module_init(void)
2114 if (ret) 2100 if (ret)
2115 goto out_attrs; 2101 goto out_attrs;
2116 2102
2117 init_waitqueue_head(&unmap_wait);
2118 unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap");
2119 if (IS_ERR(unmap_thread)) {
2120 ret = PTR_ERR(unmap_thread);
2121 goto out_unreg_transport;
2122 }
2123
2124 return 0; 2103 return 0;
2125 2104
2126out_unreg_transport:
2127 target_backend_unregister(&tcmu_ops);
2128out_attrs: 2105out_attrs:
2129 kfree(tcmu_attrs); 2106 kfree(tcmu_attrs);
2130out_unreg_genl: 2107out_unreg_genl:
@@ -2139,7 +2116,7 @@ out_free_cache:
2139 2116
2140static void __exit tcmu_module_exit(void) 2117static void __exit tcmu_module_exit(void)
2141{ 2118{
2142 kthread_stop(unmap_thread); 2119 cancel_work_sync(&tcmu_unmap_work);
2143 target_backend_unregister(&tcmu_ops); 2120 target_backend_unregister(&tcmu_ops);
2144 kfree(tcmu_attrs); 2121 kfree(tcmu_attrs);
2145 genl_unregister_family(&tcmu_genl_family); 2122 genl_unregister_family(&tcmu_genl_family);