summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2017-11-28 13:40:29 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2018-01-12 18:07:11 -0500
commit89ec9cfd3b644fbc36047e36776509130d2fc1ec (patch)
treedeab530efef0477efc8010d9bc032cc2295ddcf9 /drivers/target
parentbf99ec13327bb5b0f6475aea8735c0ca34cc2a26 (diff)
tcmu: split unmap_thread_fn
Separate unmap_thread_fn to make it easier to read. Note: this patch does not fix the bug where we might miss a wake up call. The next patch will fix that. This patch only separates the code into functions. Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_user.c120
1 files changed, 70 insertions, 50 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index d9fd91ee8282..cab6c72eb012 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1973,71 +1973,91 @@ static struct target_backend_ops tcmu_ops = {
1973 .tb_dev_attrib_attrs = NULL, 1973 .tb_dev_attrib_attrs = NULL,
1974}; 1974};
1975 1975
1976static int unmap_thread_fn(void *data) 1976
1977static void find_free_blocks(void)
1977{ 1978{
1978 struct tcmu_dev *udev; 1979 struct tcmu_dev *udev;
1979 loff_t off; 1980 loff_t off;
1980 uint32_t start, end, block; 1981 uint32_t start, end, block;
1981 1982
1982 while (!kthread_should_stop()) { 1983 mutex_lock(&root_udev_mutex);
1983 DEFINE_WAIT(__wait); 1984 list_for_each_entry(udev, &root_udev, node) {
1984 1985 mutex_lock(&udev->cmdr_lock);
1985 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
1986 schedule();
1987 finish_wait(&unmap_wait, &__wait);
1988 1986
1989 if (kthread_should_stop()) 1987 /* Try to complete the finished commands first */
1990 break; 1988 tcmu_handle_completions(udev);
1991 1989
1992 mutex_lock(&root_udev_mutex); 1990 /* Skip the udevs waiting the global pool or in idle */
1993 list_for_each_entry(udev, &root_udev, node) { 1991 if (udev->waiting_global || !udev->dbi_thresh) {
1994 mutex_lock(&udev->cmdr_lock); 1992 mutex_unlock(&udev->cmdr_lock);
1993 continue;
1994 }
1995 1995
1996 /* Try to complete the finished commands first */ 1996 end = udev->dbi_max + 1;
1997 tcmu_handle_completions(udev); 1997 block = find_last_bit(udev->data_bitmap, end);
1998 if (block == udev->dbi_max) {
1999 /*
2000 * The last bit is dbi_max, so there is
2001 * no need to shrink any blocks.
2002 */
2003 mutex_unlock(&udev->cmdr_lock);
2004 continue;
2005 } else if (block == end) {
2006 /* The current udev will goto idle state */
2007 udev->dbi_thresh = start = 0;
2008 udev->dbi_max = 0;
2009 } else {
2010 udev->dbi_thresh = start = block + 1;
2011 udev->dbi_max = block;
2012 }
1998 2013
1999 /* Skip the udevs waiting the global pool or in idle */ 2014 /* Here will truncate the data area from off */
2000 if (udev->waiting_global || !udev->dbi_thresh) { 2015 off = udev->data_off + start * DATA_BLOCK_SIZE;
2001 mutex_unlock(&udev->cmdr_lock); 2016 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2002 continue;
2003 }
2004 2017
2005 end = udev->dbi_max + 1; 2018 /* Release the block pages */
2006 block = find_last_bit(udev->data_bitmap, end); 2019 tcmu_blocks_release(&udev->data_blocks, start, end);
2007 if (block == udev->dbi_max) { 2020 mutex_unlock(&udev->cmdr_lock);
2008 /* 2021 }
2009 * The last bit is dbi_max, so there is 2022 mutex_unlock(&root_udev_mutex);
2010 * no need to shrink any blocks. 2023}
2011 */
2012 mutex_unlock(&udev->cmdr_lock);
2013 continue;
2014 } else if (block == end) {
2015 /* The current udev will goto idle state */
2016 udev->dbi_thresh = start = 0;
2017 udev->dbi_max = 0;
2018 } else {
2019 udev->dbi_thresh = start = block + 1;
2020 udev->dbi_max = block;
2021 }
2022 2024
2023 /* Here will truncate the data area from off */ 2025static void run_cmdr_queues(void)
2024 off = udev->data_off + start * DATA_BLOCK_SIZE; 2026{
2025 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2027 struct tcmu_dev *udev;
2026 2028
2027 /* Release the block pages */ 2029 /*
2028 tcmu_blocks_release(&udev->data_blocks, start, end); 2030 * Try to wake up the udevs who are waiting
2031 * for the global data block pool.
2032 */
2033 mutex_lock(&root_udev_mutex);
2034 list_for_each_entry(udev, &root_udev, node) {
2035 mutex_lock(&udev->cmdr_lock);
2036 if (!udev->waiting_global) {
2029 mutex_unlock(&udev->cmdr_lock); 2037 mutex_unlock(&udev->cmdr_lock);
2038 break;
2030 } 2039 }
2040 mutex_unlock(&udev->cmdr_lock);
2031 2041
2032 /* 2042 wake_up(&udev->wait_cmdr);
2033 * Try to wake up the udevs who are waiting 2043 }
2034 * for the global data pool. 2044 mutex_unlock(&root_udev_mutex);
2035 */ 2045}
2036 list_for_each_entry(udev, &root_udev, node) { 2046
2037 if (udev->waiting_global) 2047static int unmap_thread_fn(void *data)
2038 wake_up(&udev->wait_cmdr); 2048{
2039 } 2049 while (!kthread_should_stop()) {
2040 mutex_unlock(&root_udev_mutex); 2050 DEFINE_WAIT(__wait);
2051
2052 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
2053 schedule();
2054 finish_wait(&unmap_wait, &__wait);
2055
2056 if (kthread_should_stop())
2057 break;
2058
2059 find_free_blocks();
2060 run_cmdr_queues();
2041 } 2061 }
2042 2062
2043 return 0; 2063 return 0;