aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2011-11-30 21:18:33 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2011-12-14 06:48:46 -0500
commit4d2300ccffd22d1d0213b6a8e4d685eb6ca069c0 (patch)
treea76817aa4aaba5a6ffb05b3e6a0ea9ca654a0519
parent65586d51e0986be574118286c3d0007e903a2add (diff)
target: Remove extra se_device->execute_task_lock access in fast path
This patch makes __transport_execute_tasks() perform the addition of tasks to dev->execute_task_list via __transport_add_tasks_from_cmd() while holding dev->execute_task_lock during normal I/O fast path submission. It effectively removes the unnecessary re-acquire of dev->execute_task_lock during transport_execute_tasks() -> transport_add_tasks_from_cmd() ahead of calling __transport_execute_tasks() to queue tasks for the passed *se_cmd descriptor. (v2: Re-add goto check_depth usage for multi-task submission for now..) Cc: Christoph Hellwig <hch@lst.de> Cc: Roland Dreier <roland@purestorage.com> Cc: Joern Engel <joern@logfs.org> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_transport.c40
-rw-r--r--include/target/target_core_base.h1
2 files changed, 24 insertions, 17 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7c2def7e2593..46e25118ab25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -68,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68 68
69static int transport_generic_write_pending(struct se_cmd *); 69static int transport_generic_write_pending(struct se_cmd *);
70static int transport_processing_thread(void *param); 70static int transport_processing_thread(void *param);
71static int __transport_execute_tasks(struct se_device *dev); 71static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72static void transport_complete_task_attr(struct se_cmd *cmd); 72static void transport_complete_task_attr(struct se_cmd *cmd);
73static void transport_handle_queue_full(struct se_cmd *cmd, 73static void transport_handle_queue_full(struct se_cmd *cmd,
74 struct se_device *dev); 74 struct se_device *dev);
@@ -851,13 +851,11 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
851 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 851 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
852} 852}
853 853
854static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 854static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
855{ 855{
856 struct se_device *dev = cmd->se_dev; 856 struct se_device *dev = cmd->se_dev;
857 struct se_task *task, *task_prev = NULL; 857 struct se_task *task, *task_prev = NULL;
858 unsigned long flags;
859 858
860 spin_lock_irqsave(&dev->execute_task_lock, flags);
861 list_for_each_entry(task, &cmd->t_task_list, t_list) { 859 list_for_each_entry(task, &cmd->t_task_list, t_list) {
862 if (!list_empty(&task->t_execute_list)) 860 if (!list_empty(&task->t_execute_list))
863 continue; 861 continue;
@@ -868,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
868 __transport_add_task_to_execute_queue(task, task_prev, dev); 866 __transport_add_task_to_execute_queue(task, task_prev, dev);
869 task_prev = task; 867 task_prev = task;
870 } 868 }
869}
870
871static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
872{
873 unsigned long flags;
874 struct se_device *dev = cmd->se_dev;
875
876 spin_lock_irqsave(&dev->execute_task_lock, flags);
877 __transport_add_tasks_from_cmd(cmd);
871 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 878 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
872} 879}
873 880
@@ -2075,19 +2082,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2075 if (!add_tasks) 2082 if (!add_tasks)
2076 goto execute_tasks; 2083 goto execute_tasks;
2077 /* 2084 /*
2078 * This calls transport_add_tasks_from_cmd() to handle 2085 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2079 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation 2086 * adds associated se_tasks while holding dev->execute_task_lock
2080 * (if enabled) in __transport_add_task_to_execute_queue() and 2087 * before I/O dispath to avoid a double spinlock access.
2081 * transport_add_task_check_sam_attr().
2082 */ 2088 */
2083 transport_add_tasks_from_cmd(cmd); 2089 __transport_execute_tasks(se_dev, cmd);
2090 return 0;
2084 } 2091 }
2085 /* 2092
2086 * Kick the execution queue for the cmd associated struct se_device
2087 * storage object.
2088 */
2089execute_tasks: 2093execute_tasks:
2090 __transport_execute_tasks(se_dev); 2094 __transport_execute_tasks(se_dev, NULL);
2091 return 0; 2095 return 0;
2092} 2096}
2093 2097
@@ -2097,7 +2101,7 @@ execute_tasks:
2097 * 2101 *
2098 * Called from transport_processing_thread() 2102 * Called from transport_processing_thread()
2099 */ 2103 */
2100static int __transport_execute_tasks(struct se_device *dev) 2104static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2101{ 2105{
2102 int error; 2106 int error;
2103 struct se_cmd *cmd = NULL; 2107 struct se_cmd *cmd = NULL;
@@ -2106,6 +2110,9 @@ static int __transport_execute_tasks(struct se_device *dev)
2106 2110
2107check_depth: 2111check_depth:
2108 spin_lock_irq(&dev->execute_task_lock); 2112 spin_lock_irq(&dev->execute_task_lock);
2113 if (new_cmd != NULL)
2114 __transport_add_tasks_from_cmd(new_cmd);
2115
2109 if (list_empty(&dev->execute_task_list)) { 2116 if (list_empty(&dev->execute_task_list)) {
2110 spin_unlock_irq(&dev->execute_task_lock); 2117 spin_unlock_irq(&dev->execute_task_lock);
2111 return 0; 2118 return 0;
@@ -2139,6 +2146,7 @@ check_depth:
2139 transport_generic_request_failure(cmd); 2146 transport_generic_request_failure(cmd);
2140 } 2147 }
2141 2148
2149 new_cmd = NULL;
2142 goto check_depth; 2150 goto check_depth;
2143 2151
2144 return 0; 2152 return 0;
@@ -4647,7 +4655,7 @@ static int transport_processing_thread(void *param)
4647 goto out; 4655 goto out;
4648 4656
4649get_cmd: 4657get_cmd:
4650 __transport_execute_tasks(dev); 4658 __transport_execute_tasks(dev, NULL);
4651 4659
4652 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); 4660 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4653 if (!cmd) 4661 if (!cmd)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index cd4caf3a598f..a74cb2be74de 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -781,7 +781,6 @@ struct se_device {
781 u32 dev_port_count; 781 u32 dev_port_count;
782 /* See transport_device_status_table */ 782 /* See transport_device_status_table */
783 u32 dev_status; 783 u32 dev_status;
784 u32 dev_tcq_window_closed;
785 /* Physical device queue depth */ 784 /* Physical device queue depth */
786 u32 queue_depth; 785 u32 queue_depth;
787 /* Used for SPC-2 reservations enforce of ISIDs */ 786 /* Used for SPC-2 reservations enforce of ISIDs */