aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2011-11-30 04:25:21 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2011-12-14 06:42:13 -0500
commit65586d51e0986be574118286c3d0007e903a2add (patch)
tree0684f76d53e666ffe2a2c77f8e3947d263fa1b75 /drivers/target
parent40be67f4c588fe2f3e2dbd60ae1f470abc5b6ad8 (diff)
target: Drop se_device TCQ queue_depth usage from I/O path
Historically, pSCSI devices have been the ones that required target-core to enforce a per se_device->depth_left. This patch changes target-core to no longer (by default) enforce a per se_device->depth_left or sleep in transport_tcq_window_closed() when we out of queue slots for all backend export cases. Cc: Christoph Hellwig <hch@lst.de> Cc: Roland Dreier <roland@purestorage.com> Cc: Joern Engel <joern@logfs.org> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_device.c7
-rw-r--r--drivers/target/target_core_pscsi.c1
-rw-r--r--drivers/target/target_core_transport.c39
3 files changed, 2 insertions, 45 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1f74de25a92f..0c5992f0d946 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1132,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1132 */ 1132 */
1133int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1133int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1134{ 1134{
1135 u32 orig_queue_depth = dev->queue_depth;
1136
1137 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1135 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1138 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1136 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1139 " dev_export_obj: %d count exists\n", dev, 1137 " dev_export_obj: %d count exists\n", dev,
@@ -1167,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1167 } 1165 }
1168 1166
1169 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1167 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1170 if (queue_depth > orig_queue_depth)
1171 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1172 else if (queue_depth < orig_queue_depth)
1173 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1174
1175 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1168 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1176 dev, queue_depth); 1169 dev, queue_depth);
1177 return 0; 1170 return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index b73a399cdd54..d35467d42e12 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -350,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
350 * scsi_device_put() and the pdv->pdv_sd cleared. 350 * scsi_device_put() and the pdv->pdv_sd cleared.
351 */ 351 */
352 pdv->pdv_sd = sd; 352 pdv->pdv_sd = sd;
353
354 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 353 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
355 se_dev, dev_flags, pdv, 354 se_dev, dev_flags, pdv,
356 &dev_limits, NULL, NULL); 355 &dev_limits, NULL, NULL);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 1cc7e920ab0b..7c2def7e2593 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -691,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
691 struct se_cmd *cmd = task->task_se_cmd; 691 struct se_cmd *cmd = task->task_se_cmd;
692 struct se_device *dev = cmd->se_dev; 692 struct se_device *dev = cmd->se_dev;
693 unsigned long flags; 693 unsigned long flags;
694#if 0
695 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
696 cmd->t_task_cdb[0], dev);
697#endif
698 if (dev)
699 atomic_inc(&dev->depth_left);
700 694
701 spin_lock_irqsave(&cmd->t_state_lock, flags); 695 spin_lock_irqsave(&cmd->t_state_lock, flags);
702 task->task_flags &= ~TF_ACTIVE; 696 task->task_flags &= ~TF_ACTIVE;
@@ -971,9 +965,8 @@ void transport_dump_dev_state(
971 break; 965 break;
972 } 966 }
973 967
974 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", 968 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
975 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), 969 atomic_read(&dev->execute_tasks), dev->queue_depth);
976 dev->queue_depth);
977 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 970 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
978 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 971 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
979 *bl += sprintf(b + *bl, " "); 972 *bl += sprintf(b + *bl, " ");
@@ -1328,9 +1321,6 @@ struct se_device *transport_add_device_to_core_hba(
1328 spin_lock_init(&dev->se_port_lock); 1321 spin_lock_init(&dev->se_port_lock);
1329 spin_lock_init(&dev->se_tmr_lock); 1322 spin_lock_init(&dev->se_tmr_lock);
1330 spin_lock_init(&dev->qf_cmd_lock); 1323 spin_lock_init(&dev->qf_cmd_lock);
1331
1332 dev->queue_depth = dev_limits->queue_depth;
1333 atomic_set(&dev->depth_left, dev->queue_depth);
1334 atomic_set(&dev->dev_ordered_id, 0); 1324 atomic_set(&dev->dev_ordered_id, 0);
1335 1325
1336 se_dev_set_default_attribs(dev, dev_limits); 1326 se_dev_set_default_attribs(dev, dev_limits);
@@ -1982,18 +1972,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1982 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1972 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1983} 1973}
1984 1974
1985static inline int transport_tcq_window_closed(struct se_device *dev)
1986{
1987 if (dev->dev_tcq_window_closed++ <
1988 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1989 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1990 } else
1991 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1992
1993 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1994 return 0;
1995}
1996
1997/* 1975/*
1998 * Called from Fabric Module context from transport_execute_tasks() 1976 * Called from Fabric Module context from transport_execute_tasks()
1999 * 1977 *
@@ -2126,16 +2104,7 @@ static int __transport_execute_tasks(struct se_device *dev)
2126 struct se_task *task = NULL; 2104 struct se_task *task = NULL;
2127 unsigned long flags; 2105 unsigned long flags;
2128 2106
2129 /*
2130 * Check if there is enough room in the device and HBA queue to send
2131 * struct se_tasks to the selected transport.
2132 */
2133check_depth: 2107check_depth:
2134 if (!atomic_read(&dev->depth_left))
2135 return transport_tcq_window_closed(dev);
2136
2137 dev->dev_tcq_window_closed = 0;
2138
2139 spin_lock_irq(&dev->execute_task_lock); 2108 spin_lock_irq(&dev->execute_task_lock);
2140 if (list_empty(&dev->execute_task_list)) { 2109 if (list_empty(&dev->execute_task_list)) {
2141 spin_unlock_irq(&dev->execute_task_lock); 2110 spin_unlock_irq(&dev->execute_task_lock);
@@ -2146,10 +2115,7 @@ check_depth:
2146 __transport_remove_task_from_execute_queue(task, dev); 2115 __transport_remove_task_from_execute_queue(task, dev);
2147 spin_unlock_irq(&dev->execute_task_lock); 2116 spin_unlock_irq(&dev->execute_task_lock);
2148 2117
2149 atomic_dec(&dev->depth_left);
2150
2151 cmd = task->task_se_cmd; 2118 cmd = task->task_se_cmd;
2152
2153 spin_lock_irqsave(&cmd->t_state_lock, flags); 2119 spin_lock_irqsave(&cmd->t_state_lock, flags);
2154 task->task_flags |= (TF_ACTIVE | TF_SENT); 2120 task->task_flags |= (TF_ACTIVE | TF_SENT);
2155 atomic_inc(&cmd->t_task_cdbs_sent); 2121 atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2170,7 +2136,6 @@ check_depth:
2170 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2136 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2171 atomic_set(&cmd->t_transport_sent, 0); 2137 atomic_set(&cmd->t_transport_sent, 0);
2172 transport_stop_tasks_for_cmd(cmd); 2138 transport_stop_tasks_for_cmd(cmd);
2173 atomic_inc(&dev->depth_left);
2174 transport_generic_request_failure(cmd); 2139 transport_generic_request_failure(cmd);
2175 } 2140 }
2176 2141