aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-04-24 00:25:03 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2012-05-06 18:11:25 -0400
commit4101f0a89d4eb13f04cb0344d59a335b862ca5f9 (patch)
tree9304d857f3e256c62d5e81559c26dd260ad053bf
parent6bb35e009b656b36f7985057822c5fbf53ea75b7 (diff)
target: always allocate a single task
Simply transport_generic_new_cmd to only allocate a single task. For normal unidirection commands nothing changes except that the code is a lot simpler now. Any BIDI support that used to work will stop now for the next few patches at least. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_transport.c157
1 files changed, 37 insertions, 120 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 087bbea46cdc..5267198688fe 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1474,29 +1474,6 @@ static inline void transport_generic_prepare_cdb(
1474 } 1474 }
1475} 1475}
1476 1476
1477static struct se_task *
1478transport_generic_get_task(struct se_cmd *cmd,
1479 enum dma_data_direction data_direction)
1480{
1481 struct se_task *task;
1482 struct se_device *dev = cmd->se_dev;
1483
1484 task = dev->transport->alloc_task(cmd->t_task_cdb);
1485 if (!task) {
1486 pr_err("Unable to allocate struct se_task\n");
1487 return NULL;
1488 }
1489
1490 INIT_LIST_HEAD(&task->t_list);
1491 INIT_LIST_HEAD(&task->t_execute_list);
1492 INIT_LIST_HEAD(&task->t_state_list);
1493 init_completion(&task->task_stop_comp);
1494 task->task_se_cmd = cmd;
1495 task->task_data_direction = data_direction;
1496
1497 return task;
1498}
1499
1500static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1477static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1501 1478
1502/* 1479/*
@@ -3705,68 +3682,6 @@ out:
3705} 3682}
3706 3683
3707/* 3684/*
3708 * Break up cmd into chunks transport can handle
3709 */
3710static int
3711transport_allocate_data_tasks(struct se_cmd *cmd,
3712 enum dma_data_direction data_direction,
3713 struct scatterlist *cmd_sg, unsigned int sgl_nents)
3714{
3715 struct se_device *dev = cmd->se_dev;
3716 struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
3717 sector_t sectors;
3718 struct se_task *task;
3719 unsigned long flags;
3720
3721 if (transport_cmd_get_valid_sectors(cmd) < 0)
3722 return -EINVAL;
3723
3724 sectors = DIV_ROUND_UP(cmd->data_length, attr->block_size);
3725
3726 BUG_ON(cmd->data_length % attr->block_size);
3727 BUG_ON(sectors > attr->max_sectors);
3728
3729 task = transport_generic_get_task(cmd, data_direction);
3730 if (!task)
3731 return -ENOMEM;
3732
3733 task->task_sg = cmd_sg;
3734 task->task_sg_nents = sgl_nents;
3735
3736 spin_lock_irqsave(&cmd->t_state_lock, flags);
3737 list_add_tail(&task->t_list, &cmd->t_task_list);
3738 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3739
3740 return 1;
3741}
3742
3743static int
3744transport_allocate_control_task(struct se_cmd *cmd)
3745{
3746 struct se_task *task;
3747 unsigned long flags;
3748
3749 /* Workaround for handling zero-length control CDBs */
3750 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3751 !cmd->data_length)
3752 return 0;
3753
3754 task = transport_generic_get_task(cmd, cmd->data_direction);
3755 if (!task)
3756 return -ENOMEM;
3757
3758 task->task_sg = cmd->t_data_sg;
3759 task->task_sg_nents = cmd->t_data_nents;
3760
3761 spin_lock_irqsave(&cmd->t_state_lock, flags);
3762 list_add_tail(&task->t_list, &cmd->t_task_list);
3763 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3764
3765 /* Success! Return number of tasks allocated */
3766 return 1;
3767}
3768
3769/*
3770 * Allocate any required resources to execute the command. For writes we 3685 * Allocate any required resources to execute the command. For writes we
3771 * might not have the payload yet, so notify the fabric via a call to 3686 * might not have the payload yet, so notify the fabric via a call to
3772 * ->write_pending instead. Otherwise place it on the execution queue. 3687 * ->write_pending instead. Otherwise place it on the execution queue.
@@ -3774,8 +3689,8 @@ transport_allocate_control_task(struct se_cmd *cmd)
3774int transport_generic_new_cmd(struct se_cmd *cmd) 3689int transport_generic_new_cmd(struct se_cmd *cmd)
3775{ 3690{
3776 struct se_device *dev = cmd->se_dev; 3691 struct se_device *dev = cmd->se_dev;
3777 int task_cdbs, task_cdbs_bidi = 0; 3692 struct se_task *task;
3778 int set_counts = 1; 3693 unsigned long flags;
3779 int ret = 0; 3694 int ret = 0;
3780 3695
3781 /* 3696 /*
@@ -3790,35 +3705,9 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3790 goto out_fail; 3705 goto out_fail;
3791 } 3706 }
3792 3707
3793 /* 3708 /* Workaround for handling zero-length control CDBs */
3794 * For BIDI command set up the read tasks first. 3709 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3795 */ 3710 !cmd->data_length) {
3796 if (cmd->t_bidi_data_sg &&
3797 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3798 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3799
3800 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3801 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3802 cmd->t_bidi_data_nents);
3803 if (task_cdbs_bidi <= 0)
3804 goto out_fail;
3805
3806 atomic_inc(&cmd->t_fe_count);
3807 atomic_inc(&cmd->t_se_count);
3808 set_counts = 0;
3809 }
3810
3811 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3812 task_cdbs = transport_allocate_data_tasks(cmd,
3813 cmd->data_direction, cmd->t_data_sg,
3814 cmd->t_data_nents);
3815 } else {
3816 task_cdbs = transport_allocate_control_task(cmd);
3817 }
3818
3819 if (task_cdbs < 0)
3820 goto out_fail;
3821 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3822 spin_lock_irq(&cmd->t_state_lock); 3711 spin_lock_irq(&cmd->t_state_lock);
3823 cmd->t_state = TRANSPORT_COMPLETE; 3712 cmd->t_state = TRANSPORT_COMPLETE;
3824 cmd->transport_state |= CMD_T_ACTIVE; 3713 cmd->transport_state |= CMD_T_ACTIVE;
@@ -3836,12 +3725,40 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3836 return 0; 3725 return 0;
3837 } 3726 }
3838 3727
3839 if (set_counts) { 3728 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3840 atomic_inc(&cmd->t_fe_count); 3729 struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
3841 atomic_inc(&cmd->t_se_count); 3730
3731 if (transport_cmd_get_valid_sectors(cmd) < 0)
3732 return -EINVAL;
3733
3734 BUG_ON(cmd->data_length % attr->block_size);
3735 BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
3736 attr->max_sectors);
3737 }
3738
3739 task = dev->transport->alloc_task(cmd->t_task_cdb);
3740 if (!task) {
3741 pr_err("Unable to allocate struct se_task\n");
3742 goto out_fail;
3842 } 3743 }
3843 3744
3844 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); 3745 INIT_LIST_HEAD(&task->t_list);
3746 INIT_LIST_HEAD(&task->t_execute_list);
3747 INIT_LIST_HEAD(&task->t_state_list);
3748 init_completion(&task->task_stop_comp);
3749 task->task_se_cmd = cmd;
3750 task->task_data_direction = cmd->data_direction;
3751 task->task_sg = cmd->t_data_sg;
3752 task->task_sg_nents = cmd->t_data_nents;
3753
3754 spin_lock_irqsave(&cmd->t_state_lock, flags);
3755 list_add_tail(&task->t_list, &cmd->t_task_list);
3756 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3757
3758 atomic_inc(&cmd->t_fe_count);
3759 atomic_inc(&cmd->t_se_count);
3760
3761 cmd->t_task_list_num = 1;
3845 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); 3762 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3846 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); 3763 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3847 3764