aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/target/target_core_transport.c81
1 files changed, 32 insertions, 49 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2fb6a259442..8c8e62e2687 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3808,29 +3808,34 @@ EXPORT_SYMBOL(transport_do_task_sg_chain);
3808/* 3808/*
3809 * Break up cmd into chunks transport can handle 3809 * Break up cmd into chunks transport can handle
3810 */ 3810 */
3811static int transport_allocate_data_tasks( 3811static int
3812 struct se_cmd *cmd, 3812transport_allocate_data_tasks(struct se_cmd *cmd,
3813 unsigned long long lba,
3814 enum dma_data_direction data_direction, 3813 enum dma_data_direction data_direction,
3815 struct scatterlist *sgl, 3814 struct scatterlist *cmd_sg, unsigned int sgl_nents)
3816 unsigned int sgl_nents)
3817{ 3815{
3818 struct se_task *task;
3819 struct se_device *dev = cmd->se_dev; 3816 struct se_device *dev = cmd->se_dev;
3820 unsigned long flags;
3821 int task_count, i; 3817 int task_count, i;
3822 sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 3818 unsigned long long lba;
3823 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 3819 sector_t sectors, dev_max_sectors;
3824 struct scatterlist *sg; 3820 u32 sector_size;
3825 struct scatterlist *cmd_sg; 3821
3822 if (transport_cmd_get_valid_sectors(cmd) < 0)
3823 return -EINVAL;
3824
3825 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3826 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3826 3827
3827 WARN_ON(cmd->data_length % sector_size); 3828 WARN_ON(cmd->data_length % sector_size);
3829
3830 lba = cmd->t_task_lba;
3828 sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 3831 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3829 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); 3832 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3830 3833
3831 cmd_sg = sgl;
3832 for (i = 0; i < task_count; i++) { 3834 for (i = 0; i < task_count; i++) {
3835 struct se_task *task;
3833 unsigned int task_size, task_sg_nents_padded; 3836 unsigned int task_size, task_sg_nents_padded;
3837 struct scatterlist *sg;
3838 unsigned long flags;
3834 int count; 3839 int count;
3835 3840
3836 task = transport_generic_get_task(cmd, data_direction); 3841 task = transport_generic_get_task(cmd, data_direction);
@@ -3921,25 +3926,6 @@ transport_allocate_control_task(struct se_cmd *cmd)
3921 return 1; 3926 return 1;
3922} 3927}
3923 3928
3924static u32 transport_allocate_tasks(
3925 struct se_cmd *cmd,
3926 unsigned long long lba,
3927 enum dma_data_direction data_direction,
3928 struct scatterlist *sgl,
3929 unsigned int sgl_nents)
3930{
3931 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3932 if (transport_cmd_get_valid_sectors(cmd) < 0)
3933 return -EINVAL;
3934
3935 return transport_allocate_data_tasks(cmd, lba, data_direction,
3936 sgl, sgl_nents);
3937 } else
3938 return transport_allocate_control_task(cmd);
3939
3940}
3941
3942
3943/* 3929/*
3944 * Allocate any required ressources to execute the command, and either place 3930 * Allocate any required ressources to execute the command, and either place
3945 * it on the execution queue if possible. For writes we might not have the 3931 * it on the execution queue if possible. For writes we might not have the
@@ -3965,17 +3951,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3965 } 3951 }
3966 3952
3967 /* 3953 /*
3968 * Setup any BIDI READ tasks and memory from 3954 * For BIDI command set up the read tasks first.
3969 * cmd->t_mem_bidi_list so the READ struct se_tasks
3970 * are queued first for the non pSCSI passthrough case.
3971 */ 3955 */
3972 if (cmd->t_bidi_data_sg && 3956 if (cmd->t_bidi_data_sg &&
3973 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { 3957 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3974 ret = transport_allocate_tasks(cmd, 3958 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3975 cmd->t_task_lba, 3959
3976 DMA_FROM_DEVICE, 3960 ret = transport_allocate_data_tasks(cmd, DMA_FROM_DEVICE,
3977 cmd->t_bidi_data_sg, 3961 cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3978 cmd->t_bidi_data_nents);
3979 if (ret <= 0) 3962 if (ret <= 0)
3980 goto out_fail; 3963 goto out_fail;
3981 3964
@@ -3983,15 +3966,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3983 atomic_inc(&cmd->t_se_count); 3966 atomic_inc(&cmd->t_se_count);
3984 set_counts = 0; 3967 set_counts = 0;
3985 } 3968 }
3986 /* 3969
3987 * Setup the tasks and memory from cmd->t_mem_list 3970 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3988 * Note for BIDI transfers this will contain the WRITE payload 3971 task_cdbs = transport_allocate_data_tasks(cmd,
3989 */ 3972 cmd->data_direction, cmd->t_data_sg,
3990 task_cdbs = transport_allocate_tasks(cmd, 3973 cmd->t_data_nents);
3991 cmd->t_task_lba, 3974 } else {
3992 cmd->data_direction, 3975 task_cdbs = transport_allocate_control_task(cmd);
3993 cmd->t_data_sg, 3976 }
3994 cmd->t_data_nents); 3977
3995 if (task_cdbs <= 0) 3978 if (task_cdbs <= 0)
3996 goto out_fail; 3979 goto out_fail;
3997 3980