diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-08-13 08:30:31 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-08-22 15:26:28 -0400 |
commit | c3c74c7a33d837be391ab61aaae39bb21f16736a (patch) | |
tree | 376a28bf46562dbdb0aaa545a0c2c302717e9444 /drivers/target | |
parent | 525a48a21da259d00d6ebc5b60563b5bcf022c26 (diff) |
target: Fix task SGL chaining breakage with transport_allocate_data_tasks
This patch fixes two bugs associated with transport_do_task_sg_chain()
operation where transport_allocate_data_tasks() was incorrectly setting
task_padded_sg for all tasks, and causing bogus task->task_sg_nents
assignments + OOPsen with fabrics depending upon this code. The first bit
here adds a task_sg_nents_padded check in transport_allocate_data_tasks()
to include an extra SGL vector when necessary for tasks that expect to
be linked using sg_chain().
The second change involves making transport_do_task_sg_chain() properly
account for the extra SGL vector when task->task_padded_sg is set for
the non trailing ->task_sg or single ->task_sg allocations. Note this
patch also removes the BUG_ON(!task->task_padded_sg) check within
transport_do_task_sg_chain() as we expect this to happen normally
with the updated logic in transport_allocate_data_tasks(), along with
being bogus for CONTROL_SG_IO_CDB type payloads.
So far this bugfix has been tested with tcm_qla2xxx and iblock backends
in (task_count > 1)( and (task_count == 1) operation.
Reported-by: Kiran Patil <kiran.patil@intel.com>
Cc: Kiran Patil <kiran.patil@intel.com>
Cc: Andy Grover <agrover@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_transport.c | 34 |
1 files changed, 21 insertions, 13 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index efac6a9a7438..9cc49d1b5b1f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -4044,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4044 | if (!task->task_sg) | 4044 | if (!task->task_sg) |
4045 | continue; | 4045 | continue; |
4046 | 4046 | ||
4047 | BUG_ON(!task->task_padded_sg); | ||
4048 | |||
4049 | if (!sg_first) { | 4047 | if (!sg_first) { |
4050 | sg_first = task->task_sg; | 4048 | sg_first = task->task_sg; |
4051 | chained_nents = task->task_sg_nents; | 4049 | chained_nents = task->task_sg_nents; |
@@ -4053,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4053 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); | 4051 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
4054 | chained_nents += task->task_sg_nents; | 4052 | chained_nents += task->task_sg_nents; |
4055 | } | 4053 | } |
4054 | /* | ||
4055 | * For the padded tasks, use the extra SGL vector allocated | ||
4056 | * in transport_allocate_data_tasks() for the sg_prev_nents | ||
4057 | * offset into sg_chain() above.. The last task of a | ||
4058 | * multi-task list, or a single task will not have | ||
4059 | * task->task_sg_padded set.. | ||
4060 | */ | ||
4061 | if (task->task_padded_sg) | ||
4062 | sg_prev_nents = (task->task_sg_nents + 1); | ||
4063 | else | ||
4064 | sg_prev_nents = task->task_sg_nents; | ||
4056 | 4065 | ||
4057 | sg_prev = task->task_sg; | 4066 | sg_prev = task->task_sg; |
4058 | sg_prev_nents = task->task_sg_nents; | ||
4059 | } | 4067 | } |
4060 | /* | 4068 | /* |
4061 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4069 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
@@ -4107,7 +4115,7 @@ static int transport_allocate_data_tasks( | |||
4107 | 4115 | ||
4108 | cmd_sg = sgl; | 4116 | cmd_sg = sgl; |
4109 | for (i = 0; i < task_count; i++) { | 4117 | for (i = 0; i < task_count; i++) { |
4110 | unsigned int task_size; | 4118 | unsigned int task_size, task_sg_nents_padded; |
4111 | int count; | 4119 | int count; |
4112 | 4120 | ||
4113 | task = transport_generic_get_task(cmd, data_direction); | 4121 | task = transport_generic_get_task(cmd, data_direction); |
@@ -4135,24 +4143,24 @@ static int transport_allocate_data_tasks( | |||
4135 | * Check if the fabric module driver is requesting that all | 4143 | * Check if the fabric module driver is requesting that all |
4136 | * struct se_task->task_sg[] be chained together.. If so, | 4144 | * struct se_task->task_sg[] be chained together.. If so, |
4137 | * then allocate an extra padding SG entry for linking and | 4145 | * then allocate an extra padding SG entry for linking and |
4138 | * marking the end of the chained SGL. | 4146 | * marking the end of the chained SGL for every task except |
4139 | * Possibly over-allocate task sgl size by using cmd sgl size. | 4147 | * the last one for (task_count > 1) operation, or skipping |
4140 | * It's so much easier and only a waste when task_count > 1. | 4148 | * the extra padding for the (task_count == 1) case. |
4141 | * That is extremely rare. | ||
4142 | */ | 4149 | */ |
4143 | if (cmd->se_tfo->task_sg_chaining) { | 4150 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4144 | task->task_sg_nents++; | 4151 | task_sg_nents_padded = (task->task_sg_nents + 1); |
4145 | task->task_padded_sg = 1; | 4152 | task->task_padded_sg = 1; |
4146 | } | 4153 | } else |
4154 | task_sg_nents_padded = task->task_sg_nents; | ||
4147 | 4155 | ||
4148 | task->task_sg = kmalloc(sizeof(struct scatterlist) * | 4156 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
4149 | task->task_sg_nents, GFP_KERNEL); | 4157 | task_sg_nents_padded, GFP_KERNEL); |
4150 | if (!task->task_sg) { | 4158 | if (!task->task_sg) { |
4151 | cmd->se_dev->transport->free_task(task); | 4159 | cmd->se_dev->transport->free_task(task); |
4152 | return -ENOMEM; | 4160 | return -ENOMEM; |
4153 | } | 4161 | } |
4154 | 4162 | ||
4155 | sg_init_table(task->task_sg, task->task_sg_nents); | 4163 | sg_init_table(task->task_sg, task_sg_nents_padded); |
4156 | 4164 | ||
4157 | task_size = task->task_size; | 4165 | task_size = task->task_size; |
4158 | 4166 | ||