aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2015-04-11 00:17:31 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2015-04-14 15:37:42 -0400
commitbfd9a53e0110442eeef670227907bdd14def94e1 (patch)
treeb725f4a0aa3f27bd7497515a21bba4cd7e002de9 /drivers/target
parent054922bb3549abbea9ed2c1a78a1e331343cc05e (diff)
target/rd: Don't pass incomplete scatterlist entries to sbc_dif_verify_*
The scatterlist for protection information which is passed to sbc_dif_verify_read() or sbc_dif_verify_write() requires that neighboring scatterlist entries are contiguous or chained so that they can be iterated by sg_next(). However, the protection information for RD-MCP backends could be located in the multiple scatterlist arrays when the ramdisk space is too large. So if the read/write request straddles this boundary, sbc_dif_verify_read() or sbc_dif_verify_write() can't iterate all scatterlist entries. This problem can be fixed by chaining protection information scatterlist at creation time. For the architectures which don't support sg chaining (i.e. !CONFIG_ARCH_HAS_SG_CHAIN), fix it by allocating temporary scatterlist if needed. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Nicholas Bellinger <nab@linux-iscsi.org> Cc: Sagi Grimberg <sagig@dev.mellanox.co.il> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Christoph Hellwig <hch@lst.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: target-devel@vger.kernel.org Cc: linux-scsi@vger.kernel.org Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_rd.c67
1 files changed, 64 insertions, 3 deletions
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index ccf62a88f017..a263bf5fab8d 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -139,10 +139,22 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
139 unsigned char *p; 139 unsigned char *p;
140 140
141 while (total_sg_needed) { 141 while (total_sg_needed) {
142 unsigned int chain_entry = 0;
143
142 sg_per_table = (total_sg_needed > max_sg_per_table) ? 144 sg_per_table = (total_sg_needed > max_sg_per_table) ?
143 max_sg_per_table : total_sg_needed; 145 max_sg_per_table : total_sg_needed;
144 146
145 sg = kzalloc(sg_per_table * sizeof(struct scatterlist), 147#ifdef CONFIG_ARCH_HAS_SG_CHAIN
148
149 /*
150 * Reserve extra element for chain entry
151 */
152 if (sg_per_table < total_sg_needed)
153 chain_entry = 1;
154
155#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
156
157 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
146 GFP_KERNEL); 158 GFP_KERNEL);
147 if (!sg) { 159 if (!sg) {
148 pr_err("Unable to allocate scatterlist array" 160 pr_err("Unable to allocate scatterlist array"
@@ -150,7 +162,16 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
150 return -ENOMEM; 162 return -ENOMEM;
151 } 163 }
152 164
153 sg_init_table(sg, sg_per_table); 165 sg_init_table(sg, sg_per_table + chain_entry);
166
167#ifdef CONFIG_ARCH_HAS_SG_CHAIN
168
169 if (i > 0) {
170 sg_chain(sg_table[i - 1].sg_table,
171 max_sg_per_table + 1, sg);
172 }
173
174#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
154 175
155 sg_table[i].sg_table = sg; 176 sg_table[i].sg_table = sg;
156 sg_table[i].rd_sg_count = sg_per_table; 177 sg_table[i].rd_sg_count = sg_per_table;
@@ -390,11 +411,13 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
390 struct se_device *se_dev = cmd->se_dev; 411 struct se_device *se_dev = cmd->se_dev;
391 struct rd_dev *dev = RD_DEV(se_dev); 412 struct rd_dev *dev = RD_DEV(se_dev);
392 struct rd_dev_sg_table *prot_table; 413 struct rd_dev_sg_table *prot_table;
414 bool need_to_release = false;
393 struct scatterlist *prot_sg; 415 struct scatterlist *prot_sg;
394 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 416 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
395 u32 prot_offset, prot_page; 417 u32 prot_offset, prot_page;
418 u32 prot_npages __maybe_unused;
396 u64 tmp; 419 u64 tmp;
397 sense_reason_t rc; 420 sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
398 421
399 tmp = cmd->t_task_lba * se_dev->prot_length; 422 tmp = cmd->t_task_lba * se_dev->prot_length;
400 prot_offset = do_div(tmp, PAGE_SIZE); 423 prot_offset = do_div(tmp, PAGE_SIZE);
@@ -407,7 +430,45 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
407 prot_sg = &prot_table->sg_table[prot_page - 430 prot_sg = &prot_table->sg_table[prot_page -
408 prot_table->page_start_offset]; 431 prot_table->page_start_offset];
409 432
433#ifndef CONFIG_ARCH_HAS_SG_CHAIN
434
435 prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
436 PAGE_SIZE);
437
438 /*
439 * Allocate temporaly contiguous scatterlist entries if prot pages
440 * straddles multiple scatterlist tables.
441 */
442 if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
443 int i;
444
445 prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
446 if (!prot_sg)
447 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
448
449 need_to_release = true;
450 sg_init_table(prot_sg, prot_npages);
451
452 for (i = 0; i < prot_npages; i++) {
453 if (prot_page + i > prot_table->page_end_offset) {
454 prot_table = rd_get_prot_table(dev,
455 prot_page + i);
456 if (!prot_table) {
457 kfree(prot_sg);
458 return rc;
459 }
460 sg_unmark_end(&prot_sg[i - 1]);
461 }
462 prot_sg[i] = prot_table->sg_table[prot_page + i -
463 prot_table->page_start_offset];
464 }
465 }
466
467#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
468
410 rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); 469 rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
470 if (need_to_release)
471 kfree(prot_sg);
411 472
412 return rc; 473 return rc;
413} 474}