aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2012-02-13 19:18:14 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2012-02-25 17:37:49 -0500
commitd95b82461c56a6ff8ff248b101049a69ebb20278 (patch)
tree0a0c8ad9d37c529c859ddcdfc7123b80bd195b0c /drivers
parent3d28934aaae5e924afedf0f5cb42e1316514da6b (diff)
target: Fix up handling of short INQUIRY buffers
If the initiator sends us an INQUIRY command with an allocation length that's shorter than what we want to return, we're simply supposed to truncate our response and return what the initiator gave us space for, without signaling any error. Current target code has various tests that don't fill out the full response if the buffer is too short and sometimes return errors incorrectly. Fix this up by allocating a bounce buffer for INQUIRY responses if we need to, ie if we have cmd->data_length too small as well as SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC set in cmd->se_cmd_flags -- for most fabrics, we always allocate at least a full page, but for tcm_loop we may have a small buffer coming directly from the SCSI stack. This lets us delete a lot of cmd->data_length checking, and also makes our INQUIRY handling correct per SPC in a lot more cases. Signed-off-by: Roland Dreier <roland@purestorage.com> Cc: stable@vger.kernel.org Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/target/target_core_cdb.c150
1 files changed, 36 insertions, 114 deletions
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index f3d71fa88a28..ef8034b60d8d 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -66,24 +66,11 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
66} 66}
67 67
68static int 68static int
69target_emulate_inquiry_std(struct se_cmd *cmd) 69target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
70{ 70{
71 struct se_lun *lun = cmd->se_lun; 71 struct se_lun *lun = cmd->se_lun;
72 struct se_device *dev = cmd->se_dev; 72 struct se_device *dev = cmd->se_dev;
73 struct se_portal_group *tpg = lun->lun_sep->sep_tpg; 73 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
74 unsigned char *buf;
75
76 /*
77 * Make sure we at least have 6 bytes of INQUIRY response
78 * payload going back for EVPD=0
79 */
80 if (cmd->data_length < 6) {
81 pr_err("SCSI Inquiry payload length: %u"
82 " too small for EVPD=0\n", cmd->data_length);
83 return -EINVAL;
84 }
85
86 buf = transport_kmap_data_sg(cmd);
87 74
88 if (dev == tpg->tpg_virt_lun0.lun_se_dev) { 75 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
89 buf[0] = 0x3f; /* Not connected */ 76 buf[0] = 0x3f; /* Not connected */
@@ -112,29 +99,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
112 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 99 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
113 target_fill_alua_data(lun->lun_sep, buf); 100 target_fill_alua_data(lun->lun_sep, buf);
114 101
115 if (cmd->data_length < 8) {
116 buf[4] = 1; /* Set additional length to 1 */
117 goto out;
118 }
119
120 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ 102 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
121 103
122 /*
123 * Do not include vendor, product, reversion info in INQUIRY
124 * response payload for cdbs with a small allocation length.
125 */
126 if (cmd->data_length < 36) {
127 buf[4] = 3; /* Set additional length to 3 */
128 goto out;
129 }
130
131 snprintf(&buf[8], 8, "LIO-ORG"); 104 snprintf(&buf[8], 8, "LIO-ORG");
132 snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); 105 snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
133 snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); 106 snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
134 buf[4] = 31; /* Set additional length to 31 */ 107 buf[4] = 31; /* Set additional length to 31 */
135 108
136out:
137 transport_kunmap_data_sg(cmd);
138 return 0; 109 return 0;
139} 110}
140 111
@@ -152,12 +123,6 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
152 unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); 123 unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
153 unit_serial_len++; /* For NULL Terminator */ 124 unit_serial_len++; /* For NULL Terminator */
154 125
155 if (((len + 4) + unit_serial_len) > cmd->data_length) {
156 len += unit_serial_len;
157 buf[2] = ((len >> 8) & 0xff);
158 buf[3] = (len & 0xff);
159 return 0;
160 }
161 len += sprintf(&buf[4], "%s", 126 len += sprintf(&buf[4], "%s",
162 dev->se_sub_dev->t10_wwn.unit_serial); 127 dev->se_sub_dev->t10_wwn.unit_serial);
163 len++; /* Extra Byte for NULL Terminator */ 128 len++; /* Extra Byte for NULL Terminator */
@@ -229,9 +194,6 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
229 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) 194 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
230 goto check_t10_vend_desc; 195 goto check_t10_vend_desc;
231 196
232 if (off + 20 > cmd->data_length)
233 goto check_t10_vend_desc;
234
235 /* CODE SET == Binary */ 197 /* CODE SET == Binary */
236 buf[off++] = 0x1; 198 buf[off++] = 0x1;
237 199
@@ -283,12 +245,6 @@ check_t10_vend_desc:
283 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); 245 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
284 unit_serial_len++; /* For NULL Terminator */ 246 unit_serial_len++; /* For NULL Terminator */
285 247
286 if ((len + (id_len + 4) +
287 (prod_len + unit_serial_len)) >
288 cmd->data_length) {
289 len += (prod_len + unit_serial_len);
290 goto check_port;
291 }
292 id_len += sprintf(&buf[off+12], "%s:%s", prod, 248 id_len += sprintf(&buf[off+12], "%s:%s", prod,
293 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 249 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
294 } 250 }
@@ -306,7 +262,6 @@ check_t10_vend_desc:
306 /* 262 /*
307 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 263 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
308 */ 264 */
309check_port:
310 port = lun->lun_sep; 265 port = lun->lun_sep;
311 if (port) { 266 if (port) {
312 struct t10_alua_lu_gp *lu_gp; 267 struct t10_alua_lu_gp *lu_gp;
@@ -323,10 +278,6 @@ check_port:
323 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 278 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
324 * section 7.5.1 Table 362 279 * section 7.5.1 Table 362
325 */ 280 */
326 if (((len + 4) + 8) > cmd->data_length) {
327 len += 8;
328 goto check_tpgi;
329 }
330 buf[off] = 281 buf[off] =
331 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 282 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
332 buf[off++] |= 0x1; /* CODE SET == Binary */ 283 buf[off++] |= 0x1; /* CODE SET == Binary */
@@ -350,15 +301,10 @@ check_port:
350 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 301 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
351 * section 7.5.1 Table 362 302 * section 7.5.1 Table 362
352 */ 303 */
353check_tpgi:
354 if (dev->se_sub_dev->t10_alua.alua_type != 304 if (dev->se_sub_dev->t10_alua.alua_type !=
355 SPC3_ALUA_EMULATED) 305 SPC3_ALUA_EMULATED)
356 goto check_scsi_name; 306 goto check_scsi_name;
357 307
358 if (((len + 4) + 8) > cmd->data_length) {
359 len += 8;
360 goto check_lu_gp;
361 }
362 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 308 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
363 if (!tg_pt_gp_mem) 309 if (!tg_pt_gp_mem)
364 goto check_lu_gp; 310 goto check_lu_gp;
@@ -391,10 +337,6 @@ check_tpgi:
391 * section 7.7.3.8 337 * section 7.7.3.8
392 */ 338 */
393check_lu_gp: 339check_lu_gp:
394 if (((len + 4) + 8) > cmd->data_length) {
395 len += 8;
396 goto check_scsi_name;
397 }
398 lu_gp_mem = dev->dev_alua_lu_gp_mem; 340 lu_gp_mem = dev->dev_alua_lu_gp_mem;
399 if (!lu_gp_mem) 341 if (!lu_gp_mem)
400 goto check_scsi_name; 342 goto check_scsi_name;
@@ -435,10 +377,6 @@ check_scsi_name:
435 /* Header size + Designation descriptor */ 377 /* Header size + Designation descriptor */
436 scsi_name_len += 4; 378 scsi_name_len += 4;
437 379
438 if (((len + 4) + scsi_name_len) > cmd->data_length) {
439 len += scsi_name_len;
440 goto set_len;
441 }
442 buf[off] = 380 buf[off] =
443 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 381 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
444 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 382 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
@@ -474,7 +412,6 @@ check_scsi_name:
474 /* Header size + Designation descriptor */ 412 /* Header size + Designation descriptor */
475 len += (scsi_name_len + 4); 413 len += (scsi_name_len + 4);
476 } 414 }
477set_len:
478 buf[2] = ((len >> 8) & 0xff); 415 buf[2] = ((len >> 8) & 0xff);
479 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 416 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
480 return 0; 417 return 0;
@@ -484,9 +421,6 @@ set_len:
484static int 421static int
485target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 422target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
486{ 423{
487 if (cmd->data_length < 60)
488 return 0;
489
490 buf[3] = 0x3c; 424 buf[3] = 0x3c;
491 /* Set HEADSUP, ORDSUP, SIMPSUP */ 425 /* Set HEADSUP, ORDSUP, SIMPSUP */
492 buf[5] = 0x07; 426 buf[5] = 0x07;
@@ -512,20 +446,6 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
512 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 446 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
513 have_tp = 1; 447 have_tp = 1;
514 448
515 if (cmd->data_length < (0x10 + 4)) {
516 pr_debug("Received data_length: %u"
517 " too small for EVPD 0xb0\n",
518 cmd->data_length);
519 return -EINVAL;
520 }
521
522 if (have_tp && cmd->data_length < (0x3c + 4)) {
523 pr_debug("Received data_length: %u"
524 " too small for TPE=1 EVPD 0xb0\n",
525 cmd->data_length);
526 have_tp = 0;
527 }
528
529 buf[0] = dev->transport->get_device_type(dev); 449 buf[0] = dev->transport->get_device_type(dev);
530 buf[3] = have_tp ? 0x3c : 0x10; 450 buf[3] = have_tp ? 0x3c : 0x10;
531 451
@@ -548,10 +468,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
548 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); 468 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
549 469
550 /* 470 /*
551 * Exit now if we don't support TP or the initiator sent a too 471 * Exit now if we don't support TP.
552 * short buffer.
553 */ 472 */
554 if (!have_tp || cmd->data_length < (0x3c + 4)) 473 if (!have_tp)
555 return 0; 474 return 0;
556 475
557 /* 476 /*
@@ -589,10 +508,7 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
589 508
590 buf[0] = dev->transport->get_device_type(dev); 509 buf[0] = dev->transport->get_device_type(dev);
591 buf[3] = 0x3c; 510 buf[3] = 0x3c;
592 511 buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0;
593 if (cmd->data_length >= 5 &&
594 dev->se_sub_dev->se_dev_attrib.is_nonrot)
595 buf[5] = 1;
596 512
597 return 0; 513 return 0;
598} 514}
@@ -671,8 +587,6 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
671{ 587{
672 int p; 588 int p;
673 589
674 if (cmd->data_length < 8)
675 return 0;
676 /* 590 /*
677 * Only report the INQUIRY EVPD=1 pages after a valid NAA 591 * Only report the INQUIRY EVPD=1 pages after a valid NAA
678 * Registered Extended LUN WWN has been set via ConfigFS 592 * Registered Extended LUN WWN has been set via ConfigFS
@@ -681,8 +595,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
681 if (cmd->se_dev->se_sub_dev->su_dev_flags & 595 if (cmd->se_dev->se_sub_dev->su_dev_flags &
682 SDF_EMULATED_VPD_UNIT_SERIAL) { 596 SDF_EMULATED_VPD_UNIT_SERIAL) {
683 buf[3] = ARRAY_SIZE(evpd_handlers); 597 buf[3] = ARRAY_SIZE(evpd_handlers);
684 for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers), 598 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
685 cmd->data_length - 4); ++p)
686 buf[p + 4] = evpd_handlers[p].page; 599 buf[p + 4] = evpd_handlers[p].page;
687 } 600 }
688 601
@@ -693,45 +606,50 @@ int target_emulate_inquiry(struct se_task *task)
693{ 606{
694 struct se_cmd *cmd = task->task_se_cmd; 607 struct se_cmd *cmd = task->task_se_cmd;
695 struct se_device *dev = cmd->se_dev; 608 struct se_device *dev = cmd->se_dev;
696 unsigned char *buf; 609 unsigned char *buf, *map_buf;
697 unsigned char *cdb = cmd->t_task_cdb; 610 unsigned char *cdb = cmd->t_task_cdb;
698 int p, ret; 611 int p, ret;
699 612
613 map_buf = transport_kmap_data_sg(cmd);
614 /*
615 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
616 * know we actually allocated a full page. Otherwise, if the
617 * data buffer is too small, allocate a temporary buffer so we
618 * don't have to worry about overruns in all our INQUIRY
619 * emulation handling.
620 */
621 if (cmd->data_length < SE_INQUIRY_BUF &&
622 (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
623 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
624 if (!buf) {
625 transport_kunmap_data_sg(cmd);
626 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
627 return -ENOMEM;
628 }
629 } else {
630 buf = map_buf;
631 }
632
700 if (!(cdb[1] & 0x1)) { 633 if (!(cdb[1] & 0x1)) {
701 if (cdb[2]) { 634 if (cdb[2]) {
702 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 635 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
703 cdb[2]); 636 cdb[2]);
704 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 637 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
705 return -EINVAL; 638 ret = -EINVAL;
639 goto out;
706 } 640 }
707 641
708 ret = target_emulate_inquiry_std(cmd); 642 ret = target_emulate_inquiry_std(cmd, buf);
709 goto out; 643 goto out;
710 } 644 }
711 645
712 /*
713 * Make sure we at least have 4 bytes of INQUIRY response
714 * payload for 0x00 going back for EVPD=1. Note that 0x80
715 * and 0x83 will check for enough payload data length and
716 * jump to set_len: label when there is not enough inquiry EVPD
717 * payload length left for the next outgoing EVPD metadata
718 */
719 if (cmd->data_length < 4) {
720 pr_err("SCSI Inquiry payload length: %u"
721 " too small for EVPD=1\n", cmd->data_length);
722 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
723 return -EINVAL;
724 }
725
726 buf = transport_kmap_data_sg(cmd);
727
728 buf[0] = dev->transport->get_device_type(dev); 646 buf[0] = dev->transport->get_device_type(dev);
729 647
730 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 648 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
731 if (cdb[2] == evpd_handlers[p].page) { 649 if (cdb[2] == evpd_handlers[p].page) {
732 buf[1] = cdb[2]; 650 buf[1] = cdb[2];
733 ret = evpd_handlers[p].emulate(cmd, buf); 651 ret = evpd_handlers[p].emulate(cmd, buf);
734 goto out_unmap; 652 goto out;
735 } 653 }
736 } 654 }
737 655
@@ -739,9 +657,13 @@ int target_emulate_inquiry(struct se_task *task)
739 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 657 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
740 ret = -EINVAL; 658 ret = -EINVAL;
741 659
742out_unmap:
743 transport_kunmap_data_sg(cmd);
744out: 660out:
661 if (buf != map_buf) {
662 memcpy(map_buf, buf, cmd->data_length);
663 kfree(buf);
664 }
665 transport_kunmap_data_sg(cmd);
666
745 if (!ret) { 667 if (!ret) {
746 task->task_scsi_status = GOOD; 668 task->task_scsi_status = GOOD;
747 transport_complete_task(task, 1); 669 transport_complete_task(task, 1);