aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-27 12:42:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-27 12:42:16 -0400
commitdebd52a0506147c0ce8370ee82f5a3b79efa2f26 (patch)
tree319ed9fcc08d47bf38a5d542e4fa2f0ce538c98c /drivers
parentc92067ae06cb71561628d9f4b24b56c1813c54e0 (diff)
parent52ab9768f723823a71dc659f0fad803a90f80236 (diff)
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI fixes from James Bottomley: "Three small bug fixes (barrier elimination, memory leak on unload, spinlock recursion) and a technical enhancement left over from the merge window: the TCMU read length support is required for tape devices read when the length of the read is greater than the tape block size" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: scsi: scsi_debug: Fix memory leak on module unload scsi: qla2xxx: Spinlock recursion in qla_target scsi: ipr: Eliminate duplicate barriers scsi: target: tcmu: add read length support
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c7
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/target/target_core_user.c44
4 files changed, 40 insertions, 15 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0a9b8b387bd2..02d65dce74e5 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
760 ioa_cfg->hrrq[i].allow_interrupts = 0; 760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock); 761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 } 762 }
763 wmb();
764 763
765 /* Set interrupt mask to stop all new interrupts */ 764 /* Set interrupt mask to stop all new interrupts */
766 if (ioa_cfg->sis64) 765 if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8403 ioa_cfg->hrrq[i].allow_interrupts = 1; 8402 ioa_cfg->hrrq[i].allow_interrupts = 1;
8404 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8403 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8405 } 8404 }
8406 wmb();
8407 if (ioa_cfg->sis64) { 8405 if (ioa_cfg->sis64) {
8408 /* Set the adapter to the correct endian mode. */ 8406 /* Set the adapter to the correct endian mode. */
8409 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8407 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0fea2e2326be..1027b0cb7fa3 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
1224void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1224void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1225{ 1225{
1226 struct qla_tgt *tgt = sess->tgt; 1226 struct qla_tgt *tgt = sess->tgt;
1227 struct qla_hw_data *ha = sess->vha->hw;
1228 unsigned long flags; 1227 unsigned long flags;
1229 1228
1230 if (sess->disc_state == DSC_DELETE_PEND) 1229 if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1241 return; 1240 return;
1242 } 1241 }
1243 1242
1244 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1245 if (sess->deleted == QLA_SESS_DELETED) 1243 if (sess->deleted == QLA_SESS_DELETED)
1246 sess->logout_on_delete = 0; 1244 sess->logout_on_delete = 0;
1247 1245
1246 spin_lock_irqsave(&sess->vha->work_lock, flags);
1248 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1247 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1249 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1248 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1250 return; 1249 return;
1251 } 1250 }
1252 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1251 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1253 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1252 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1254 1253
1255 sess->disc_state = DSC_DELETE_PEND; 1254 sess->disc_state = DSC_DELETE_PEND;
1256 1255
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24d7496cd9e2..364e71861bfd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
5507 int k = sdebug_add_host; 5507 int k = sdebug_add_host;
5508 5508
5509 stop_all_queued(); 5509 stop_all_queued();
5510 free_all_queued();
5511 for (; k; k--) 5510 for (; k; k--)
5512 sdebug_remove_adapter(); 5511 sdebug_remove_adapter();
5512 free_all_queued();
5513 driver_unregister(&sdebug_driverfs_driver); 5513 driver_unregister(&sdebug_driverfs_driver);
5514 bus_unregister(&pseudo_lld_bus); 5514 bus_unregister(&pseudo_lld_bus);
5515 root_device_unregister(pseudo_primary); 5515 root_device_unregister(pseudo_primary);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7f96dfa32b9c..d8dc3d22051f 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
656} 656}
657 657
658static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 658static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
659 bool bidi) 659 bool bidi, uint32_t read_len)
660{ 660{
661 struct se_cmd *se_cmd = cmd->se_cmd; 661 struct se_cmd *se_cmd = cmd->se_cmd;
662 int i, dbi; 662 int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
689 for_each_sg(data_sg, sg, data_nents, i) { 689 for_each_sg(data_sg, sg, data_nents, i) {
690 int sg_remaining = sg->length; 690 int sg_remaining = sg->length;
691 to = kmap_atomic(sg_page(sg)) + sg->offset; 691 to = kmap_atomic(sg_page(sg)) + sg->offset;
692 while (sg_remaining > 0) { 692 while (sg_remaining > 0 && read_len > 0) {
693 if (block_remaining == 0) { 693 if (block_remaining == 0) {
694 if (from) 694 if (from)
695 kunmap_atomic(from); 695 kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
701 } 701 }
702 copy_bytes = min_t(size_t, sg_remaining, 702 copy_bytes = min_t(size_t, sg_remaining,
703 block_remaining); 703 block_remaining);
704 if (read_len < copy_bytes)
705 copy_bytes = read_len;
704 offset = DATA_BLOCK_SIZE - block_remaining; 706 offset = DATA_BLOCK_SIZE - block_remaining;
705 tcmu_flush_dcache_range(from, copy_bytes); 707 tcmu_flush_dcache_range(from, copy_bytes);
706 memcpy(to + sg->length - sg_remaining, from + offset, 708 memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
708 710
709 sg_remaining -= copy_bytes; 711 sg_remaining -= copy_bytes;
710 block_remaining -= copy_bytes; 712 block_remaining -= copy_bytes;
713 read_len -= copy_bytes;
711 } 714 }
712 kunmap_atomic(to - sg->offset); 715 kunmap_atomic(to - sg->offset);
716 if (read_len == 0)
717 break;
713 } 718 }
714 if (from) 719 if (from)
715 kunmap_atomic(from); 720 kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1042{ 1047{
1043 struct se_cmd *se_cmd = cmd->se_cmd; 1048 struct se_cmd *se_cmd = cmd->se_cmd;
1044 struct tcmu_dev *udev = cmd->tcmu_dev; 1049 struct tcmu_dev *udev = cmd->tcmu_dev;
1050 bool read_len_valid = false;
1051 uint32_t read_len = se_cmd->data_length;
1045 1052
1046 /* 1053 /*
1047 * cmd has been completed already from timeout, just reclaim 1054 * cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1056 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1063 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1057 cmd->se_cmd); 1064 cmd->se_cmd);
1058 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1065 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1059 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1066 goto done;
1067 }
1068
1069 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1070 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1071 read_len_valid = true;
1072 if (entry->rsp.read_len < read_len)
1073 read_len = entry->rsp.read_len;
1074 }
1075
1076 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1060 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1077 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1061 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 1078 if (!read_len_valid )
1079 goto done;
1080 else
1081 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1082 }
1083 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1062 /* Get Data-In buffer before clean up */ 1084 /* Get Data-In buffer before clean up */
1063 gather_data_area(udev, cmd, true); 1085 gather_data_area(udev, cmd, true, read_len);
1064 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1086 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1065 gather_data_area(udev, cmd, false); 1087 gather_data_area(udev, cmd, false, read_len);
1066 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1088 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1067 /* TODO: */ 1089 /* TODO: */
1068 } else if (se_cmd->data_direction != DMA_NONE) { 1090 } else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1070 se_cmd->data_direction); 1092 se_cmd->data_direction);
1071 } 1093 }
1072 1094
1073 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1095done:
1096 if (read_len_valid) {
1097 pr_debug("read_len = %d\n", read_len);
1098 target_complete_cmd_with_length(cmd->se_cmd,
1099 entry->rsp.scsi_status, read_len);
1100 } else
1101 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1074 1102
1075out: 1103out:
1076 cmd->se_cmd = NULL; 1104 cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
1740 /* Initialise the mailbox of the ring buffer */ 1768 /* Initialise the mailbox of the ring buffer */
1741 mb = udev->mb_addr; 1769 mb = udev->mb_addr;
1742 mb->version = TCMU_MAILBOX_VERSION; 1770 mb->version = TCMU_MAILBOX_VERSION;
1743 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; 1771 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1744 mb->cmdr_off = CMDR_OFF; 1772 mb->cmdr_off = CMDR_OFF;
1745 mb->cmdr_size = udev->cmdr_size; 1773 mb->cmdr_size = udev->cmdr_size;
1746 1774