aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-10-03 13:16:07 -0400
committerJames Bottomley <JBottomley@Odin.com>2015-10-07 13:24:48 -0400
commit15e3d5a285ab9283136dba34bbf72886d9146706 (patch)
tree8c94bda0e97d809ed69bfe5e51a1a064d5e27a9d
parent1378889c563a2938d231203ed36c041af183b798 (diff)
3w-9xxx: don't unmap bounce buffered commands
3w controller don't dma map small single SGL entry commands but instead bounce buffer them. Add a helper to identify these commands and don't call scsi_dma_unmap for them. Based on an earlier patch from James Bottomley. Fixes: 118c85 ("3w-9xxx: fix command completion race") Reported-by: Tóth Attila <atoth@atoth.sote.hu> Tested-by: Tóth Attila <atoth@atoth.sote.hu> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Adam Radford <aradford@gmail.com> Signed-off-by: James Bottomley <JBottomley@Odin.com>
-rw-r--r--drivers/scsi/3w-9xxx.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index add419d6ff34..a56a7b243e91 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
212 .llseek = noop_llseek, 212 .llseek = noop_llseek,
213}; 213};
214 214
215/*
216 * The controllers use an inline buffer instead of a mapped SGL for small,
217 * single entry buffers. Note that we treat a zero-length transfer like
218 * a mapped SGL.
219 */
220static bool twa_command_mapped(struct scsi_cmnd *cmd)
221{
222 return scsi_sg_count(cmd) != 1 ||
223 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
224}
225
215/* This function will complete an aen request from the isr */ 226/* This function will complete an aen request from the isr */
216static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) 227static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
217{ 228{
@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1339 } 1350 }
1340 1351
1341 /* Now complete the io */ 1352 /* Now complete the io */
1342 scsi_dma_unmap(cmd); 1353 if (twa_command_mapped(cmd))
1354 scsi_dma_unmap(cmd);
1343 cmd->scsi_done(cmd); 1355 cmd->scsi_done(cmd);
1344 tw_dev->state[request_id] = TW_S_COMPLETED; 1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1345 twa_free_request_id(tw_dev, request_id); 1357 twa_free_request_id(tw_dev, request_id);
@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1582 struct scsi_cmnd *cmd = tw_dev->srb[i]; 1594 struct scsi_cmnd *cmd = tw_dev->srb[i];
1583 1595
1584 cmd->result = (DID_RESET << 16); 1596 cmd->result = (DID_RESET << 16);
1585 scsi_dma_unmap(cmd); 1597 if (twa_command_mapped(cmd))
1598 scsi_dma_unmap(cmd);
1586 cmd->scsi_done(cmd); 1599 cmd->scsi_done(cmd);
1587 } 1600 }
1588 } 1601 }
@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1778 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1766 switch (retval) { 1779 switch (retval) {
1767 case SCSI_MLQUEUE_HOST_BUSY: 1780 case SCSI_MLQUEUE_HOST_BUSY:
1768 scsi_dma_unmap(SCpnt); 1781 if (twa_command_mapped(SCpnt))
1782 scsi_dma_unmap(SCpnt);
1769 twa_free_request_id(tw_dev, request_id); 1783 twa_free_request_id(tw_dev, request_id);
1770 break; 1784 break;
1771 case 1: 1785 case 1:
1772 SCpnt->result = (DID_ERROR << 16); 1786 SCpnt->result = (DID_ERROR << 16);
1773 scsi_dma_unmap(SCpnt); 1787 if (twa_command_mapped(SCpnt))
1788 scsi_dma_unmap(SCpnt);
1774 done(SCpnt); 1789 done(SCpnt);
1775 tw_dev->state[request_id] = TW_S_COMPLETED; 1790 tw_dev->state[request_id] = TW_S_COMPLETED;
1776 twa_free_request_id(tw_dev, request_id); 1791 twa_free_request_id(tw_dev, request_id);
@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1831 /* Map sglist from scsi layer to cmd packet */ 1846 /* Map sglist from scsi layer to cmd packet */
1832 1847
1833 if (scsi_sg_count(srb)) { 1848 if (scsi_sg_count(srb)) {
1834 if ((scsi_sg_count(srb) == 1) && 1849 if (!twa_command_mapped(srb)) {
1835 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1836 if (srb->sc_data_direction == DMA_TO_DEVICE || 1850 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1837 srb->sc_data_direction == DMA_BIDIRECTIONAL) 1851 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1838 scsi_sg_copy_to_buffer(srb, 1852 scsi_sg_copy_to_buffer(srb,
@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
1905{ 1919{
1906 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1920 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1907 1921
1908 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && 1922 if (!twa_command_mapped(cmd) &&
1909 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1923 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1910 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1924 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1911 if (scsi_sg_count(cmd) == 1) { 1925 if (scsi_sg_count(cmd) == 1) {