aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi/ibmvfc.c
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2008-10-29 09:46:41 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-12-29 12:24:13 -0500
commitad8dcffaf9bc1d7eb86dabf591e95f4ffb86cf1b (patch)
treedb8928e93f09708714f3bed2dd2a2a244721b6b5 /drivers/scsi/ibmvscsi/ibmvfc.c
parent50119dad2a6c2674f35d81e708822b40f65f40cb (diff)
[SCSI] ibmvfc: Error handling fixes
Due to an ambiguity in the VIOS VFC interface specification, abort/cancel handling is not done correctly and can result in double completion of commands. In order to cancel all outstanding commands to a device, a cancel must be sent, followed by an abort task set. After the responses are received for these commands, there may still be commands outstanding, in the process of getting flushed back, in which case, we need to wait for them. This patch removes the assumption that if the abort and the cancel both complete successfully that the device queue has been flushed and waits for all the responses to come back. Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c207
1 files changed, 147 insertions, 60 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ae65fa60eeb4..af69c0738edb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -758,6 +758,9 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
758 cmnd->scsi_done(cmnd); 758 cmnd->scsi_done(cmnd);
759 } 759 }
760 760
761 if (evt->eh_comp)
762 complete(evt->eh_comp);
763
761 ibmvfc_free_event(evt); 764 ibmvfc_free_event(evt);
762} 765}
763 766
@@ -1245,6 +1248,7 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
1245 evt->sync_iu = NULL; 1248 evt->sync_iu = NULL;
1246 evt->crq.format = format; 1249 evt->crq.format = format;
1247 evt->done = done; 1250 evt->done = done;
1251 evt->eh_comp = NULL;
1248} 1252}
1249 1253
1250/** 1254/**
@@ -1485,6 +1489,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1485 cmnd->scsi_done(cmnd); 1489 cmnd->scsi_done(cmnd);
1486 } 1490 }
1487 1491
1492 if (evt->eh_comp)
1493 complete(evt->eh_comp);
1494
1488 ibmvfc_free_event(evt); 1495 ibmvfc_free_event(evt);
1489} 1496}
1490 1497
@@ -1785,7 +1792,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1785static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) 1792static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1786{ 1793{
1787 struct ibmvfc_host *vhost = shost_priv(sdev->host); 1794 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1788 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1795 struct scsi_target *starget = scsi_target(sdev);
1796 struct fc_rport *rport = starget_to_rport(starget);
1789 struct ibmvfc_tmf *tmf; 1797 struct ibmvfc_tmf *tmf;
1790 struct ibmvfc_event *evt, *found_evt; 1798 struct ibmvfc_event *evt, *found_evt;
1791 union ibmvfc_iu rsp; 1799 union ibmvfc_iu rsp;
@@ -1823,7 +1831,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1823 int_to_scsilun(sdev->lun, &tmf->lun); 1831 int_to_scsilun(sdev->lun, &tmf->lun);
1824 tmf->flags = (type | IBMVFC_TMF_LUA_VALID); 1832 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1825 tmf->cancel_key = (unsigned long)sdev->hostdata; 1833 tmf->cancel_key = (unsigned long)sdev->hostdata;
1826 tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata); 1834 tmf->my_cancel_key = (unsigned long)starget->hostdata;
1827 1835
1828 evt->sync_iu = &rsp; 1836 evt->sync_iu = &rsp;
1829 init_completion(&evt->comp); 1837 init_completion(&evt->comp);
@@ -1855,6 +1863,91 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1855} 1863}
1856 1864
1857/** 1865/**
1866 * ibmvfc_match_target - Match function for specified target
1867 * @evt: ibmvfc event struct
1868 * @device: device to match (starget)
1869 *
1870 * Returns:
1871 * 1 if event matches starget / 0 if event does not match starget
1872 **/
1873static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
1874{
1875 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
1876 return 1;
1877 return 0;
1878}
1879
1880/**
1881 * ibmvfc_match_lun - Match function for specified LUN
1882 * @evt: ibmvfc event struct
1883 * @device: device to match (sdev)
1884 *
1885 * Returns:
1886 * 1 if event matches sdev / 0 if event does not match sdev
1887 **/
1888static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
1889{
1890 if (evt->cmnd && evt->cmnd->device == device)
1891 return 1;
1892 return 0;
1893}
1894
1895/**
1896 * ibmvfc_wait_for_ops - Wait for ops to complete
1897 * @vhost: ibmvfc host struct
1898 * @device: device to match (starget or sdev)
1899 * @match: match function
1900 *
1901 * Returns:
1902 * SUCCESS / FAILED
1903 **/
1904static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
1905 int (*match) (struct ibmvfc_event *, void *))
1906{
1907 struct ibmvfc_event *evt;
1908 DECLARE_COMPLETION_ONSTACK(comp);
1909 int wait;
1910 unsigned long flags;
1911 signed long timeout = init_timeout * HZ;
1912
1913 ENTER;
1914 do {
1915 wait = 0;
1916 spin_lock_irqsave(vhost->host->host_lock, flags);
1917 list_for_each_entry(evt, &vhost->sent, queue) {
1918 if (match(evt, device)) {
1919 evt->eh_comp = &comp;
1920 wait++;
1921 }
1922 }
1923 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1924
1925 if (wait) {
1926 timeout = wait_for_completion_timeout(&comp, timeout);
1927
1928 if (!timeout) {
1929 wait = 0;
1930 spin_lock_irqsave(vhost->host->host_lock, flags);
1931 list_for_each_entry(evt, &vhost->sent, queue) {
1932 if (match(evt, device)) {
1933 evt->eh_comp = NULL;
1934 wait++;
1935 }
1936 }
1937 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1938 if (wait)
1939 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
1940 LEAVE;
1941 return wait ? FAILED : SUCCESS;
1942 }
1943 }
1944 } while (wait);
1945
1946 LEAVE;
1947 return SUCCESS;
1948}
1949
1950/**
1858 * ibmvfc_eh_abort_handler - Abort a command 1951 * ibmvfc_eh_abort_handler - Abort a command
1859 * @cmd: scsi command to abort 1952 * @cmd: scsi command to abort
1860 * 1953 *
@@ -1863,29 +1956,21 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1863 **/ 1956 **/
1864static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) 1957static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1865{ 1958{
1866 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 1959 struct scsi_device *sdev = cmd->device;
1867 struct ibmvfc_event *evt, *pos; 1960 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1868 int cancel_rc, abort_rc; 1961 int cancel_rc, abort_rc;
1869 unsigned long flags; 1962 int rc = FAILED;
1870 1963
1871 ENTER; 1964 ENTER;
1872 ibmvfc_wait_while_resetting(vhost); 1965 ibmvfc_wait_while_resetting(vhost);
1873 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET); 1966 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
1874 abort_rc = ibmvfc_abort_task_set(cmd->device); 1967 abort_rc = ibmvfc_abort_task_set(sdev);
1875 1968
1876 if (!cancel_rc && !abort_rc) { 1969 if (!cancel_rc && !abort_rc)
1877 spin_lock_irqsave(vhost->host->host_lock, flags); 1970 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1878 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1879 if (evt->cmnd && evt->cmnd->device == cmd->device)
1880 ibmvfc_fail_request(evt, DID_ABORT);
1881 }
1882 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1883 LEAVE;
1884 return SUCCESS;
1885 }
1886 1971
1887 LEAVE; 1972 LEAVE;
1888 return FAILED; 1973 return rc;
1889} 1974}
1890 1975
1891/** 1976/**
@@ -1897,29 +1982,21 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1897 **/ 1982 **/
1898static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) 1983static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1899{ 1984{
1900 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 1985 struct scsi_device *sdev = cmd->device;
1901 struct ibmvfc_event *evt, *pos; 1986 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1902 int cancel_rc, reset_rc; 1987 int cancel_rc, reset_rc;
1903 unsigned long flags; 1988 int rc = FAILED;
1904 1989
1905 ENTER; 1990 ENTER;
1906 ibmvfc_wait_while_resetting(vhost); 1991 ibmvfc_wait_while_resetting(vhost);
1907 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET); 1992 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
1908 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN"); 1993 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
1909 1994
1910 if (!cancel_rc && !reset_rc) { 1995 if (!cancel_rc && !reset_rc)
1911 spin_lock_irqsave(vhost->host->host_lock, flags); 1996 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1912 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1913 if (evt->cmnd && evt->cmnd->device == cmd->device)
1914 ibmvfc_fail_request(evt, DID_ABORT);
1915 }
1916 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1917 LEAVE;
1918 return SUCCESS;
1919 }
1920 1997
1921 LEAVE; 1998 LEAVE;
1922 return FAILED; 1999 return rc;
1923} 2000}
1924 2001
1925/** 2002/**
@@ -1955,31 +2032,23 @@ static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
1955 **/ 2032 **/
1956static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) 2033static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
1957{ 2034{
1958 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 2035 struct scsi_device *sdev = cmd->device;
1959 struct scsi_target *starget = scsi_target(cmd->device); 2036 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1960 struct ibmvfc_event *evt, *pos; 2037 struct scsi_target *starget = scsi_target(sdev);
1961 int reset_rc; 2038 int reset_rc;
2039 int rc = FAILED;
1962 unsigned long cancel_rc = 0; 2040 unsigned long cancel_rc = 0;
1963 unsigned long flags;
1964 2041
1965 ENTER; 2042 ENTER;
1966 ibmvfc_wait_while_resetting(vhost); 2043 ibmvfc_wait_while_resetting(vhost);
1967 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2044 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1968 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target"); 2045 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
1969 2046
1970 if (!cancel_rc && !reset_rc) { 2047 if (!cancel_rc && !reset_rc)
1971 spin_lock_irqsave(vhost->host->host_lock, flags); 2048 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
1972 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1973 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1974 ibmvfc_fail_request(evt, DID_ABORT);
1975 }
1976 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1977 LEAVE;
1978 return SUCCESS;
1979 }
1980 2049
1981 LEAVE; 2050 LEAVE;
1982 return FAILED; 2051 return rc;
1983} 2052}
1984 2053
1985/** 2054/**
@@ -2009,23 +2078,18 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2009 struct scsi_target *starget = to_scsi_target(&rport->dev); 2078 struct scsi_target *starget = to_scsi_target(&rport->dev);
2010 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2079 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2011 struct ibmvfc_host *vhost = shost_priv(shost); 2080 struct ibmvfc_host *vhost = shost_priv(shost);
2012 struct ibmvfc_event *evt, *pos;
2013 unsigned long cancel_rc = 0; 2081 unsigned long cancel_rc = 0;
2014 unsigned long abort_rc = 0; 2082 unsigned long abort_rc = 0;
2015 unsigned long flags; 2083 int rc = FAILED;
2016 2084
2017 ENTER; 2085 ENTER;
2018 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2086 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
2019 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); 2087 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2020 2088
2021 if (!cancel_rc && !abort_rc) { 2089 if (!cancel_rc && !abort_rc)
2022 spin_lock_irqsave(shost->host_lock, flags); 2090 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2023 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { 2091
2024 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget) 2092 if (rc == FAILED)
2025 ibmvfc_fail_request(evt, DID_ABORT);
2026 }
2027 spin_unlock_irqrestore(shost->host_lock, flags);
2028 } else
2029 ibmvfc_issue_fc_host_lip(shost); 2093 ibmvfc_issue_fc_host_lip(shost);
2030 LEAVE; 2094 LEAVE;
2031} 2095}
@@ -2259,6 +2323,28 @@ static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2259} 2323}
2260 2324
2261/** 2325/**
2326 * ibmvfc_target_alloc - Setup the target's task set value
2327 * @starget: struct scsi_target
2328 *
2329 * Set the target's task set value so that error handling works as
2330 * expected.
2331 *
2332 * Returns:
2333 * 0 on success / -ENXIO if device does not exist
2334 **/
2335static int ibmvfc_target_alloc(struct scsi_target *starget)
2336{
2337 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2338 struct ibmvfc_host *vhost = shost_priv(shost);
2339 unsigned long flags = 0;
2340
2341 spin_lock_irqsave(shost->host_lock, flags);
2342 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2343 spin_unlock_irqrestore(shost->host_lock, flags);
2344 return 0;
2345}
2346
2347/**
2262 * ibmvfc_slave_configure - Configure the device 2348 * ibmvfc_slave_configure - Configure the device
2263 * @sdev: struct scsi_device device to configure 2349 * @sdev: struct scsi_device device to configure
2264 * 2350 *
@@ -2537,6 +2623,7 @@ static struct scsi_host_template driver_template = {
2537 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, 2623 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2538 .slave_alloc = ibmvfc_slave_alloc, 2624 .slave_alloc = ibmvfc_slave_alloc,
2539 .slave_configure = ibmvfc_slave_configure, 2625 .slave_configure = ibmvfc_slave_configure,
2626 .target_alloc = ibmvfc_target_alloc,
2540 .scan_finished = ibmvfc_scan_finished, 2627 .scan_finished = ibmvfc_scan_finished,
2541 .change_queue_depth = ibmvfc_change_queue_depth, 2628 .change_queue_depth = ibmvfc_change_queue_depth,
2542 .change_queue_type = ibmvfc_change_queue_type, 2629 .change_queue_type = ibmvfc_change_queue_type,