aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/scsi/ibmvscsi
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c363
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c58
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c1
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c14
7 files changed, 407 insertions, 48 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index bb2c696c006a..c2eea711a5ce 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -28,7 +28,9 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/kthread.h> 30#include <linux/kthread.h>
31#include <linux/slab.h>
31#include <linux/of.h> 32#include <linux/of.h>
33#include <linux/pm.h>
32#include <linux/stringify.h> 34#include <linux/stringify.h>
33#include <asm/firmware.h> 35#include <asm/firmware.h>
34#include <asm/irq.h> 36#include <asm/irq.h>
@@ -39,6 +41,7 @@
39#include <scsi/scsi_device.h> 41#include <scsi/scsi_device.h>
40#include <scsi/scsi_tcq.h> 42#include <scsi/scsi_tcq.h>
41#include <scsi/scsi_transport_fc.h> 43#include <scsi/scsi_transport_fc.h>
44#include <scsi/scsi_bsg_fc.h>
42#include "ibmvfc.h" 45#include "ibmvfc.h"
43 46
44static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; 47static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
@@ -558,12 +561,11 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
558/** 561/**
559 * ibmvfc_init_host - Start host initialization 562 * ibmvfc_init_host - Start host initialization
560 * @vhost: ibmvfc host struct 563 * @vhost: ibmvfc host struct
561 * @relogin: is this a re-login?
562 * 564 *
563 * Return value: 565 * Return value:
564 * nothing 566 * nothing
565 **/ 567 **/
566static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) 568static void ibmvfc_init_host(struct ibmvfc_host *vhost)
567{ 569{
568 struct ibmvfc_target *tgt; 570 struct ibmvfc_target *tgt;
569 571
@@ -577,10 +579,8 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
577 } 579 }
578 580
579 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { 581 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
580 if (!relogin) { 582 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
581 memset(vhost->async_crq.msgs, 0, PAGE_SIZE); 583 vhost->async_crq.cur = 0;
582 vhost->async_crq.cur = 0;
583 }
584 584
585 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
@@ -1678,6 +1678,276 @@ static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1678} 1678}
1679 1679
1680/** 1680/**
1681 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1682 * @evt: struct ibmvfc_event
1683 *
1684 **/
1685static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1686{
1687 struct ibmvfc_host *vhost = evt->vhost;
1688
1689 ibmvfc_free_event(evt);
1690 vhost->aborting_passthru = 0;
1691 dev_info(vhost->dev, "Passthru command cancelled\n");
1692}
1693
1694/**
1695 * ibmvfc_bsg_timeout - Handle a BSG timeout
1696 * @job: struct fc_bsg_job that timed out
1697 *
1698 * Returns:
1699 * 0 on success / other on failure
1700 **/
1701static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
1702{
1703 struct ibmvfc_host *vhost = shost_priv(job->shost);
1704 unsigned long port_id = (unsigned long)job->dd_data;
1705 struct ibmvfc_event *evt;
1706 struct ibmvfc_tmf *tmf;
1707 unsigned long flags;
1708 int rc;
1709
1710 ENTER;
1711 spin_lock_irqsave(vhost->host->host_lock, flags);
1712 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1713 __ibmvfc_reset_host(vhost);
1714 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1715 return 0;
1716 }
1717
1718 vhost->aborting_passthru = 1;
1719 evt = ibmvfc_get_event(vhost);
1720 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1721
1722 tmf = &evt->iu.tmf;
1723 memset(tmf, 0, sizeof(*tmf));
1724 tmf->common.version = 1;
1725 tmf->common.opcode = IBMVFC_TMF_MAD;
1726 tmf->common.length = sizeof(*tmf);
1727 tmf->scsi_id = port_id;
1728 tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1729 tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
1730 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1731
1732 if (rc != 0) {
1733 vhost->aborting_passthru = 0;
1734 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1735 rc = -EIO;
1736 } else
1737 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1738 port_id);
1739
1740 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1741
1742 LEAVE;
1743 return rc;
1744}
1745
1746/**
1747 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1748 * @vhost: struct ibmvfc_host to send command
1749 * @port_id: port ID to send command
1750 *
1751 * Returns:
1752 * 0 on success / other on failure
1753 **/
1754static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1755{
1756 struct ibmvfc_port_login *plogi;
1757 struct ibmvfc_target *tgt;
1758 struct ibmvfc_event *evt;
1759 union ibmvfc_iu rsp_iu;
1760 unsigned long flags;
1761 int rc = 0, issue_login = 1;
1762
1763 ENTER;
1764 spin_lock_irqsave(vhost->host->host_lock, flags);
1765 list_for_each_entry(tgt, &vhost->targets, queue) {
1766 if (tgt->scsi_id == port_id) {
1767 issue_login = 0;
1768 break;
1769 }
1770 }
1771
1772 if (!issue_login)
1773 goto unlock_out;
1774 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1775 goto unlock_out;
1776
1777 evt = ibmvfc_get_event(vhost);
1778 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1779 plogi = &evt->iu.plogi;
1780 memset(plogi, 0, sizeof(*plogi));
1781 plogi->common.version = 1;
1782 plogi->common.opcode = IBMVFC_PORT_LOGIN;
1783 plogi->common.length = sizeof(*plogi);
1784 plogi->scsi_id = port_id;
1785 evt->sync_iu = &rsp_iu;
1786 init_completion(&evt->comp);
1787
1788 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1789 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1790
1791 if (rc)
1792 return -EIO;
1793
1794 wait_for_completion(&evt->comp);
1795
1796 if (rsp_iu.plogi.common.status)
1797 rc = -EIO;
1798
1799 spin_lock_irqsave(vhost->host->host_lock, flags);
1800 ibmvfc_free_event(evt);
1801unlock_out:
1802 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1803 LEAVE;
1804 return rc;
1805}
1806
1807/**
1808 * ibmvfc_bsg_request - Handle a BSG request
1809 * @job: struct fc_bsg_job to be executed
1810 *
1811 * Returns:
1812 * 0 on success / other on failure
1813 **/
1814static int ibmvfc_bsg_request(struct fc_bsg_job *job)
1815{
1816 struct ibmvfc_host *vhost = shost_priv(job->shost);
1817 struct fc_rport *rport = job->rport;
1818 struct ibmvfc_passthru_mad *mad;
1819 struct ibmvfc_event *evt;
1820 union ibmvfc_iu rsp_iu;
1821 unsigned long flags, port_id = -1;
1822 unsigned int code = job->request->msgcode;
1823 int rc = 0, req_seg, rsp_seg, issue_login = 0;
1824 u32 fc_flags, rsp_len;
1825
1826 ENTER;
1827 job->reply->reply_payload_rcv_len = 0;
1828 if (rport)
1829 port_id = rport->port_id;
1830
1831 switch (code) {
1832 case FC_BSG_HST_ELS_NOLOGIN:
1833 port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
1834 (job->request->rqst_data.h_els.port_id[1] << 8) |
1835 job->request->rqst_data.h_els.port_id[2];
1836 case FC_BSG_RPT_ELS:
1837 fc_flags = IBMVFC_FC_ELS;
1838 break;
1839 case FC_BSG_HST_CT:
1840 issue_login = 1;
1841 port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
1842 (job->request->rqst_data.h_ct.port_id[1] << 8) |
1843 job->request->rqst_data.h_ct.port_id[2];
1844 case FC_BSG_RPT_CT:
1845 fc_flags = IBMVFC_FC_CT_IU;
1846 break;
1847 default:
1848 return -ENOTSUPP;
1849 };
1850
1851 if (port_id == -1)
1852 return -EINVAL;
1853 if (!mutex_trylock(&vhost->passthru_mutex))
1854 return -EBUSY;
1855
1856 job->dd_data = (void *)port_id;
1857 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1858 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1859
1860 if (!req_seg) {
1861 mutex_unlock(&vhost->passthru_mutex);
1862 return -ENOMEM;
1863 }
1864
1865 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1866 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1867
1868 if (!rsp_seg) {
1869 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1870 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1871 mutex_unlock(&vhost->passthru_mutex);
1872 return -ENOMEM;
1873 }
1874
1875 if (req_seg > 1 || rsp_seg > 1) {
1876 rc = -EINVAL;
1877 goto out;
1878 }
1879
1880 if (issue_login)
1881 rc = ibmvfc_bsg_plogi(vhost, port_id);
1882
1883 spin_lock_irqsave(vhost->host->host_lock, flags);
1884
1885 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1886 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1887 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1888 goto out;
1889 }
1890
1891 evt = ibmvfc_get_event(vhost);
1892 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1893 mad = &evt->iu.passthru;
1894
1895 memset(mad, 0, sizeof(*mad));
1896 mad->common.version = 1;
1897 mad->common.opcode = IBMVFC_PASSTHRU;
1898 mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
1899
1900 mad->cmd_ioba.va = (u64)evt->crq.ioba +
1901 offsetof(struct ibmvfc_passthru_mad, iu);
1902 mad->cmd_ioba.len = sizeof(mad->iu);
1903
1904 mad->iu.cmd_len = job->request_payload.payload_len;
1905 mad->iu.rsp_len = job->reply_payload.payload_len;
1906 mad->iu.flags = fc_flags;
1907 mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1908
1909 mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
1910 mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
1911 mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
1912 mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
1913 mad->iu.scsi_id = port_id;
1914 mad->iu.tag = (u64)evt;
1915 rsp_len = mad->iu.rsp.len;
1916
1917 evt->sync_iu = &rsp_iu;
1918 init_completion(&evt->comp);
1919 rc = ibmvfc_send_event(evt, vhost, 0);
1920 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1921
1922 if (rc) {
1923 rc = -EIO;
1924 goto out;
1925 }
1926
1927 wait_for_completion(&evt->comp);
1928
1929 if (rsp_iu.passthru.common.status)
1930 rc = -EIO;
1931 else
1932 job->reply->reply_payload_rcv_len = rsp_len;
1933
1934 spin_lock_irqsave(vhost->host->host_lock, flags);
1935 ibmvfc_free_event(evt);
1936 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1937 job->reply->result = rc;
1938 job->job_done(job);
1939 rc = 0;
1940out:
1941 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1942 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1943 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
1944 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 mutex_unlock(&vhost->passthru_mutex);
1946 LEAVE;
1947 return rc;
1948}
1949
1950/**
1681 * ibmvfc_reset_device - Reset the device with the specified reset type 1951 * ibmvfc_reset_device - Reset the device with the specified reset type
1682 * @sdev: scsi device to reset 1952 * @sdev: scsi device to reset
1683 * @type: reset type 1953 * @type: reset type
@@ -1731,7 +2001,10 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1731 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); 2001 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
1732 wait_for_completion(&evt->comp); 2002 wait_for_completion(&evt->comp);
1733 2003
1734 if (rsp_iu.cmd.status) { 2004 if (rsp_iu.cmd.status)
2005 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2006
2007 if (rsp_code) {
1735 if (fc_rsp->flags & FCP_RSP_LEN_VALID) 2008 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1736 rsp_code = fc_rsp->data.info.rsp_code; 2009 rsp_code = fc_rsp->data.info.rsp_code;
1737 2010
@@ -1820,7 +2093,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1820 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); 2093 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
1821 wait_for_completion(&evt->comp); 2094 wait_for_completion(&evt->comp);
1822 2095
1823 if (rsp_iu.cmd.status) { 2096 if (rsp_iu.cmd.status)
2097 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2098
2099 if (rsp_code) {
1824 if (fc_rsp->flags & FCP_RSP_LEN_VALID) 2100 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1825 rsp_code = fc_rsp->data.info.rsp_code; 2101 rsp_code = fc_rsp->data.info.rsp_code;
1826 2102
@@ -2061,12 +2337,24 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2061} 2337}
2062 2338
2063/** 2339/**
2064 * ibmvfc_dev_cancel_all - Device iterated cancel all function 2340 * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
2341 * @sdev: scsi device struct
2342 * @data: return code
2343 *
2344 **/
2345static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
2346{
2347 unsigned long *rc = data;
2348 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2349}
2350
2351/**
2352 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2065 * @sdev: scsi device struct 2353 * @sdev: scsi device struct
2066 * @data: return code 2354 * @data: return code
2067 * 2355 *
2068 **/ 2356 **/
2069static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data) 2357static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2070{ 2358{
2071 unsigned long *rc = data; 2359 unsigned long *rc = data;
2072 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); 2360 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
@@ -2102,7 +2390,7 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2102 2390
2103 ENTER; 2391 ENTER;
2104 ibmvfc_wait_while_resetting(vhost); 2392 ibmvfc_wait_while_resetting(vhost);
2105 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2393 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2106 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); 2394 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2107 2395
2108 if (!cancel_rc && !reset_rc) 2396 if (!cancel_rc && !reset_rc)
@@ -2144,7 +2432,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2144 int rc = FAILED; 2432 int rc = FAILED;
2145 2433
2146 ENTER; 2434 ENTER;
2147 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2435 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts);
2148 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); 2436 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2149 2437
2150 if (!cancel_rc && !abort_rc) 2438 if (!cancel_rc && !abort_rc)
@@ -2297,13 +2585,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2297 /* Send back a response */ 2585 /* Send back a response */
2298 rc = ibmvfc_send_crq_init_complete(vhost); 2586 rc = ibmvfc_send_crq_init_complete(vhost);
2299 if (rc == 0) 2587 if (rc == 0)
2300 ibmvfc_init_host(vhost, 0); 2588 ibmvfc_init_host(vhost);
2301 else 2589 else
2302 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); 2590 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2303 break; 2591 break;
2304 case IBMVFC_CRQ_INIT_COMPLETE: 2592 case IBMVFC_CRQ_INIT_COMPLETE:
2305 dev_info(vhost->dev, "Partner initialization complete\n"); 2593 dev_info(vhost->dev, "Partner initialization complete\n");
2306 ibmvfc_init_host(vhost, 0); 2594 ibmvfc_init_host(vhost);
2307 break; 2595 break;
2308 default: 2596 default:
2309 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); 2597 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
@@ -2478,12 +2766,17 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2478 * ibmvfc_change_queue_depth - Change the device's queue depth 2766 * ibmvfc_change_queue_depth - Change the device's queue depth
2479 * @sdev: scsi device struct 2767 * @sdev: scsi device struct
2480 * @qdepth: depth to set 2768 * @qdepth: depth to set
2769 * @reason: calling context
2481 * 2770 *
2482 * Return value: 2771 * Return value:
2483 * actual depth set 2772 * actual depth set
2484 **/ 2773 **/
2485static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) 2774static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
2775 int reason)
2486{ 2776{
2777 if (reason != SCSI_QDEPTH_DEFAULT)
2778 return -EOPNOTSUPP;
2779
2487 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) 2780 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2488 qdepth = IBMVFC_MAX_CMDS_PER_LUN; 2781 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2489 2782
@@ -3725,7 +4018,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3725 case IBMVFC_MAD_SUCCESS: 4018 case IBMVFC_MAD_SUCCESS:
3726 if (list_empty(&vhost->sent) && 4019 if (list_empty(&vhost->sent) &&
3727 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { 4020 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3728 ibmvfc_init_host(vhost, 0); 4021 ibmvfc_init_host(vhost);
3729 return; 4022 return;
3730 } 4023 }
3731 break; 4024 break;
@@ -3903,6 +4196,8 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3903 rport->supported_classes |= FC_COS_CLASS2; 4196 rport->supported_classes |= FC_COS_CLASS2;
3904 if (tgt->service_parms.class3_parms[0] & 0x80000000) 4197 if (tgt->service_parms.class3_parms[0] & 0x80000000)
3905 rport->supported_classes |= FC_COS_CLASS3; 4198 rport->supported_classes |= FC_COS_CLASS3;
4199 if (rport->rqst_q)
4200 blk_queue_max_segments(rport->rqst_q, 1);
3906 } else 4201 } else
3907 tgt_dbg(tgt, "rport add failed\n"); 4202 tgt_dbg(tgt, "rport add failed\n");
3908 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4203 spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -4342,6 +4637,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4342 init_waitqueue_head(&vhost->work_wait_q); 4637 init_waitqueue_head(&vhost->work_wait_q);
4343 init_waitqueue_head(&vhost->init_wait_q); 4638 init_waitqueue_head(&vhost->init_wait_q);
4344 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); 4639 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4640 mutex_init(&vhost->passthru_mutex);
4345 4641
4346 if ((rc = ibmvfc_alloc_mem(vhost))) 4642 if ((rc = ibmvfc_alloc_mem(vhost)))
4347 goto free_scsi_host; 4643 goto free_scsi_host;
@@ -4374,6 +4670,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4374 goto remove_shost; 4670 goto remove_shost;
4375 } 4671 }
4376 4672
4673 if (shost_to_fc_host(shost)->rqst_q)
4674 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
4377 dev_set_drvdata(dev, vhost); 4675 dev_set_drvdata(dev, vhost);
4378 spin_lock(&ibmvfc_driver_lock); 4676 spin_lock(&ibmvfc_driver_lock);
4379 list_add_tail(&vhost->queue, &ibmvfc_head); 4677 list_add_tail(&vhost->queue, &ibmvfc_head);
@@ -4414,7 +4712,11 @@ static int ibmvfc_remove(struct vio_dev *vdev)
4414 4712
4415 ENTER; 4713 ENTER;
4416 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); 4714 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
4715
4716 spin_lock_irqsave(vhost->host->host_lock, flags);
4417 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 4717 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
4718 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4719
4418 ibmvfc_wait_while_resetting(vhost); 4720 ibmvfc_wait_while_resetting(vhost);
4419 ibmvfc_release_crq_queue(vhost); 4721 ibmvfc_release_crq_queue(vhost);
4420 kthread_stop(vhost->work_thread); 4722 kthread_stop(vhost->work_thread);
@@ -4436,6 +4738,27 @@ static int ibmvfc_remove(struct vio_dev *vdev)
4436} 4738}
4437 4739
4438/** 4740/**
4741 * ibmvfc_resume - Resume from suspend
4742 * @dev: device struct
4743 *
4744 * We may have lost an interrupt across suspend/resume, so kick the
4745 * interrupt handler
4746 *
4747 */
4748static int ibmvfc_resume(struct device *dev)
4749{
4750 unsigned long flags;
4751 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
4752 struct vio_dev *vdev = to_vio_dev(dev);
4753
4754 spin_lock_irqsave(vhost->host->host_lock, flags);
4755 vio_disable_interrupts(vdev);
4756 tasklet_schedule(&vhost->tasklet);
4757 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4758 return 0;
4759}
4760
4761/**
4439 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver 4762 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
4440 * @vdev: vio device struct 4763 * @vdev: vio device struct
4441 * 4764 *
@@ -4455,6 +4778,10 @@ static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
4455}; 4778};
4456MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); 4779MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
4457 4780
4781static struct dev_pm_ops ibmvfc_pm_ops = {
4782 .resume = ibmvfc_resume
4783};
4784
4458static struct vio_driver ibmvfc_driver = { 4785static struct vio_driver ibmvfc_driver = {
4459 .id_table = ibmvfc_device_table, 4786 .id_table = ibmvfc_device_table,
4460 .probe = ibmvfc_probe, 4787 .probe = ibmvfc_probe,
@@ -4463,6 +4790,7 @@ static struct vio_driver ibmvfc_driver = {
4463 .driver = { 4790 .driver = {
4464 .name = IBMVFC_NAME, 4791 .name = IBMVFC_NAME,
4465 .owner = THIS_MODULE, 4792 .owner = THIS_MODULE,
4793 .pm = &ibmvfc_pm_ops,
4466 } 4794 }
4467}; 4795};
4468 4796
@@ -4498,6 +4826,9 @@ static struct fc_function_template ibmvfc_transport_functions = {
4498 4826
4499 .get_starget_port_id = ibmvfc_get_starget_port_id, 4827 .get_starget_port_id = ibmvfc_get_starget_port_id,
4500 .show_starget_port_id = 1, 4828 .show_starget_port_id = 1,
4829
4830 .bsg_request = ibmvfc_bsg_request,
4831 .bsg_timeout = ibmvfc_bsg_timeout,
4501}; 4832};
4502 4833
4503/** 4834/**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 007fa1c9ef14..d25106a958d7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.6" 32#define IBMVFC_DRIVER_VERSION "1.0.7"
33#define IBMVFC_DRIVER_DATE "(May 28, 2009)" 33#define IBMVFC_DRIVER_DATE "(October 16, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -58,9 +58,10 @@
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout 60 * 1 for NPIV Logout
61 * 2 for BSG passthru
61 * 2 for each discovery thread 62 * 2 for each discovery thread
62 */ 63 */
63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) 64#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2))
64 65
65#define IBMVFC_MAD_SUCCESS 0x00 66#define IBMVFC_MAD_SUCCESS 0x00
66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 67#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -466,7 +467,10 @@ struct ibmvfc_passthru_iu {
466 u16 error; 467 u16 error;
467 u32 flags; 468 u32 flags;
468#define IBMVFC_FC_ELS 0x01 469#define IBMVFC_FC_ELS 0x01
470#define IBMVFC_FC_CT_IU 0x02
469 u32 cancel_key; 471 u32 cancel_key;
472#define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000
473#define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001
470 u32 reserved; 474 u32 reserved;
471 struct srp_direct_buf cmd; 475 struct srp_direct_buf cmd;
472 struct srp_direct_buf rsp; 476 struct srp_direct_buf rsp;
@@ -693,6 +697,7 @@ struct ibmvfc_host {
693 int disc_buf_sz; 697 int disc_buf_sz;
694 int log_level; 698 int log_level;
695 struct ibmvfc_discover_targets_buf *disc_buf; 699 struct ibmvfc_discover_targets_buf *disc_buf;
700 struct mutex passthru_mutex;
696 int task_set; 701 int task_set;
697 int init_retries; 702 int init_retries;
698 int discovery_threads; 703 int discovery_threads;
@@ -702,6 +707,7 @@ struct ibmvfc_host {
702 int delay_init; 707 int delay_init;
703 int scan_complete; 708 int scan_complete;
704 int logged_in; 709 int logged_in;
710 int aborting_passthru;
705 int events_to_log; 711 int events_to_log;
706#define IBMVFC_AE_LINKUP 0x0001 712#define IBMVFC_AE_LINKUP 0x0001
707#define IBMVFC_AE_LINKDOWN 0x0002 713#define IBMVFC_AE_LINKDOWN 0x0002
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9b0e9d31983..88bad0e81bdd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -40,7 +40,7 @@
40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's
41 * Senders cannot access the buffer directly, but send messages by 41 * Senders cannot access the buffer directly, but send messages by
42 * making a hypervisor call and passing in the 16 bytes. The hypervisor 42 * making a hypervisor call and passing in the 16 bytes. The hypervisor
43 * puts the message in the next 16 byte space in round-robbin fashion, 43 * puts the message in the next 16 byte space in round-robin fashion,
44 * turns on the high order bit of the message (the valid bit), and 44 * turns on the high order bit of the message (the valid bit), and
45 * generates an interrupt to the receiver (if interrupts are turned on.) 45 * generates an interrupt to the receiver (if interrupts are turned on.)
46 * The receiver just turns off the valid bit when they have copied out 46 * The receiver just turns off the valid bit when they have copied out
@@ -70,7 +70,9 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/slab.h>
73#include <linux/of.h> 74#include <linux/of.h>
75#include <linux/pm.h>
74#include <asm/firmware.h> 76#include <asm/firmware.h>
75#include <asm/vio.h> 77#include <asm/vio.h>
76#include <scsi/scsi.h> 78#include <scsi/scsi.h>
@@ -321,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
321 srp_cmd->buf_fmt = fmt; 323 srp_cmd->buf_fmt = fmt;
322} 324}
323 325
324static void unmap_sg_list(int num_entries,
325 struct device *dev,
326 struct srp_direct_buf *md)
327{
328 int i;
329
330 for (i = 0; i < num_entries; ++i)
331 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
332}
333
334/** 326/**
335 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 327 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
336 * @cmd: srp_cmd whose additional_data member will be unmapped 328 * @cmd: srp_cmd whose additional_data member will be unmapped
@@ -348,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
348 340
349 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) 341 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
350 return; 342 return;
351 else if (out_fmt == SRP_DATA_DESC_DIRECT ||
352 in_fmt == SRP_DATA_DESC_DIRECT) {
353 struct srp_direct_buf *data =
354 (struct srp_direct_buf *) cmd->add_data;
355 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
356 } else {
357 struct srp_indirect_buf *indirect =
358 (struct srp_indirect_buf *) cmd->add_data;
359 int num_mapped = indirect->table_desc.len /
360 sizeof(struct srp_direct_buf);
361 343
362 if (num_mapped <= MAX_INDIRECT_BUFS) { 344 if (evt_struct->cmnd)
363 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); 345 scsi_dma_unmap(evt_struct->cmnd);
364 return;
365 }
366
367 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
368 }
369} 346}
370 347
371static int map_sg_list(struct scsi_cmnd *cmd, int nseg, 348static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
@@ -1637,12 +1614,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1637 * ibmvscsi_change_queue_depth - Change the device's queue depth 1614 * ibmvscsi_change_queue_depth - Change the device's queue depth
1638 * @sdev: scsi device struct 1615 * @sdev: scsi device struct
1639 * @qdepth: depth to set 1616 * @qdepth: depth to set
1617 * @reason: calling context
1640 * 1618 *
1641 * Return value: 1619 * Return value:
1642 * actual depth set 1620 * actual depth set
1643 **/ 1621 **/
1644static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) 1622static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
1623 int reason)
1645{ 1624{
1625 if (reason != SCSI_QDEPTH_DEFAULT)
1626 return -EOPNOTSUPP;
1627
1646 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) 1628 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1647 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; 1629 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1648 1630
@@ -1986,6 +1968,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1986} 1968}
1987 1969
1988/** 1970/**
1971 * ibmvscsi_resume: Resume from suspend
1972 * @dev: device struct
1973 *
1974 * We may have lost an interrupt across suspend/resume, so kick the
1975 * interrupt handler
1976 */
1977static int ibmvscsi_resume(struct device *dev)
1978{
1979 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
1980 return ibmvscsi_ops->resume(hostdata);
1981}
1982
1983/**
1989 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 1984 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
1990 * support. 1985 * support.
1991 */ 1986 */
@@ -1995,6 +1990,10 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1995}; 1990};
1996MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1991MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1997 1992
1993static struct dev_pm_ops ibmvscsi_pm_ops = {
1994 .resume = ibmvscsi_resume
1995};
1996
1998static struct vio_driver ibmvscsi_driver = { 1997static struct vio_driver ibmvscsi_driver = {
1999 .id_table = ibmvscsi_device_table, 1998 .id_table = ibmvscsi_device_table,
2000 .probe = ibmvscsi_probe, 1999 .probe = ibmvscsi_probe,
@@ -2003,6 +2002,7 @@ static struct vio_driver ibmvscsi_driver = {
2003 .driver = { 2002 .driver = {
2004 .name = "ibmvscsi", 2003 .name = "ibmvscsi",
2005 .owner = THIS_MODULE, 2004 .owner = THIS_MODULE,
2005 .pm = &ibmvscsi_pm_ops,
2006 } 2006 }
2007}; 2007};
2008 2008
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 76425303def0..9cb7c6a773e1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -120,6 +120,7 @@ struct ibmvscsi_ops {
120 struct ibmvscsi_host_data *hostdata); 120 struct ibmvscsi_host_data *hostdata);
121 int (*send_crq)(struct ibmvscsi_host_data *hostdata, 121 int (*send_crq)(struct ibmvscsi_host_data *hostdata,
122 u64 word1, u64 word2); 122 u64 word1, u64 word2);
123 int (*resume) (struct ibmvscsi_host_data *hostdata);
123}; 124};
124 125
125extern struct ibmvscsi_ops iseriesvscsi_ops; 126extern struct ibmvscsi_ops iseriesvscsi_ops;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index d5eaf9727109..e2056d517e99 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h>
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_transport_srp.h> 29#include <scsi/scsi_transport_srp.h>
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 0775fdee5fa8..f4776451a754 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -158,10 +158,16 @@ static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
158 0); 158 0);
159} 159}
160 160
161static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
162{
163 return 0;
164}
165
161struct ibmvscsi_ops iseriesvscsi_ops = { 166struct ibmvscsi_ops iseriesvscsi_ops = {
162 .init_crq_queue = iseriesvscsi_init_crq_queue, 167 .init_crq_queue = iseriesvscsi_init_crq_queue,
163 .release_crq_queue = iseriesvscsi_release_crq_queue, 168 .release_crq_queue = iseriesvscsi_release_crq_queue,
164 .reset_crq_queue = iseriesvscsi_reset_crq_queue, 169 .reset_crq_queue = iseriesvscsi_reset_crq_queue,
165 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue, 170 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
166 .send_crq = iseriesvscsi_send_crq, 171 .send_crq = iseriesvscsi_send_crq,
172 .resume = iseriesvscsi_resume,
167}; 173};
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 462a8574dad9..a864ccc0a342 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -32,6 +32,7 @@
32#include <asm/iommu.h> 32#include <asm/iommu.h>
33#include <asm/hvcall.h> 33#include <asm/hvcall.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/gfp.h>
35#include <linux/interrupt.h> 36#include <linux/interrupt.h>
36#include "ibmvscsi.h" 37#include "ibmvscsi.h"
37 38
@@ -334,10 +335,23 @@ static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
334 return rc; 335 return rc;
335} 336}
336 337
338/**
339 * rpavscsi_resume: - resume after suspend
340 * @hostdata: ibmvscsi_host_data of host
341 *
342 */
343static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
344{
345 vio_disable_interrupts(to_vio_dev(hostdata->dev));
346 tasklet_schedule(&hostdata->srp_task);
347 return 0;
348}
349
337struct ibmvscsi_ops rpavscsi_ops = { 350struct ibmvscsi_ops rpavscsi_ops = {
338 .init_crq_queue = rpavscsi_init_crq_queue, 351 .init_crq_queue = rpavscsi_init_crq_queue,
339 .release_crq_queue = rpavscsi_release_crq_queue, 352 .release_crq_queue = rpavscsi_release_crq_queue,
340 .reset_crq_queue = rpavscsi_reset_crq_queue, 353 .reset_crq_queue = rpavscsi_reset_crq_queue,
341 .reenable_crq_queue = rpavscsi_reenable_crq_queue, 354 .reenable_crq_queue = rpavscsi_reenable_crq_queue,
342 .send_crq = rpavscsi_send_crq, 355 .send_crq = rpavscsi_send_crq,
356 .resume = rpavscsi_resume,
343}; 357};