aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 17:27:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 17:27:06 -0500
commit70e71ca0af244f48a5dcf56dc435243792e3a495 (patch)
treef7d9c4c4d9a857a00043e9bf6aa2d6f533a34778 /drivers/scsi
parentbae41e45b7400496b9bf0c70c6004419d9987819 (diff)
parent00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) New offloading infrastructure and example 'rocker' driver for offloading of switching and routing to hardware. This work was done by a large group of dedicated individuals, not limited to: Scott Feldman, Jiri Pirko, Thomas Graf, John Fastabend, Jamal Hadi Salim, Andy Gospodarek, Florian Fainelli, Roopa Prabhu 2) Start making the networking operate on IOV iterators instead of modifying iov objects in-situ during transfers. Thanks to Al Viro and Herbert Xu. 3) A set of new netlink interfaces for the TIPC stack, from Richard Alpe. 4) Remove unnecessary looping during ipv6 routing lookups, from Martin KaFai Lau. 5) Add PAUSE frame generation support to gianfar driver, from Matei Pavaluca. 6) Allow for larger reordering levels in TCP, which are easily achievable in the real world right now, from Eric Dumazet. 7) Add a variable of napi_schedule that doesn't need to disable cpu interrupts, from Eric Dumazet. 8) Use a doubly linked list to optimize neigh_parms_release(), from Nicolas Dichtel. 9) Various enhancements to the kernel BPF verifier, and allow eBPF programs to actually be attached to sockets. From Alexei Starovoitov. 10) Support TSO/LSO in sunvnet driver, from David L Stevens. 11) Allow controlling ECN usage via routing metrics, from Florian Westphal. 12) Remote checksum offload, from Tom Herbert. 13) Add split-header receive, BQL, and xmit_more support to amd-xgbe driver, from Thomas Lendacky. 14) Add MPLS support to openvswitch, from Simon Horman. 15) Support wildcard tunnel endpoints in ipv6 tunnels, from Steffen Klassert. 16) Do gro flushes on a per-device basis using a timer, from Eric Dumazet. This tries to resolve the conflicting goals between the desired handling of bulk vs. RPC-like traffic. 17) Allow userspace to ask for the CPU upon what a packet was received/steered, via SO_INCOMING_CPU. From Eric Dumazet. 18) Limit GSO packets to half the current congestion window, from Eric Dumazet. 19) Add a generic helper so that all drivers set their RSS keys in a consistent way, from Eric Dumazet. 20) Add xmit_more support to enic driver, from Govindarajulu Varadarajan. 21) Add VLAN packet scheduler action, from Jiri Pirko. 22) Support configurable RSS hash functions via ethtool, from Eyal Perry. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1820 commits) Fix race condition between vxlan_sock_add and vxlan_sock_release net/macb: fix compilation warning for print_hex_dump() called with skb->mac_header net/mlx4: Add support for A0 steering net/mlx4: Refactor QUERY_PORT net/mlx4_core: Add explicit error message when rule doesn't meet configuration net/mlx4: Add A0 hybrid steering net/mlx4: Add mlx4_bitmap zone allocator net/mlx4: Add a check if there are too many reserved QPs net/mlx4: Change QP allocation scheme net/mlx4_core: Use tasklet for user-space CQ completion events net/mlx4_core: Mask out host side virtualization features for guests net/mlx4_en: Set csum level for encapsulated packets be2net: Export tunnel offloads only when a VxLAN tunnel is created gianfar: Fix dma check map error when DMA_API_DEBUG is enabled cxgb4/csiostor: Don't use MASTER_MUST for fw_hello call net: fec: only enable mdio interrupt before phy device link up net: fec: clear all interrupt events to support i.MX6SX net: fec: reset fep link status in suspend function net: sock: fix access via invalid file descriptor net: introduce helper macro for_each_cmsghdr ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/csiostor/csio_attr.c8
-rw-r--r--drivers/scsi/csiostor/csio_hw.c58
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h49
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c15
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c21
-rw-r--r--drivers/scsi/csiostor/csio_init.c78
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c20
-rw-r--r--drivers/scsi/csiostor/csio_mb.c343
-rw-r--r--drivers/scsi/csiostor/csio_mb.h12
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c26
-rw-r--r--drivers/scsi/csiostor/csio_wr.h2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c113
-rw-r--r--drivers/scsi/fcoe/fcoe.c6
14 files changed, 338 insertions, 414 deletions
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 065a87ace623..2d1c4ebd40f9 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -451,9 +451,9 @@ csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
451 451
452 /* Process Mbox response of VNP command */ 452 /* Process Mbox response of VNP command */
453 rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); 453 rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
454 if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { 454 if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
455 csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n", 455 csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
456 FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16))); 456 FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
457 ret = -EINVAL; 457 ret = -EINVAL;
458 goto out_free; 458 goto out_free;
459 } 459 }
@@ -526,9 +526,9 @@ csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
526 526
527 /* Process Mbox response of VNP command */ 527 /* Process Mbox response of VNP command */
528 rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); 528 rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
529 if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { 529 if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
530 csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n", 530 csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
531 FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16))); 531 FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
532 ret = -EINVAL; 532 ret = -EINVAL;
533 } 533 }
534 534
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 0eaec4748957..9ab997e18b20 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -47,7 +47,6 @@
47#include "csio_lnode.h" 47#include "csio_lnode.h"
48#include "csio_rnode.h" 48#include "csio_rnode.h"
49 49
50int csio_force_master;
51int csio_dbg_level = 0xFEFF; 50int csio_dbg_level = 0xFEFF;
52unsigned int csio_port_mask = 0xf; 51unsigned int csio_port_mask = 0xf;
53 52
@@ -650,10 +649,10 @@ static void
650csio_hw_print_fw_version(struct csio_hw *hw, char *str) 649csio_hw_print_fw_version(struct csio_hw *hw, char *str)
651{ 650{
652 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 651 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
653 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), 652 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
654 FW_HDR_FW_VER_MINOR_GET(hw->fwrev), 653 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
655 FW_HDR_FW_VER_MICRO_GET(hw->fwrev), 654 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
656 FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); 655 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
657} 656}
658 657
659/* 658/*
@@ -706,9 +705,9 @@ csio_hw_check_fw_version(struct csio_hw *hw)
706 if (ret) 705 if (ret)
707 return ret; 706 return ret;
708 707
709 major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev); 708 major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); 709 minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); 710 micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
712 711
713 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ 712 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
714 csio_err(hw, "card FW has major version %u, driver wants %u\n", 713 csio_err(hw, "card FW has major version %u, driver wants %u\n",
@@ -889,7 +888,6 @@ csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
889{ 888{
890 struct csio_mb *mbp; 889 struct csio_mb *mbp;
891 int rv = 0; 890 int rv = 0;
892 enum csio_dev_master master;
893 enum fw_retval retval; 891 enum fw_retval retval;
894 uint8_t mpfn; 892 uint8_t mpfn;
895 char state_str[16]; 893 char state_str[16];
@@ -904,11 +902,9 @@ csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
904 goto out; 902 goto out;
905 } 903 }
906 904
907 master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
908
909retry: 905retry:
910 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 906 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
911 hw->pfn, master, NULL); 907 hw->pfn, CSIO_MASTER_MAY, NULL);
912 908
913 rv = csio_mb_issue(hw, mbp); 909 rv = csio_mb_issue(hw, mbp);
914 if (rv) { 910 if (rv) {
@@ -1170,7 +1166,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1170 } 1166 }
1171 1167
1172 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1168 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1173 PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1), 1169 PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
1174 NULL); 1170 NULL);
1175 1171
1176 if (csio_mb_issue(hw, mbp)) { 1172 if (csio_mb_issue(hw, mbp)) {
@@ -1370,13 +1366,13 @@ csio_hw_fw_config_file(struct csio_hw *hw,
1370 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1366 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
1371 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1367 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
1372 caps_cmd->op_to_write = 1368 caps_cmd->op_to_write =
1373 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1369 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1374 FW_CMD_REQUEST | 1370 FW_CMD_REQUEST_F |
1375 FW_CMD_READ); 1371 FW_CMD_READ_F);
1376 caps_cmd->cfvalid_to_len16 = 1372 caps_cmd->cfvalid_to_len16 =
1377 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 1373 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
1378 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1374 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
1379 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 1375 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
1380 FW_LEN16(*caps_cmd)); 1376 FW_LEN16(*caps_cmd));
1381 1377
1382 if (csio_mb_issue(hw, mbp)) { 1378 if (csio_mb_issue(hw, mbp)) {
@@ -1407,9 +1403,9 @@ csio_hw_fw_config_file(struct csio_hw *hw,
1407 * And now tell the firmware to use the configuration we just loaded. 1403 * And now tell the firmware to use the configuration we just loaded.
1408 */ 1404 */
1409 caps_cmd->op_to_write = 1405 caps_cmd->op_to_write =
1410 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1406 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1411 FW_CMD_REQUEST | 1407 FW_CMD_REQUEST_F |
1412 FW_CMD_WRITE); 1408 FW_CMD_WRITE_F);
1413 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1409 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
1414 1410
1415 if (csio_mb_issue(hw, mbp)) { 1411 if (csio_mb_issue(hw, mbp)) {
@@ -1678,7 +1674,7 @@ csio_get_fcoe_resinfo(struct csio_hw *hw)
1678 } 1674 }
1679 1675
1680 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1676 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
1681 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 1677 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
1682 if (retval != FW_SUCCESS) { 1678 if (retval != FW_SUCCESS) {
1683 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1679 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1684 retval); 1680 retval);
@@ -1723,8 +1719,8 @@ csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1723 * Find out whether we're dealing with a version of 1719 * Find out whether we're dealing with a version of
1724 * the firmware which has configuration file support. 1720 * the firmware which has configuration file support.
1725 */ 1721 */
1726 _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1722 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1727 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 1723 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
1728 1724
1729 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1725 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1730 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1726 ARRAY_SIZE(_param), _param, NULL, false, NULL);
@@ -1781,8 +1777,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1781 goto leave; 1777 goto leave;
1782 } 1778 }
1783 1779
1784 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 1780 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1785 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 1781 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
1786 1782
1787 ret = csio_memory_write(hw, mtype, maddr, 1783 ret = csio_memory_write(hw, mtype, maddr,
1788 cf->size + value_to_add, cfg_data); 1784 cf->size + value_to_add, cfg_data);
@@ -1871,8 +1867,8 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1871 goto bye; 1867 goto bye;
1872 } 1868 }
1873 } else { 1869 } else {
1874 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 1870 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1875 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 1871 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
1876 using_flash = 0; 1872 using_flash = 0;
1877 } 1873 }
1878 1874
@@ -1998,13 +1994,13 @@ csio_hw_flash_fw(struct csio_hw *hw)
1998 1994
1999 hdr = (const struct fw_hdr *)fw->data; 1995 hdr = (const struct fw_hdr *)fw->data;
2000 fw_ver = ntohl(hdr->fw_ver); 1996 fw_ver = ntohl(hdr->fw_ver);
2001 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw)) 1997 if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
2002 return -EINVAL; /* wrong major version, won't do */ 1998 return -EINVAL; /* wrong major version, won't do */
2003 1999
2004 /* 2000 /*
2005 * If the flash FW is unusable or we found something newer, load it. 2001 * If the flash FW is unusable or we found something newer, load it.
2006 */ 2002 */
2007 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) || 2003 if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
2008 fw_ver > hw->fwrev) { 2004 fw_ver > hw->fwrev) {
2009 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2005 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2010 /*force=*/false); 2006 /*force=*/false);
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 5db2d85195b1..68248da1b9af 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -110,7 +110,6 @@ struct csio_scsi_cpu_info {
110}; 110};
111 111
112extern int csio_dbg_level; 112extern int csio_dbg_level;
113extern int csio_force_master;
114extern unsigned int csio_port_mask; 113extern unsigned int csio_port_mask;
115extern int csio_msi; 114extern int csio_msi;
116 115
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
index bca0de61ae80..4752fed476df 100644
--- a/drivers/scsi/csiostor/csio_hw_chip.h
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -36,60 +36,13 @@
36 36
37#include "csio_defs.h" 37#include "csio_defs.h"
38 38
39/* FCoE device IDs for T4 */
40#define CSIO_DEVID_T440DBG_FCOE 0x4600
41#define CSIO_DEVID_T420CR_FCOE 0x4601
42#define CSIO_DEVID_T422CR_FCOE 0x4602
43#define CSIO_DEVID_T440CR_FCOE 0x4603
44#define CSIO_DEVID_T420BCH_FCOE 0x4604
45#define CSIO_DEVID_T440BCH_FCOE 0x4605
46#define CSIO_DEVID_T440CH_FCOE 0x4606
47#define CSIO_DEVID_T420SO_FCOE 0x4607
48#define CSIO_DEVID_T420CX_FCOE 0x4608
49#define CSIO_DEVID_T420BT_FCOE 0x4609
50#define CSIO_DEVID_T404BT_FCOE 0x460A
51#define CSIO_DEVID_B420_FCOE 0x460B
52#define CSIO_DEVID_B404_FCOE 0x460C
53#define CSIO_DEVID_T480CR_FCOE 0x460D
54#define CSIO_DEVID_T440LPCR_FCOE 0x460E
55#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F
56#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680
57#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681
58#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682
59#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683
60#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684
61#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685
62#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686
63#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687
64
65/* FCoE device IDs for T5 */
66#define CSIO_DEVID_T580DBG_FCOE 0x5600
67#define CSIO_DEVID_T520CR_FCOE 0x5601
68#define CSIO_DEVID_T522CR_FCOE 0x5602
69#define CSIO_DEVID_T540CR_FCOE 0x5603
70#define CSIO_DEVID_T520BCH_FCOE 0x5604
71#define CSIO_DEVID_T540BCH_FCOE 0x5605
72#define CSIO_DEVID_T540CH_FCOE 0x5606
73#define CSIO_DEVID_T520SO_FCOE 0x5607
74#define CSIO_DEVID_T520CX_FCOE 0x5608
75#define CSIO_DEVID_T520BT_FCOE 0x5609
76#define CSIO_DEVID_T504BT_FCOE 0x560A
77#define CSIO_DEVID_B520_FCOE 0x560B
78#define CSIO_DEVID_B504_FCOE 0x560C
79#define CSIO_DEVID_T580CR2_FCOE 0x560D
80#define CSIO_DEVID_T540LPCR_FCOE 0x560E
81#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F
82#define CSIO_DEVID_T580LPCR_FCOE 0x5610
83#define CSIO_DEVID_T520LLCR_FCOE 0x5611
84#define CSIO_DEVID_T560CR_FCOE 0x5612
85#define CSIO_DEVID_T580CR_FCOE 0x5613
86
87/* Define MACRO values */ 39/* Define MACRO values */
88#define CSIO_HW_T4 0x4000 40#define CSIO_HW_T4 0x4000
89#define CSIO_T4_FCOE_ASIC 0x4600 41#define CSIO_T4_FCOE_ASIC 0x4600
90#define CSIO_HW_T5 0x5000 42#define CSIO_HW_T5 0x5000
91#define CSIO_T5_FCOE_ASIC 0x5600 43#define CSIO_T5_FCOE_ASIC 0x5600
92#define CSIO_HW_CHIP_MASK 0xF000 44#define CSIO_HW_CHIP_MASK 0xF000
45
93#define T4_REGMAP_SIZE (160 * 1024) 46#define T4_REGMAP_SIZE (160 * 1024)
94#define T5_REGMAP_SIZE (332 * 1024) 47#define T5_REGMAP_SIZE (332 * 1024)
95#define FW_FNAME_T4 "cxgb4/t4fw.bin" 48#define FW_FNAME_T4 "cxgb4/t4fw.bin"
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
index 89ecbac5478f..95d831857640 100644
--- a/drivers/scsi/csiostor/csio_hw_t4.c
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -307,12 +307,12 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
307 * MEM_EDC1 = 1 307 * MEM_EDC1 = 1
308 * MEM_MC = 2 -- T4 308 * MEM_MC = 2 -- T4
309 */ 309 */
310 edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR)); 310 edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
311 if (mtype != MEM_MC1) 311 if (mtype != MEM_MC1)
312 memoffset = (mtype * (edc_size * 1024 * 1024)); 312 memoffset = (mtype * (edc_size * 1024 * 1024));
313 else { 313 else {
314 mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw, 314 mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
315 MA_EXT_MEMORY_BAR)); 315 MA_EXT_MEMORY_BAR_A));
316 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 316 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
317 } 317 }
318 318
@@ -383,11 +383,12 @@ static void
383csio_t4_dfs_create_ext_mem(struct csio_hw *hw) 383csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
384{ 384{
385 u32 size; 385 u32 size;
386 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); 386 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
387 if (i & EXT_MEM_ENABLE) { 387
388 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR); 388 if (i & EXT_MEM_ENABLE_F) {
389 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
389 csio_add_debugfs_mem(hw, "mc", MEM_MC, 390 csio_add_debugfs_mem(hw, "mc", MEM_MC,
390 EXT_MEM_SIZE_GET(size)); 391 EXT_MEM_SIZE_G(size));
391 } 392 }
392} 393}
393 394
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 27745c170c24..66e180a58718 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -298,12 +298,12 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
298 * MEM_MC0 = 2 -- For T5 298 * MEM_MC0 = 2 -- For T5
299 * MEM_MC1 = 3 -- For T5 299 * MEM_MC1 = 3 -- For T5
300 */ 300 */
301 edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR)); 301 edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
302 if (mtype != MEM_MC1) 302 if (mtype != MEM_MC1)
303 memoffset = (mtype * (edc_size * 1024 * 1024)); 303 memoffset = (mtype * (edc_size * 1024 * 1024));
304 else { 304 else {
305 mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw, 305 mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
306 MA_EXT_MEMORY_BAR)); 306 MA_EXT_MEMORY_BAR_A));
307 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 307 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
308 } 308 }
309 309
@@ -372,16 +372,17 @@ static void
372csio_t5_dfs_create_ext_mem(struct csio_hw *hw) 372csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
373{ 373{
374 u32 size; 374 u32 size;
375 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); 375 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
376 if (i & EXT_MEM_ENABLE) { 376
377 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR); 377 if (i & EXT_MEM_ENABLE_F) {
378 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
378 csio_add_debugfs_mem(hw, "mc0", MEM_MC0, 379 csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
379 EXT_MEM_SIZE_GET(size)); 380 EXT_MEM_SIZE_G(size));
380 } 381 }
381 if (i & EXT_MEM1_ENABLE) { 382 if (i & EXT_MEM1_ENABLE_F) {
382 size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR); 383 size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR_A);
383 csio_add_debugfs_mem(hw, "mc1", MEM_MC1, 384 csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
384 EXT_MEM_SIZE_GET(size)); 385 EXT_MEM_SIZE_G(size));
385 } 386 }
386} 387}
387 388
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 17794add855c..34d20cc3e110 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -128,10 +128,10 @@ static int csio_setup_debugfs(struct csio_hw *hw)
128 if (IS_ERR_OR_NULL(hw->debugfs_root)) 128 if (IS_ERR_OR_NULL(hw->debugfs_root))
129 return -1; 129 return -1;
130 130
131 i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); 131 i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
132 if (i & EDRAM0_ENABLE) 132 if (i & EDRAM0_ENABLE_F)
133 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); 133 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
134 if (i & EDRAM1_ENABLE) 134 if (i & EDRAM1_ENABLE_F)
135 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); 135 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
136 136
137 hw->chip_ops->chip_dfs_create_ext_mem(hw); 137 hw->chip_ops->chip_dfs_create_ext_mem(hw);
@@ -955,6 +955,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
955 struct csio_hw *hw; 955 struct csio_hw *hw;
956 struct csio_lnode *ln; 956 struct csio_lnode *ln;
957 957
958 /* probe only T5 cards */
959 if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
960 return -ENODEV;
961
958 rv = csio_pci_init(pdev, &bars); 962 rv = csio_pci_init(pdev, &bars);
959 if (rv) 963 if (rv)
960 goto err; 964 goto err;
@@ -974,10 +978,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
974 } 978 }
975 979
976 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", 980 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
977 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), 981 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
978 FW_HDR_FW_VER_MINOR_GET(hw->fwrev), 982 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
979 FW_HDR_FW_VER_MICRO_GET(hw->fwrev), 983 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
980 FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); 984 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
981 985
982 for (i = 0; i < hw->num_pports; i++) { 986 for (i = 0; i < hw->num_pports; i++) {
983 ln = csio_shost_init(hw, &pdev->dev, true, NULL); 987 ln = csio_shost_init(hw, &pdev->dev, true, NULL);
@@ -1167,53 +1171,21 @@ static struct pci_error_handlers csio_err_handler = {
1167 .resume = csio_pci_resume, 1171 .resume = csio_pci_resume,
1168}; 1172};
1169 1173
1170static const struct pci_device_id csio_pci_tbl[] = { 1174/*
1171 CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */ 1175 * Macros needed to support the PCI Device ID Table ...
1172 CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ 1176 */
1173 CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ 1177#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
1174 CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ 1178 static struct pci_device_id csio_pci_tbl[] = {
1175 CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */ 1179/* Define for iSCSI uses PF5, FCoE uses PF6 */
1176 CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */ 1180#define CH_PCI_DEVICE_ID_FUNCTION 0x5
1177 CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */ 1181#define CH_PCI_DEVICE_ID_FUNCTION2 0x6
1178 CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */ 1182
1179 CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */ 1183#define CH_PCI_ID_TABLE_ENTRY(devid) \
1180 CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */ 1184 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
1181 CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */ 1185
1182 CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */ 1186#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
1183 CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
1184 CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
1185 CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
1186 CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */
1187 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */
1188 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */
1189 CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */
1190 CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */
1191 CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
1192 CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */
1193 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
1194 CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */
1195 CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */
1196 CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */
1197 CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */
1198 CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */
1199 CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */
1200 CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */
1201 CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */
1202 CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */
1203 CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */
1204 CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */
1205 CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */
1206 CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */
1207 CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */
1208 CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */
1209 CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */
1210 CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */
1211 CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */
1212 CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */
1213 CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */
1214 { 0, 0, 0, 0, 0, 0, 0 }
1215};
1216 1187
1188#include "t4_pci_id_tbl.h"
1217 1189
1218static struct pci_driver csio_pci_driver = { 1190static struct pci_driver csio_pci_driver = {
1219 .name = KBUILD_MODNAME, 1191 .name = KBUILD_MODNAME,
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index ffe9be04dc39..87f9280d9b43 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -603,7 +603,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
603 enum fw_retval retval; 603 enum fw_retval retval;
604 __be32 nport_id; 604 __be32 nport_id;
605 605
606 retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); 606 retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
607 if (retval != FW_SUCCESS) { 607 if (retval != FW_SUCCESS) {
608 csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); 608 csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
609 mempool_free(mbp, hw->mb_mempool); 609 mempool_free(mbp, hw->mb_mempool);
@@ -770,7 +770,7 @@ csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
770 (struct fw_fcoe_fcf_cmd *)(mbp->mb); 770 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
771 enum fw_retval retval; 771 enum fw_retval retval;
772 772
773 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 773 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
774 if (retval != FW_SUCCESS) { 774 if (retval != FW_SUCCESS) {
775 csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", 775 csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
776 retval); 776 retval);
@@ -1506,7 +1506,7 @@ csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
1506 } 1506 }
1507 } else if (cpl_op == CPL_FW6_PLD) { 1507 } else if (cpl_op == CPL_FW6_PLD) {
1508 wr = (struct fw_wr_hdr *) (cmd + 4); 1508 wr = (struct fw_wr_hdr *) (cmd + 4);
1509 if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) 1509 if (FW_WR_OP_G(be32_to_cpu(wr->hi))
1510 == FW_RDEV_WR) { 1510 == FW_RDEV_WR) {
1511 1511
1512 rdev_wr = (struct fw_rdev_wr *) (cmd + 4); 1512 rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
@@ -1574,17 +1574,17 @@ out_pld:
1574 return; 1574 return;
1575 } else { 1575 } else {
1576 csio_warn(hw, "unexpected WR op(0x%x) recv\n", 1576 csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1577 FW_WR_OP_GET(be32_to_cpu((wr->hi)))); 1577 FW_WR_OP_G(be32_to_cpu((wr->hi))));
1578 CSIO_INC_STATS(hw, n_cpl_unexp); 1578 CSIO_INC_STATS(hw, n_cpl_unexp);
1579 } 1579 }
1580 } else if (cpl_op == CPL_FW6_MSG) { 1580 } else if (cpl_op == CPL_FW6_MSG) {
1581 wr = (struct fw_wr_hdr *) (cmd); 1581 wr = (struct fw_wr_hdr *) (cmd);
1582 if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { 1582 if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
1583 csio_ln_mgmt_wr_handler(hw, wr, 1583 csio_ln_mgmt_wr_handler(hw, wr,
1584 sizeof(struct fw_fcoe_els_ct_wr)); 1584 sizeof(struct fw_fcoe_els_ct_wr));
1585 } else { 1585 } else {
1586 csio_warn(hw, "unexpected WR op(0x%x) recv\n", 1586 csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1587 FW_WR_OP_GET(be32_to_cpu((wr->hi)))); 1587 FW_WR_OP_G(be32_to_cpu((wr->hi))));
1588 CSIO_INC_STATS(hw, n_cpl_unexp); 1588 CSIO_INC_STATS(hw, n_cpl_unexp);
1589 } 1589 }
1590 } else { 1590 } else {
@@ -1668,12 +1668,12 @@ csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
1668 __be32 port_id; 1668 __be32 port_id;
1669 1669
1670 wr = (struct fw_fcoe_els_ct_wr *)fw_wr; 1670 wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
1671 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) | 1671 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
1672 FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); 1672 FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
1673 1673
1674 wr_len = DIV_ROUND_UP(wr_len, 16); 1674 wr_len = DIV_ROUND_UP(wr_len, 16);
1675 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) | 1675 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
1676 FW_WR_LEN16(wr_len)); 1676 FW_WR_LEN16_V(wr_len));
1677 wr->els_ct_type = sub_op; 1677 wr->els_ct_type = sub_op;
1678 wr->ctl_pri = 0; 1678 wr->ctl_pri = 0;
1679 wr->cp_en_class = 0; 1679 wr->cp_en_class = 0;
@@ -1757,7 +1757,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
1757 csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); 1757 csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
1758 else { 1758 else {
1759 /* Program DSGL to dma payload */ 1759 /* Program DSGL to dma payload */
1760 dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | 1760 dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
1761 ULPTX_MORE | ULPTX_NSGE(1)); 1761 ULPTX_MORE | ULPTX_NSGE(1));
1762 dsgl.len0 = cpu_to_be32(pld_len); 1762 dsgl.len0 = cpu_to_be32(pld_len);
1763 dsgl.addr0 = cpu_to_be64(pld->paddr); 1763 dsgl.addr0 = cpu_to_be64(pld->paddr);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 15b635142546..08c265c0f353 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -59,7 +59,7 @@ csio_mb_fw_retval(struct csio_mb *mbp)
59 59
60 hdr = (struct fw_cmd_hdr *)(mbp->mb); 60 hdr = (struct fw_cmd_hdr *)(mbp->mb);
61 61
62 return FW_CMD_RETVAL_GET(ntohl(hdr->lo)); 62 return FW_CMD_RETVAL_G(ntohl(hdr->lo));
63} 63}
64 64
65/* 65/*
@@ -81,17 +81,17 @@ csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
81 81
82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
83 83
84 cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) | 84 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) |
85 FW_CMD_REQUEST | FW_CMD_WRITE); 85 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
86 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 86 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
87 cmdp->err_to_clearinit = htonl( 87 cmdp->err_to_clearinit = htonl(
88 FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) | 88 FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) |
89 FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) | 89 FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) |
90 FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ? 90 FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ?
91 m_mbox : FW_HELLO_CMD_MBMASTER_MASK) | 91 m_mbox : FW_HELLO_CMD_MBMASTER_M) |
92 FW_HELLO_CMD_MBASYNCNOT(a_mbox) | 92 FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) |
93 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | 93 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
94 FW_HELLO_CMD_CLEARINIT); 94 FW_HELLO_CMD_CLEARINIT_F);
95 95
96} 96}
97 97
@@ -112,17 +112,17 @@ csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb); 112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
113 uint32_t value; 113 uint32_t value;
114 114
115 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 115 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
116 116
117 if (*retval == FW_SUCCESS) { 117 if (*retval == FW_SUCCESS) {
118 hw->fwrev = ntohl(rsp->fwrev); 118 hw->fwrev = ntohl(rsp->fwrev);
119 119
120 value = ntohl(rsp->err_to_clearinit); 120 value = ntohl(rsp->err_to_clearinit);
121 *mpfn = FW_HELLO_CMD_MBMASTER_GET(value); 121 *mpfn = FW_HELLO_CMD_MBMASTER_G(value);
122 122
123 if (value & FW_HELLO_CMD_INIT) 123 if (value & FW_HELLO_CMD_INIT_F)
124 *state = CSIO_DEV_STATE_INIT; 124 *state = CSIO_DEV_STATE_INIT;
125 else if (value & FW_HELLO_CMD_ERR) 125 else if (value & FW_HELLO_CMD_ERR_F)
126 *state = CSIO_DEV_STATE_ERR; 126 *state = CSIO_DEV_STATE_ERR;
127 else 127 else
128 *state = CSIO_DEV_STATE_UNINIT; 128 *state = CSIO_DEV_STATE_UNINIT;
@@ -144,9 +144,9 @@ csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
144 144
145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
146 146
147 cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) | 147 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) |
148 FW_CMD_REQUEST | FW_CMD_WRITE); 148 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
149 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 149 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
150 150
151} 151}
152 152
@@ -167,9 +167,9 @@ csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
167 167
168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
169 169
170 cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) | 170 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) |
171 FW_CMD_REQUEST | FW_CMD_WRITE); 171 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
172 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 172 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
173 cmdp->val = htonl(reset); 173 cmdp->val = htonl(reset);
174 cmdp->halt_pkd = htonl(halt); 174 cmdp->halt_pkd = htonl(halt);
175 175
@@ -202,12 +202,12 @@ csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
202 202
203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
204 204
205 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | 205 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) |
206 FW_CMD_REQUEST | 206 FW_CMD_REQUEST_F |
207 (wr ? FW_CMD_WRITE : FW_CMD_READ) | 207 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) |
208 FW_PARAMS_CMD_PFN(pf) | 208 FW_PARAMS_CMD_PFN_V(pf) |
209 FW_PARAMS_CMD_VFN(vf)); 209 FW_PARAMS_CMD_VFN_V(vf));
210 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 210 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
211 211
212 /* Write Params */ 212 /* Write Params */
213 if (wr) { 213 if (wr) {
@@ -245,7 +245,7 @@ csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
245 uint32_t i; 245 uint32_t i;
246 __be32 *p = &rsp->param[0].val; 246 __be32 *p = &rsp->param[0].val;
247 247
248 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 248 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
249 249
250 if (*retval == FW_SUCCESS) 250 if (*retval == FW_SUCCESS)
251 for (i = 0; i < nparams; i++, p += 2) 251 for (i = 0; i < nparams; i++, p += 2)
@@ -271,14 +271,14 @@ csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
271 * specified PCI-E Configuration Space register. 271 * specified PCI-E Configuration Space register.
272 */ 272 */
273 ldst_cmd->op_to_addrspace = 273 ldst_cmd->op_to_addrspace =
274 htonl(FW_CMD_OP(FW_LDST_CMD) | 274 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
275 FW_CMD_REQUEST | 275 FW_CMD_REQUEST_F |
276 FW_CMD_READ | 276 FW_CMD_READ_F |
277 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 277 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd)); 278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); 279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
280 ldst_cmd->u.pcie.ctrl_to_fn = 280 ldst_cmd->u.pcie.ctrl_to_fn =
281 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn)); 281 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn));
282 ldst_cmd->u.pcie.r = (uint8_t)reg; 282 ldst_cmd->u.pcie.r = (uint8_t)reg;
283} 283}
284 284
@@ -306,10 +306,10 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
306 306
307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); 307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
308 308
309 cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 309 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
310 FW_CMD_REQUEST | 310 FW_CMD_REQUEST_F |
311 (wr ? FW_CMD_WRITE : FW_CMD_READ)); 311 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F));
312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
313 313
314 /* Read config */ 314 /* Read config */
315 if (!wr) 315 if (!wr)
@@ -347,25 +347,25 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
347 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 347 void (*cbfn) (struct csio_hw *, struct csio_mb *))
348{ 348{
349 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb); 349 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
350 unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); 350 unsigned int lfc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
351 351
352 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 352 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
353 353
354 cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | 354 cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
355 FW_CMD_REQUEST | 355 FW_CMD_REQUEST_F |
356 (wr ? FW_CMD_EXEC : FW_CMD_READ) | 356 (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) |
357 FW_PORT_CMD_PORTID(portid)); 357 FW_PORT_CMD_PORTID_V(portid));
358 if (!wr) { 358 if (!wr) {
359 cmdp->action_to_len16 = htonl( 359 cmdp->action_to_len16 = htonl(
360 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 360 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
361 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 361 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
362 return; 362 return;
363 } 363 }
364 364
365 /* Set port */ 365 /* Set port */
366 cmdp->action_to_len16 = htonl( 366 cmdp->action_to_len16 = htonl(
367 FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 367 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
368 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
369 369
370 if (fc & PAUSE_RX) 370 if (fc & PAUSE_RX)
371 lfc |= FW_PORT_CAP_FC_RX; 371 lfc |= FW_PORT_CAP_FC_RX;
@@ -393,7 +393,7 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
393{ 393{
394 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb); 394 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
395 395
396 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16)); 396 *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16));
397 397
398 if (*retval == FW_SUCCESS) 398 if (*retval == FW_SUCCESS)
399 *caps = ntohs(rsp->u.info.pcap); 399 *caps = ntohs(rsp->u.info.pcap);
@@ -415,9 +415,9 @@ csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
415 415
416 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 416 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
417 417
418 cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) | 418 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) |
419 FW_CMD_REQUEST | FW_CMD_WRITE); 419 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
420 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 420 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
421 421
422} 422}
423 423
@@ -443,18 +443,18 @@ csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
443 443
444 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 444 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
445 445
446 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | 446 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
447 FW_CMD_REQUEST | FW_CMD_EXEC | 447 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
448 FW_IQ_CMD_PFN(iq_params->pfn) | 448 FW_IQ_CMD_PFN_V(iq_params->pfn) |
449 FW_IQ_CMD_VFN(iq_params->vfn)); 449 FW_IQ_CMD_VFN_V(iq_params->vfn));
450 450
451 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | 451 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F |
452 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 452 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
453 453
454 cmdp->type_to_iqandstindex = htonl( 454 cmdp->type_to_iqandstindex = htonl(
455 FW_IQ_CMD_VIID(iq_params->viid) | 455 FW_IQ_CMD_VIID_V(iq_params->viid) |
456 FW_IQ_CMD_TYPE(iq_params->type) | 456 FW_IQ_CMD_TYPE_V(iq_params->type) |
457 FW_IQ_CMD_IQASYNCH(iq_params->iqasynch)); 457 FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch));
458 458
459 cmdp->fl0size = htons(iq_params->fl0size); 459 cmdp->fl0size = htons(iq_params->fl0size);
460 cmdp->fl0size = htons(iq_params->fl1size); 460 cmdp->fl0size = htons(iq_params->fl1size);
@@ -488,8 +488,8 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
488 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); 488 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
489 489
490 uint32_t iq_start_stop = (iq_params->iq_start) ? 490 uint32_t iq_start_stop = (iq_params->iq_start) ?
491 FW_IQ_CMD_IQSTART(1) : 491 FW_IQ_CMD_IQSTART_F :
492 FW_IQ_CMD_IQSTOP(1); 492 FW_IQ_CMD_IQSTOP_F;
493 493
494 /* 494 /*
495 * If this IQ write is cascaded with IQ alloc request, do not 495 * If this IQ write is cascaded with IQ alloc request, do not
@@ -499,51 +499,51 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
499 if (!cascaded_req) 499 if (!cascaded_req)
500 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 500 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
501 501
502 cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) | 502 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) |
503 FW_CMD_REQUEST | FW_CMD_WRITE | 503 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
504 FW_IQ_CMD_PFN(iq_params->pfn) | 504 FW_IQ_CMD_PFN_V(iq_params->pfn) |
505 FW_IQ_CMD_VFN(iq_params->vfn)); 505 FW_IQ_CMD_VFN_V(iq_params->vfn));
506 cmdp->alloc_to_len16 |= htonl(iq_start_stop | 506 cmdp->alloc_to_len16 |= htonl(iq_start_stop |
507 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 507 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
508 cmdp->iqid |= htons(iq_params->iqid); 508 cmdp->iqid |= htons(iq_params->iqid);
509 cmdp->fl0id |= htons(iq_params->fl0id); 509 cmdp->fl0id |= htons(iq_params->fl0id);
510 cmdp->fl1id |= htons(iq_params->fl1id); 510 cmdp->fl1id |= htons(iq_params->fl1id);
511 cmdp->type_to_iqandstindex |= htonl( 511 cmdp->type_to_iqandstindex |= htonl(
512 FW_IQ_CMD_IQANDST(iq_params->iqandst) | 512 FW_IQ_CMD_IQANDST_V(iq_params->iqandst) |
513 FW_IQ_CMD_IQANUS(iq_params->iqanus) | 513 FW_IQ_CMD_IQANUS_V(iq_params->iqanus) |
514 FW_IQ_CMD_IQANUD(iq_params->iqanud) | 514 FW_IQ_CMD_IQANUD_V(iq_params->iqanud) |
515 FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex)); 515 FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex));
516 cmdp->iqdroprss_to_iqesize |= htons( 516 cmdp->iqdroprss_to_iqesize |= htons(
517 FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) | 517 FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) |
518 FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) | 518 FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) |
519 FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) | 519 FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) |
520 FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) | 520 FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) |
521 FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) | 521 FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) |
522 FW_IQ_CMD_IQESIZE(iq_params->iqesize)); 522 FW_IQ_CMD_IQESIZE_V(iq_params->iqesize));
523 523
524 cmdp->iqsize |= htons(iq_params->iqsize); 524 cmdp->iqsize |= htons(iq_params->iqsize);
525 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr); 525 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
526 526
527 if (iq_params->type == 0) { 527 if (iq_params->type == 0) {
528 cmdp->iqns_to_fl0congen |= htonl( 528 cmdp->iqns_to_fl0congen |= htonl(
529 FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)| 529 FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)|
530 FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen)); 530 FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen));
531 } 531 }
532 532
533 if (iq_params->fl0size && iq_params->fl0addr && 533 if (iq_params->fl0size && iq_params->fl0addr &&
534 (iq_params->fl0id != 0xFFFF)) { 534 (iq_params->fl0id != 0xFFFF)) {
535 535
536 cmdp->iqns_to_fl0congen |= htonl( 536 cmdp->iqns_to_fl0congen |= htonl(
537 FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)| 537 FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
538 FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) | 538 FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
539 FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) | 539 FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
540 FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen)); 540 FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
541 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( 541 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
542 FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) | 542 FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) |
543 FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) | 543 FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) |
544 FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) | 544 FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) |
545 FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) | 545 FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) |
546 FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh)); 546 FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh));
547 cmdp->fl0size |= htons(iq_params->fl0size); 547 cmdp->fl0size |= htons(iq_params->fl0size);
548 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr); 548 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
549 } 549 }
@@ -588,7 +588,7 @@ csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
588{ 588{
589 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb); 589 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
590 590
591 *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); 591 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
592 if (*ret_val == FW_SUCCESS) { 592 if (*ret_val == FW_SUCCESS) {
593 iq_params->physiqid = ntohs(rsp->physiqid); 593 iq_params->physiqid = ntohs(rsp->physiqid);
594 iq_params->iqid = ntohs(rsp->iqid); 594 iq_params->iqid = ntohs(rsp->iqid);
@@ -622,13 +622,13 @@ csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
622 622
623 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 623 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
624 624
625 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | 625 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
626 FW_CMD_REQUEST | FW_CMD_EXEC | 626 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
627 FW_IQ_CMD_PFN(iq_params->pfn) | 627 FW_IQ_CMD_PFN_V(iq_params->pfn) |
628 FW_IQ_CMD_VFN(iq_params->vfn)); 628 FW_IQ_CMD_VFN_V(iq_params->vfn));
629 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE | 629 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F |
630 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 630 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
631 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type)); 631 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type));
632 632
633 cmdp->iqid = htons(iq_params->iqid); 633 cmdp->iqid = htons(iq_params->iqid);
634 cmdp->fl0id = htons(iq_params->fl0id); 634 cmdp->fl0id = htons(iq_params->fl0id);
@@ -657,12 +657,12 @@ csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
657 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); 657 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
658 658
659 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 659 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
660 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | 660 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
661 FW_CMD_REQUEST | FW_CMD_EXEC | 661 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
662 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | 662 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
663 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); 663 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
664 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | 664 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
665 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 665 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
666 666
667} /* csio_mb_eq_ofld_alloc */ 667} /* csio_mb_eq_ofld_alloc */
668 668
@@ -694,7 +694,8 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
694 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); 694 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
695 695
696 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ? 696 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
697 FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP; 697 FW_EQ_OFLD_CMD_EQSTART_F :
698 FW_EQ_OFLD_CMD_EQSTOP_F;
698 699
699 /* 700 /*
700 * If this EQ write is cascaded with EQ alloc request, do not 701 * If this EQ write is cascaded with EQ alloc request, do not
@@ -704,29 +705,29 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
704 if (!cascaded_req) 705 if (!cascaded_req)
705 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 706 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
706 707
707 cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | 708 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
708 FW_CMD_REQUEST | FW_CMD_WRITE | 709 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
709 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | 710 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
710 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); 711 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
711 cmdp->alloc_to_len16 |= htonl(eq_start_stop | 712 cmdp->alloc_to_len16 |= htonl(eq_start_stop |
712 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 713 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
713 714
714 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid)); 715 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
715 716
716 cmdp->fetchszm_to_iqid |= htonl( 717 cmdp->fetchszm_to_iqid |= htonl(
717 FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) | 718 FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) |
718 FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) | 719 FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) |
719 FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) | 720 FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) |
720 FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid)); 721 FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid));
721 722
722 cmdp->dcaen_to_eqsize |= htonl( 723 cmdp->dcaen_to_eqsize |= htonl(
723 FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) | 724 FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) |
724 FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) | 725 FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) |
725 FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) | 726 FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) |
726 FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) | 727 FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) |
727 FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) | 728 FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) |
728 FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) | 729 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) |
729 FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize)); 730 FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize));
730 731
731 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr); 732 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
732 733
@@ -773,12 +774,12 @@ csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
773{ 774{
774 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb); 775 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
775 776
776 *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); 777 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
777 778
778 if (*ret_val == FW_SUCCESS) { 779 if (*ret_val == FW_SUCCESS) {
779 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET( 780 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G(
780 ntohl(rsp->eqid_pkd)); 781 ntohl(rsp->eqid_pkd));
781 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET( 782 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G(
782 ntohl(rsp->physeqid_pkd)); 783 ntohl(rsp->physeqid_pkd));
783 } else 784 } else
784 eq_ofld_params->eqid = 0; 785 eq_ofld_params->eqid = 0;
@@ -807,13 +808,13 @@ csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
807 808
808 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 809 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
809 810
810 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | 811 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
811 FW_CMD_REQUEST | FW_CMD_EXEC | 812 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
812 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | 813 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
813 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); 814 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
814 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | 815 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F |
815 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 816 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
816 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid)); 817 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
817 818
818} /* csio_mb_eq_ofld_free */ 819} /* csio_mb_eq_ofld_free */
819 820
@@ -840,15 +841,15 @@ csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
840 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 841 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
841 842
842 cmdp->op_to_portid = htonl(( 843 cmdp->op_to_portid = htonl((
843 FW_CMD_OP(FW_FCOE_LINK_CMD) | 844 FW_CMD_OP_V(FW_FCOE_LINK_CMD) |
844 FW_CMD_REQUEST | 845 FW_CMD_REQUEST_F |
845 FW_CMD_WRITE | 846 FW_CMD_WRITE_F |
846 FW_FCOE_LINK_CMD_PORTID(port_id))); 847 FW_FCOE_LINK_CMD_PORTID(port_id)));
847 cmdp->sub_opcode_fcfi = htonl( 848 cmdp->sub_opcode_fcfi = htonl(
848 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) | 849 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
849 FW_FCOE_LINK_CMD_FCFI(fcfi)); 850 FW_FCOE_LINK_CMD_FCFI(fcfi));
850 cmdp->lstatus = link_status; 851 cmdp->lstatus = link_status;
851 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 852 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
852 853
853} /* csio_write_fcoe_link_cond_init_mb */ 854} /* csio_write_fcoe_link_cond_init_mb */
854 855
@@ -873,11 +874,11 @@ csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
873 874
874 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); 875 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
875 876
876 cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) | 877 cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) |
877 FW_CMD_REQUEST | 878 FW_CMD_REQUEST_F |
878 FW_CMD_READ)); 879 FW_CMD_READ_F));
879 880
880 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 881 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
881 882
882} /* csio_fcoe_read_res_info_init_mb */ 883} /* csio_fcoe_read_res_info_init_mb */
883 884
@@ -908,13 +909,13 @@ csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
908 909
909 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 910 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
910 911
911 cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) | 912 cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
912 FW_CMD_REQUEST | 913 FW_CMD_REQUEST_F |
913 FW_CMD_EXEC | 914 FW_CMD_EXEC_F |
914 FW_FCOE_VNP_CMD_FCFI(fcfi))); 915 FW_FCOE_VNP_CMD_FCFI(fcfi)));
915 916
916 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC | 917 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
917 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 918 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
918 919
919 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); 920 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
920 921
@@ -948,11 +949,11 @@ csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
948 (struct fw_fcoe_vnp_cmd *)(mbp->mb); 949 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
949 950
950 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 951 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
951 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) | 952 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
952 FW_CMD_REQUEST | 953 FW_CMD_REQUEST_F |
953 FW_CMD_READ | 954 FW_CMD_READ_F |
954 FW_FCOE_VNP_CMD_FCFI(fcfi)); 955 FW_FCOE_VNP_CMD_FCFI(fcfi));
955 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 956 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
956 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); 957 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
957} 958}
958 959
@@ -978,12 +979,12 @@ csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
978 979
979 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 980 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
980 981
981 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) | 982 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
982 FW_CMD_REQUEST | 983 FW_CMD_REQUEST_F |
983 FW_CMD_EXEC | 984 FW_CMD_EXEC_F |
984 FW_FCOE_VNP_CMD_FCFI(fcfi)); 985 FW_FCOE_VNP_CMD_FCFI(fcfi));
985 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE | 986 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
986 FW_CMD_LEN16(sizeof(*cmdp) / 16)); 987 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
987 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); 988 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
988} 989}
989 990
@@ -1009,11 +1010,11 @@ csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1009 1010
1010 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 1011 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1011 1012
1012 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) | 1013 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) |
1013 FW_CMD_REQUEST | 1014 FW_CMD_REQUEST_F |
1014 FW_CMD_READ | 1015 FW_CMD_READ_F |
1015 FW_FCOE_FCF_CMD_FCFI(fcfi)); 1016 FW_FCOE_FCF_CMD_FCFI(fcfi));
1016 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); 1017 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
1017 1018
1018} /* csio_fcoe_read_fcf_init_mb */ 1019} /* csio_fcoe_read_fcf_init_mb */
1019 1020
@@ -1029,9 +1030,9 @@ csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
1029 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); 1030 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
1030 mbp->mb_size = 64; 1031 mbp->mb_size = 64;
1031 1032
1032 cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) | 1033 cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) |
1033 FW_CMD_REQUEST | FW_CMD_READ); 1034 FW_CMD_REQUEST_F | FW_CMD_READ_F);
1034 cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16)); 1035 cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16));
1035 1036
1036 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) | 1037 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
1037 FW_FCOE_STATS_CMD_PORT(portparams->portid); 1038 FW_FCOE_STATS_CMD_PORT(portparams->portid);
@@ -1053,7 +1054,7 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
1053 uint8_t *src; 1054 uint8_t *src;
1054 uint8_t *dst; 1055 uint8_t *dst;
1055 1056
1056 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16)); 1057 *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16));
1057 1058
1058 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats)); 1059 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
1059 1060
@@ -1125,7 +1126,7 @@ csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
1125{ 1126{
1126 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd; 1127 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
1127 1128
1128 if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) { 1129 if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) {
1129 csio_info(hw, "FW print message:\n"); 1130 csio_info(hw, "FW print message:\n");
1130 csio_info(hw, "\tdebug->dprtstridx = %d\n", 1131 csio_info(hw, "\tdebug->dprtstridx = %d\n",
1131 ntohs(dbg->u.prt.dprtstridx)); 1132 ntohs(dbg->u.prt.dprtstridx));
@@ -1305,7 +1306,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1305 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); 1306 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1306 fw_hdr = (struct fw_cmd_hdr *)&hdr; 1307 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1307 1308
1308 switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) { 1309 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
1309 case FW_DEBUG_CMD: 1310 case FW_DEBUG_CMD:
1310 csio_mb_debug_cmd_handler(hw); 1311 csio_mb_debug_cmd_handler(hw);
1311 continue; 1312 continue;
@@ -1406,9 +1407,9 @@ csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
1406 1407
1407 if (opcode == FW_PORT_CMD) { 1408 if (opcode == FW_PORT_CMD) {
1408 pcmd = (struct fw_port_cmd *)cmd; 1409 pcmd = (struct fw_port_cmd *)cmd;
1409 port_id = FW_PORT_CMD_PORTID_GET( 1410 port_id = FW_PORT_CMD_PORTID_G(
1410 ntohl(pcmd->op_to_portid)); 1411 ntohl(pcmd->op_to_portid));
1411 action = FW_PORT_CMD_ACTION_GET( 1412 action = FW_PORT_CMD_ACTION_G(
1412 ntohl(pcmd->action_to_len16)); 1413 ntohl(pcmd->action_to_len16));
1413 if (action != FW_PORT_ACTION_GET_PORT_INFO) { 1414 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1414 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n", 1415 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
@@ -1417,15 +1418,15 @@ csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
1417 } 1418 }
1418 1419
1419 link_status = ntohl(pcmd->u.info.lstatus_to_modtype); 1420 link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
1420 mod_type = FW_PORT_CMD_MODTYPE_GET(link_status); 1421 mod_type = FW_PORT_CMD_MODTYPE_G(link_status);
1421 1422
1422 hw->pport[port_id].link_status = 1423 hw->pport[port_id].link_status =
1423 FW_PORT_CMD_LSTATUS_GET(link_status); 1424 FW_PORT_CMD_LSTATUS_G(link_status);
1424 hw->pport[port_id].link_speed = 1425 hw->pport[port_id].link_speed =
1425 FW_PORT_CMD_LSPEED_GET(link_status); 1426 FW_PORT_CMD_LSPEED_G(link_status);
1426 1427
1427 csio_info(hw, "Port:%x - LINK %s\n", port_id, 1428 csio_info(hw, "Port:%x - LINK %s\n", port_id,
1428 FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN"); 1429 FW_PORT_CMD_LSTATUS_G(link_status) ? "UP" : "DOWN");
1429 1430
1430 if (mod_type != hw->pport[port_id].mod_type) { 1431 if (mod_type != hw->pport[port_id].mod_type) {
1431 hw->pport[port_id].mod_type = mod_type; 1432 hw->pport[port_id].mod_type = mod_type;
@@ -1498,7 +1499,7 @@ csio_mb_isr_handler(struct csio_hw *hw)
1498 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); 1499 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1499 fw_hdr = (struct fw_cmd_hdr *)&hdr; 1500 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1500 1501
1501 switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) { 1502 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
1502 case FW_DEBUG_CMD: 1503 case FW_DEBUG_CMD:
1503 csio_mb_debug_cmd_handler(hw); 1504 csio_mb_debug_cmd_handler(hw);
1504 return -EINVAL; 1505 return -EINVAL;
@@ -1571,11 +1572,11 @@ csio_mb_tmo_handler(struct csio_hw *hw)
1571 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb); 1572 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
1572 1573
1573 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn, 1574 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
1574 FW_CMD_OP_GET(ntohl(fw_hdr->hi))); 1575 FW_CMD_OP_G(ntohl(fw_hdr->hi)));
1575 1576
1576 mbm->mcurrent = NULL; 1577 mbm->mcurrent = NULL;
1577 CSIO_INC_STATS(mbm, n_tmo); 1578 CSIO_INC_STATS(mbm, n_tmo);
1578 fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT)); 1579 fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT));
1579 1580
1580 return mbp; 1581 return mbp;
1581} 1582}
@@ -1624,10 +1625,10 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
1624 hdr = (struct fw_cmd_hdr *)(mbp->mb); 1625 hdr = (struct fw_cmd_hdr *)(mbp->mb);
1625 1626
1626 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n", 1627 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
1627 hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi))); 1628 hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
1628 1629
1629 CSIO_INC_STATS(mbm, n_cancel); 1630 CSIO_INC_STATS(mbm, n_cancel);
1630 hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR)); 1631 hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR));
1631 } 1632 }
1632} 1633}
1633 1634
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
index a84179e54ab9..1bc82d0bc260 100644
--- a/drivers/scsi/csiostor/csio_mb.h
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -79,14 +79,14 @@ enum csio_dev_state {
79}; 79};
80 80
81#define FW_PARAM_DEV(param) \ 81#define FW_PARAM_DEV(param) \
82 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 82 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
83 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 83 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
84 84
85#define FW_PARAM_PFVF(param) \ 85#define FW_PARAM_PFVF(param) \
86 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 86 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
87 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ 87 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
88 FW_PARAMS_PARAM_Y(0) | \ 88 FW_PARAMS_PARAM_Y_V(0) | \
89 FW_PARAMS_PARAM_Z(0)) 89 FW_PARAMS_PARAM_Z_V(0))
90 90
91enum { 91enum {
92 PAUSE_RX = 1 << 0, 92 PAUSE_RX = 1 << 0,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 4d0b6ce55f20..51ea5dc5f084 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -209,10 +209,10 @@ csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
209 struct csio_dma_buf *dma_buf; 209 struct csio_dma_buf *dma_buf;
210 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 210 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
211 211
212 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) | 212 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
213 FW_SCSI_CMD_WR_IMMDLEN(imm)); 213 FW_SCSI_CMD_WR_IMMDLEN(imm));
214 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 214 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
215 FW_WR_LEN16( 215 FW_WR_LEN16_V(
216 DIV_ROUND_UP(size, 16))); 216 DIV_ROUND_UP(size, 16)));
217 217
218 wr->cookie = (uintptr_t) req; 218 wr->cookie = (uintptr_t) req;
@@ -301,7 +301,7 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
301 struct csio_dma_buf *dma_buf; 301 struct csio_dma_buf *dma_buf;
302 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 302 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
303 303
304 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE | 304 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
305 ULPTX_NSGE(req->nsge)); 305 ULPTX_NSGE(req->nsge));
306 /* Now add the data SGLs */ 306 /* Now add the data SGLs */
307 if (likely(!req->dcopy)) { 307 if (likely(!req->dcopy)) {
@@ -370,10 +370,10 @@ csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
370 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 370 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
371 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 371 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
372 372
373 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) | 373 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
374 FW_SCSI_READ_WR_IMMDLEN(imm)); 374 FW_SCSI_READ_WR_IMMDLEN(imm));
375 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 375 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
376 FW_WR_LEN16(DIV_ROUND_UP(size, 16))); 376 FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
377 wr->cookie = (uintptr_t)req; 377 wr->cookie = (uintptr_t)req;
378 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 378 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
379 wr->tmo_val = (uint8_t)(req->tmo); 379 wr->tmo_val = (uint8_t)(req->tmo);
@@ -423,10 +423,10 @@ csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
423 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 423 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
424 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 424 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
425 425
426 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) | 426 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
427 FW_SCSI_WRITE_WR_IMMDLEN(imm)); 427 FW_SCSI_WRITE_WR_IMMDLEN(imm));
428 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 428 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
429 FW_WR_LEN16(DIV_ROUND_UP(size, 16))); 429 FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
430 wr->cookie = (uintptr_t)req; 430 wr->cookie = (uintptr_t)req;
431 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 431 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
432 wr->tmo_val = (uint8_t)(req->tmo); 432 wr->tmo_val = (uint8_t)(req->tmo);
@@ -653,9 +653,9 @@ csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
653 struct csio_rnode *rn = req->rnode; 653 struct csio_rnode *rn = req->rnode;
654 struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; 654 struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
655 655
656 wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR)); 656 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
657 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | 657 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
658 FW_WR_LEN16( 658 FW_WR_LEN16_V(
659 DIV_ROUND_UP(size, 16))); 659 DIV_ROUND_UP(size, 16)));
660 660
661 wr->cookie = (uintptr_t) req; 661 wr->cookie = (uintptr_t) req;
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h
index 8d30e7ac1f5e..0c0dd9a658cc 100644
--- a/drivers/scsi/csiostor/csio_wr.h
+++ b/drivers/scsi/csiostor/csio_wr.h
@@ -101,7 +101,7 @@
101 101
102/* WR status is at the same position as retval in a CMD header */ 102/* WR status is at the same position as retval in a CMD header */
103#define csio_wr_status(_wr) \ 103#define csio_wr_status(_wr) \
104 (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo))) 104 (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
105 105
106struct csio_hw; 106struct csio_hw;
107 107
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e6c3f55d9d36..69fbfc89efb6 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -189,18 +189,18 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
189 unsigned int qid_atid = ((unsigned int)csk->atid) | 189 unsigned int qid_atid = ((unsigned int)csk->atid) |
190 (((unsigned int)csk->rss_qid) << 14); 190 (((unsigned int)csk->rss_qid) << 14);
191 191
192 opt0 = KEEP_ALIVE(1) | 192 opt0 = KEEP_ALIVE_F |
193 WND_SCALE(wscale) | 193 WND_SCALE_V(wscale) |
194 MSS_IDX(csk->mss_idx) | 194 MSS_IDX_V(csk->mss_idx) |
195 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 195 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
196 TX_CHAN(csk->tx_chan) | 196 TX_CHAN_V(csk->tx_chan) |
197 SMAC_SEL(csk->smac_idx) | 197 SMAC_SEL_V(csk->smac_idx) |
198 ULP_MODE(ULP_MODE_ISCSI) | 198 ULP_MODE_V(ULP_MODE_ISCSI) |
199 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 199 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
200 opt2 = RX_CHANNEL(0) | 200 opt2 = RX_CHANNEL_V(0) |
201 RSS_QUEUE_VALID | 201 RSS_QUEUE_VALID_F |
202 (1 << 20) | 202 (RX_FC_DISABLE_F) |
203 RSS_QUEUE(csk->rss_qid); 203 RSS_QUEUE_V(csk->rss_qid);
204 204
205 if (is_t4(lldi->adapter_type)) { 205 if (is_t4(lldi->adapter_type)) {
206 struct cpl_act_open_req *req = 206 struct cpl_act_open_req *req =
@@ -217,7 +217,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
217 req->params = cpu_to_be32(cxgb4_select_ntuple( 217 req->params = cpu_to_be32(cxgb4_select_ntuple(
218 csk->cdev->ports[csk->port_id], 218 csk->cdev->ports[csk->port_id],
219 csk->l2t)); 219 csk->l2t));
220 opt2 |= 1 << 22; 220 opt2 |= RX_FC_VALID_F;
221 req->opt2 = cpu_to_be32(opt2); 221 req->opt2 = cpu_to_be32(opt2);
222 222
223 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 223 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -237,7 +237,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
237 req->local_ip = csk->saddr.sin_addr.s_addr; 237 req->local_ip = csk->saddr.sin_addr.s_addr;
238 req->peer_ip = csk->daddr.sin_addr.s_addr; 238 req->peer_ip = csk->daddr.sin_addr.s_addr;
239 req->opt0 = cpu_to_be64(opt0); 239 req->opt0 = cpu_to_be64(opt0);
240 req->params = cpu_to_be64(V_FILTER_TUPLE( 240 req->params = cpu_to_be64(FILTER_TUPLE_V(
241 cxgb4_select_ntuple( 241 cxgb4_select_ntuple(
242 csk->cdev->ports[csk->port_id], 242 csk->cdev->ports[csk->port_id],
243 csk->l2t))); 243 csk->l2t)));
@@ -272,19 +272,19 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
272 unsigned int qid_atid = ((unsigned int)csk->atid) | 272 unsigned int qid_atid = ((unsigned int)csk->atid) |
273 (((unsigned int)csk->rss_qid) << 14); 273 (((unsigned int)csk->rss_qid) << 14);
274 274
275 opt0 = KEEP_ALIVE(1) | 275 opt0 = KEEP_ALIVE_F |
276 WND_SCALE(wscale) | 276 WND_SCALE_V(wscale) |
277 MSS_IDX(csk->mss_idx) | 277 MSS_IDX_V(csk->mss_idx) |
278 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 278 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
279 TX_CHAN(csk->tx_chan) | 279 TX_CHAN_V(csk->tx_chan) |
280 SMAC_SEL(csk->smac_idx) | 280 SMAC_SEL_V(csk->smac_idx) |
281 ULP_MODE(ULP_MODE_ISCSI) | 281 ULP_MODE_V(ULP_MODE_ISCSI) |
282 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 282 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
283 283
284 opt2 = RX_CHANNEL(0) | 284 opt2 = RX_CHANNEL_V(0) |
285 RSS_QUEUE_VALID | 285 RSS_QUEUE_VALID_F |
286 RX_FC_DISABLE | 286 RX_FC_DISABLE_F |
287 RSS_QUEUE(csk->rss_qid); 287 RSS_QUEUE_V(csk->rss_qid);
288 288
289 if (t4) { 289 if (t4) {
290 struct cpl_act_open_req6 *req = 290 struct cpl_act_open_req6 *req =
@@ -305,7 +305,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
305 305
306 req->opt0 = cpu_to_be64(opt0); 306 req->opt0 = cpu_to_be64(opt0);
307 307
308 opt2 |= RX_FC_VALID; 308 opt2 |= RX_FC_VALID_F;
309 req->opt2 = cpu_to_be32(opt2); 309 req->opt2 = cpu_to_be32(opt2);
310 310
311 req->params = cpu_to_be32(cxgb4_select_ntuple( 311 req->params = cpu_to_be32(cxgb4_select_ntuple(
@@ -328,10 +328,10 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
328 8); 328 8);
329 req->opt0 = cpu_to_be64(opt0); 329 req->opt0 = cpu_to_be64(opt0);
330 330
331 opt2 |= T5_OPT_2_VALID; 331 opt2 |= T5_OPT_2_VALID_F;
332 req->opt2 = cpu_to_be32(opt2); 332 req->opt2 = cpu_to_be32(opt2);
333 333
334 req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple( 334 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
335 csk->cdev->ports[csk->port_id], 335 csk->cdev->ports[csk->port_id],
336 csk->l2t))); 336 csk->l2t)));
337 } 337 }
@@ -452,7 +452,8 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
452 INIT_TP_WR(req, csk->tid); 452 INIT_TP_WR(req, csk->tid);
453 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 453 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
454 csk->tid)); 454 csk->tid));
455 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1)); 455 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
456 | RX_FORCE_ACK_F);
456 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 457 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
457 return credits; 458 return credits;
458} 459}
@@ -500,10 +501,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
500 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 501 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
501 flowc = (struct fw_flowc_wr *)skb->head; 502 flowc = (struct fw_flowc_wr *)skb->head;
502 flowc->op_to_nparams = 503 flowc->op_to_nparams =
503 htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); 504 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8));
504 flowc->flowid_len16 = 505 flowc->flowid_len16 =
505 htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | 506 htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) |
506 FW_WR_FLOWID(csk->tid)); 507 FW_WR_FLOWID_V(csk->tid));
507 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 508 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
508 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 509 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
509 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 510 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
@@ -543,30 +544,31 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
543{ 544{
544 struct fw_ofld_tx_data_wr *req; 545 struct fw_ofld_tx_data_wr *req;
545 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 546 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
546 unsigned int wr_ulp_mode = 0; 547 unsigned int wr_ulp_mode = 0, val;
547 548
548 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 549 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
549 550
550 if (is_ofld_imm(skb)) { 551 if (is_ofld_imm(skb)) {
551 req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) | 552 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
552 FW_WR_COMPL(1) | 553 FW_WR_COMPL_F |
553 FW_WR_IMMDLEN(dlen)); 554 FW_WR_IMMDLEN_V(dlen));
554 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) | 555 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
555 FW_WR_LEN16(credits)); 556 FW_WR_LEN16_V(credits));
556 } else { 557 } else {
557 req->op_to_immdlen = 558 req->op_to_immdlen =
558 cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) | 559 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
559 FW_WR_COMPL(1) | 560 FW_WR_COMPL_F |
560 FW_WR_IMMDLEN(0)); 561 FW_WR_IMMDLEN_V(0));
561 req->flowid_len16 = 562 req->flowid_len16 =
562 cpu_to_be32(FW_WR_FLOWID(csk->tid) | 563 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
563 FW_WR_LEN16(credits)); 564 FW_WR_LEN16_V(credits));
564 } 565 }
565 if (submode) 566 if (submode)
566 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) | 567 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
567 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode); 568 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
569 val = skb_peek(&csk->write_queue) ? 0 : 1;
568 req->tunnel_to_proxy = htonl(wr_ulp_mode | 570 req->tunnel_to_proxy = htonl(wr_ulp_mode |
569 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1)); 571 FW_OFLD_TX_DATA_WR_SHOVE_V(val));
570 req->plen = htonl(len); 572 req->plen = htonl(len);
571 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 573 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
572 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 574 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
@@ -1445,16 +1447,16 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1445 1447
1446 INIT_ULPTX_WR(req, wr_len, 0, 0); 1448 INIT_ULPTX_WR(req, wr_len, 0, 0);
1447 if (is_t4(lldi->adapter_type)) 1449 if (is_t4(lldi->adapter_type))
1448 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1450 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1449 (ULP_MEMIO_ORDER(1))); 1451 (ULP_MEMIO_ORDER_F));
1450 else 1452 else
1451 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1453 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1452 (V_T5_ULP_MEMIO_IMM(1))); 1454 (T5_ULP_MEMIO_IMM_F));
1453 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); 1455 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1454 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); 1456 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1455 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1457 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1456 1458
1457 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); 1459 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1458 idata->len = htonl(dlen); 1460 idata->len = htonl(dlen);
1459} 1461}
1460 1462
@@ -1678,7 +1680,8 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1678 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1680 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1679 cdev->itp = &cxgb4i_iscsi_transport; 1681 cdev->itp = &cxgb4i_iscsi_transport;
1680 1682
1681 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; 1683 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1684 << FW_VIID_PFN_S;
1682 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1685 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1683 cdev, lldi->ports[0]->name, cdev->pfvf); 1686 cdev, lldi->ports[0]->name, cdev->pfvf);
1684 1687
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 308a016fdaea..cd00a6cdf55b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1671,10 +1671,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1671 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { 1671 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
1672 /* must set skb->dev before calling vlan_put_tag */ 1672 /* must set skb->dev before calling vlan_put_tag */
1673 skb->dev = fcoe->realdev; 1673 skb->dev = fcoe->realdev;
1674 skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1674 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1675 vlan_dev_vlan_id(fcoe->netdev)); 1675 vlan_dev_vlan_id(fcoe->netdev));
1676 if (!skb)
1677 return -ENOMEM;
1678 } else 1676 } else
1679 skb->dev = fcoe->netdev; 1677 skb->dev = fcoe->netdev;
1680 1678