aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c60
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c32
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c1
5 files changed, 90 insertions, 41 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index cd213d967529..fead5c65a4f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -209,7 +209,6 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
209 209
210 if (base_status != MCC_STATUS_SUCCESS && 210 if (base_status != MCC_STATUS_SUCCESS &&
211 !be_skip_err_log(opcode, base_status, addl_status)) { 211 !be_skip_err_log(opcode, base_status, addl_status)) {
212
213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 212 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
214 dev_warn(&adapter->pdev->dev, 213 dev_warn(&adapter->pdev->dev,
215 "VF is not privileged to issue opcode %d-%d\n", 214 "VF is not privileged to issue opcode %d-%d\n",
@@ -317,7 +316,7 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
317 struct be_mcc_compl *cmp) 316 struct be_mcc_compl *cmp)
318{ 317{
319 u8 event_type = 0; 318 u8 event_type = 0;
320 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; 319 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
321 320
322 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 321 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
323 ASYNC_EVENT_TYPE_MASK; 322 ASYNC_EVENT_TYPE_MASK;
@@ -593,6 +592,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
593static bool lancer_provisioning_error(struct be_adapter *adapter) 592static bool lancer_provisioning_error(struct be_adapter *adapter)
594{ 593{
595 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 594 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
595
596 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 596 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
597 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 597 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
598 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); 598 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
@@ -675,7 +675,6 @@ int be_fw_wait_ready(struct be_adapter *adapter)
675 return -1; 675 return -1;
676} 676}
677 677
678
679static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 678static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
680{ 679{
681 return &wrb->payload.sgl[0]; 680 return &wrb->payload.sgl[0];
@@ -922,6 +921,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
922 status = be_mbox_notify_wait(adapter); 921 status = be_mbox_notify_wait(adapter);
923 if (!status) { 922 if (!status) {
924 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 923 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
924
925 eqo->q.id = le16_to_cpu(resp->eq_id); 925 eqo->q.id = le16_to_cpu(resp->eq_id);
926 eqo->msix_idx = 926 eqo->msix_idx =
927 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 927 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
@@ -956,7 +956,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
956 if (permanent) { 956 if (permanent) {
957 req->permanent = 1; 957 req->permanent = 1;
958 } else { 958 } else {
959 req->if_id = cpu_to_le16((u16) if_handle); 959 req->if_id = cpu_to_le16((u16)if_handle);
960 req->pmac_id = cpu_to_le32(pmac_id); 960 req->pmac_id = cpu_to_le32(pmac_id);
961 req->permanent = 0; 961 req->permanent = 0;
962 } 962 }
@@ -964,6 +964,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
964 status = be_mcc_notify_wait(adapter); 964 status = be_mcc_notify_wait(adapter);
965 if (!status) { 965 if (!status) {
966 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 966 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
967
967 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 968 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
968 } 969 }
969 970
@@ -1000,6 +1001,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1000 status = be_mcc_notify_wait(adapter); 1001 status = be_mcc_notify_wait(adapter);
1001 if (!status) { 1002 if (!status) {
1002 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1003 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1004
1003 *pmac_id = le32_to_cpu(resp->pmac_id); 1005 *pmac_id = le32_to_cpu(resp->pmac_id);
1004 } 1006 }
1005 1007
@@ -1032,7 +1034,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1032 req = embedded_payload(wrb); 1034 req = embedded_payload(wrb);
1033 1035
1034 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1036 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1035 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); 1037 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1038 wrb, NULL);
1036 1039
1037 req->hdr.domain = dom; 1040 req->hdr.domain = dom;
1038 req->if_id = cpu_to_le32(if_id); 1041 req->if_id = cpu_to_le32(if_id);
@@ -1104,6 +1107,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1104 status = be_mbox_notify_wait(adapter); 1107 status = be_mbox_notify_wait(adapter);
1105 if (!status) { 1108 if (!status) {
1106 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1109 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1110
1107 cq->id = le16_to_cpu(resp->cq_id); 1111 cq->id = le16_to_cpu(resp->cq_id);
1108 cq->created = true; 1112 cq->created = true;
1109 } 1113 }
@@ -1116,6 +1120,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1116static u32 be_encoded_q_len(int q_len) 1120static u32 be_encoded_q_len(int q_len)
1117{ 1121{
1118 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1122 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1123
1119 if (len_encoded == 16) 1124 if (len_encoded == 16)
1120 len_encoded = 0; 1125 len_encoded = 0;
1121 return len_encoded; 1126 return len_encoded;
@@ -1171,6 +1176,7 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1171 status = be_mbox_notify_wait(adapter); 1176 status = be_mbox_notify_wait(adapter);
1172 if (!status) { 1177 if (!status) {
1173 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1178 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1179
1174 mccq->id = le16_to_cpu(resp->id); 1180 mccq->id = le16_to_cpu(resp->id);
1175 mccq->created = true; 1181 mccq->created = true;
1176 } 1182 }
@@ -1214,6 +1220,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1214 status = be_mbox_notify_wait(adapter); 1220 status = be_mbox_notify_wait(adapter);
1215 if (!status) { 1221 if (!status) {
1216 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1222 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1223
1217 mccq->id = le16_to_cpu(resp->id); 1224 mccq->id = le16_to_cpu(resp->id);
1218 mccq->created = true; 1225 mccq->created = true;
1219 } 1226 }
@@ -1272,6 +1279,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1272 status = be_cmd_notify_wait(adapter, &wrb); 1279 status = be_cmd_notify_wait(adapter, &wrb);
1273 if (!status) { 1280 if (!status) {
1274 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1281 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1282
1275 txq->id = le16_to_cpu(resp->cid); 1283 txq->id = le16_to_cpu(resp->cid);
1276 if (ver == 2) 1284 if (ver == 2)
1277 txo->db_offset = le32_to_cpu(resp->db_offset); 1285 txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1316,6 +1324,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
1316 status = be_mcc_notify_wait(adapter); 1324 status = be_mcc_notify_wait(adapter);
1317 if (!status) { 1325 if (!status) {
1318 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1326 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1327
1319 rxq->id = le16_to_cpu(resp->id); 1328 rxq->id = le16_to_cpu(resp->id);
1320 rxq->created = true; 1329 rxq->created = true;
1321 *rss_id = resp->rss_id; 1330 *rss_id = resp->rss_id;
@@ -1429,6 +1438,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1429 status = be_cmd_notify_wait(adapter, &wrb); 1438 status = be_cmd_notify_wait(adapter, &wrb);
1430 if (!status) { 1439 if (!status) {
1431 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1440 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1441
1432 *if_handle = le32_to_cpu(resp->interface_id); 1442 *if_handle = le32_to_cpu(resp->interface_id);
1433 1443
1434 /* Hack to retrieve VF's pmac-id on BE3 */ 1444 /* Hack to retrieve VF's pmac-id on BE3 */
@@ -1512,7 +1522,6 @@ err:
1512int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1522int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1513 struct be_dma_mem *nonemb_cmd) 1523 struct be_dma_mem *nonemb_cmd)
1514{ 1524{
1515
1516 struct be_mcc_wrb *wrb; 1525 struct be_mcc_wrb *wrb;
1517 struct lancer_cmd_req_pport_stats *req; 1526 struct lancer_cmd_req_pport_stats *req;
1518 int status = 0; 1527 int status = 0;
@@ -1603,6 +1612,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1603 status = be_mcc_notify_wait(adapter); 1612 status = be_mcc_notify_wait(adapter);
1604 if (!status) { 1613 if (!status) {
1605 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1614 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1615
1606 if (link_speed) { 1616 if (link_speed) {
1607 *link_speed = resp->link_speed ? 1617 *link_speed = resp->link_speed ?
1608 le16_to_cpu(resp->link_speed) * 10 : 1618 le16_to_cpu(resp->link_speed) * 10 :
@@ -1670,6 +1680,7 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1670 status = be_mcc_notify_wait(adapter); 1680 status = be_mcc_notify_wait(adapter);
1671 if (!status) { 1681 if (!status) {
1672 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); 1682 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1683
1673 if (log_size && resp->log_size) 1684 if (log_size && resp->log_size)
1674 *log_size = le32_to_cpu(resp->log_size) - 1685 *log_size = le32_to_cpu(resp->log_size) -
1675 sizeof(u32); 1686 sizeof(u32);
@@ -1699,7 +1710,7 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1699 &get_fat_cmd.dma); 1710 &get_fat_cmd.dma);
1700 if (!get_fat_cmd.va) { 1711 if (!get_fat_cmd.va) {
1701 dev_err(&adapter->pdev->dev, 1712 dev_err(&adapter->pdev->dev,
1702 "Memory allocation failure while retrieving FAT data\n"); 1713 "Memory allocation failure while reading FAT data\n");
1703 return -ENOMEM; 1714 return -ENOMEM;
1704 } 1715 }
1705 1716
@@ -1729,6 +1740,7 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1729 status = be_mcc_notify_wait(adapter); 1740 status = be_mcc_notify_wait(adapter);
1730 if (!status) { 1741 if (!status) {
1731 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1742 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1743
1732 memcpy(buf + offset, 1744 memcpy(buf + offset,
1733 resp->data_buffer, 1745 resp->data_buffer,
1734 le32_to_cpu(resp->read_log_length)); 1746 le32_to_cpu(resp->read_log_length));
@@ -1783,8 +1795,8 @@ err:
1783/* set the EQ delay interval of an EQ to specified value 1795/* set the EQ delay interval of an EQ to specified value
1784 * Uses async mcc 1796 * Uses async mcc
1785 */ 1797 */
1786int __be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1798static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1787 int num) 1799 struct be_set_eqd *set_eqd, int num)
1788{ 1800{
1789 struct be_mcc_wrb *wrb; 1801 struct be_mcc_wrb *wrb;
1790 struct be_cmd_req_modify_eq_delay *req; 1802 struct be_cmd_req_modify_eq_delay *req;
@@ -1899,8 +1911,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1899 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1911 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1900 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1912 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1901 } else if (flags & IFF_ALLMULTI) { 1913 } else if (flags & IFF_ALLMULTI) {
1902 req->if_flags_mask = req->if_flags = 1914 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1903 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1915 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1904 } else if (flags & BE_FLAGS_VLAN_PROMISC) { 1916 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1905 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); 1917 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1906 1918
@@ -1911,8 +1923,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1911 struct netdev_hw_addr *ha; 1923 struct netdev_hw_addr *ha;
1912 int i = 0; 1924 int i = 0;
1913 1925
1914 req->if_flags_mask = req->if_flags = 1926 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1915 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1927 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1916 1928
1917 /* Reset mcast promisc mode if already set by setting mask 1929 /* Reset mcast promisc mode if already set by setting mask
1918 * and not setting flags field 1930 * and not setting flags field
@@ -2010,6 +2022,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2010 if (!status) { 2022 if (!status) {
2011 struct be_cmd_resp_get_flow_control *resp = 2023 struct be_cmd_resp_get_flow_control *resp =
2012 embedded_payload(wrb); 2024 embedded_payload(wrb);
2025
2013 *tx_fc = le16_to_cpu(resp->tx_flow_control); 2026 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2014 *rx_fc = le16_to_cpu(resp->rx_flow_control); 2027 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2015 } 2028 }
@@ -2039,6 +2052,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2039 status = be_mbox_notify_wait(adapter); 2052 status = be_mbox_notify_wait(adapter);
2040 if (!status) { 2053 if (!status) {
2041 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2054 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2055
2042 adapter->port_num = le32_to_cpu(resp->phys_port); 2056 adapter->port_num = le32_to_cpu(resp->phys_port);
2043 adapter->function_mode = le32_to_cpu(resp->function_mode); 2057 adapter->function_mode = le32_to_cpu(resp->function_mode);
2044 adapter->function_caps = le32_to_cpu(resp->function_caps); 2058 adapter->function_caps = le32_to_cpu(resp->function_caps);
@@ -2187,6 +2201,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2187 if (!status) { 2201 if (!status) {
2188 struct be_cmd_resp_get_beacon_state *resp = 2202 struct be_cmd_resp_get_beacon_state *resp =
2189 embedded_payload(wrb); 2203 embedded_payload(wrb);
2204
2190 *state = resp->beacon_state; 2205 *state = resp->beacon_state;
2191 } 2206 }
2192 2207
@@ -2457,7 +2472,7 @@ err_unlock:
2457} 2472}
2458 2473
2459int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2474int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2460 u16 optype, int offset) 2475 u16 optype, int offset)
2461{ 2476{
2462 struct be_mcc_wrb *wrb; 2477 struct be_mcc_wrb *wrb;
2463 struct be_cmd_read_flash_crc *req; 2478 struct be_cmd_read_flash_crc *req;
@@ -2628,9 +2643,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2628 2643
2629 if (!status) { 2644 if (!status) {
2630 struct be_cmd_resp_ddrdma_test *resp; 2645 struct be_cmd_resp_ddrdma_test *resp;
2646
2631 resp = cmd->va; 2647 resp = cmd->va;
2632 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 2648 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2633 resp->snd_err) { 2649 resp->snd_err) {
2634 status = -1; 2650 status = -1;
2635 } 2651 }
2636 } 2652 }
@@ -2703,6 +2719,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2703 if (!status) { 2719 if (!status) {
2704 struct be_phy_info *resp_phy_info = 2720 struct be_phy_info *resp_phy_info =
2705 cmd.va + sizeof(struct be_cmd_req_hdr); 2721 cmd.va + sizeof(struct be_cmd_req_hdr);
2722
2706 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 2723 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2707 adapter->phy.interface_type = 2724 adapter->phy.interface_type =
2708 le16_to_cpu(resp_phy_info->interface_type); 2725 le16_to_cpu(resp_phy_info->interface_type);
@@ -2832,6 +2849,7 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
2832 status = be_mbox_notify_wait(adapter); 2849 status = be_mbox_notify_wait(adapter);
2833 if (!status) { 2850 if (!status) {
2834 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2851 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2852
2835 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2853 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2836 CAPABILITY_BE3_NATIVE_ERX_API; 2854 CAPABILITY_BE3_NATIVE_ERX_API;
2837 if (!adapter->be3_native) 2855 if (!adapter->be3_native)
@@ -2871,6 +2889,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2871 if (!status) { 2889 if (!status) {
2872 struct be_cmd_resp_get_fn_privileges *resp = 2890 struct be_cmd_resp_get_fn_privileges *resp =
2873 embedded_payload(wrb); 2891 embedded_payload(wrb);
2892
2874 *privilege = le32_to_cpu(resp->privilege_mask); 2893 *privilege = le32_to_cpu(resp->privilege_mask);
2875 2894
2876 /* In UMC mode FW does not return right privileges. 2895 /* In UMC mode FW does not return right privileges.
@@ -3018,7 +3037,6 @@ out:
3018int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 3037int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3019 u8 *mac, u32 if_handle, bool active, u32 domain) 3038 u8 *mac, u32 if_handle, bool active, u32 domain)
3020{ 3039{
3021
3022 if (!active) 3040 if (!active)
3023 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 3041 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3024 if_handle, domain); 3042 if_handle, domain);
@@ -3202,6 +3220,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3202 if (!status) { 3220 if (!status) {
3203 struct be_cmd_resp_get_hsw_config *resp = 3221 struct be_cmd_resp_get_hsw_config *resp =
3204 embedded_payload(wrb); 3222 embedded_payload(wrb);
3223
3205 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3224 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3206 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3225 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3207 pvid, &resp->context); 3226 pvid, &resp->context);
@@ -3261,7 +3280,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3261 status = be_mbox_notify_wait(adapter); 3280 status = be_mbox_notify_wait(adapter);
3262 if (!status) { 3281 if (!status) {
3263 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 3282 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3264 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; 3283
3284 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3265 3285
3266 adapter->wol_cap = resp->wol_settings; 3286 adapter->wol_cap = resp->wol_settings;
3267 if (adapter->wol_cap & BE_WOL_CAP) 3287 if (adapter->wol_cap & BE_WOL_CAP)
@@ -3297,6 +3317,7 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3297 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 3317 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3298 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 3318 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3299 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 3319 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3320
3300 for (j = 0; j < num_modes; j++) { 3321 for (j = 0; j < num_modes; j++) {
3301 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 3322 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3302 cfgs->module[i].trace_lvl[j].dbg_lvl = 3323 cfgs->module[i].trace_lvl[j].dbg_lvl =
@@ -3333,6 +3354,7 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3333 if (!status) { 3354 if (!status) {
3334 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 3355 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3335 sizeof(struct be_cmd_resp_hdr)); 3356 sizeof(struct be_cmd_resp_hdr));
3357
3336 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 3358 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3337 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 3359 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3338 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3360 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
@@ -3429,6 +3451,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3429 status = be_mcc_notify_wait(adapter); 3451 status = be_mcc_notify_wait(adapter);
3430 if (!status) { 3452 if (!status) {
3431 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3453 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3454
3432 *port_name = resp->port_name[adapter->hba_port_num]; 3455 *port_name = resp->port_name[adapter->hba_port_num];
3433 } else { 3456 } else {
3434 *port_name = adapter->hba_port_num + '0'; 3457 *port_name = adapter->hba_port_num + '0';
@@ -4052,6 +4075,7 @@ int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4052 if (!status) { 4075 if (!status) {
4053 struct be_cmd_resp_get_active_profile *resp = 4076 struct be_cmd_resp_get_active_profile *resp =
4054 embedded_payload(wrb); 4077 embedded_payload(wrb);
4078
4055 *profile_id = le16_to_cpu(resp->active_profile_id); 4079 *profile_id = le16_to_cpu(resp->active_profile_id);
4056 } 4080 }
4057 4081
@@ -4104,7 +4128,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4104{ 4128{
4105 struct be_adapter *adapter = netdev_priv(netdev_handle); 4129 struct be_adapter *adapter = netdev_priv(netdev_handle);
4106 struct be_mcc_wrb *wrb; 4130 struct be_mcc_wrb *wrb;
4107 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; 4131 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4108 struct be_cmd_req_hdr *req; 4132 struct be_cmd_req_hdr *req;
4109 struct be_cmd_resp_hdr *resp; 4133 struct be_cmd_resp_hdr *resp;
4110 int status; 4134 int status;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index e86a5ef439dd..eb5085d6794f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1005,8 +1005,8 @@ struct be_cmd_resp_link_status {
1005/* Identifies the type of port attached to NIC */ 1005/* Identifies the type of port attached to NIC */
1006struct be_cmd_req_port_type { 1006struct be_cmd_req_port_type {
1007 struct be_cmd_req_hdr hdr; 1007 struct be_cmd_req_hdr hdr;
1008 u32 page_num; 1008 __le32 page_num;
1009 u32 port; 1009 __le32 port;
1010}; 1010};
1011 1011
1012enum { 1012enum {
@@ -1374,6 +1374,7 @@ enum {
1374#define BE_SUPPORTED_SPEED_100MBPS 2 1374#define BE_SUPPORTED_SPEED_100MBPS 2
1375#define BE_SUPPORTED_SPEED_1GBPS 4 1375#define BE_SUPPORTED_SPEED_1GBPS 4
1376#define BE_SUPPORTED_SPEED_10GBPS 8 1376#define BE_SUPPORTED_SPEED_10GBPS 8
1377#define BE_SUPPORTED_SPEED_20GBPS 0x10
1377#define BE_SUPPORTED_SPEED_40GBPS 0x20 1378#define BE_SUPPORTED_SPEED_40GBPS 0x20
1378 1379
1379#define BE_AN_EN 0x2 1380#define BE_AN_EN 0x2
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 43b559570632..e42a791c1835 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -130,6 +130,7 @@ static const struct be_ethtool_stat et_stats[] = {
130 {DRVSTAT_INFO(roce_drops_payload_len)}, 130 {DRVSTAT_INFO(roce_drops_payload_len)},
131 {DRVSTAT_INFO(roce_drops_crc)} 131 {DRVSTAT_INFO(roce_drops_crc)}
132}; 132};
133
133#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 134#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
134 135
135/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts 136/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
@@ -152,6 +153,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
152 */ 153 */
153 {DRVSTAT_RX_INFO(rx_drops_no_frags)} 154 {DRVSTAT_RX_INFO(rx_drops_no_frags)}
154}; 155};
156
155#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) 157#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
156 158
157/* Stats related to multi TX queues: get_stats routine assumes compl is the 159/* Stats related to multi TX queues: get_stats routine assumes compl is the
@@ -200,6 +202,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
200 /* Pkts dropped in the driver's transmit path */ 202 /* Pkts dropped in the driver's transmit path */
201 {DRVSTAT_TX_INFO(tx_drv_drops)} 203 {DRVSTAT_TX_INFO(tx_drv_drops)}
202}; 204};
205
203#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) 206#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
204 207
205static const char et_self_tests[][ETH_GSTRING_LEN] = { 208static const char et_self_tests[][ETH_GSTRING_LEN] = {
@@ -274,7 +277,7 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
274 277
275 while ((total_read_len < buf_len) && !eof) { 278 while ((total_read_len < buf_len) && !eof) {
276 chunk_size = min_t(u32, (buf_len - total_read_len), 279 chunk_size = min_t(u32, (buf_len - total_read_len),
277 LANCER_READ_FILE_CHUNK); 280 LANCER_READ_FILE_CHUNK);
278 chunk_size = ALIGN(chunk_size, 4); 281 chunk_size = ALIGN(chunk_size, 4);
279 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, 282 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
280 total_read_len, file_name, 283 total_read_len, file_name,
@@ -333,7 +336,6 @@ static int be_get_coalesce(struct net_device *netdev,
333 struct be_adapter *adapter = netdev_priv(netdev); 336 struct be_adapter *adapter = netdev_priv(netdev);
334 struct be_aic_obj *aic = &adapter->aic_obj[0]; 337 struct be_aic_obj *aic = &adapter->aic_obj[0];
335 338
336
337 et->rx_coalesce_usecs = aic->prev_eqd; 339 et->rx_coalesce_usecs = aic->prev_eqd;
338 et->rx_coalesce_usecs_high = aic->max_eqd; 340 et->rx_coalesce_usecs_high = aic->max_eqd;
339 et->rx_coalesce_usecs_low = aic->min_eqd; 341 et->rx_coalesce_usecs_low = aic->min_eqd;
@@ -534,10 +536,24 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
534 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) 536 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
535 val |= SUPPORTED_10000baseKX4_Full; 537 val |= SUPPORTED_10000baseKX4_Full;
536 break; 538 break;
539 case PHY_TYPE_KR2_20GB:
540 val |= SUPPORTED_Backplane;
541 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
542 val |= SUPPORTED_10000baseKR_Full;
543 if (if_speeds & BE_SUPPORTED_SPEED_20GBPS)
544 val |= SUPPORTED_20000baseKR2_Full;
545 break;
537 case PHY_TYPE_KR_10GB: 546 case PHY_TYPE_KR_10GB:
538 val |= SUPPORTED_Backplane | 547 val |= SUPPORTED_Backplane |
539 SUPPORTED_10000baseKR_Full; 548 SUPPORTED_10000baseKR_Full;
540 break; 549 break;
550 case PHY_TYPE_KR4_40GB:
551 val |= SUPPORTED_Backplane;
552 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
553 val |= SUPPORTED_10000baseKR_Full;
554 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS)
555 val |= SUPPORTED_40000baseKR4_Full;
556 break;
541 case PHY_TYPE_QSFP: 557 case PHY_TYPE_QSFP:
542 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) { 558 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) {
543 switch (adapter->phy.cable_type) { 559 switch (adapter->phy.cable_type) {
@@ -668,8 +684,10 @@ static void be_get_ringparam(struct net_device *netdev,
668{ 684{
669 struct be_adapter *adapter = netdev_priv(netdev); 685 struct be_adapter *adapter = netdev_priv(netdev);
670 686
671 ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; 687 ring->rx_max_pending = adapter->rx_obj[0].q.len;
672 ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; 688 ring->rx_pending = adapter->rx_obj[0].q.len;
689 ring->tx_max_pending = adapter->tx_obj[0].q.len;
690 ring->tx_pending = adapter->tx_obj[0].q.len;
673} 691}
674 692
675static void 693static void
@@ -961,8 +979,6 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
961 FW_LOG_LEVEL_DEFAULT : 979 FW_LOG_LEVEL_DEFAULT :
962 FW_LOG_LEVEL_FATAL); 980 FW_LOG_LEVEL_FATAL);
963 adapter->msg_enable = level; 981 adapter->msg_enable = level;
964
965 return;
966} 982}
967 983
968static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type) 984static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
@@ -1181,6 +1197,7 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1181 1197
1182 if (indir) { 1198 if (indir) {
1183 struct be_rx_obj *rxo; 1199 struct be_rx_obj *rxo;
1200
1184 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) { 1201 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1185 j = indir[i]; 1202 j = indir[i];
1186 rxo = &adapter->rx_obj[j]; 1203 rxo = &adapter->rx_obj[j];
@@ -1196,8 +1213,8 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1196 hkey = adapter->rss_info.rss_hkey; 1213 hkey = adapter->rss_info.rss_hkey;
1197 1214
1198 rc = be_cmd_rss_config(adapter, rsstable, 1215 rc = be_cmd_rss_config(adapter, rsstable,
1199 adapter->rss_info.rss_flags, 1216 adapter->rss_info.rss_flags,
1200 RSS_INDIR_TABLE_LEN, hkey); 1217 RSS_INDIR_TABLE_LEN, hkey);
1201 if (rc) { 1218 if (rc) {
1202 adapter->rss_info.rss_flags = RSS_ENABLE_NONE; 1219 adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1203 return -EIO; 1220 return -EIO;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index beebab60b6c4..9a18e7930b31 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -86,6 +86,7 @@ static const char * const ue_status_low_desc[] = {
86 "JTAG ", 86 "JTAG ",
87 "MPU_INTPEND " 87 "MPU_INTPEND "
88}; 88};
89
89/* UE Status High CSR */ 90/* UE Status High CSR */
90static const char * const ue_status_hi_desc[] = { 91static const char * const ue_status_hi_desc[] = {
91 "LPCMEMHOST", 92 "LPCMEMHOST",
@@ -122,10 +123,10 @@ static const char * const ue_status_hi_desc[] = {
122 "Unknown" 123 "Unknown"
123}; 124};
124 125
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{ 127{
128 struct be_dma_mem *mem = &q->dma_mem; 128 struct be_dma_mem *mem = &q->dma_mem;
129
129 if (mem->va) { 130 if (mem->va) {
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, 131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma); 132 mem->dma);
@@ -187,6 +188,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188{ 189{
189 u32 val = 0; 190 u32 val = 0;
191
190 val |= qid & DB_RQ_RING_ID_MASK; 192 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT; 193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192 194
@@ -198,6 +200,7 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted) 200 u16 posted)
199{ 201{
200 u32 val = 0; 202 u32 val = 0;
203
201 val |= txo->q.id & DB_TXULP_RING_ID_MASK; 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203 206
@@ -209,6 +212,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209 bool arm, bool clear_int, u16 num_popped) 212 bool arm, bool clear_int, u16 num_popped)
210{ 213{
211 u32 val = 0; 214 u32 val = 0;
215
212 val |= qid & DB_EQ_RING_ID_MASK; 216 val |= qid & DB_EQ_RING_ID_MASK;
213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214 218
@@ -227,6 +231,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228{ 232{
229 u32 val = 0; 233 u32 val = 0;
234
230 val |= qid & DB_CQ_RING_ID_MASK; 235 val |= qid & DB_CQ_RING_ID_MASK;
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << 236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT); 237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
@@ -488,7 +493,6 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
488 493
489static void populate_lancer_stats(struct be_adapter *adapter) 494static void populate_lancer_stats(struct be_adapter *adapter)
490{ 495{
491
492 struct be_drv_stats *drvs = &adapter->drv_stats; 496 struct be_drv_stats *drvs = &adapter->drv_stats;
493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter); 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
494 498
@@ -588,6 +592,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
588 592
589 for_all_rx_queues(adapter, rxo, i) { 593 for_all_rx_queues(adapter, rxo, i) {
590 const struct be_rx_stats *rx_stats = rx_stats(rxo); 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
595
591 do { 596 do {
592 start = u64_stats_fetch_begin_irq(&rx_stats->sync); 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
593 pkts = rx_stats(rxo)->rx_pkts; 598 pkts = rx_stats(rxo)->rx_pkts;
@@ -602,6 +607,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
602 607
603 for_all_tx_queues(adapter, txo, i) { 608 for_all_tx_queues(adapter, txo, i) {
604 const struct be_tx_stats *tx_stats = tx_stats(txo); 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
610
605 do { 611 do {
606 start = u64_stats_fetch_begin_irq(&tx_stats->sync); 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
607 pkts = tx_stats(txo)->tx_pkts; 613 pkts = tx_stats(txo)->tx_pkts;
@@ -807,6 +813,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
807 813
808 if (skb->len > skb->data_len) { 814 if (skb->len > skb->data_len) {
809 int len = skb_headlen(skb); 815 int len = skb_headlen(skb);
816
810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr)) 818 if (dma_mapping_error(dev, busaddr))
812 goto dma_err; 819 goto dma_err;
@@ -820,6 +827,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
820 827
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
830
823 busaddr = skb_frag_dma_map(dev, frag, 0, 831 busaddr = skb_frag_dma_map(dev, frag, 0,
824 skb_frag_size(frag), DMA_TO_DEVICE); 832 skb_frag_size(frag), DMA_TO_DEVICE);
825 if (dma_mapping_error(dev, busaddr)) 833 if (dma_mapping_error(dev, busaddr))
@@ -910,7 +918,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
910 if (ip6h->nexthdr != NEXTHDR_TCP && 918 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) { 919 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr = 920 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset); 921 (struct ipv6_opt_hdr *)(skb->data + offset);
914 922
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */ 923 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff) 924 if (ehdr->hdrlen == 0xff)
@@ -974,8 +982,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
974 * skip HW tagging is not enabled by FW. 982 * skip HW tagging is not enabled by FW.
975 */ 983 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && 984 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
977 (adapter->pvid || adapter->qnq_vid) && 985 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter))) 986 !qnq_async_evt_rcvd(adapter)))
979 goto tx_drop; 987 goto tx_drop;
980 988
981 /* Manual VLAN tag insertion to prevent: 989 /* Manual VLAN tag insertion to prevent:
@@ -1416,6 +1424,7 @@ err:
1416 max_tx_rate, vf); 1424 max_tx_rate, vf);
1417 return be_cmd_status(status); 1425 return be_cmd_status(status);
1418} 1426}
1427
1419static int be_set_vf_link_state(struct net_device *netdev, int vf, 1428static int be_set_vf_link_state(struct net_device *netdev, int vf,
1420 int link_state) 1429 int link_state)
1421{ 1430{
@@ -1481,7 +1490,6 @@ static void be_eqd_update(struct be_adapter *adapter)
1481 tx_pkts = txo->stats.tx_reqs; 1490 tx_pkts = txo->stats.tx_reqs;
1482 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); 1491 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1483 1492
1484
1485 /* Skip, if wrapped around or first calculation */ 1493 /* Skip, if wrapped around or first calculation */
1486 now = jiffies; 1494 now = jiffies;
1487 if (!aic->jiffies || time_before(now, aic->jiffies) || 1495 if (!aic->jiffies || time_before(now, aic->jiffies) ||
@@ -2053,7 +2061,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2053 memset(page_info, 0, sizeof(*page_info)); 2061 memset(page_info, 0, sizeof(*page_info));
2054 } 2062 }
2055 BUG_ON(atomic_read(&rxq->used)); 2063 BUG_ON(atomic_read(&rxq->used));
2056 rxq->tail = rxq->head = 0; 2064 rxq->tail = 0;
2065 rxq->head = 0;
2057} 2066}
2058 2067
2059static void be_tx_compl_clean(struct be_adapter *adapter) 2068static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -3716,8 +3725,6 @@ static void be_netpoll(struct net_device *netdev)
3716 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); 3725 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3717 napi_schedule(&eqo->napi); 3726 napi_schedule(&eqo->napi);
3718 } 3727 }
3719
3720 return;
3721} 3728}
3722#endif 3729#endif
3723 3730
@@ -4395,7 +4402,6 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4395 return; 4402 return;
4396err: 4403err:
4397 be_disable_vxlan_offloads(adapter); 4404 be_disable_vxlan_offloads(adapter);
4398 return;
4399} 4405}
4400 4406
4401static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, 4407static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
@@ -4735,7 +4741,6 @@ static void be_func_recovery_task(struct work_struct *work)
4735 be_detect_error(adapter); 4741 be_detect_error(adapter);
4736 4742
4737 if (adapter->hw_error && lancer_chip(adapter)) { 4743 if (adapter->hw_error && lancer_chip(adapter)) {
4738
4739 rtnl_lock(); 4744 rtnl_lock();
4740 netif_device_detach(adapter->netdev); 4745 netif_device_detach(adapter->netdev);
4741 rtnl_unlock(); 4746 rtnl_unlock();
@@ -4772,7 +4777,7 @@ static void be_worker(struct work_struct *work)
4772 if (!adapter->stats_cmd_sent) { 4777 if (!adapter->stats_cmd_sent) {
4773 if (lancer_chip(adapter)) 4778 if (lancer_chip(adapter))
4774 lancer_cmd_get_pport_stats(adapter, 4779 lancer_cmd_get_pport_stats(adapter,
4775 &adapter->stats_cmd); 4780 &adapter->stats_cmd);
4776 else 4781 else
4777 be_cmd_get_stats(adapter, &adapter->stats_cmd); 4782 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4778 } 4783 }
@@ -4919,7 +4924,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4919 4924
4920 INIT_DELAYED_WORK(&adapter->work, be_worker); 4925 INIT_DELAYED_WORK(&adapter->work, be_worker);
4921 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); 4926 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4922 adapter->rx_fc = adapter->tx_fc = true; 4927 adapter->rx_fc = true;
4928 adapter->tx_fc = true;
4923 4929
4924 status = be_setup(adapter); 4930 status = be_setup(adapter);
4925 if (status) 4931 if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index ef4672dc7357..132866433a25 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -174,6 +174,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
174 ocrdma_drv = drv; 174 ocrdma_drv = drv;
175 list_for_each_entry(dev, &be_adapter_list, entry) { 175 list_for_each_entry(dev, &be_adapter_list, entry) {
176 struct net_device *netdev; 176 struct net_device *netdev;
177
177 _be_roce_dev_add(dev); 178 _be_roce_dev_add(dev);
178 netdev = dev->netdev; 179 netdev = dev->netdev;
179 if (netif_running(netdev) && netif_oper_up(netdev)) 180 if (netif_running(netdev) && netif_oper_up(netdev))