diff options
110 files changed, 9975 insertions, 7938 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index b1b929843558..b04b97fe3217 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1365,7 +1365,7 @@ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER | |||
1365 | M: Eilon Greenstein <eilong@broadcom.com> | 1365 | M: Eilon Greenstein <eilong@broadcom.com> |
1366 | L: netdev@vger.kernel.org | 1366 | L: netdev@vger.kernel.org |
1367 | S: Supported | 1367 | S: Supported |
1368 | F: drivers/net/bnx2x* | 1368 | F: drivers/net/bnx2x/ |
1369 | 1369 | ||
1370 | BROADCOM TG3 GIGABIT ETHERNET DRIVER | 1370 | BROADCOM TG3 GIGABIT ETHERNET DRIVER |
1371 | M: Matt Carlson <mcarlson@broadcom.com> | 1371 | M: Matt Carlson <mcarlson@broadcom.com> |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index f05372694233..2ab233ba32c1 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -25,11 +25,6 @@ | |||
25 | #include "net_kern.h" | 25 | #include "net_kern.h" |
26 | #include "net_user.h" | 26 | #include "net_user.h" |
27 | 27 | ||
28 | static inline void set_ether_mac(struct net_device *dev, unsigned char *addr) | ||
29 | { | ||
30 | memcpy(dev->dev_addr, addr, ETH_ALEN); | ||
31 | } | ||
32 | |||
33 | #define DRIVER_NAME "uml-netdev" | 28 | #define DRIVER_NAME "uml-netdev" |
34 | 29 | ||
35 | static DEFINE_SPINLOCK(opened_lock); | 30 | static DEFINE_SPINLOCK(opened_lock); |
@@ -266,7 +261,7 @@ static int uml_net_set_mac(struct net_device *dev, void *addr) | |||
266 | struct sockaddr *hwaddr = addr; | 261 | struct sockaddr *hwaddr = addr; |
267 | 262 | ||
268 | spin_lock_irq(&lp->lock); | 263 | spin_lock_irq(&lp->lock); |
269 | set_ether_mac(dev, hwaddr->sa_data); | 264 | eth_mac_addr(dev, hwaddr->sa_data); |
270 | spin_unlock_irq(&lp->lock); | 265 | spin_unlock_irq(&lp->lock); |
271 | 266 | ||
272 | return 0; | 267 | return 0; |
@@ -380,7 +375,6 @@ static const struct net_device_ops uml_netdev_ops = { | |||
380 | .ndo_tx_timeout = uml_net_tx_timeout, | 375 | .ndo_tx_timeout = uml_net_tx_timeout, |
381 | .ndo_set_mac_address = uml_net_set_mac, | 376 | .ndo_set_mac_address = uml_net_set_mac, |
382 | .ndo_change_mtu = uml_net_change_mtu, | 377 | .ndo_change_mtu = uml_net_change_mtu, |
383 | .ndo_set_mac_address = eth_mac_addr, | ||
384 | .ndo_validate_addr = eth_validate_addr, | 378 | .ndo_validate_addr = eth_validate_addr, |
385 | }; | 379 | }; |
386 | 380 | ||
@@ -478,7 +472,7 @@ static void eth_configure(int n, void *init, char *mac, | |||
478 | ((*transport->user->init)(&lp->user, dev) != 0)) | 472 | ((*transport->user->init)(&lp->user, dev) != 0)) |
479 | goto out_unregister; | 473 | goto out_unregister; |
480 | 474 | ||
481 | set_ether_mac(dev, device->mac); | 475 | eth_mac_addr(dev, device->mac); |
482 | dev->mtu = transport->user->mtu; | 476 | dev->mtu = transport->user->mtu; |
483 | dev->netdev_ops = ¨_netdev_ops; | 477 | dev->netdev_ops = ¨_netdev_ops; |
484 | dev->ethtool_ops = ¨_net_ethtool_ops; | 478 | dev->ethtool_ops = ¨_net_ethtool_ops; |
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 729a149b6b2b..2f3516b7f118 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c | |||
@@ -154,7 +154,6 @@ static void which_list(ns_dev * card, struct sk_buff *skb); | |||
154 | #endif | 154 | #endif |
155 | static void ns_poll(unsigned long arg); | 155 | static void ns_poll(unsigned long arg); |
156 | static int ns_parse_mac(char *mac, unsigned char *esi); | 156 | static int ns_parse_mac(char *mac, unsigned char *esi); |
157 | static short ns_h2i(char c); | ||
158 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | 157 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, |
159 | unsigned long addr); | 158 | unsigned long addr); |
160 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); | 159 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); |
@@ -2824,9 +2823,9 @@ static int ns_parse_mac(char *mac, unsigned char *esi) | |||
2824 | return -1; | 2823 | return -1; |
2825 | j = 0; | 2824 | j = 0; |
2826 | for (i = 0; i < 6; i++) { | 2825 | for (i = 0; i < 6; i++) { |
2827 | if ((byte1 = ns_h2i(mac[j++])) < 0) | 2826 | if ((byte1 = hex_to_bin(mac[j++])) < 0) |
2828 | return -1; | 2827 | return -1; |
2829 | if ((byte0 = ns_h2i(mac[j++])) < 0) | 2828 | if ((byte0 = hex_to_bin(mac[j++])) < 0) |
2830 | return -1; | 2829 | return -1; |
2831 | esi[i] = (unsigned char)(byte1 * 16 + byte0); | 2830 | esi[i] = (unsigned char)(byte1 * 16 + byte0); |
2832 | if (i < 5) { | 2831 | if (i < 5) { |
@@ -2837,16 +2836,6 @@ static int ns_parse_mac(char *mac, unsigned char *esi) | |||
2837 | return 0; | 2836 | return 0; |
2838 | } | 2837 | } |
2839 | 2838 | ||
2840 | static short ns_h2i(char c) | ||
2841 | { | ||
2842 | if (c >= '0' && c <= '9') | ||
2843 | return (short)(c - '0'); | ||
2844 | if (c >= 'A' && c <= 'F') | ||
2845 | return (short)(c - 'A' + 10); | ||
2846 | if (c >= 'a' && c <= 'f') | ||
2847 | return (short)(c - 'a' + 10); | ||
2848 | return -1; | ||
2849 | } | ||
2850 | 2839 | ||
2851 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | 2840 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, |
2852 | unsigned long addr) | 2841 | unsigned long addr) |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 069a03f717d3..c754d88e5ec9 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -1020,10 +1020,16 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, | |||
1020 | ioaddr = pci_iomap(pdev, pci_bar, 0); | 1020 | ioaddr = pci_iomap(pdev, pci_bar, 0); |
1021 | if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ | 1021 | if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ |
1022 | ioaddr = pci_iomap(pdev, 0, 0); | 1022 | ioaddr = pci_iomap(pdev, 0, 0); |
1023 | if (!ioaddr) { | ||
1024 | pci_disable_device(pdev); | ||
1025 | rc = -ENOMEM; | ||
1026 | goto out; | ||
1027 | } | ||
1023 | 1028 | ||
1024 | rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, | 1029 | rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, |
1025 | ent->driver_data, unit); | 1030 | ent->driver_data, unit); |
1026 | if (rc < 0) { | 1031 | if (rc < 0) { |
1032 | pci_iounmap(pdev, ioaddr); | ||
1027 | pci_disable_device(pdev); | 1033 | pci_disable_device(pdev); |
1028 | goto out; | 1034 | goto out; |
1029 | } | 1035 | } |
@@ -1387,7 +1393,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1387 | mii_preamble_required++; | 1393 | mii_preamble_required++; |
1388 | if (vp->drv_flags & EXTRA_PREAMBLE) | 1394 | if (vp->drv_flags & EXTRA_PREAMBLE) |
1389 | mii_preamble_required++; | 1395 | mii_preamble_required++; |
1390 | mdio_sync(ioaddr, 32); | 1396 | mdio_sync(vp, 32); |
1391 | mdio_read(dev, 24, MII_BMSR); | 1397 | mdio_read(dev, 24, MII_BMSR); |
1392 | for (phy = 0; phy < 32 && phy_idx < 1; phy++) { | 1398 | for (phy = 0; phy < 32 && phy_idx < 1; phy++) { |
1393 | int mii_status, phyx; | 1399 | int mii_status, phyx; |
@@ -2912,6 +2918,36 @@ static void vortex_get_drvinfo(struct net_device *dev, | |||
2912 | } | 2918 | } |
2913 | } | 2919 | } |
2914 | 2920 | ||
2921 | static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2922 | { | ||
2923 | struct vortex_private *vp = netdev_priv(dev); | ||
2924 | |||
2925 | spin_lock_irq(&vp->lock); | ||
2926 | wol->supported = WAKE_MAGIC; | ||
2927 | |||
2928 | wol->wolopts = 0; | ||
2929 | if (vp->enable_wol) | ||
2930 | wol->wolopts |= WAKE_MAGIC; | ||
2931 | spin_unlock_irq(&vp->lock); | ||
2932 | } | ||
2933 | |||
2934 | static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2935 | { | ||
2936 | struct vortex_private *vp = netdev_priv(dev); | ||
2937 | if (wol->wolopts & ~WAKE_MAGIC) | ||
2938 | return -EINVAL; | ||
2939 | |||
2940 | spin_lock_irq(&vp->lock); | ||
2941 | if (wol->wolopts & WAKE_MAGIC) | ||
2942 | vp->enable_wol = 1; | ||
2943 | else | ||
2944 | vp->enable_wol = 0; | ||
2945 | acpi_set_WOL(dev); | ||
2946 | spin_unlock_irq(&vp->lock); | ||
2947 | |||
2948 | return 0; | ||
2949 | } | ||
2950 | |||
2915 | static const struct ethtool_ops vortex_ethtool_ops = { | 2951 | static const struct ethtool_ops vortex_ethtool_ops = { |
2916 | .get_drvinfo = vortex_get_drvinfo, | 2952 | .get_drvinfo = vortex_get_drvinfo, |
2917 | .get_strings = vortex_get_strings, | 2953 | .get_strings = vortex_get_strings, |
@@ -2923,6 +2959,8 @@ static const struct ethtool_ops vortex_ethtool_ops = { | |||
2923 | .set_settings = vortex_set_settings, | 2959 | .set_settings = vortex_set_settings, |
2924 | .get_link = ethtool_op_get_link, | 2960 | .get_link = ethtool_op_get_link, |
2925 | .nway_reset = vortex_nway_reset, | 2961 | .nway_reset = vortex_nway_reset, |
2962 | .get_wol = vortex_get_wol, | ||
2963 | .set_wol = vortex_set_wol, | ||
2926 | }; | 2964 | }; |
2927 | 2965 | ||
2928 | #ifdef CONFIG_PCI | 2966 | #ifdef CONFIG_PCI |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index ce555819c8fc..56e8c27f77ce 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -84,8 +84,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o | |||
84 | obj-$(CONFIG_TIGON3) += tg3.o | 84 | obj-$(CONFIG_TIGON3) += tg3.o |
85 | obj-$(CONFIG_BNX2) += bnx2.o | 85 | obj-$(CONFIG_BNX2) += bnx2.o |
86 | obj-$(CONFIG_CNIC) += cnic.o | 86 | obj-$(CONFIG_CNIC) += cnic.o |
87 | obj-$(CONFIG_BNX2X) += bnx2x.o | 87 | obj-$(CONFIG_BNX2X) += bnx2x/ |
88 | bnx2x-objs := bnx2x_main.o bnx2x_link.o | ||
89 | spidernet-y += spider_net.o spider_net_ethtool.o | 88 | spidernet-y += spider_net.o spider_net_ethtool.o |
90 | obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o | 89 | obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o |
91 | obj-$(CONFIG_GELIC_NET) += ps3_gelic.o | 90 | obj-$(CONFIG_GELIC_NET) += ps3_gelic.o |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index f17428caecf1..e06369c36dd4 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | #include "be_hw.h" | 34 | #include "be_hw.h" |
35 | 35 | ||
36 | #define DRV_VER "2.102.147u" | 36 | #define DRV_VER "2.103.175u" |
37 | #define DRV_NAME "be2net" | 37 | #define DRV_NAME "be2net" |
38 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" | 38 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" |
39 | #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" | 39 | #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" |
@@ -220,7 +220,16 @@ struct be_rx_obj { | |||
220 | struct be_rx_page_info page_info_tbl[RX_Q_LEN]; | 220 | struct be_rx_page_info page_info_tbl[RX_Q_LEN]; |
221 | }; | 221 | }; |
222 | 222 | ||
223 | struct be_vf_cfg { | ||
224 | unsigned char vf_mac_addr[ETH_ALEN]; | ||
225 | u32 vf_if_handle; | ||
226 | u32 vf_pmac_id; | ||
227 | u16 vf_vlan_tag; | ||
228 | u32 vf_tx_rate; | ||
229 | }; | ||
230 | |||
223 | #define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ | 231 | #define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ |
232 | #define BE_INVALID_PMAC_ID 0xffffffff | ||
224 | struct be_adapter { | 233 | struct be_adapter { |
225 | struct pci_dev *pdev; | 234 | struct pci_dev *pdev; |
226 | struct net_device *netdev; | 235 | struct net_device *netdev; |
@@ -276,7 +285,7 @@ struct be_adapter { | |||
276 | u32 port_num; | 285 | u32 port_num; |
277 | bool promiscuous; | 286 | bool promiscuous; |
278 | bool wol; | 287 | bool wol; |
279 | u32 cap; | 288 | u32 function_mode; |
280 | u32 rx_fc; /* Rx flow control */ | 289 | u32 rx_fc; /* Rx flow control */ |
281 | u32 tx_fc; /* Tx flow control */ | 290 | u32 tx_fc; /* Tx flow control */ |
282 | int link_speed; | 291 | int link_speed; |
@@ -288,8 +297,7 @@ struct be_adapter { | |||
288 | struct completion flash_compl; | 297 | struct completion flash_compl; |
289 | 298 | ||
290 | bool sriov_enabled; | 299 | bool sriov_enabled; |
291 | u32 vf_if_handle[BE_MAX_VF]; | 300 | struct be_vf_cfg vf_cfg[BE_MAX_VF]; |
292 | u32 vf_pmac_id[BE_MAX_VF]; | ||
293 | u8 base_eq_id; | 301 | u8 base_eq_id; |
294 | u8 is_virtfn; | 302 | u8 is_virtfn; |
295 | }; | 303 | }; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 344e062b7f25..6eaf8a3fa5ea 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -76,7 +76,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
76 | sizeof(resp->hw_stats)); | 76 | sizeof(resp->hw_stats)); |
77 | netdev_stats_update(adapter); | 77 | netdev_stats_update(adapter); |
78 | } | 78 | } |
79 | } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) { | 79 | } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && |
80 | (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { | ||
80 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | 81 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & |
81 | CQE_STATUS_EXTD_MASK; | 82 | CQE_STATUS_EXTD_MASK; |
82 | dev_warn(&adapter->pdev->dev, | 83 | dev_warn(&adapter->pdev->dev, |
@@ -1257,7 +1258,7 @@ err: | |||
1257 | } | 1258 | } |
1258 | 1259 | ||
1259 | /* Uses mbox */ | 1260 | /* Uses mbox */ |
1260 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) | 1261 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode) |
1261 | { | 1262 | { |
1262 | struct be_mcc_wrb *wrb; | 1263 | struct be_mcc_wrb *wrb; |
1263 | struct be_cmd_req_query_fw_cfg *req; | 1264 | struct be_cmd_req_query_fw_cfg *req; |
@@ -1278,7 +1279,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) | |||
1278 | if (!status) { | 1279 | if (!status) { |
1279 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); | 1280 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); |
1280 | *port_num = le32_to_cpu(resp->phys_port); | 1281 | *port_num = le32_to_cpu(resp->phys_port); |
1281 | *cap = le32_to_cpu(resp->function_cap); | 1282 | *mode = le32_to_cpu(resp->function_mode); |
1282 | } | 1283 | } |
1283 | 1284 | ||
1284 | spin_unlock(&adapter->mbox_lock); | 1285 | spin_unlock(&adapter->mbox_lock); |
@@ -1730,3 +1731,36 @@ err: | |||
1730 | spin_unlock_bh(&adapter->mcc_lock); | 1731 | spin_unlock_bh(&adapter->mcc_lock); |
1731 | return status; | 1732 | return status; |
1732 | } | 1733 | } |
1734 | |||
1735 | int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) | ||
1736 | { | ||
1737 | struct be_mcc_wrb *wrb; | ||
1738 | struct be_cmd_req_set_qos *req; | ||
1739 | int status; | ||
1740 | |||
1741 | spin_lock_bh(&adapter->mcc_lock); | ||
1742 | |||
1743 | wrb = wrb_from_mccq(adapter); | ||
1744 | if (!wrb) { | ||
1745 | status = -EBUSY; | ||
1746 | goto err; | ||
1747 | } | ||
1748 | |||
1749 | req = embedded_payload(wrb); | ||
1750 | |||
1751 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | ||
1752 | OPCODE_COMMON_SET_QOS); | ||
1753 | |||
1754 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
1755 | OPCODE_COMMON_SET_QOS, sizeof(*req)); | ||
1756 | |||
1757 | req->hdr.domain = domain; | ||
1758 | req->valid_bits = BE_QOS_BITS_NIC; | ||
1759 | req->max_bps_nic = bps; | ||
1760 | |||
1761 | status = be_mcc_notify_wait(adapter); | ||
1762 | |||
1763 | err: | ||
1764 | spin_unlock_bh(&adapter->mcc_lock); | ||
1765 | return status; | ||
1766 | } | ||
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 912a0586f060..036531cd200f 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
@@ -124,6 +124,7 @@ struct be_mcc_mailbox { | |||
124 | #define OPCODE_COMMON_CQ_CREATE 12 | 124 | #define OPCODE_COMMON_CQ_CREATE 12 |
125 | #define OPCODE_COMMON_EQ_CREATE 13 | 125 | #define OPCODE_COMMON_EQ_CREATE 13 |
126 | #define OPCODE_COMMON_MCC_CREATE 21 | 126 | #define OPCODE_COMMON_MCC_CREATE 21 |
127 | #define OPCODE_COMMON_SET_QOS 28 | ||
127 | #define OPCODE_COMMON_SEEPROM_READ 30 | 128 | #define OPCODE_COMMON_SEEPROM_READ 30 |
128 | #define OPCODE_COMMON_NTWK_RX_FILTER 34 | 129 | #define OPCODE_COMMON_NTWK_RX_FILTER 34 |
129 | #define OPCODE_COMMON_GET_FW_VERSION 35 | 130 | #define OPCODE_COMMON_GET_FW_VERSION 35 |
@@ -748,7 +749,7 @@ struct be_cmd_resp_query_fw_cfg { | |||
748 | u32 be_config_number; | 749 | u32 be_config_number; |
749 | u32 asic_revision; | 750 | u32 asic_revision; |
750 | u32 phys_port; | 751 | u32 phys_port; |
751 | u32 function_cap; | 752 | u32 function_mode; |
752 | u32 rsvd[26]; | 753 | u32 rsvd[26]; |
753 | }; | 754 | }; |
754 | 755 | ||
@@ -894,6 +895,22 @@ struct be_cmd_resp_get_phy_info { | |||
894 | u32 future_use[4]; | 895 | u32 future_use[4]; |
895 | }; | 896 | }; |
896 | 897 | ||
898 | /*********************** Set QOS ***********************/ | ||
899 | |||
900 | #define BE_QOS_BITS_NIC 1 | ||
901 | |||
902 | struct be_cmd_req_set_qos { | ||
903 | struct be_cmd_req_hdr hdr; | ||
904 | u32 valid_bits; | ||
905 | u32 max_bps_nic; | ||
906 | u32 rsvd[7]; | ||
907 | }; | ||
908 | |||
909 | struct be_cmd_resp_set_qos { | ||
910 | struct be_cmd_resp_hdr hdr; | ||
911 | u32 rsvd; | ||
912 | }; | ||
913 | |||
897 | extern int be_pci_fnum_get(struct be_adapter *adapter); | 914 | extern int be_pci_fnum_get(struct be_adapter *adapter); |
898 | extern int be_cmd_POST(struct be_adapter *adapter); | 915 | extern int be_cmd_POST(struct be_adapter *adapter); |
899 | extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, | 916 | extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, |
@@ -974,4 +991,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, | |||
974 | u8 loopback_type, u8 enable); | 991 | u8 loopback_type, u8 enable); |
975 | extern int be_cmd_get_phy_info(struct be_adapter *adapter, | 992 | extern int be_cmd_get_phy_info(struct be_adapter *adapter, |
976 | struct be_dma_mem *cmd); | 993 | struct be_dma_mem *cmd); |
994 | extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); | ||
977 | 995 | ||
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index e6ca92334d6d..d5b097d836b9 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -552,11 +552,18 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) | |||
552 | * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. | 552 | * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. |
553 | * If the user configures more, place BE in vlan promiscuous mode. | 553 | * If the user configures more, place BE in vlan promiscuous mode. |
554 | */ | 554 | */ |
555 | static int be_vid_config(struct be_adapter *adapter) | 555 | static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) |
556 | { | 556 | { |
557 | u16 vtag[BE_NUM_VLANS_SUPPORTED]; | 557 | u16 vtag[BE_NUM_VLANS_SUPPORTED]; |
558 | u16 ntags = 0, i; | 558 | u16 ntags = 0, i; |
559 | int status = 0; | 559 | int status = 0; |
560 | u32 if_handle; | ||
561 | |||
562 | if (vf) { | ||
563 | if_handle = adapter->vf_cfg[vf_num].vf_if_handle; | ||
564 | vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag); | ||
565 | status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0); | ||
566 | } | ||
560 | 567 | ||
561 | if (adapter->vlans_added <= adapter->max_vlans) { | 568 | if (adapter->vlans_added <= adapter->max_vlans) { |
562 | /* Construct VLAN Table to give to HW */ | 569 | /* Construct VLAN Table to give to HW */ |
@@ -572,6 +579,7 @@ static int be_vid_config(struct be_adapter *adapter) | |||
572 | status = be_cmd_vlan_config(adapter, adapter->if_handle, | 579 | status = be_cmd_vlan_config(adapter, adapter->if_handle, |
573 | NULL, 0, 1, 1); | 580 | NULL, 0, 1, 1); |
574 | } | 581 | } |
582 | |||
575 | return status; | 583 | return status; |
576 | } | 584 | } |
577 | 585 | ||
@@ -592,27 +600,28 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid) | |||
592 | { | 600 | { |
593 | struct be_adapter *adapter = netdev_priv(netdev); | 601 | struct be_adapter *adapter = netdev_priv(netdev); |
594 | 602 | ||
603 | adapter->vlans_added++; | ||
595 | if (!be_physfn(adapter)) | 604 | if (!be_physfn(adapter)) |
596 | return; | 605 | return; |
597 | 606 | ||
598 | adapter->vlan_tag[vid] = 1; | 607 | adapter->vlan_tag[vid] = 1; |
599 | adapter->vlans_added++; | ||
600 | if (adapter->vlans_added <= (adapter->max_vlans + 1)) | 608 | if (adapter->vlans_added <= (adapter->max_vlans + 1)) |
601 | be_vid_config(adapter); | 609 | be_vid_config(adapter, false, 0); |
602 | } | 610 | } |
603 | 611 | ||
604 | static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) | 612 | static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) |
605 | { | 613 | { |
606 | struct be_adapter *adapter = netdev_priv(netdev); | 614 | struct be_adapter *adapter = netdev_priv(netdev); |
607 | 615 | ||
616 | adapter->vlans_added--; | ||
617 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); | ||
618 | |||
608 | if (!be_physfn(adapter)) | 619 | if (!be_physfn(adapter)) |
609 | return; | 620 | return; |
610 | 621 | ||
611 | adapter->vlan_tag[vid] = 0; | 622 | adapter->vlan_tag[vid] = 0; |
612 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); | ||
613 | adapter->vlans_added--; | ||
614 | if (adapter->vlans_added <= adapter->max_vlans) | 623 | if (adapter->vlans_added <= adapter->max_vlans) |
615 | be_vid_config(adapter); | 624 | be_vid_config(adapter, false, 0); |
616 | } | 625 | } |
617 | 626 | ||
618 | static void be_set_multicast_list(struct net_device *netdev) | 627 | static void be_set_multicast_list(struct net_device *netdev) |
@@ -656,14 +665,93 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) | |||
656 | if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) | 665 | if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) |
657 | return -EINVAL; | 666 | return -EINVAL; |
658 | 667 | ||
659 | status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf], | 668 | if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) |
660 | adapter->vf_pmac_id[vf]); | 669 | status = be_cmd_pmac_del(adapter, |
670 | adapter->vf_cfg[vf].vf_if_handle, | ||
671 | adapter->vf_cfg[vf].vf_pmac_id); | ||
661 | 672 | ||
662 | status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf], | 673 | status = be_cmd_pmac_add(adapter, mac, |
663 | &adapter->vf_pmac_id[vf]); | 674 | adapter->vf_cfg[vf].vf_if_handle, |
664 | if (!status) | 675 | &adapter->vf_cfg[vf].vf_pmac_id); |
676 | |||
677 | if (status) | ||
665 | dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", | 678 | dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", |
666 | mac, vf); | 679 | mac, vf); |
680 | else | ||
681 | memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); | ||
682 | |||
683 | return status; | ||
684 | } | ||
685 | |||
686 | static int be_get_vf_config(struct net_device *netdev, int vf, | ||
687 | struct ifla_vf_info *vi) | ||
688 | { | ||
689 | struct be_adapter *adapter = netdev_priv(netdev); | ||
690 | |||
691 | if (!adapter->sriov_enabled) | ||
692 | return -EPERM; | ||
693 | |||
694 | if (vf >= num_vfs) | ||
695 | return -EINVAL; | ||
696 | |||
697 | vi->vf = vf; | ||
698 | vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate; | ||
699 | vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag; | ||
700 | vi->qos = 0; | ||
701 | memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN); | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | static int be_set_vf_vlan(struct net_device *netdev, | ||
707 | int vf, u16 vlan, u8 qos) | ||
708 | { | ||
709 | struct be_adapter *adapter = netdev_priv(netdev); | ||
710 | int status = 0; | ||
711 | |||
712 | if (!adapter->sriov_enabled) | ||
713 | return -EPERM; | ||
714 | |||
715 | if ((vf >= num_vfs) || (vlan > 4095)) | ||
716 | return -EINVAL; | ||
717 | |||
718 | if (vlan) { | ||
719 | adapter->vf_cfg[vf].vf_vlan_tag = vlan; | ||
720 | adapter->vlans_added++; | ||
721 | } else { | ||
722 | adapter->vf_cfg[vf].vf_vlan_tag = 0; | ||
723 | adapter->vlans_added--; | ||
724 | } | ||
725 | |||
726 | status = be_vid_config(adapter, true, vf); | ||
727 | |||
728 | if (status) | ||
729 | dev_info(&adapter->pdev->dev, | ||
730 | "VLAN %d config on VF %d failed\n", vlan, vf); | ||
731 | return status; | ||
732 | } | ||
733 | |||
734 | static int be_set_vf_tx_rate(struct net_device *netdev, | ||
735 | int vf, int rate) | ||
736 | { | ||
737 | struct be_adapter *adapter = netdev_priv(netdev); | ||
738 | int status = 0; | ||
739 | |||
740 | if (!adapter->sriov_enabled) | ||
741 | return -EPERM; | ||
742 | |||
743 | if ((vf >= num_vfs) || (rate < 0)) | ||
744 | return -EINVAL; | ||
745 | |||
746 | if (rate > 10000) | ||
747 | rate = 10000; | ||
748 | |||
749 | adapter->vf_cfg[vf].vf_tx_rate = rate; | ||
750 | status = be_cmd_set_qos(adapter, rate / 10, vf); | ||
751 | |||
752 | if (status) | ||
753 | dev_info(&adapter->pdev->dev, | ||
754 | "tx rate %d on VF %d failed\n", rate, vf); | ||
667 | return status; | 755 | return status; |
668 | } | 756 | } |
669 | 757 | ||
@@ -875,7 +963,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, | |||
875 | 963 | ||
876 | /* vlanf could be wrongly set in some cards. | 964 | /* vlanf could be wrongly set in some cards. |
877 | * ignore if vtm is not set */ | 965 | * ignore if vtm is not set */ |
878 | if ((adapter->cap & 0x400) && !vtm) | 966 | if ((adapter->function_mode & 0x400) && !vtm) |
879 | vlanf = 0; | 967 | vlanf = 0; |
880 | 968 | ||
881 | if (unlikely(vlanf)) { | 969 | if (unlikely(vlanf)) { |
@@ -915,7 +1003,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
915 | 1003 | ||
916 | /* vlanf could be wrongly set in some cards. | 1004 | /* vlanf could be wrongly set in some cards. |
917 | * ignore if vtm is not set */ | 1005 | * ignore if vtm is not set */ |
918 | if ((adapter->cap & 0x400) && !vtm) | 1006 | if ((adapter->function_mode & 0x400) && !vtm) |
919 | vlanf = 0; | 1007 | vlanf = 0; |
920 | 1008 | ||
921 | skb = napi_get_frags(&eq_obj->napi); | 1009 | skb = napi_get_frags(&eq_obj->napi); |
@@ -1822,7 +1910,7 @@ static int be_open(struct net_device *netdev) | |||
1822 | be_link_status_update(adapter, link_up); | 1910 | be_link_status_update(adapter, link_up); |
1823 | 1911 | ||
1824 | if (be_physfn(adapter)) { | 1912 | if (be_physfn(adapter)) { |
1825 | status = be_vid_config(adapter); | 1913 | status = be_vid_config(adapter, false, 0); |
1826 | if (status) | 1914 | if (status) |
1827 | goto err; | 1915 | goto err; |
1828 | 1916 | ||
@@ -1903,13 +1991,15 @@ static int be_setup(struct be_adapter *adapter) | |||
1903 | cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | 1991 | cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
1904 | | BE_IF_FLAGS_BROADCAST; | 1992 | | BE_IF_FLAGS_BROADCAST; |
1905 | status = be_cmd_if_create(adapter, cap_flags, en_flags, | 1993 | status = be_cmd_if_create(adapter, cap_flags, en_flags, |
1906 | mac, true, &adapter->vf_if_handle[vf], | 1994 | mac, true, |
1995 | &adapter->vf_cfg[vf].vf_if_handle, | ||
1907 | NULL, vf+1); | 1996 | NULL, vf+1); |
1908 | if (status) { | 1997 | if (status) { |
1909 | dev_err(&adapter->pdev->dev, | 1998 | dev_err(&adapter->pdev->dev, |
1910 | "Interface Create failed for VF %d\n", vf); | 1999 | "Interface Create failed for VF %d\n", vf); |
1911 | goto if_destroy; | 2000 | goto if_destroy; |
1912 | } | 2001 | } |
2002 | adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; | ||
1913 | vf++; | 2003 | vf++; |
1914 | } | 2004 | } |
1915 | } else if (!be_physfn(adapter)) { | 2005 | } else if (!be_physfn(adapter)) { |
@@ -1943,8 +2033,9 @@ tx_qs_destroy: | |||
1943 | be_tx_queues_destroy(adapter); | 2033 | be_tx_queues_destroy(adapter); |
1944 | if_destroy: | 2034 | if_destroy: |
1945 | for (vf = 0; vf < num_vfs; vf++) | 2035 | for (vf = 0; vf < num_vfs; vf++) |
1946 | if (adapter->vf_if_handle[vf]) | 2036 | if (adapter->vf_cfg[vf].vf_if_handle) |
1947 | be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]); | 2037 | be_cmd_if_destroy(adapter, |
2038 | adapter->vf_cfg[vf].vf_if_handle); | ||
1948 | be_cmd_if_destroy(adapter, adapter->if_handle); | 2039 | be_cmd_if_destroy(adapter, adapter->if_handle); |
1949 | do_none: | 2040 | do_none: |
1950 | return status; | 2041 | return status; |
@@ -2187,7 +2278,10 @@ static struct net_device_ops be_netdev_ops = { | |||
2187 | .ndo_vlan_rx_register = be_vlan_register, | 2278 | .ndo_vlan_rx_register = be_vlan_register, |
2188 | .ndo_vlan_rx_add_vid = be_vlan_add_vid, | 2279 | .ndo_vlan_rx_add_vid = be_vlan_add_vid, |
2189 | .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, | 2280 | .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, |
2190 | .ndo_set_vf_mac = be_set_vf_mac | 2281 | .ndo_set_vf_mac = be_set_vf_mac, |
2282 | .ndo_set_vf_vlan = be_set_vf_vlan, | ||
2283 | .ndo_set_vf_tx_rate = be_set_vf_tx_rate, | ||
2284 | .ndo_get_vf_config = be_get_vf_config | ||
2191 | }; | 2285 | }; |
2192 | 2286 | ||
2193 | static void be_netdev_init(struct net_device *netdev) | 2287 | static void be_netdev_init(struct net_device *netdev) |
@@ -2406,7 +2500,7 @@ static int be_get_config(struct be_adapter *adapter) | |||
2406 | return status; | 2500 | return status; |
2407 | 2501 | ||
2408 | status = be_cmd_query_fw_cfg(adapter, | 2502 | status = be_cmd_query_fw_cfg(adapter, |
2409 | &adapter->port_num, &adapter->cap); | 2503 | &adapter->port_num, &adapter->function_mode); |
2410 | if (status) | 2504 | if (status) |
2411 | return status; | 2505 | return status; |
2412 | 2506 | ||
@@ -2426,7 +2520,7 @@ static int be_get_config(struct be_adapter *adapter) | |||
2426 | memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); | 2520 | memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); |
2427 | } | 2521 | } |
2428 | 2522 | ||
2429 | if (adapter->cap & 0x400) | 2523 | if (adapter->function_mode & 0x400) |
2430 | adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; | 2524 | adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; |
2431 | else | 2525 | else |
2432 | adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; | 2526 | adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; |
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile new file mode 100644 index 000000000000..084afce89ae9 --- /dev/null +++ b/drivers/net/bnx2x/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for Broadcom 10-Gigabit ethernet driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_BNX2X) += bnx2x.o | ||
6 | |||
7 | bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o | ||
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 8bd23687c530..53af9c93e75c 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -20,6 +20,10 @@ | |||
20 | * (you will need to reboot afterwards) */ | 20 | * (you will need to reboot afterwards) */ |
21 | /* #define BNX2X_STOP_ON_ERROR */ | 21 | /* #define BNX2X_STOP_ON_ERROR */ |
22 | 22 | ||
23 | #define DRV_MODULE_VERSION "1.52.53-3" | ||
24 | #define DRV_MODULE_RELDATE "2010/18/04" | ||
25 | #define BNX2X_BC_VER 0x040200 | ||
26 | |||
23 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 27 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
24 | #define BCM_VLAN 1 | 28 | #define BCM_VLAN 1 |
25 | #endif | 29 | #endif |
@@ -32,7 +36,7 @@ | |||
32 | 36 | ||
33 | #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) | 37 | #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) |
34 | #define BCM_CNIC 1 | 38 | #define BCM_CNIC 1 |
35 | #include "cnic_if.h" | 39 | #include "../cnic_if.h" |
36 | #endif | 40 | #endif |
37 | 41 | ||
38 | 42 | ||
@@ -45,10 +49,12 @@ | |||
45 | #endif | 49 | #endif |
46 | 50 | ||
47 | #include <linux/mdio.h> | 51 | #include <linux/mdio.h> |
52 | #include <linux/pci.h> | ||
48 | #include "bnx2x_reg.h" | 53 | #include "bnx2x_reg.h" |
49 | #include "bnx2x_fw_defs.h" | 54 | #include "bnx2x_fw_defs.h" |
50 | #include "bnx2x_hsi.h" | 55 | #include "bnx2x_hsi.h" |
51 | #include "bnx2x_link.h" | 56 | #include "bnx2x_link.h" |
57 | #include "bnx2x_stats.h" | ||
52 | 58 | ||
53 | /* error/debug prints */ | 59 | /* error/debug prints */ |
54 | 60 | ||
@@ -106,6 +112,7 @@ do { \ | |||
106 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ | 112 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ |
107 | } while (0) | 113 | } while (0) |
108 | 114 | ||
115 | void bnx2x_panic_dump(struct bnx2x *bp); | ||
109 | 116 | ||
110 | #ifdef BNX2X_STOP_ON_ERROR | 117 | #ifdef BNX2X_STOP_ON_ERROR |
111 | #define bnx2x_panic() do { \ | 118 | #define bnx2x_panic() do { \ |
@@ -248,43 +255,6 @@ union db_prod { | |||
248 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) | 255 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) |
249 | 256 | ||
250 | 257 | ||
251 | struct bnx2x_eth_q_stats { | ||
252 | u32 total_bytes_received_hi; | ||
253 | u32 total_bytes_received_lo; | ||
254 | u32 total_bytes_transmitted_hi; | ||
255 | u32 total_bytes_transmitted_lo; | ||
256 | u32 total_unicast_packets_received_hi; | ||
257 | u32 total_unicast_packets_received_lo; | ||
258 | u32 total_multicast_packets_received_hi; | ||
259 | u32 total_multicast_packets_received_lo; | ||
260 | u32 total_broadcast_packets_received_hi; | ||
261 | u32 total_broadcast_packets_received_lo; | ||
262 | u32 total_unicast_packets_transmitted_hi; | ||
263 | u32 total_unicast_packets_transmitted_lo; | ||
264 | u32 total_multicast_packets_transmitted_hi; | ||
265 | u32 total_multicast_packets_transmitted_lo; | ||
266 | u32 total_broadcast_packets_transmitted_hi; | ||
267 | u32 total_broadcast_packets_transmitted_lo; | ||
268 | u32 valid_bytes_received_hi; | ||
269 | u32 valid_bytes_received_lo; | ||
270 | |||
271 | u32 error_bytes_received_hi; | ||
272 | u32 error_bytes_received_lo; | ||
273 | u32 etherstatsoverrsizepkts_hi; | ||
274 | u32 etherstatsoverrsizepkts_lo; | ||
275 | u32 no_buff_discard_hi; | ||
276 | u32 no_buff_discard_lo; | ||
277 | |||
278 | u32 driver_xoff; | ||
279 | u32 rx_err_discard_pkt; | ||
280 | u32 rx_skb_alloc_failed; | ||
281 | u32 hw_csum_err; | ||
282 | }; | ||
283 | |||
284 | #define BNX2X_NUM_Q_STATS 13 | ||
285 | #define Q_STATS_OFFSET32(stat_name) \ | ||
286 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
287 | |||
288 | struct bnx2x_fastpath { | 258 | struct bnx2x_fastpath { |
289 | 259 | ||
290 | struct napi_struct napi; | 260 | struct napi_struct napi; |
@@ -593,27 +563,6 @@ struct bnx2x_common { | |||
593 | 563 | ||
594 | /* port */ | 564 | /* port */ |
595 | 565 | ||
596 | struct nig_stats { | ||
597 | u32 brb_discard; | ||
598 | u32 brb_packet; | ||
599 | u32 brb_truncate; | ||
600 | u32 flow_ctrl_discard; | ||
601 | u32 flow_ctrl_octets; | ||
602 | u32 flow_ctrl_packet; | ||
603 | u32 mng_discard; | ||
604 | u32 mng_octet_inp; | ||
605 | u32 mng_octet_out; | ||
606 | u32 mng_packet_inp; | ||
607 | u32 mng_packet_out; | ||
608 | u32 pbf_octets; | ||
609 | u32 pbf_packet; | ||
610 | u32 safc_inp; | ||
611 | u32 egress_mac_pkt0_lo; | ||
612 | u32 egress_mac_pkt0_hi; | ||
613 | u32 egress_mac_pkt1_lo; | ||
614 | u32 egress_mac_pkt1_hi; | ||
615 | }; | ||
616 | |||
617 | struct bnx2x_port { | 566 | struct bnx2x_port { |
618 | u32 pmf; | 567 | u32 pmf; |
619 | 568 | ||
@@ -641,156 +590,6 @@ struct bnx2x_port { | |||
641 | /* end of port */ | 590 | /* end of port */ |
642 | 591 | ||
643 | 592 | ||
644 | enum bnx2x_stats_event { | ||
645 | STATS_EVENT_PMF = 0, | ||
646 | STATS_EVENT_LINK_UP, | ||
647 | STATS_EVENT_UPDATE, | ||
648 | STATS_EVENT_STOP, | ||
649 | STATS_EVENT_MAX | ||
650 | }; | ||
651 | |||
652 | enum bnx2x_stats_state { | ||
653 | STATS_STATE_DISABLED = 0, | ||
654 | STATS_STATE_ENABLED, | ||
655 | STATS_STATE_MAX | ||
656 | }; | ||
657 | |||
658 | struct bnx2x_eth_stats { | ||
659 | u32 total_bytes_received_hi; | ||
660 | u32 total_bytes_received_lo; | ||
661 | u32 total_bytes_transmitted_hi; | ||
662 | u32 total_bytes_transmitted_lo; | ||
663 | u32 total_unicast_packets_received_hi; | ||
664 | u32 total_unicast_packets_received_lo; | ||
665 | u32 total_multicast_packets_received_hi; | ||
666 | u32 total_multicast_packets_received_lo; | ||
667 | u32 total_broadcast_packets_received_hi; | ||
668 | u32 total_broadcast_packets_received_lo; | ||
669 | u32 total_unicast_packets_transmitted_hi; | ||
670 | u32 total_unicast_packets_transmitted_lo; | ||
671 | u32 total_multicast_packets_transmitted_hi; | ||
672 | u32 total_multicast_packets_transmitted_lo; | ||
673 | u32 total_broadcast_packets_transmitted_hi; | ||
674 | u32 total_broadcast_packets_transmitted_lo; | ||
675 | u32 valid_bytes_received_hi; | ||
676 | u32 valid_bytes_received_lo; | ||
677 | |||
678 | u32 error_bytes_received_hi; | ||
679 | u32 error_bytes_received_lo; | ||
680 | u32 etherstatsoverrsizepkts_hi; | ||
681 | u32 etherstatsoverrsizepkts_lo; | ||
682 | u32 no_buff_discard_hi; | ||
683 | u32 no_buff_discard_lo; | ||
684 | |||
685 | u32 rx_stat_ifhcinbadoctets_hi; | ||
686 | u32 rx_stat_ifhcinbadoctets_lo; | ||
687 | u32 tx_stat_ifhcoutbadoctets_hi; | ||
688 | u32 tx_stat_ifhcoutbadoctets_lo; | ||
689 | u32 rx_stat_dot3statsfcserrors_hi; | ||
690 | u32 rx_stat_dot3statsfcserrors_lo; | ||
691 | u32 rx_stat_dot3statsalignmenterrors_hi; | ||
692 | u32 rx_stat_dot3statsalignmenterrors_lo; | ||
693 | u32 rx_stat_dot3statscarriersenseerrors_hi; | ||
694 | u32 rx_stat_dot3statscarriersenseerrors_lo; | ||
695 | u32 rx_stat_falsecarriererrors_hi; | ||
696 | u32 rx_stat_falsecarriererrors_lo; | ||
697 | u32 rx_stat_etherstatsundersizepkts_hi; | ||
698 | u32 rx_stat_etherstatsundersizepkts_lo; | ||
699 | u32 rx_stat_dot3statsframestoolong_hi; | ||
700 | u32 rx_stat_dot3statsframestoolong_lo; | ||
701 | u32 rx_stat_etherstatsfragments_hi; | ||
702 | u32 rx_stat_etherstatsfragments_lo; | ||
703 | u32 rx_stat_etherstatsjabbers_hi; | ||
704 | u32 rx_stat_etherstatsjabbers_lo; | ||
705 | u32 rx_stat_maccontrolframesreceived_hi; | ||
706 | u32 rx_stat_maccontrolframesreceived_lo; | ||
707 | u32 rx_stat_bmac_xpf_hi; | ||
708 | u32 rx_stat_bmac_xpf_lo; | ||
709 | u32 rx_stat_bmac_xcf_hi; | ||
710 | u32 rx_stat_bmac_xcf_lo; | ||
711 | u32 rx_stat_xoffstateentered_hi; | ||
712 | u32 rx_stat_xoffstateentered_lo; | ||
713 | u32 rx_stat_xonpauseframesreceived_hi; | ||
714 | u32 rx_stat_xonpauseframesreceived_lo; | ||
715 | u32 rx_stat_xoffpauseframesreceived_hi; | ||
716 | u32 rx_stat_xoffpauseframesreceived_lo; | ||
717 | u32 tx_stat_outxonsent_hi; | ||
718 | u32 tx_stat_outxonsent_lo; | ||
719 | u32 tx_stat_outxoffsent_hi; | ||
720 | u32 tx_stat_outxoffsent_lo; | ||
721 | u32 tx_stat_flowcontroldone_hi; | ||
722 | u32 tx_stat_flowcontroldone_lo; | ||
723 | u32 tx_stat_etherstatscollisions_hi; | ||
724 | u32 tx_stat_etherstatscollisions_lo; | ||
725 | u32 tx_stat_dot3statssinglecollisionframes_hi; | ||
726 | u32 tx_stat_dot3statssinglecollisionframes_lo; | ||
727 | u32 tx_stat_dot3statsmultiplecollisionframes_hi; | ||
728 | u32 tx_stat_dot3statsmultiplecollisionframes_lo; | ||
729 | u32 tx_stat_dot3statsdeferredtransmissions_hi; | ||
730 | u32 tx_stat_dot3statsdeferredtransmissions_lo; | ||
731 | u32 tx_stat_dot3statsexcessivecollisions_hi; | ||
732 | u32 tx_stat_dot3statsexcessivecollisions_lo; | ||
733 | u32 tx_stat_dot3statslatecollisions_hi; | ||
734 | u32 tx_stat_dot3statslatecollisions_lo; | ||
735 | u32 tx_stat_etherstatspkts64octets_hi; | ||
736 | u32 tx_stat_etherstatspkts64octets_lo; | ||
737 | u32 tx_stat_etherstatspkts65octetsto127octets_hi; | ||
738 | u32 tx_stat_etherstatspkts65octetsto127octets_lo; | ||
739 | u32 tx_stat_etherstatspkts128octetsto255octets_hi; | ||
740 | u32 tx_stat_etherstatspkts128octetsto255octets_lo; | ||
741 | u32 tx_stat_etherstatspkts256octetsto511octets_hi; | ||
742 | u32 tx_stat_etherstatspkts256octetsto511octets_lo; | ||
743 | u32 tx_stat_etherstatspkts512octetsto1023octets_hi; | ||
744 | u32 tx_stat_etherstatspkts512octetsto1023octets_lo; | ||
745 | u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; | ||
746 | u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; | ||
747 | u32 tx_stat_etherstatspktsover1522octets_hi; | ||
748 | u32 tx_stat_etherstatspktsover1522octets_lo; | ||
749 | u32 tx_stat_bmac_2047_hi; | ||
750 | u32 tx_stat_bmac_2047_lo; | ||
751 | u32 tx_stat_bmac_4095_hi; | ||
752 | u32 tx_stat_bmac_4095_lo; | ||
753 | u32 tx_stat_bmac_9216_hi; | ||
754 | u32 tx_stat_bmac_9216_lo; | ||
755 | u32 tx_stat_bmac_16383_hi; | ||
756 | u32 tx_stat_bmac_16383_lo; | ||
757 | u32 tx_stat_dot3statsinternalmactransmiterrors_hi; | ||
758 | u32 tx_stat_dot3statsinternalmactransmiterrors_lo; | ||
759 | u32 tx_stat_bmac_ufl_hi; | ||
760 | u32 tx_stat_bmac_ufl_lo; | ||
761 | |||
762 | u32 pause_frames_received_hi; | ||
763 | u32 pause_frames_received_lo; | ||
764 | u32 pause_frames_sent_hi; | ||
765 | u32 pause_frames_sent_lo; | ||
766 | |||
767 | u32 etherstatspkts1024octetsto1522octets_hi; | ||
768 | u32 etherstatspkts1024octetsto1522octets_lo; | ||
769 | u32 etherstatspktsover1522octets_hi; | ||
770 | u32 etherstatspktsover1522octets_lo; | ||
771 | |||
772 | u32 brb_drop_hi; | ||
773 | u32 brb_drop_lo; | ||
774 | u32 brb_truncate_hi; | ||
775 | u32 brb_truncate_lo; | ||
776 | |||
777 | u32 mac_filter_discard; | ||
778 | u32 xxoverflow_discard; | ||
779 | u32 brb_truncate_discard; | ||
780 | u32 mac_discard; | ||
781 | |||
782 | u32 driver_xoff; | ||
783 | u32 rx_err_discard_pkt; | ||
784 | u32 rx_skb_alloc_failed; | ||
785 | u32 hw_csum_err; | ||
786 | |||
787 | u32 nig_timer_max; | ||
788 | }; | ||
789 | |||
790 | #define BNX2X_NUM_STATS 43 | ||
791 | #define STATS_OFFSET32(stat_name) \ | ||
792 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) | ||
793 | |||
794 | 593 | ||
795 | #ifdef BCM_CNIC | 594 | #ifdef BCM_CNIC |
796 | #define MAX_CONTEXT 15 | 595 | #define MAX_CONTEXT 15 |
@@ -1006,6 +805,8 @@ struct bnx2x { | |||
1006 | 805 | ||
1007 | int multi_mode; | 806 | int multi_mode; |
1008 | int num_queues; | 807 | int num_queues; |
808 | int disable_tpa; | ||
809 | int int_mode; | ||
1009 | 810 | ||
1010 | u32 rx_mode; | 811 | u32 rx_mode; |
1011 | #define BNX2X_RX_MODE_NONE 0 | 812 | #define BNX2X_RX_MODE_NONE 0 |
@@ -1062,6 +863,10 @@ struct bnx2x { | |||
1062 | 863 | ||
1063 | /* used to synchronize stats collecting */ | 864 | /* used to synchronize stats collecting */ |
1064 | int stats_state; | 865 | int stats_state; |
866 | |||
867 | /* used for synchronization of concurrent threads statistics handling */ | ||
868 | spinlock_t stats_lock; | ||
869 | |||
1065 | /* used by dmae command loader */ | 870 | /* used by dmae command loader */ |
1066 | struct dmae_command stats_dmae; | 871 | struct dmae_command stats_dmae; |
1067 | int executer_idx; | 872 | int executer_idx; |
@@ -1130,6 +935,10 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command); | |||
1130 | void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); | 935 | void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); |
1131 | void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, | 936 | void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, |
1132 | u32 addr, u32 len); | 937 | u32 addr, u32 len); |
938 | void bnx2x_calc_fc_adv(struct bnx2x *bp); | ||
939 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | ||
940 | u32 data_hi, u32 data_lo, int common); | ||
941 | void bnx2x_update_coalesce(struct bnx2x *bp); | ||
1133 | 942 | ||
1134 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | 943 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, |
1135 | int wait) | 944 | int wait) |
@@ -1371,6 +1180,18 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1371 | #define BNX2X_VPD_LEN 128 | 1180 | #define BNX2X_VPD_LEN 128 |
1372 | #define VENDOR_ID_LEN 4 | 1181 | #define VENDOR_ID_LEN 4 |
1373 | 1182 | ||
1183 | #ifdef BNX2X_MAIN | ||
1184 | #define BNX2X_EXTERN | ||
1185 | #else | ||
1186 | #define BNX2X_EXTERN extern | ||
1187 | #endif | ||
1188 | |||
1189 | BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | ||
1190 | |||
1374 | /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ | 1191 | /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ |
1375 | 1192 | ||
1193 | extern void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1194 | |||
1195 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); | ||
1196 | |||
1376 | #endif /* bnx2x.h */ | 1197 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c new file mode 100644 index 000000000000..02bf710629a3 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -0,0 +1,2252 @@ | |||
1 | /* bnx2x_cmn.c: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 Broadcom Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
10 | * Written by: Eliezer Tamir | ||
11 | * Based on code from Michael Chan's bnx2 driver | ||
12 | * UDP CSUM errata workaround by Arik Gendelman | ||
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | ||
14 | * Statistics and Link management by Yitchak Gertner | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include <linux/etherdevice.h> | ||
20 | #include <linux/ip.h> | ||
21 | #include <linux/ipv6.h> | ||
22 | #include <net/ip6_checksum.h> | ||
23 | #include "bnx2x_cmn.h" | ||
24 | |||
25 | #ifdef BCM_VLAN | ||
26 | #include <linux/if_vlan.h> | ||
27 | #endif | ||
28 | |||
29 | static int bnx2x_poll(struct napi_struct *napi, int budget); | ||
30 | |||
31 | /* free skb in the packet ring at pos idx | ||
32 | * return idx of last bd freed | ||
33 | */ | ||
34 | static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
35 | u16 idx) | ||
36 | { | ||
37 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; | ||
38 | struct eth_tx_start_bd *tx_start_bd; | ||
39 | struct eth_tx_bd *tx_data_bd; | ||
40 | struct sk_buff *skb = tx_buf->skb; | ||
41 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | ||
42 | int nbd; | ||
43 | |||
44 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | ||
45 | prefetch(&skb->end); | ||
46 | |||
47 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | ||
48 | idx, tx_buf, skb); | ||
49 | |||
50 | /* unmap first bd */ | ||
51 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | ||
52 | tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; | ||
53 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
54 | BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); | ||
55 | |||
56 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | ||
57 | #ifdef BNX2X_STOP_ON_ERROR | ||
58 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { | ||
59 | BNX2X_ERR("BAD nbd!\n"); | ||
60 | bnx2x_panic(); | ||
61 | } | ||
62 | #endif | ||
63 | new_cons = nbd + tx_buf->first_bd; | ||
64 | |||
65 | /* Get the next bd */ | ||
66 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
67 | |||
68 | /* Skip a parse bd... */ | ||
69 | --nbd; | ||
70 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
71 | |||
72 | /* ...and the TSO split header bd since they have no mapping */ | ||
73 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | ||
74 | --nbd; | ||
75 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
76 | } | ||
77 | |||
78 | /* now free frags */ | ||
79 | while (nbd > 0) { | ||
80 | |||
81 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); | ||
82 | tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; | ||
83 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), | ||
84 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | ||
85 | if (--nbd) | ||
86 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
87 | } | ||
88 | |||
89 | /* release skb */ | ||
90 | WARN_ON(!skb); | ||
91 | dev_kfree_skb(skb); | ||
92 | tx_buf->first_bd = 0; | ||
93 | tx_buf->skb = NULL; | ||
94 | |||
95 | return new_cons; | ||
96 | } | ||
97 | |||
98 | int bnx2x_tx_int(struct bnx2x_fastpath *fp) | ||
99 | { | ||
100 | struct bnx2x *bp = fp->bp; | ||
101 | struct netdev_queue *txq; | ||
102 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; | ||
103 | |||
104 | #ifdef BNX2X_STOP_ON_ERROR | ||
105 | if (unlikely(bp->panic)) | ||
106 | return -1; | ||
107 | #endif | ||
108 | |||
109 | txq = netdev_get_tx_queue(bp->dev, fp->index); | ||
110 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | ||
111 | sw_cons = fp->tx_pkt_cons; | ||
112 | |||
113 | while (sw_cons != hw_cons) { | ||
114 | u16 pkt_cons; | ||
115 | |||
116 | pkt_cons = TX_BD(sw_cons); | ||
117 | |||
118 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ | ||
119 | |||
120 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", | ||
121 | hw_cons, sw_cons, pkt_cons); | ||
122 | |||
123 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { | ||
124 | rmb(); | ||
125 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); | ||
126 | } | ||
127 | */ | ||
128 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); | ||
129 | sw_cons++; | ||
130 | } | ||
131 | |||
132 | fp->tx_pkt_cons = sw_cons; | ||
133 | fp->tx_bd_cons = bd_cons; | ||
134 | |||
135 | /* Need to make the tx_bd_cons update visible to start_xmit() | ||
136 | * before checking for netif_tx_queue_stopped(). Without the | ||
137 | * memory barrier, there is a small possibility that | ||
138 | * start_xmit() will miss it and cause the queue to be stopped | ||
139 | * forever. | ||
140 | */ | ||
141 | smp_mb(); | ||
142 | |||
143 | /* TBD need a thresh? */ | ||
144 | if (unlikely(netif_tx_queue_stopped(txq))) { | ||
145 | /* Taking tx_lock() is needed to prevent reenabling the queue | ||
146 | * while it's empty. This could have happen if rx_action() gets | ||
147 | * suspended in bnx2x_tx_int() after the condition before | ||
148 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): | ||
149 | * | ||
150 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> | ||
151 | * sends some packets consuming the whole queue again-> | ||
152 | * stops the queue | ||
153 | */ | ||
154 | |||
155 | __netif_tx_lock(txq, smp_processor_id()); | ||
156 | |||
157 | if ((netif_tx_queue_stopped(txq)) && | ||
158 | (bp->state == BNX2X_STATE_OPEN) && | ||
159 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | ||
160 | netif_tx_wake_queue(txq); | ||
161 | |||
162 | __netif_tx_unlock(txq); | ||
163 | } | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, | ||
168 | u16 idx) | ||
169 | { | ||
170 | u16 last_max = fp->last_max_sge; | ||
171 | |||
172 | if (SUB_S16(idx, last_max) > 0) | ||
173 | fp->last_max_sge = idx; | ||
174 | } | ||
175 | |||
176 | static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | ||
177 | struct eth_fast_path_rx_cqe *fp_cqe) | ||
178 | { | ||
179 | struct bnx2x *bp = fp->bp; | ||
180 | u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | ||
181 | le16_to_cpu(fp_cqe->len_on_bd)) >> | ||
182 | SGE_PAGE_SHIFT; | ||
183 | u16 last_max, last_elem, first_elem; | ||
184 | u16 delta = 0; | ||
185 | u16 i; | ||
186 | |||
187 | if (!sge_len) | ||
188 | return; | ||
189 | |||
190 | /* First mark all used pages */ | ||
191 | for (i = 0; i < sge_len; i++) | ||
192 | SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); | ||
193 | |||
194 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", | ||
195 | sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); | ||
196 | |||
197 | /* Here we assume that the last SGE index is the biggest */ | ||
198 | prefetch((void *)(fp->sge_mask)); | ||
199 | bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); | ||
200 | |||
201 | last_max = RX_SGE(fp->last_max_sge); | ||
202 | last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; | ||
203 | first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; | ||
204 | |||
205 | /* If ring is not full */ | ||
206 | if (last_elem + 1 != first_elem) | ||
207 | last_elem++; | ||
208 | |||
209 | /* Now update the prod */ | ||
210 | for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { | ||
211 | if (likely(fp->sge_mask[i])) | ||
212 | break; | ||
213 | |||
214 | fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; | ||
215 | delta += RX_SGE_MASK_ELEM_SZ; | ||
216 | } | ||
217 | |||
218 | if (delta > 0) { | ||
219 | fp->rx_sge_prod += delta; | ||
220 | /* clear page-end entries */ | ||
221 | bnx2x_clear_sge_mask_next_elems(fp); | ||
222 | } | ||
223 | |||
224 | DP(NETIF_MSG_RX_STATUS, | ||
225 | "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", | ||
226 | fp->last_max_sge, fp->rx_sge_prod); | ||
227 | } | ||
228 | |||
229 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | ||
230 | struct sk_buff *skb, u16 cons, u16 prod) | ||
231 | { | ||
232 | struct bnx2x *bp = fp->bp; | ||
233 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | ||
234 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | ||
235 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | ||
236 | dma_addr_t mapping; | ||
237 | |||
238 | /* move empty skb from pool to prod and map it */ | ||
239 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | ||
240 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, | ||
241 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
242 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
243 | |||
244 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
245 | fp->tpa_pool[queue] = *cons_rx_buf; | ||
246 | |||
247 | /* mark bin state as start - print error if current state != stop */ | ||
248 | if (fp->tpa_state[queue] != BNX2X_TPA_STOP) | ||
249 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); | ||
250 | |||
251 | fp->tpa_state[queue] = BNX2X_TPA_START; | ||
252 | |||
253 | /* point prod_bd to new skb */ | ||
254 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
255 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
256 | |||
257 | #ifdef BNX2X_STOP_ON_ERROR | ||
258 | fp->tpa_queue_used |= (1 << queue); | ||
259 | #ifdef _ASM_GENERIC_INT_L64_H | ||
260 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
261 | #else | ||
262 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | ||
263 | #endif | ||
264 | fp->tpa_queue_used); | ||
265 | #endif | ||
266 | } | ||
267 | |||
268 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
269 | struct sk_buff *skb, | ||
270 | struct eth_fast_path_rx_cqe *fp_cqe, | ||
271 | u16 cqe_idx) | ||
272 | { | ||
273 | struct sw_rx_page *rx_pg, old_rx_pg; | ||
274 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | ||
275 | u32 i, frag_len, frag_size, pages; | ||
276 | int err; | ||
277 | int j; | ||
278 | |||
279 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | ||
280 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | ||
281 | |||
282 | /* This is needed in order to enable forwarding support */ | ||
283 | if (frag_size) | ||
284 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | ||
285 | max(frag_size, (u32)len_on_bd)); | ||
286 | |||
287 | #ifdef BNX2X_STOP_ON_ERROR | ||
288 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | ||
289 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | ||
290 | pages, cqe_idx); | ||
291 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | ||
292 | fp_cqe->pkt_len, len_on_bd); | ||
293 | bnx2x_panic(); | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | /* Run through the SGL and compose the fragmented skb */ | ||
299 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { | ||
300 | u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); | ||
301 | |||
302 | /* FW gives the indices of the SGE as if the ring is an array | ||
303 | (meaning that "next" element will consume 2 indices) */ | ||
304 | frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); | ||
305 | rx_pg = &fp->rx_page_ring[sge_idx]; | ||
306 | old_rx_pg = *rx_pg; | ||
307 | |||
308 | /* If we fail to allocate a substitute page, we simply stop | ||
309 | where we are and drop the whole packet */ | ||
310 | err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); | ||
311 | if (unlikely(err)) { | ||
312 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
313 | return err; | ||
314 | } | ||
315 | |||
316 | /* Unmap the page as we r going to pass it to the stack */ | ||
317 | dma_unmap_page(&bp->pdev->dev, | ||
318 | dma_unmap_addr(&old_rx_pg, mapping), | ||
319 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | ||
320 | |||
321 | /* Add one frag and update the appropriate fields in the skb */ | ||
322 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | ||
323 | |||
324 | skb->data_len += frag_len; | ||
325 | skb->truesize += frag_len; | ||
326 | skb->len += frag_len; | ||
327 | |||
328 | frag_size -= frag_len; | ||
329 | } | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
335 | u16 queue, int pad, int len, union eth_rx_cqe *cqe, | ||
336 | u16 cqe_idx) | ||
337 | { | ||
338 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; | ||
339 | struct sk_buff *skb = rx_buf->skb; | ||
340 | /* alloc new skb */ | ||
341 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
342 | |||
343 | /* Unmap skb in the pool anyway, as we are going to change | ||
344 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | ||
345 | fails. */ | ||
346 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), | ||
347 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
348 | |||
349 | if (likely(new_skb)) { | ||
350 | /* fix ip xsum and give it to the stack */ | ||
351 | /* (no need to map the new skb) */ | ||
352 | #ifdef BCM_VLAN | ||
353 | int is_vlan_cqe = | ||
354 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | ||
355 | PARSING_FLAGS_VLAN); | ||
356 | int is_not_hwaccel_vlan_cqe = | ||
357 | (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); | ||
358 | #endif | ||
359 | |||
360 | prefetch(skb); | ||
361 | prefetch(((char *)(skb)) + 128); | ||
362 | |||
363 | #ifdef BNX2X_STOP_ON_ERROR | ||
364 | if (pad + len > bp->rx_buf_size) { | ||
365 | BNX2X_ERR("skb_put is about to fail... " | ||
366 | "pad %d len %d rx_buf_size %d\n", | ||
367 | pad, len, bp->rx_buf_size); | ||
368 | bnx2x_panic(); | ||
369 | return; | ||
370 | } | ||
371 | #endif | ||
372 | |||
373 | skb_reserve(skb, pad); | ||
374 | skb_put(skb, len); | ||
375 | |||
376 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
377 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
378 | |||
379 | { | ||
380 | struct iphdr *iph; | ||
381 | |||
382 | iph = (struct iphdr *)skb->data; | ||
383 | #ifdef BCM_VLAN | ||
384 | /* If there is no Rx VLAN offloading - | ||
385 | take VLAN tag into an account */ | ||
386 | if (unlikely(is_not_hwaccel_vlan_cqe)) | ||
387 | iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); | ||
388 | #endif | ||
389 | iph->check = 0; | ||
390 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | ||
391 | } | ||
392 | |||
393 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | ||
394 | &cqe->fast_path_cqe, cqe_idx)) { | ||
395 | #ifdef BCM_VLAN | ||
396 | if ((bp->vlgrp != NULL) && is_vlan_cqe && | ||
397 | (!is_not_hwaccel_vlan_cqe)) | ||
398 | vlan_gro_receive(&fp->napi, bp->vlgrp, | ||
399 | le16_to_cpu(cqe->fast_path_cqe. | ||
400 | vlan_tag), skb); | ||
401 | else | ||
402 | #endif | ||
403 | napi_gro_receive(&fp->napi, skb); | ||
404 | } else { | ||
405 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | ||
406 | " - dropping packet!\n"); | ||
407 | dev_kfree_skb(skb); | ||
408 | } | ||
409 | |||
410 | |||
411 | /* put new skb in bin */ | ||
412 | fp->tpa_pool[queue].skb = new_skb; | ||
413 | |||
414 | } else { | ||
415 | /* else drop the packet and keep the buffer in the bin */ | ||
416 | DP(NETIF_MSG_RX_STATUS, | ||
417 | "Failed to allocate new skb - dropping packet!\n"); | ||
418 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
419 | } | ||
420 | |||
421 | fp->tpa_state[queue] = BNX2X_TPA_STOP; | ||
422 | } | ||
423 | |||
424 | /* Set Toeplitz hash value in the skb using the value from the | ||
425 | * CQE (calculated by HW). | ||
426 | */ | ||
427 | static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, | ||
428 | struct sk_buff *skb) | ||
429 | { | ||
430 | /* Set Toeplitz hash from CQE */ | ||
431 | if ((bp->dev->features & NETIF_F_RXHASH) && | ||
432 | (cqe->fast_path_cqe.status_flags & | ||
433 | ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) | ||
434 | skb->rxhash = | ||
435 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); | ||
436 | } | ||
437 | |||
438 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | ||
439 | { | ||
440 | struct bnx2x *bp = fp->bp; | ||
441 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; | ||
442 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | ||
443 | int rx_pkt = 0; | ||
444 | |||
445 | #ifdef BNX2X_STOP_ON_ERROR | ||
446 | if (unlikely(bp->panic)) | ||
447 | return 0; | ||
448 | #endif | ||
449 | |||
450 | /* CQ "next element" is of the size of the regular element, | ||
451 | that's why it's ok here */ | ||
452 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); | ||
453 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
454 | hw_comp_cons++; | ||
455 | |||
456 | bd_cons = fp->rx_bd_cons; | ||
457 | bd_prod = fp->rx_bd_prod; | ||
458 | bd_prod_fw = bd_prod; | ||
459 | sw_comp_cons = fp->rx_comp_cons; | ||
460 | sw_comp_prod = fp->rx_comp_prod; | ||
461 | |||
462 | /* Memory barrier necessary as speculative reads of the rx | ||
463 | * buffer can be ahead of the index in the status block | ||
464 | */ | ||
465 | rmb(); | ||
466 | |||
467 | DP(NETIF_MSG_RX_STATUS, | ||
468 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", | ||
469 | fp->index, hw_comp_cons, sw_comp_cons); | ||
470 | |||
471 | while (sw_comp_cons != hw_comp_cons) { | ||
472 | struct sw_rx_bd *rx_buf = NULL; | ||
473 | struct sk_buff *skb; | ||
474 | union eth_rx_cqe *cqe; | ||
475 | u8 cqe_fp_flags; | ||
476 | u16 len, pad; | ||
477 | |||
478 | comp_ring_cons = RCQ_BD(sw_comp_cons); | ||
479 | bd_prod = RX_BD(bd_prod); | ||
480 | bd_cons = RX_BD(bd_cons); | ||
481 | |||
482 | /* Prefetch the page containing the BD descriptor | ||
483 | at producer's index. It will be needed when new skb is | ||
484 | allocated */ | ||
485 | prefetch((void *)(PAGE_ALIGN((unsigned long) | ||
486 | (&fp->rx_desc_ring[bd_prod])) - | ||
487 | PAGE_SIZE + 1)); | ||
488 | |||
489 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | ||
490 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | ||
491 | |||
492 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | ||
493 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | ||
494 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, | ||
495 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), | ||
496 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), | ||
497 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | ||
498 | |||
499 | /* is this a slowpath msg? */ | ||
500 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { | ||
501 | bnx2x_sp_event(fp, cqe); | ||
502 | goto next_cqe; | ||
503 | |||
504 | /* this is an rx packet */ | ||
505 | } else { | ||
506 | rx_buf = &fp->rx_buf_ring[bd_cons]; | ||
507 | skb = rx_buf->skb; | ||
508 | prefetch(skb); | ||
509 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | ||
510 | pad = cqe->fast_path_cqe.placement_offset; | ||
511 | |||
512 | /* If CQE is marked both TPA_START and TPA_END | ||
513 | it is a non-TPA CQE */ | ||
514 | if ((!fp->disable_tpa) && | ||
515 | (TPA_TYPE(cqe_fp_flags) != | ||
516 | (TPA_TYPE_START | TPA_TYPE_END))) { | ||
517 | u16 queue = cqe->fast_path_cqe.queue_index; | ||
518 | |||
519 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { | ||
520 | DP(NETIF_MSG_RX_STATUS, | ||
521 | "calling tpa_start on queue %d\n", | ||
522 | queue); | ||
523 | |||
524 | bnx2x_tpa_start(fp, queue, skb, | ||
525 | bd_cons, bd_prod); | ||
526 | |||
527 | /* Set Toeplitz hash for an LRO skb */ | ||
528 | bnx2x_set_skb_rxhash(bp, cqe, skb); | ||
529 | |||
530 | goto next_rx; | ||
531 | } | ||
532 | |||
533 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) { | ||
534 | DP(NETIF_MSG_RX_STATUS, | ||
535 | "calling tpa_stop on queue %d\n", | ||
536 | queue); | ||
537 | |||
538 | if (!BNX2X_RX_SUM_FIX(cqe)) | ||
539 | BNX2X_ERR("STOP on none TCP " | ||
540 | "data\n"); | ||
541 | |||
542 | /* This is a size of the linear data | ||
543 | on this skb */ | ||
544 | len = le16_to_cpu(cqe->fast_path_cqe. | ||
545 | len_on_bd); | ||
546 | bnx2x_tpa_stop(bp, fp, queue, pad, | ||
547 | len, cqe, comp_ring_cons); | ||
548 | #ifdef BNX2X_STOP_ON_ERROR | ||
549 | if (bp->panic) | ||
550 | return 0; | ||
551 | #endif | ||
552 | |||
553 | bnx2x_update_sge_prod(fp, | ||
554 | &cqe->fast_path_cqe); | ||
555 | goto next_cqe; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | dma_sync_single_for_device(&bp->pdev->dev, | ||
560 | dma_unmap_addr(rx_buf, mapping), | ||
561 | pad + RX_COPY_THRESH, | ||
562 | DMA_FROM_DEVICE); | ||
563 | prefetch(((char *)(skb)) + 128); | ||
564 | |||
565 | /* is this an error packet? */ | ||
566 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { | ||
567 | DP(NETIF_MSG_RX_ERR, | ||
568 | "ERROR flags %x rx packet %u\n", | ||
569 | cqe_fp_flags, sw_comp_cons); | ||
570 | fp->eth_q_stats.rx_err_discard_pkt++; | ||
571 | goto reuse_rx; | ||
572 | } | ||
573 | |||
574 | /* Since we don't have a jumbo ring | ||
575 | * copy small packets if mtu > 1500 | ||
576 | */ | ||
577 | if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && | ||
578 | (len <= RX_COPY_THRESH)) { | ||
579 | struct sk_buff *new_skb; | ||
580 | |||
581 | new_skb = netdev_alloc_skb(bp->dev, | ||
582 | len + pad); | ||
583 | if (new_skb == NULL) { | ||
584 | DP(NETIF_MSG_RX_ERR, | ||
585 | "ERROR packet dropped " | ||
586 | "because of alloc failure\n"); | ||
587 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
588 | goto reuse_rx; | ||
589 | } | ||
590 | |||
591 | /* aligned copy */ | ||
592 | skb_copy_from_linear_data_offset(skb, pad, | ||
593 | new_skb->data + pad, len); | ||
594 | skb_reserve(new_skb, pad); | ||
595 | skb_put(new_skb, len); | ||
596 | |||
597 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | ||
598 | |||
599 | skb = new_skb; | ||
600 | |||
601 | } else | ||
602 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { | ||
603 | dma_unmap_single(&bp->pdev->dev, | ||
604 | dma_unmap_addr(rx_buf, mapping), | ||
605 | bp->rx_buf_size, | ||
606 | DMA_FROM_DEVICE); | ||
607 | skb_reserve(skb, pad); | ||
608 | skb_put(skb, len); | ||
609 | |||
610 | } else { | ||
611 | DP(NETIF_MSG_RX_ERR, | ||
612 | "ERROR packet dropped because " | ||
613 | "of alloc failure\n"); | ||
614 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
615 | reuse_rx: | ||
616 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | ||
617 | goto next_rx; | ||
618 | } | ||
619 | |||
620 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
621 | |||
622 | /* Set Toeplitz hash for a none-LRO skb */ | ||
623 | bnx2x_set_skb_rxhash(bp, cqe, skb); | ||
624 | |||
625 | skb->ip_summed = CHECKSUM_NONE; | ||
626 | if (bp->rx_csum) { | ||
627 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
628 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
629 | else | ||
630 | fp->eth_q_stats.hw_csum_err++; | ||
631 | } | ||
632 | } | ||
633 | |||
634 | skb_record_rx_queue(skb, fp->index); | ||
635 | |||
636 | #ifdef BCM_VLAN | ||
637 | if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && | ||
638 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | ||
639 | PARSING_FLAGS_VLAN)) | ||
640 | vlan_gro_receive(&fp->napi, bp->vlgrp, | ||
641 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); | ||
642 | else | ||
643 | #endif | ||
644 | napi_gro_receive(&fp->napi, skb); | ||
645 | |||
646 | |||
647 | next_rx: | ||
648 | rx_buf->skb = NULL; | ||
649 | |||
650 | bd_cons = NEXT_RX_IDX(bd_cons); | ||
651 | bd_prod = NEXT_RX_IDX(bd_prod); | ||
652 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); | ||
653 | rx_pkt++; | ||
654 | next_cqe: | ||
655 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); | ||
656 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); | ||
657 | |||
658 | if (rx_pkt == budget) | ||
659 | break; | ||
660 | } /* while */ | ||
661 | |||
662 | fp->rx_bd_cons = bd_cons; | ||
663 | fp->rx_bd_prod = bd_prod_fw; | ||
664 | fp->rx_comp_cons = sw_comp_cons; | ||
665 | fp->rx_comp_prod = sw_comp_prod; | ||
666 | |||
667 | /* Update producers */ | ||
668 | bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, | ||
669 | fp->rx_sge_prod); | ||
670 | |||
671 | fp->rx_pkt += rx_pkt; | ||
672 | fp->rx_calls++; | ||
673 | |||
674 | return rx_pkt; | ||
675 | } | ||
676 | |||
677 | static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | ||
678 | { | ||
679 | struct bnx2x_fastpath *fp = fp_cookie; | ||
680 | struct bnx2x *bp = fp->bp; | ||
681 | |||
682 | /* Return here if interrupt is disabled */ | ||
683 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
684 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
685 | return IRQ_HANDLED; | ||
686 | } | ||
687 | |||
688 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", | ||
689 | fp->index, fp->sb_id); | ||
690 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); | ||
691 | |||
692 | #ifdef BNX2X_STOP_ON_ERROR | ||
693 | if (unlikely(bp->panic)) | ||
694 | return IRQ_HANDLED; | ||
695 | #endif | ||
696 | |||
697 | /* Handle Rx and Tx according to MSI-X vector */ | ||
698 | prefetch(fp->rx_cons_sb); | ||
699 | prefetch(fp->tx_cons_sb); | ||
700 | prefetch(&fp->status_blk->u_status_block.status_block_index); | ||
701 | prefetch(&fp->status_blk->c_status_block.status_block_index); | ||
702 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
703 | |||
704 | return IRQ_HANDLED; | ||
705 | } | ||
706 | |||
707 | |||
708 | /* HW Lock for shared dual port PHYs */ | ||
709 | void bnx2x_acquire_phy_lock(struct bnx2x *bp) | ||
710 | { | ||
711 | mutex_lock(&bp->port.phy_mutex); | ||
712 | |||
713 | if (bp->port.need_hw_lock) | ||
714 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | ||
715 | } | ||
716 | |||
717 | void bnx2x_release_phy_lock(struct bnx2x *bp) | ||
718 | { | ||
719 | if (bp->port.need_hw_lock) | ||
720 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | ||
721 | |||
722 | mutex_unlock(&bp->port.phy_mutex); | ||
723 | } | ||
724 | |||
725 | void bnx2x_link_report(struct bnx2x *bp) | ||
726 | { | ||
727 | if (bp->flags & MF_FUNC_DIS) { | ||
728 | netif_carrier_off(bp->dev); | ||
729 | netdev_err(bp->dev, "NIC Link is Down\n"); | ||
730 | return; | ||
731 | } | ||
732 | |||
733 | if (bp->link_vars.link_up) { | ||
734 | u16 line_speed; | ||
735 | |||
736 | if (bp->state == BNX2X_STATE_OPEN) | ||
737 | netif_carrier_on(bp->dev); | ||
738 | netdev_info(bp->dev, "NIC Link is Up, "); | ||
739 | |||
740 | line_speed = bp->link_vars.line_speed; | ||
741 | if (IS_E1HMF(bp)) { | ||
742 | u16 vn_max_rate; | ||
743 | |||
744 | vn_max_rate = | ||
745 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
746 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
747 | if (vn_max_rate < line_speed) | ||
748 | line_speed = vn_max_rate; | ||
749 | } | ||
750 | pr_cont("%d Mbps ", line_speed); | ||
751 | |||
752 | if (bp->link_vars.duplex == DUPLEX_FULL) | ||
753 | pr_cont("full duplex"); | ||
754 | else | ||
755 | pr_cont("half duplex"); | ||
756 | |||
757 | if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { | ||
758 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { | ||
759 | pr_cont(", receive "); | ||
760 | if (bp->link_vars.flow_ctrl & | ||
761 | BNX2X_FLOW_CTRL_TX) | ||
762 | pr_cont("& transmit "); | ||
763 | } else { | ||
764 | pr_cont(", transmit "); | ||
765 | } | ||
766 | pr_cont("flow control ON"); | ||
767 | } | ||
768 | pr_cont("\n"); | ||
769 | |||
770 | } else { /* link_down */ | ||
771 | netif_carrier_off(bp->dev); | ||
772 | netdev_err(bp->dev, "NIC Link is Down\n"); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | void bnx2x_init_rx_rings(struct bnx2x *bp) | ||
777 | { | ||
778 | int func = BP_FUNC(bp); | ||
779 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
780 | ETH_MAX_AGGREGATION_QUEUES_E1H; | ||
781 | u16 ring_prod, cqe_ring_prod; | ||
782 | int i, j; | ||
783 | |||
784 | bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; | ||
785 | DP(NETIF_MSG_IFUP, | ||
786 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); | ||
787 | |||
788 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
789 | |||
790 | for_each_queue(bp, j) { | ||
791 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
792 | |||
793 | for (i = 0; i < max_agg_queues; i++) { | ||
794 | fp->tpa_pool[i].skb = | ||
795 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
796 | if (!fp->tpa_pool[i].skb) { | ||
797 | BNX2X_ERR("Failed to allocate TPA " | ||
798 | "skb pool for queue[%d] - " | ||
799 | "disabling TPA on this " | ||
800 | "queue!\n", j); | ||
801 | bnx2x_free_tpa_pool(bp, fp, i); | ||
802 | fp->disable_tpa = 1; | ||
803 | break; | ||
804 | } | ||
805 | dma_unmap_addr_set((struct sw_rx_bd *) | ||
806 | &bp->fp->tpa_pool[i], | ||
807 | mapping, 0); | ||
808 | fp->tpa_state[i] = BNX2X_TPA_STOP; | ||
809 | } | ||
810 | } | ||
811 | } | ||
812 | |||
813 | for_each_queue(bp, j) { | ||
814 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
815 | |||
816 | fp->rx_bd_cons = 0; | ||
817 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | ||
818 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; | ||
819 | |||
820 | /* "next page" elements initialization */ | ||
821 | /* SGE ring */ | ||
822 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | ||
823 | struct eth_rx_sge *sge; | ||
824 | |||
825 | sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; | ||
826 | sge->addr_hi = | ||
827 | cpu_to_le32(U64_HI(fp->rx_sge_mapping + | ||
828 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); | ||
829 | sge->addr_lo = | ||
830 | cpu_to_le32(U64_LO(fp->rx_sge_mapping + | ||
831 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); | ||
832 | } | ||
833 | |||
834 | bnx2x_init_sge_ring_bit_mask(fp); | ||
835 | |||
836 | /* RX BD ring */ | ||
837 | for (i = 1; i <= NUM_RX_RINGS; i++) { | ||
838 | struct eth_rx_bd *rx_bd; | ||
839 | |||
840 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; | ||
841 | rx_bd->addr_hi = | ||
842 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + | ||
843 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | ||
844 | rx_bd->addr_lo = | ||
845 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + | ||
846 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | ||
847 | } | ||
848 | |||
849 | /* CQ ring */ | ||
850 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { | ||
851 | struct eth_rx_cqe_next_page *nextpg; | ||
852 | |||
853 | nextpg = (struct eth_rx_cqe_next_page *) | ||
854 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; | ||
855 | nextpg->addr_hi = | ||
856 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + | ||
857 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | ||
858 | nextpg->addr_lo = | ||
859 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + | ||
860 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | ||
861 | } | ||
862 | |||
863 | /* Allocate SGEs and initialize the ring elements */ | ||
864 | for (i = 0, ring_prod = 0; | ||
865 | i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { | ||
866 | |||
867 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { | ||
868 | BNX2X_ERR("was only able to allocate " | ||
869 | "%d rx sges\n", i); | ||
870 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); | ||
871 | /* Cleanup already allocated elements */ | ||
872 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); | ||
873 | bnx2x_free_tpa_pool(bp, fp, max_agg_queues); | ||
874 | fp->disable_tpa = 1; | ||
875 | ring_prod = 0; | ||
876 | break; | ||
877 | } | ||
878 | ring_prod = NEXT_SGE_IDX(ring_prod); | ||
879 | } | ||
880 | fp->rx_sge_prod = ring_prod; | ||
881 | |||
882 | /* Allocate BDs and initialize BD ring */ | ||
883 | fp->rx_comp_cons = 0; | ||
884 | cqe_ring_prod = ring_prod = 0; | ||
885 | for (i = 0; i < bp->rx_ring_size; i++) { | ||
886 | if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { | ||
887 | BNX2X_ERR("was only able to allocate " | ||
888 | "%d rx skbs on queue[%d]\n", i, j); | ||
889 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
890 | break; | ||
891 | } | ||
892 | ring_prod = NEXT_RX_IDX(ring_prod); | ||
893 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); | ||
894 | WARN_ON(ring_prod <= i); | ||
895 | } | ||
896 | |||
897 | fp->rx_bd_prod = ring_prod; | ||
898 | /* must not have more available CQEs than BDs */ | ||
899 | fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, | ||
900 | cqe_ring_prod); | ||
901 | fp->rx_pkt = fp->rx_calls = 0; | ||
902 | |||
903 | /* Warning! | ||
904 | * this will generate an interrupt (to the TSTORM) | ||
905 | * must only be done after chip is initialized | ||
906 | */ | ||
907 | bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod, | ||
908 | fp->rx_sge_prod); | ||
909 | if (j != 0) | ||
910 | continue; | ||
911 | |||
912 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
913 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), | ||
914 | U64_LO(fp->rx_comp_mapping)); | ||
915 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
916 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, | ||
917 | U64_HI(fp->rx_comp_mapping)); | ||
918 | } | ||
919 | } | ||
920 | static void bnx2x_free_tx_skbs(struct bnx2x *bp) | ||
921 | { | ||
922 | int i; | ||
923 | |||
924 | for_each_queue(bp, i) { | ||
925 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
926 | |||
927 | u16 bd_cons = fp->tx_bd_cons; | ||
928 | u16 sw_prod = fp->tx_pkt_prod; | ||
929 | u16 sw_cons = fp->tx_pkt_cons; | ||
930 | |||
931 | while (sw_cons != sw_prod) { | ||
932 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); | ||
933 | sw_cons++; | ||
934 | } | ||
935 | } | ||
936 | } | ||
937 | |||
938 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) | ||
939 | { | ||
940 | int i, j; | ||
941 | |||
942 | for_each_queue(bp, j) { | ||
943 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
944 | |||
945 | for (i = 0; i < NUM_RX_BD; i++) { | ||
946 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | ||
947 | struct sk_buff *skb = rx_buf->skb; | ||
948 | |||
949 | if (skb == NULL) | ||
950 | continue; | ||
951 | |||
952 | dma_unmap_single(&bp->pdev->dev, | ||
953 | dma_unmap_addr(rx_buf, mapping), | ||
954 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
955 | |||
956 | rx_buf->skb = NULL; | ||
957 | dev_kfree_skb(skb); | ||
958 | } | ||
959 | if (!fp->disable_tpa) | ||
960 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | ||
961 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
962 | ETH_MAX_AGGREGATION_QUEUES_E1H); | ||
963 | } | ||
964 | } | ||
965 | |||
966 | void bnx2x_free_skbs(struct bnx2x *bp) | ||
967 | { | ||
968 | bnx2x_free_tx_skbs(bp); | ||
969 | bnx2x_free_rx_skbs(bp); | ||
970 | } | ||
971 | |||
972 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | ||
973 | { | ||
974 | int i, offset = 1; | ||
975 | |||
976 | free_irq(bp->msix_table[0].vector, bp->dev); | ||
977 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | ||
978 | bp->msix_table[0].vector); | ||
979 | |||
980 | #ifdef BCM_CNIC | ||
981 | offset++; | ||
982 | #endif | ||
983 | for_each_queue(bp, i) { | ||
984 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | ||
985 | "state %x\n", i, bp->msix_table[i + offset].vector, | ||
986 | bnx2x_fp(bp, i, state)); | ||
987 | |||
988 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); | ||
989 | } | ||
990 | } | ||
991 | |||
992 | void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) | ||
993 | { | ||
994 | if (bp->flags & USING_MSIX_FLAG) { | ||
995 | if (!disable_only) | ||
996 | bnx2x_free_msix_irqs(bp); | ||
997 | pci_disable_msix(bp->pdev); | ||
998 | bp->flags &= ~USING_MSIX_FLAG; | ||
999 | |||
1000 | } else if (bp->flags & USING_MSI_FLAG) { | ||
1001 | if (!disable_only) | ||
1002 | free_irq(bp->pdev->irq, bp->dev); | ||
1003 | pci_disable_msi(bp->pdev); | ||
1004 | bp->flags &= ~USING_MSI_FLAG; | ||
1005 | |||
1006 | } else if (!disable_only) | ||
1007 | free_irq(bp->pdev->irq, bp->dev); | ||
1008 | } | ||
1009 | |||
1010 | static int bnx2x_enable_msix(struct bnx2x *bp) | ||
1011 | { | ||
1012 | int i, rc, offset = 1; | ||
1013 | int igu_vec = 0; | ||
1014 | |||
1015 | bp->msix_table[0].entry = igu_vec; | ||
1016 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); | ||
1017 | |||
1018 | #ifdef BCM_CNIC | ||
1019 | igu_vec = BP_L_ID(bp) + offset; | ||
1020 | bp->msix_table[1].entry = igu_vec; | ||
1021 | DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); | ||
1022 | offset++; | ||
1023 | #endif | ||
1024 | for_each_queue(bp, i) { | ||
1025 | igu_vec = BP_L_ID(bp) + offset + i; | ||
1026 | bp->msix_table[i + offset].entry = igu_vec; | ||
1027 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | ||
1028 | "(fastpath #%u)\n", i + offset, igu_vec, i); | ||
1029 | } | ||
1030 | |||
1031 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | ||
1032 | BNX2X_NUM_QUEUES(bp) + offset); | ||
1033 | |||
1034 | /* | ||
1035 | * reconfigure number of tx/rx queues according to available | ||
1036 | * MSI-X vectors | ||
1037 | */ | ||
1038 | if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { | ||
1039 | /* vectors available for FP */ | ||
1040 | int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; | ||
1041 | |||
1042 | DP(NETIF_MSG_IFUP, | ||
1043 | "Trying to use less MSI-X vectors: %d\n", rc); | ||
1044 | |||
1045 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); | ||
1046 | |||
1047 | if (rc) { | ||
1048 | DP(NETIF_MSG_IFUP, | ||
1049 | "MSI-X is not attainable rc %d\n", rc); | ||
1050 | return rc; | ||
1051 | } | ||
1052 | |||
1053 | bp->num_queues = min(bp->num_queues, fp_vec); | ||
1054 | |||
1055 | DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", | ||
1056 | bp->num_queues); | ||
1057 | } else if (rc) { | ||
1058 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); | ||
1059 | return rc; | ||
1060 | } | ||
1061 | |||
1062 | bp->flags |= USING_MSIX_FLAG; | ||
1063 | |||
1064 | return 0; | ||
1065 | } | ||
1066 | |||
1067 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | ||
1068 | { | ||
1069 | int i, rc, offset = 1; | ||
1070 | |||
1071 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | ||
1072 | bp->dev->name, bp->dev); | ||
1073 | if (rc) { | ||
1074 | BNX2X_ERR("request sp irq failed\n"); | ||
1075 | return -EBUSY; | ||
1076 | } | ||
1077 | |||
1078 | #ifdef BCM_CNIC | ||
1079 | offset++; | ||
1080 | #endif | ||
1081 | for_each_queue(bp, i) { | ||
1082 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1083 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | ||
1084 | bp->dev->name, i); | ||
1085 | |||
1086 | rc = request_irq(bp->msix_table[i + offset].vector, | ||
1087 | bnx2x_msix_fp_int, 0, fp->name, fp); | ||
1088 | if (rc) { | ||
1089 | BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); | ||
1090 | bnx2x_free_msix_irqs(bp); | ||
1091 | return -EBUSY; | ||
1092 | } | ||
1093 | |||
1094 | fp->state = BNX2X_FP_STATE_IRQ; | ||
1095 | } | ||
1096 | |||
1097 | i = BNX2X_NUM_QUEUES(bp); | ||
1098 | netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" | ||
1099 | " ... fp[%d] %d\n", | ||
1100 | bp->msix_table[0].vector, | ||
1101 | 0, bp->msix_table[offset].vector, | ||
1102 | i - 1, bp->msix_table[offset + i - 1].vector); | ||
1103 | |||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | static int bnx2x_enable_msi(struct bnx2x *bp) | ||
1108 | { | ||
1109 | int rc; | ||
1110 | |||
1111 | rc = pci_enable_msi(bp->pdev); | ||
1112 | if (rc) { | ||
1113 | DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); | ||
1114 | return -1; | ||
1115 | } | ||
1116 | bp->flags |= USING_MSI_FLAG; | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
1121 | static int bnx2x_req_irq(struct bnx2x *bp) | ||
1122 | { | ||
1123 | unsigned long flags; | ||
1124 | int rc; | ||
1125 | |||
1126 | if (bp->flags & USING_MSI_FLAG) | ||
1127 | flags = 0; | ||
1128 | else | ||
1129 | flags = IRQF_SHARED; | ||
1130 | |||
1131 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | ||
1132 | bp->dev->name, bp->dev); | ||
1133 | if (!rc) | ||
1134 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | ||
1135 | |||
1136 | return rc; | ||
1137 | } | ||
1138 | |||
1139 | static void bnx2x_napi_enable(struct bnx2x *bp) | ||
1140 | { | ||
1141 | int i; | ||
1142 | |||
1143 | for_each_queue(bp, i) | ||
1144 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
1145 | } | ||
1146 | |||
1147 | static void bnx2x_napi_disable(struct bnx2x *bp) | ||
1148 | { | ||
1149 | int i; | ||
1150 | |||
1151 | for_each_queue(bp, i) | ||
1152 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
1153 | } | ||
1154 | |||
1155 | void bnx2x_netif_start(struct bnx2x *bp) | ||
1156 | { | ||
1157 | int intr_sem; | ||
1158 | |||
1159 | intr_sem = atomic_dec_and_test(&bp->intr_sem); | ||
1160 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | ||
1161 | |||
1162 | if (intr_sem) { | ||
1163 | if (netif_running(bp->dev)) { | ||
1164 | bnx2x_napi_enable(bp); | ||
1165 | bnx2x_int_enable(bp); | ||
1166 | if (bp->state == BNX2X_STATE_OPEN) | ||
1167 | netif_tx_wake_all_queues(bp->dev); | ||
1168 | } | ||
1169 | } | ||
1170 | } | ||
1171 | |||
1172 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | ||
1173 | { | ||
1174 | bnx2x_int_disable_sync(bp, disable_hw); | ||
1175 | bnx2x_napi_disable(bp); | ||
1176 | netif_tx_disable(bp->dev); | ||
1177 | } | ||
1178 | static int bnx2x_set_num_queues(struct bnx2x *bp) | ||
1179 | { | ||
1180 | int rc = 0; | ||
1181 | |||
1182 | switch (bp->int_mode) { | ||
1183 | case INT_MODE_INTx: | ||
1184 | case INT_MODE_MSI: | ||
1185 | bp->num_queues = 1; | ||
1186 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); | ||
1187 | break; | ||
1188 | default: | ||
1189 | /* Set number of queues according to bp->multi_mode value */ | ||
1190 | bnx2x_set_num_queues_msix(bp); | ||
1191 | |||
1192 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", | ||
1193 | bp->num_queues); | ||
1194 | |||
1195 | /* if we can't use MSI-X we only need one fp, | ||
1196 | * so try to enable MSI-X with the requested number of fp's | ||
1197 | * and fallback to MSI or legacy INTx with one fp | ||
1198 | */ | ||
1199 | rc = bnx2x_enable_msix(bp); | ||
1200 | if (rc) | ||
1201 | /* failed to enable MSI-X */ | ||
1202 | bp->num_queues = 1; | ||
1203 | break; | ||
1204 | } | ||
1205 | bp->dev->real_num_tx_queues = bp->num_queues; | ||
1206 | return rc; | ||
1207 | } | ||
1208 | |||
1209 | /* must be called with rtnl_lock */ | ||
1210 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
1211 | { | ||
1212 | u32 load_code; | ||
1213 | int i, rc; | ||
1214 | |||
1215 | #ifdef BNX2X_STOP_ON_ERROR | ||
1216 | if (unlikely(bp->panic)) | ||
1217 | return -EPERM; | ||
1218 | #endif | ||
1219 | |||
1220 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | ||
1221 | |||
1222 | rc = bnx2x_set_num_queues(bp); | ||
1223 | |||
1224 | if (bnx2x_alloc_mem(bp)) { | ||
1225 | bnx2x_free_irq(bp, true); | ||
1226 | return -ENOMEM; | ||
1227 | } | ||
1228 | |||
1229 | for_each_queue(bp, i) | ||
1230 | bnx2x_fp(bp, i, disable_tpa) = | ||
1231 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | ||
1232 | |||
1233 | for_each_queue(bp, i) | ||
1234 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | ||
1235 | bnx2x_poll, 128); | ||
1236 | |||
1237 | bnx2x_napi_enable(bp); | ||
1238 | |||
1239 | if (bp->flags & USING_MSIX_FLAG) { | ||
1240 | rc = bnx2x_req_msix_irqs(bp); | ||
1241 | if (rc) { | ||
1242 | bnx2x_free_irq(bp, true); | ||
1243 | goto load_error1; | ||
1244 | } | ||
1245 | } else { | ||
1246 | /* Fall to INTx if failed to enable MSI-X due to lack of | ||
1247 | memory (in bnx2x_set_num_queues()) */ | ||
1248 | if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx)) | ||
1249 | bnx2x_enable_msi(bp); | ||
1250 | bnx2x_ack_int(bp); | ||
1251 | rc = bnx2x_req_irq(bp); | ||
1252 | if (rc) { | ||
1253 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
1254 | bnx2x_free_irq(bp, true); | ||
1255 | goto load_error1; | ||
1256 | } | ||
1257 | if (bp->flags & USING_MSI_FLAG) { | ||
1258 | bp->dev->irq = bp->pdev->irq; | ||
1259 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
1260 | bp->pdev->irq); | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | /* Send LOAD_REQUEST command to MCP | ||
1265 | Returns the type of LOAD command: | ||
1266 | if it is the first port to be initialized | ||
1267 | common blocks should be initialized, otherwise - not | ||
1268 | */ | ||
1269 | if (!BP_NOMCP(bp)) { | ||
1270 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | ||
1271 | if (!load_code) { | ||
1272 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
1273 | rc = -EBUSY; | ||
1274 | goto load_error2; | ||
1275 | } | ||
1276 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | ||
1277 | rc = -EBUSY; /* other port in diagnostic mode */ | ||
1278 | goto load_error2; | ||
1279 | } | ||
1280 | |||
1281 | } else { | ||
1282 | int port = BP_PORT(bp); | ||
1283 | |||
1284 | DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", | ||
1285 | load_count[0], load_count[1], load_count[2]); | ||
1286 | load_count[0]++; | ||
1287 | load_count[1 + port]++; | ||
1288 | DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", | ||
1289 | load_count[0], load_count[1], load_count[2]); | ||
1290 | if (load_count[0] == 1) | ||
1291 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
1292 | else if (load_count[1 + port] == 1) | ||
1293 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
1294 | else | ||
1295 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
1296 | } | ||
1297 | |||
1298 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | ||
1299 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) | ||
1300 | bp->port.pmf = 1; | ||
1301 | else | ||
1302 | bp->port.pmf = 0; | ||
1303 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
1304 | |||
1305 | /* Initialize HW */ | ||
1306 | rc = bnx2x_init_hw(bp, load_code); | ||
1307 | if (rc) { | ||
1308 | BNX2X_ERR("HW init failed, aborting\n"); | ||
1309 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | ||
1310 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | ||
1311 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
1312 | goto load_error2; | ||
1313 | } | ||
1314 | |||
1315 | /* Setup NIC internals and enable interrupts */ | ||
1316 | bnx2x_nic_init(bp, load_code); | ||
1317 | |||
1318 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && | ||
1319 | (bp->common.shmem2_base)) | ||
1320 | SHMEM2_WR(bp, dcc_support, | ||
1321 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | | ||
1322 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | ||
1323 | |||
1324 | /* Send LOAD_DONE command to MCP */ | ||
1325 | if (!BP_NOMCP(bp)) { | ||
1326 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | ||
1327 | if (!load_code) { | ||
1328 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
1329 | rc = -EBUSY; | ||
1330 | goto load_error3; | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
1335 | |||
1336 | rc = bnx2x_setup_leading(bp); | ||
1337 | if (rc) { | ||
1338 | BNX2X_ERR("Setup leading failed!\n"); | ||
1339 | #ifndef BNX2X_STOP_ON_ERROR | ||
1340 | goto load_error3; | ||
1341 | #else | ||
1342 | bp->panic = 1; | ||
1343 | return -EBUSY; | ||
1344 | #endif | ||
1345 | } | ||
1346 | |||
1347 | if (CHIP_IS_E1H(bp)) | ||
1348 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { | ||
1349 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); | ||
1350 | bp->flags |= MF_FUNC_DIS; | ||
1351 | } | ||
1352 | |||
1353 | if (bp->state == BNX2X_STATE_OPEN) { | ||
1354 | #ifdef BCM_CNIC | ||
1355 | /* Enable Timer scan */ | ||
1356 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); | ||
1357 | #endif | ||
1358 | for_each_nondefault_queue(bp, i) { | ||
1359 | rc = bnx2x_setup_multi(bp, i); | ||
1360 | if (rc) | ||
1361 | #ifdef BCM_CNIC | ||
1362 | goto load_error4; | ||
1363 | #else | ||
1364 | goto load_error3; | ||
1365 | #endif | ||
1366 | } | ||
1367 | |||
1368 | if (CHIP_IS_E1(bp)) | ||
1369 | bnx2x_set_eth_mac_addr_e1(bp, 1); | ||
1370 | else | ||
1371 | bnx2x_set_eth_mac_addr_e1h(bp, 1); | ||
1372 | #ifdef BCM_CNIC | ||
1373 | /* Set iSCSI L2 MAC */ | ||
1374 | mutex_lock(&bp->cnic_mutex); | ||
1375 | if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { | ||
1376 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | ||
1377 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; | ||
1378 | bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, | ||
1379 | CNIC_SB_ID(bp)); | ||
1380 | } | ||
1381 | mutex_unlock(&bp->cnic_mutex); | ||
1382 | #endif | ||
1383 | } | ||
1384 | |||
1385 | if (bp->port.pmf) | ||
1386 | bnx2x_initial_phy_init(bp, load_mode); | ||
1387 | |||
1388 | /* Start fast path */ | ||
1389 | switch (load_mode) { | ||
1390 | case LOAD_NORMAL: | ||
1391 | if (bp->state == BNX2X_STATE_OPEN) { | ||
1392 | /* Tx queue should be only reenabled */ | ||
1393 | netif_tx_wake_all_queues(bp->dev); | ||
1394 | } | ||
1395 | /* Initialize the receive filter. */ | ||
1396 | bnx2x_set_rx_mode(bp->dev); | ||
1397 | break; | ||
1398 | |||
1399 | case LOAD_OPEN: | ||
1400 | netif_tx_start_all_queues(bp->dev); | ||
1401 | if (bp->state != BNX2X_STATE_OPEN) | ||
1402 | netif_tx_disable(bp->dev); | ||
1403 | /* Initialize the receive filter. */ | ||
1404 | bnx2x_set_rx_mode(bp->dev); | ||
1405 | break; | ||
1406 | |||
1407 | case LOAD_DIAG: | ||
1408 | /* Initialize the receive filter. */ | ||
1409 | bnx2x_set_rx_mode(bp->dev); | ||
1410 | bp->state = BNX2X_STATE_DIAG; | ||
1411 | break; | ||
1412 | |||
1413 | default: | ||
1414 | break; | ||
1415 | } | ||
1416 | |||
1417 | if (!bp->port.pmf) | ||
1418 | bnx2x__link_status_update(bp); | ||
1419 | |||
1420 | /* start the timer */ | ||
1421 | mod_timer(&bp->timer, jiffies + bp->current_interval); | ||
1422 | |||
1423 | #ifdef BCM_CNIC | ||
1424 | bnx2x_setup_cnic_irq_info(bp); | ||
1425 | if (bp->state == BNX2X_STATE_OPEN) | ||
1426 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); | ||
1427 | #endif | ||
1428 | bnx2x_inc_load_cnt(bp); | ||
1429 | |||
1430 | return 0; | ||
1431 | |||
1432 | #ifdef BCM_CNIC | ||
1433 | load_error4: | ||
1434 | /* Disable Timer scan */ | ||
1435 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); | ||
1436 | #endif | ||
1437 | load_error3: | ||
1438 | bnx2x_int_disable_sync(bp, 1); | ||
1439 | if (!BP_NOMCP(bp)) { | ||
1440 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | ||
1441 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
1442 | } | ||
1443 | bp->port.pmf = 0; | ||
1444 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
1445 | bnx2x_free_skbs(bp); | ||
1446 | for_each_queue(bp, i) | ||
1447 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
1448 | load_error2: | ||
1449 | /* Release IRQs */ | ||
1450 | bnx2x_free_irq(bp, false); | ||
1451 | load_error1: | ||
1452 | bnx2x_napi_disable(bp); | ||
1453 | for_each_queue(bp, i) | ||
1454 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
1455 | bnx2x_free_mem(bp); | ||
1456 | |||
1457 | return rc; | ||
1458 | } | ||
1459 | |||
1460 | /* must be called with rtnl_lock */ | ||
1461 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
1462 | { | ||
1463 | int i; | ||
1464 | |||
1465 | if (bp->state == BNX2X_STATE_CLOSED) { | ||
1466 | /* Interface has been removed - nothing to recover */ | ||
1467 | bp->recovery_state = BNX2X_RECOVERY_DONE; | ||
1468 | bp->is_leader = 0; | ||
1469 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | ||
1470 | smp_wmb(); | ||
1471 | |||
1472 | return -EINVAL; | ||
1473 | } | ||
1474 | |||
1475 | #ifdef BCM_CNIC | ||
1476 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
1477 | #endif | ||
1478 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
1479 | |||
1480 | /* Set "drop all" */ | ||
1481 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
1482 | bnx2x_set_storm_rx_mode(bp); | ||
1483 | |||
1484 | /* Disable HW interrupts, NAPI and Tx */ | ||
1485 | bnx2x_netif_stop(bp, 1); | ||
1486 | netif_carrier_off(bp->dev); | ||
1487 | |||
1488 | del_timer_sync(&bp->timer); | ||
1489 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | ||
1490 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | ||
1491 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
1492 | |||
1493 | /* Release IRQs */ | ||
1494 | bnx2x_free_irq(bp, false); | ||
1495 | |||
1496 | /* Cleanup the chip if needed */ | ||
1497 | if (unload_mode != UNLOAD_RECOVERY) | ||
1498 | bnx2x_chip_cleanup(bp, unload_mode); | ||
1499 | |||
1500 | bp->port.pmf = 0; | ||
1501 | |||
1502 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
1503 | bnx2x_free_skbs(bp); | ||
1504 | for_each_queue(bp, i) | ||
1505 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
1506 | for_each_queue(bp, i) | ||
1507 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
1508 | bnx2x_free_mem(bp); | ||
1509 | |||
1510 | bp->state = BNX2X_STATE_CLOSED; | ||
1511 | |||
1512 | /* The last driver must disable a "close the gate" if there is no | ||
1513 | * parity attention or "process kill" pending. | ||
1514 | */ | ||
1515 | if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && | ||
1516 | bnx2x_reset_is_done(bp)) | ||
1517 | bnx2x_disable_close_the_gate(bp); | ||
1518 | |||
1519 | /* Reset MCP mail box sequence if there is on going recovery */ | ||
1520 | if (unload_mode == UNLOAD_RECOVERY) | ||
1521 | bp->fw_seq = 0; | ||
1522 | |||
1523 | return 0; | ||
1524 | } | ||
1525 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | ||
1526 | { | ||
1527 | u16 pmcsr; | ||
1528 | |||
1529 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
1530 | |||
1531 | switch (state) { | ||
1532 | case PCI_D0: | ||
1533 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | ||
1534 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | ||
1535 | PCI_PM_CTRL_PME_STATUS)); | ||
1536 | |||
1537 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) | ||
1538 | /* delay required during transition out of D3hot */ | ||
1539 | msleep(20); | ||
1540 | break; | ||
1541 | |||
1542 | case PCI_D3hot: | ||
1543 | /* If there are other clients above don't | ||
1544 | shut down the power */ | ||
1545 | if (atomic_read(&bp->pdev->enable_cnt) != 1) | ||
1546 | return 0; | ||
1547 | /* Don't shut down the power for emulation and FPGA */ | ||
1548 | if (CHIP_REV_IS_SLOW(bp)) | ||
1549 | return 0; | ||
1550 | |||
1551 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | ||
1552 | pmcsr |= 3; | ||
1553 | |||
1554 | if (bp->wol) | ||
1555 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; | ||
1556 | |||
1557 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | ||
1558 | pmcsr); | ||
1559 | |||
1560 | /* No more memory access after this point until | ||
1561 | * device is brought back to D0. | ||
1562 | */ | ||
1563 | break; | ||
1564 | |||
1565 | default: | ||
1566 | return -EINVAL; | ||
1567 | } | ||
1568 | return 0; | ||
1569 | } | ||
1570 | |||
1571 | |||
1572 | |||
1573 | /* | ||
1574 | * net_device service functions | ||
1575 | */ | ||
1576 | |||
1577 | static int bnx2x_poll(struct napi_struct *napi, int budget) | ||
1578 | { | ||
1579 | int work_done = 0; | ||
1580 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | ||
1581 | napi); | ||
1582 | struct bnx2x *bp = fp->bp; | ||
1583 | |||
1584 | while (1) { | ||
1585 | #ifdef BNX2X_STOP_ON_ERROR | ||
1586 | if (unlikely(bp->panic)) { | ||
1587 | napi_complete(napi); | ||
1588 | return 0; | ||
1589 | } | ||
1590 | #endif | ||
1591 | |||
1592 | if (bnx2x_has_tx_work(fp)) | ||
1593 | bnx2x_tx_int(fp); | ||
1594 | |||
1595 | if (bnx2x_has_rx_work(fp)) { | ||
1596 | work_done += bnx2x_rx_int(fp, budget - work_done); | ||
1597 | |||
1598 | /* must not complete if we consumed full budget */ | ||
1599 | if (work_done >= budget) | ||
1600 | break; | ||
1601 | } | ||
1602 | |||
1603 | /* Fall out from the NAPI loop if needed */ | ||
1604 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
1605 | bnx2x_update_fpsb_idx(fp); | ||
1606 | /* bnx2x_has_rx_work() reads the status block, thus we need | ||
1607 | * to ensure that status block indices have been actually read | ||
1608 | * (bnx2x_update_fpsb_idx) prior to this check | ||
1609 | * (bnx2x_has_rx_work) so that we won't write the "newer" | ||
1610 | * value of the status block to IGU (if there was a DMA right | ||
1611 | * after bnx2x_has_rx_work and if there is no rmb, the memory | ||
1612 | * reading (bnx2x_update_fpsb_idx) may be postponed to right | ||
1613 | * before bnx2x_ack_sb). In this case there will never be | ||
1614 | * another interrupt until there is another update of the | ||
1615 | * status block, while there is still unhandled work. | ||
1616 | */ | ||
1617 | rmb(); | ||
1618 | |||
1619 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
1620 | napi_complete(napi); | ||
1621 | /* Re-enable interrupts */ | ||
1622 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
1623 | le16_to_cpu(fp->fp_c_idx), | ||
1624 | IGU_INT_NOP, 1); | ||
1625 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
1626 | le16_to_cpu(fp->fp_u_idx), | ||
1627 | IGU_INT_ENABLE, 1); | ||
1628 | break; | ||
1629 | } | ||
1630 | } | ||
1631 | } | ||
1632 | |||
1633 | return work_done; | ||
1634 | } | ||
1635 | |||
1636 | |||
1637 | /* we split the first BD into headers and data BDs | ||
1638 | * to ease the pain of our fellow microcode engineers | ||
1639 | * we use one mapping for both BDs | ||
1640 | * So far this has only been observed to happen | ||
1641 | * in Other Operating Systems(TM) | ||
1642 | */ | ||
1643 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | ||
1644 | struct bnx2x_fastpath *fp, | ||
1645 | struct sw_tx_bd *tx_buf, | ||
1646 | struct eth_tx_start_bd **tx_bd, u16 hlen, | ||
1647 | u16 bd_prod, int nbd) | ||
1648 | { | ||
1649 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; | ||
1650 | struct eth_tx_bd *d_tx_bd; | ||
1651 | dma_addr_t mapping; | ||
1652 | int old_len = le16_to_cpu(h_tx_bd->nbytes); | ||
1653 | |||
1654 | /* first fix first BD */ | ||
1655 | h_tx_bd->nbd = cpu_to_le16(nbd); | ||
1656 | h_tx_bd->nbytes = cpu_to_le16(hlen); | ||
1657 | |||
1658 | DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " | ||
1659 | "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, | ||
1660 | h_tx_bd->addr_lo, h_tx_bd->nbd); | ||
1661 | |||
1662 | /* now get a new data BD | ||
1663 | * (after the pbd) and fill it */ | ||
1664 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
1665 | d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
1666 | |||
1667 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), | ||
1668 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; | ||
1669 | |||
1670 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
1671 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
1672 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); | ||
1673 | |||
1674 | /* this marks the BD as one that has no individual mapping */ | ||
1675 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; | ||
1676 | |||
1677 | DP(NETIF_MSG_TX_QUEUED, | ||
1678 | "TSO split data size is %d (%x:%x)\n", | ||
1679 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); | ||
1680 | |||
1681 | /* update tx_bd */ | ||
1682 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; | ||
1683 | |||
1684 | return bd_prod; | ||
1685 | } | ||
1686 | |||
1687 | static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) | ||
1688 | { | ||
1689 | if (fix > 0) | ||
1690 | csum = (u16) ~csum_fold(csum_sub(csum, | ||
1691 | csum_partial(t_header - fix, fix, 0))); | ||
1692 | |||
1693 | else if (fix < 0) | ||
1694 | csum = (u16) ~csum_fold(csum_add(csum, | ||
1695 | csum_partial(t_header, -fix, 0))); | ||
1696 | |||
1697 | return swab16(csum); | ||
1698 | } | ||
1699 | |||
1700 | static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | ||
1701 | { | ||
1702 | u32 rc; | ||
1703 | |||
1704 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
1705 | rc = XMIT_PLAIN; | ||
1706 | |||
1707 | else { | ||
1708 | if (skb->protocol == htons(ETH_P_IPV6)) { | ||
1709 | rc = XMIT_CSUM_V6; | ||
1710 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
1711 | rc |= XMIT_CSUM_TCP; | ||
1712 | |||
1713 | } else { | ||
1714 | rc = XMIT_CSUM_V4; | ||
1715 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
1716 | rc |= XMIT_CSUM_TCP; | ||
1717 | } | ||
1718 | } | ||
1719 | |||
1720 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | ||
1721 | rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); | ||
1722 | |||
1723 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
1724 | rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); | ||
1725 | |||
1726 | return rc; | ||
1727 | } | ||
1728 | |||
1729 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
1730 | /* check if packet requires linearization (packet is too fragmented) | ||
1731 | no need to check fragmentation if page size > 8K (there will be no | ||
1732 | violation to FW restrictions) */ | ||
1733 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, | ||
1734 | u32 xmit_type) | ||
1735 | { | ||
1736 | int to_copy = 0; | ||
1737 | int hlen = 0; | ||
1738 | int first_bd_sz = 0; | ||
1739 | |||
1740 | /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ | ||
1741 | if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { | ||
1742 | |||
1743 | if (xmit_type & XMIT_GSO) { | ||
1744 | unsigned short lso_mss = skb_shinfo(skb)->gso_size; | ||
1745 | /* Check if LSO packet needs to be copied: | ||
1746 | 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ | ||
1747 | int wnd_size = MAX_FETCH_BD - 3; | ||
1748 | /* Number of windows to check */ | ||
1749 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; | ||
1750 | int wnd_idx = 0; | ||
1751 | int frag_idx = 0; | ||
1752 | u32 wnd_sum = 0; | ||
1753 | |||
1754 | /* Headers length */ | ||
1755 | hlen = (int)(skb_transport_header(skb) - skb->data) + | ||
1756 | tcp_hdrlen(skb); | ||
1757 | |||
1758 | /* Amount of data (w/o headers) on linear part of SKB*/ | ||
1759 | first_bd_sz = skb_headlen(skb) - hlen; | ||
1760 | |||
1761 | wnd_sum = first_bd_sz; | ||
1762 | |||
1763 | /* Calculate the first sum - it's special */ | ||
1764 | for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) | ||
1765 | wnd_sum += | ||
1766 | skb_shinfo(skb)->frags[frag_idx].size; | ||
1767 | |||
1768 | /* If there was data on linear skb data - check it */ | ||
1769 | if (first_bd_sz > 0) { | ||
1770 | if (unlikely(wnd_sum < lso_mss)) { | ||
1771 | to_copy = 1; | ||
1772 | goto exit_lbl; | ||
1773 | } | ||
1774 | |||
1775 | wnd_sum -= first_bd_sz; | ||
1776 | } | ||
1777 | |||
1778 | /* Others are easier: run through the frag list and | ||
1779 | check all windows */ | ||
1780 | for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { | ||
1781 | wnd_sum += | ||
1782 | skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; | ||
1783 | |||
1784 | if (unlikely(wnd_sum < lso_mss)) { | ||
1785 | to_copy = 1; | ||
1786 | break; | ||
1787 | } | ||
1788 | wnd_sum -= | ||
1789 | skb_shinfo(skb)->frags[wnd_idx].size; | ||
1790 | } | ||
1791 | } else { | ||
1792 | /* in non-LSO too fragmented packet should always | ||
1793 | be linearized */ | ||
1794 | to_copy = 1; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | exit_lbl: | ||
1799 | if (unlikely(to_copy)) | ||
1800 | DP(NETIF_MSG_TX_QUEUED, | ||
1801 | "Linearization IS REQUIRED for %s packet. " | ||
1802 | "num_frags %d hlen %d first_bd_sz %d\n", | ||
1803 | (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", | ||
1804 | skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); | ||
1805 | |||
1806 | return to_copy; | ||
1807 | } | ||
1808 | #endif | ||
1809 | |||
1810 | /* called with netif_tx_lock | ||
1811 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call | ||
1812 | * netif_wake_queue() | ||
1813 | */ | ||
1814 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1815 | { | ||
1816 | struct bnx2x *bp = netdev_priv(dev); | ||
1817 | struct bnx2x_fastpath *fp; | ||
1818 | struct netdev_queue *txq; | ||
1819 | struct sw_tx_bd *tx_buf; | ||
1820 | struct eth_tx_start_bd *tx_start_bd; | ||
1821 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | ||
1822 | struct eth_tx_parse_bd *pbd = NULL; | ||
1823 | u16 pkt_prod, bd_prod; | ||
1824 | int nbd, fp_index; | ||
1825 | dma_addr_t mapping; | ||
1826 | u32 xmit_type = bnx2x_xmit_type(bp, skb); | ||
1827 | int i; | ||
1828 | u8 hlen = 0; | ||
1829 | __le16 pkt_size = 0; | ||
1830 | struct ethhdr *eth; | ||
1831 | u8 mac_type = UNICAST_ADDRESS; | ||
1832 | |||
1833 | #ifdef BNX2X_STOP_ON_ERROR | ||
1834 | if (unlikely(bp->panic)) | ||
1835 | return NETDEV_TX_BUSY; | ||
1836 | #endif | ||
1837 | |||
1838 | fp_index = skb_get_queue_mapping(skb); | ||
1839 | txq = netdev_get_tx_queue(dev, fp_index); | ||
1840 | |||
1841 | fp = &bp->fp[fp_index]; | ||
1842 | |||
1843 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | ||
1844 | fp->eth_q_stats.driver_xoff++; | ||
1845 | netif_tx_stop_queue(txq); | ||
1846 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | ||
1847 | return NETDEV_TX_BUSY; | ||
1848 | } | ||
1849 | |||
1850 | DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" | ||
1851 | " gso type %x xmit_type %x\n", | ||
1852 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, | ||
1853 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); | ||
1854 | |||
1855 | eth = (struct ethhdr *)skb->data; | ||
1856 | |||
1857 | /* set flag according to packet type (UNICAST_ADDRESS is default)*/ | ||
1858 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { | ||
1859 | if (is_broadcast_ether_addr(eth->h_dest)) | ||
1860 | mac_type = BROADCAST_ADDRESS; | ||
1861 | else | ||
1862 | mac_type = MULTICAST_ADDRESS; | ||
1863 | } | ||
1864 | |||
1865 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
1866 | /* First, check if we need to linearize the skb (due to FW | ||
1867 | restrictions). No need to check fragmentation if page size > 8K | ||
1868 | (there will be no violation to FW restrictions) */ | ||
1869 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { | ||
1870 | /* Statistics of linearization */ | ||
1871 | bp->lin_cnt++; | ||
1872 | if (skb_linearize(skb) != 0) { | ||
1873 | DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " | ||
1874 | "silently dropping this SKB\n"); | ||
1875 | dev_kfree_skb_any(skb); | ||
1876 | return NETDEV_TX_OK; | ||
1877 | } | ||
1878 | } | ||
1879 | #endif | ||
1880 | |||
1881 | /* | ||
1882 | Please read carefully. First we use one BD which we mark as start, | ||
1883 | then we have a parsing info BD (used for TSO or xsum), | ||
1884 | and only then we have the rest of the TSO BDs. | ||
1885 | (don't forget to mark the last one as last, | ||
1886 | and to unmap only AFTER you write to the BD ...) | ||
1887 | And above all, all pdb sizes are in words - NOT DWORDS! | ||
1888 | */ | ||
1889 | |||
1890 | pkt_prod = fp->tx_pkt_prod++; | ||
1891 | bd_prod = TX_BD(fp->tx_bd_prod); | ||
1892 | |||
1893 | /* get a tx_buf and first BD */ | ||
1894 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | ||
1895 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; | ||
1896 | |||
1897 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
1898 | tx_start_bd->general_data = (mac_type << | ||
1899 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); | ||
1900 | /* header nbd */ | ||
1901 | tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); | ||
1902 | |||
1903 | /* remember the first BD of the packet */ | ||
1904 | tx_buf->first_bd = fp->tx_bd_prod; | ||
1905 | tx_buf->skb = skb; | ||
1906 | tx_buf->flags = 0; | ||
1907 | |||
1908 | DP(NETIF_MSG_TX_QUEUED, | ||
1909 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | ||
1910 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); | ||
1911 | |||
1912 | #ifdef BCM_VLAN | ||
1913 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && | ||
1914 | (bp->flags & HW_VLAN_TX_FLAG)) { | ||
1915 | tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | ||
1916 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; | ||
1917 | } else | ||
1918 | #endif | ||
1919 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); | ||
1920 | |||
1921 | /* turn on parsing and get a BD */ | ||
1922 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
1923 | pbd = &fp->tx_desc_ring[bd_prod].parse_bd; | ||
1924 | |||
1925 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | ||
1926 | |||
1927 | if (xmit_type & XMIT_CSUM) { | ||
1928 | hlen = (skb_network_header(skb) - skb->data) / 2; | ||
1929 | |||
1930 | /* for now NS flag is not used in Linux */ | ||
1931 | pbd->global_data = | ||
1932 | (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << | ||
1933 | ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); | ||
1934 | |||
1935 | pbd->ip_hlen = (skb_transport_header(skb) - | ||
1936 | skb_network_header(skb)) / 2; | ||
1937 | |||
1938 | hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; | ||
1939 | |||
1940 | pbd->total_hlen = cpu_to_le16(hlen); | ||
1941 | hlen = hlen*2; | ||
1942 | |||
1943 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | ||
1944 | |||
1945 | if (xmit_type & XMIT_CSUM_V4) | ||
1946 | tx_start_bd->bd_flags.as_bitfield |= | ||
1947 | ETH_TX_BD_FLAGS_IP_CSUM; | ||
1948 | else | ||
1949 | tx_start_bd->bd_flags.as_bitfield |= | ||
1950 | ETH_TX_BD_FLAGS_IPV6; | ||
1951 | |||
1952 | if (xmit_type & XMIT_CSUM_TCP) { | ||
1953 | pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); | ||
1954 | |||
1955 | } else { | ||
1956 | s8 fix = SKB_CS_OFF(skb); /* signed! */ | ||
1957 | |||
1958 | pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; | ||
1959 | |||
1960 | DP(NETIF_MSG_TX_QUEUED, | ||
1961 | "hlen %d fix %d csum before fix %x\n", | ||
1962 | le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); | ||
1963 | |||
1964 | /* HW bug: fixup the CSUM */ | ||
1965 | pbd->tcp_pseudo_csum = | ||
1966 | bnx2x_csum_fix(skb_transport_header(skb), | ||
1967 | SKB_CS(skb), fix); | ||
1968 | |||
1969 | DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", | ||
1970 | pbd->tcp_pseudo_csum); | ||
1971 | } | ||
1972 | } | ||
1973 | |||
1974 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
1975 | skb_headlen(skb), DMA_TO_DEVICE); | ||
1976 | |||
1977 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
1978 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
1979 | nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ | ||
1980 | tx_start_bd->nbd = cpu_to_le16(nbd); | ||
1981 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | ||
1982 | pkt_size = tx_start_bd->nbytes; | ||
1983 | |||
1984 | DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" | ||
1985 | " nbytes %d flags %x vlan %x\n", | ||
1986 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, | ||
1987 | le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), | ||
1988 | tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); | ||
1989 | |||
1990 | if (xmit_type & XMIT_GSO) { | ||
1991 | |||
1992 | DP(NETIF_MSG_TX_QUEUED, | ||
1993 | "TSO packet len %d hlen %d total len %d tso size %d\n", | ||
1994 | skb->len, hlen, skb_headlen(skb), | ||
1995 | skb_shinfo(skb)->gso_size); | ||
1996 | |||
1997 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; | ||
1998 | |||
1999 | if (unlikely(skb_headlen(skb) > hlen)) | ||
2000 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, | ||
2001 | hlen, bd_prod, ++nbd); | ||
2002 | |||
2003 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | ||
2004 | pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); | ||
2005 | pbd->tcp_flags = pbd_tcp_flags(skb); | ||
2006 | |||
2007 | if (xmit_type & XMIT_GSO_V4) { | ||
2008 | pbd->ip_id = swab16(ip_hdr(skb)->id); | ||
2009 | pbd->tcp_pseudo_csum = | ||
2010 | swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
2011 | ip_hdr(skb)->daddr, | ||
2012 | 0, IPPROTO_TCP, 0)); | ||
2013 | |||
2014 | } else | ||
2015 | pbd->tcp_pseudo_csum = | ||
2016 | swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
2017 | &ipv6_hdr(skb)->daddr, | ||
2018 | 0, IPPROTO_TCP, 0)); | ||
2019 | |||
2020 | pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; | ||
2021 | } | ||
2022 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; | ||
2023 | |||
2024 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2025 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2026 | |||
2027 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
2028 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
2029 | if (total_pkt_bd == NULL) | ||
2030 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
2031 | |||
2032 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
2033 | frag->page_offset, | ||
2034 | frag->size, DMA_TO_DEVICE); | ||
2035 | |||
2036 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
2037 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
2038 | tx_data_bd->nbytes = cpu_to_le16(frag->size); | ||
2039 | le16_add_cpu(&pkt_size, frag->size); | ||
2040 | |||
2041 | DP(NETIF_MSG_TX_QUEUED, | ||
2042 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", | ||
2043 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, | ||
2044 | le16_to_cpu(tx_data_bd->nbytes)); | ||
2045 | } | ||
2046 | |||
2047 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); | ||
2048 | |||
2049 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
2050 | |||
2051 | /* now send a tx doorbell, counting the next BD | ||
2052 | * if the packet contains or ends with it | ||
2053 | */ | ||
2054 | if (TX_BD_POFF(bd_prod) < nbd) | ||
2055 | nbd++; | ||
2056 | |||
2057 | if (total_pkt_bd != NULL) | ||
2058 | total_pkt_bd->total_pkt_bytes = pkt_size; | ||
2059 | |||
2060 | if (pbd) | ||
2061 | DP(NETIF_MSG_TX_QUEUED, | ||
2062 | "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" | ||
2063 | " tcp_flags %x xsum %x seq %u hlen %u\n", | ||
2064 | pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, | ||
2065 | pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, | ||
2066 | pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); | ||
2067 | |||
2068 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | ||
2069 | |||
2070 | /* | ||
2071 | * Make sure that the BD data is updated before updating the producer | ||
2072 | * since FW might read the BD right after the producer is updated. | ||
2073 | * This is only applicable for weak-ordered memory model archs such | ||
2074 | * as IA-64. The following barrier is also mandatory since FW will | ||
2075 | * assumes packets must have BDs. | ||
2076 | */ | ||
2077 | wmb(); | ||
2078 | |||
2079 | fp->tx_db.data.prod += nbd; | ||
2080 | barrier(); | ||
2081 | DOORBELL(bp, fp->index, fp->tx_db.raw); | ||
2082 | |||
2083 | mmiowb(); | ||
2084 | |||
2085 | fp->tx_bd_prod += nbd; | ||
2086 | |||
2087 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { | ||
2088 | netif_tx_stop_queue(txq); | ||
2089 | |||
2090 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep | ||
2091 | * ordering of set_bit() in netif_tx_stop_queue() and read of | ||
2092 | * fp->bd_tx_cons */ | ||
2093 | smp_mb(); | ||
2094 | |||
2095 | fp->eth_q_stats.driver_xoff++; | ||
2096 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | ||
2097 | netif_tx_wake_queue(txq); | ||
2098 | } | ||
2099 | fp->tx_pkt++; | ||
2100 | |||
2101 | return NETDEV_TX_OK; | ||
2102 | } | ||
2103 | /* called with rtnl_lock */ | ||
2104 | int bnx2x_change_mac_addr(struct net_device *dev, void *p) | ||
2105 | { | ||
2106 | struct sockaddr *addr = p; | ||
2107 | struct bnx2x *bp = netdev_priv(dev); | ||
2108 | |||
2109 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) | ||
2110 | return -EINVAL; | ||
2111 | |||
2112 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
2113 | if (netif_running(dev)) { | ||
2114 | if (CHIP_IS_E1(bp)) | ||
2115 | bnx2x_set_eth_mac_addr_e1(bp, 1); | ||
2116 | else | ||
2117 | bnx2x_set_eth_mac_addr_e1h(bp, 1); | ||
2118 | } | ||
2119 | |||
2120 | return 0; | ||
2121 | } | ||
2122 | |||
2123 | /* called with rtnl_lock */ | ||
2124 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | ||
2125 | { | ||
2126 | struct bnx2x *bp = netdev_priv(dev); | ||
2127 | int rc = 0; | ||
2128 | |||
2129 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
2130 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
2131 | return -EAGAIN; | ||
2132 | } | ||
2133 | |||
2134 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | ||
2135 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | ||
2136 | return -EINVAL; | ||
2137 | |||
2138 | /* This does not race with packet allocation | ||
2139 | * because the actual alloc size is | ||
2140 | * only updated as part of load | ||
2141 | */ | ||
2142 | dev->mtu = new_mtu; | ||
2143 | |||
2144 | if (netif_running(dev)) { | ||
2145 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
2146 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
2147 | } | ||
2148 | |||
2149 | return rc; | ||
2150 | } | ||
2151 | |||
2152 | void bnx2x_tx_timeout(struct net_device *dev) | ||
2153 | { | ||
2154 | struct bnx2x *bp = netdev_priv(dev); | ||
2155 | |||
2156 | #ifdef BNX2X_STOP_ON_ERROR | ||
2157 | if (!bp->panic) | ||
2158 | bnx2x_panic(); | ||
2159 | #endif | ||
2160 | /* This allows the netif to be shutdown gracefully before resetting */ | ||
2161 | schedule_delayed_work(&bp->reset_task, 0); | ||
2162 | } | ||
2163 | |||
2164 | #ifdef BCM_VLAN | ||
2165 | /* called with rtnl_lock */ | ||
2166 | void bnx2x_vlan_rx_register(struct net_device *dev, | ||
2167 | struct vlan_group *vlgrp) | ||
2168 | { | ||
2169 | struct bnx2x *bp = netdev_priv(dev); | ||
2170 | |||
2171 | bp->vlgrp = vlgrp; | ||
2172 | |||
2173 | /* Set flags according to the required capabilities */ | ||
2174 | bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); | ||
2175 | |||
2176 | if (dev->features & NETIF_F_HW_VLAN_TX) | ||
2177 | bp->flags |= HW_VLAN_TX_FLAG; | ||
2178 | |||
2179 | if (dev->features & NETIF_F_HW_VLAN_RX) | ||
2180 | bp->flags |= HW_VLAN_RX_FLAG; | ||
2181 | |||
2182 | if (netif_running(dev)) | ||
2183 | bnx2x_set_client_config(bp); | ||
2184 | } | ||
2185 | |||
2186 | #endif | ||
2187 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2188 | { | ||
2189 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2190 | struct bnx2x *bp; | ||
2191 | |||
2192 | if (!dev) { | ||
2193 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | ||
2194 | return -ENODEV; | ||
2195 | } | ||
2196 | bp = netdev_priv(dev); | ||
2197 | |||
2198 | rtnl_lock(); | ||
2199 | |||
2200 | pci_save_state(pdev); | ||
2201 | |||
2202 | if (!netif_running(dev)) { | ||
2203 | rtnl_unlock(); | ||
2204 | return 0; | ||
2205 | } | ||
2206 | |||
2207 | netif_device_detach(dev); | ||
2208 | |||
2209 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | ||
2210 | |||
2211 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | ||
2212 | |||
2213 | rtnl_unlock(); | ||
2214 | |||
2215 | return 0; | ||
2216 | } | ||
2217 | |||
2218 | int bnx2x_resume(struct pci_dev *pdev) | ||
2219 | { | ||
2220 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2221 | struct bnx2x *bp; | ||
2222 | int rc; | ||
2223 | |||
2224 | if (!dev) { | ||
2225 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | ||
2226 | return -ENODEV; | ||
2227 | } | ||
2228 | bp = netdev_priv(dev); | ||
2229 | |||
2230 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
2231 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
2232 | return -EAGAIN; | ||
2233 | } | ||
2234 | |||
2235 | rtnl_lock(); | ||
2236 | |||
2237 | pci_restore_state(pdev); | ||
2238 | |||
2239 | if (!netif_running(dev)) { | ||
2240 | rtnl_unlock(); | ||
2241 | return 0; | ||
2242 | } | ||
2243 | |||
2244 | bnx2x_set_power_state(bp, PCI_D0); | ||
2245 | netif_device_attach(dev); | ||
2246 | |||
2247 | rc = bnx2x_nic_load(bp, LOAD_OPEN); | ||
2248 | |||
2249 | rtnl_unlock(); | ||
2250 | |||
2251 | return rc; | ||
2252 | } | ||
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h new file mode 100644 index 000000000000..d1979b1a7ed2 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -0,0 +1,652 @@ | |||
1 | /* bnx2x_cmn.h: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 Broadcom Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
10 | * Written by: Eliezer Tamir | ||
11 | * Based on code from Michael Chan's bnx2 driver | ||
12 | * UDP CSUM errata workaround by Arik Gendelman | ||
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | ||
14 | * Statistics and Link management by Yitchak Gertner | ||
15 | * | ||
16 | */ | ||
17 | #ifndef BNX2X_CMN_H | ||
18 | #define BNX2X_CMN_H | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/netdevice.h> | ||
22 | |||
23 | |||
24 | #include "bnx2x.h" | ||
25 | |||
26 | |||
27 | /*********************** Interfaces **************************** | ||
28 | * Functions that need to be implemented by each driver version | ||
29 | */ | ||
30 | |||
31 | /** | ||
32 | * Initialize link parameters structure variables. | ||
33 | * | ||
34 | * @param bp | ||
35 | * @param load_mode | ||
36 | * | ||
37 | * @return u8 | ||
38 | */ | ||
39 | u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); | ||
40 | |||
41 | /** | ||
42 | * Configure hw according to link parameters structure. | ||
43 | * | ||
44 | * @param bp | ||
45 | */ | ||
46 | void bnx2x_link_set(struct bnx2x *bp); | ||
47 | |||
48 | /** | ||
49 | * Query link status | ||
50 | * | ||
51 | * @param bp | ||
52 | * | ||
53 | * @return 0 - link is UP | ||
54 | */ | ||
55 | u8 bnx2x_link_test(struct bnx2x *bp); | ||
56 | |||
57 | /** | ||
58 | * Handles link status change | ||
59 | * | ||
60 | * @param bp | ||
61 | */ | ||
62 | void bnx2x__link_status_update(struct bnx2x *bp); | ||
63 | |||
64 | /** | ||
65 | * MSI-X slowpath interrupt handler | ||
66 | * | ||
67 | * @param irq | ||
68 | * @param dev_instance | ||
69 | * | ||
70 | * @return irqreturn_t | ||
71 | */ | ||
72 | irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); | ||
73 | |||
74 | /** | ||
75 | * non MSI-X interrupt handler | ||
76 | * | ||
77 | * @param irq | ||
78 | * @param dev_instance | ||
79 | * | ||
80 | * @return irqreturn_t | ||
81 | */ | ||
82 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); | ||
83 | #ifdef BCM_CNIC | ||
84 | |||
85 | /** | ||
86 | * Send command to cnic driver | ||
87 | * | ||
88 | * @param bp | ||
89 | * @param cmd | ||
90 | */ | ||
91 | int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); | ||
92 | |||
93 | /** | ||
94 | * Provides cnic information for proper interrupt handling | ||
95 | * | ||
96 | * @param bp | ||
97 | */ | ||
98 | void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); | ||
99 | #endif | ||
100 | |||
101 | /** | ||
102 | * Enable HW interrupts. | ||
103 | * | ||
104 | * @param bp | ||
105 | */ | ||
106 | void bnx2x_int_enable(struct bnx2x *bp); | ||
107 | |||
108 | /** | ||
109 | * Disable interrupts. This function ensures that there are no | ||
110 | * ISRs or SP DPCs (sp_task) are running after it returns. | ||
111 | * | ||
112 | * @param bp | ||
113 | * @param disable_hw if true, disable HW interrupts. | ||
114 | */ | ||
115 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); | ||
116 | |||
117 | /** | ||
118 | * Init HW blocks according to current initialization stage: | ||
119 | * COMMON, PORT or FUNCTION. | ||
120 | * | ||
121 | * @param bp | ||
122 | * @param load_code: COMMON, PORT or FUNCTION | ||
123 | * | ||
124 | * @return int | ||
125 | */ | ||
126 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); | ||
127 | |||
128 | /** | ||
129 | * Init driver internals: | ||
130 | * - rings | ||
131 | * - status blocks | ||
132 | * - etc. | ||
133 | * | ||
134 | * @param bp | ||
135 | * @param load_code COMMON, PORT or FUNCTION | ||
136 | */ | ||
137 | void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); | ||
138 | |||
139 | /** | ||
140 | * Allocate driver's memory. | ||
141 | * | ||
142 | * @param bp | ||
143 | * | ||
144 | * @return int | ||
145 | */ | ||
146 | int bnx2x_alloc_mem(struct bnx2x *bp); | ||
147 | |||
148 | /** | ||
149 | * Release driver's memory. | ||
150 | * | ||
151 | * @param bp | ||
152 | */ | ||
153 | void bnx2x_free_mem(struct bnx2x *bp); | ||
154 | |||
155 | /** | ||
156 | * Bring up a leading (the first) eth Client. | ||
157 | * | ||
158 | * @param bp | ||
159 | * | ||
160 | * @return int | ||
161 | */ | ||
162 | int bnx2x_setup_leading(struct bnx2x *bp); | ||
163 | |||
164 | /** | ||
165 | * Setup non-leading eth Client. | ||
166 | * | ||
167 | * @param bp | ||
168 | * @param fp | ||
169 | * | ||
170 | * @return int | ||
171 | */ | ||
172 | int bnx2x_setup_multi(struct bnx2x *bp, int index); | ||
173 | |||
174 | /** | ||
175 | * Set number of quueus according to mode and number of available | ||
176 | * msi-x vectors | ||
177 | * | ||
178 | * @param bp | ||
179 | * | ||
180 | */ | ||
181 | void bnx2x_set_num_queues_msix(struct bnx2x *bp); | ||
182 | |||
183 | /** | ||
184 | * Cleanup chip internals: | ||
185 | * - Cleanup MAC configuration. | ||
186 | * - Close clients. | ||
187 | * - etc. | ||
188 | * | ||
189 | * @param bp | ||
190 | * @param unload_mode | ||
191 | */ | ||
192 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); | ||
193 | |||
194 | /** | ||
195 | * Acquire HW lock. | ||
196 | * | ||
197 | * @param bp | ||
198 | * @param resource Resource bit which was locked | ||
199 | * | ||
200 | * @return int | ||
201 | */ | ||
202 | int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); | ||
203 | |||
204 | /** | ||
205 | * Release HW lock. | ||
206 | * | ||
207 | * @param bp driver handle | ||
208 | * @param resource Resource bit which was locked | ||
209 | * | ||
210 | * @return int | ||
211 | */ | ||
212 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); | ||
213 | |||
214 | /** | ||
215 | * Configure eth MAC address in the HW according to the value in | ||
216 | * netdev->dev_addr for 57711 | ||
217 | * | ||
218 | * @param bp driver handle | ||
219 | * @param set | ||
220 | */ | ||
221 | void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); | ||
222 | |||
223 | /** | ||
224 | * Configure eth MAC address in the HW according to the value in | ||
225 | * netdev->dev_addr for 57710 | ||
226 | * | ||
227 | * @param bp driver handle | ||
228 | * @param set | ||
229 | */ | ||
230 | void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set); | ||
231 | |||
232 | #ifdef BCM_CNIC | ||
233 | /** | ||
234 | * Set iSCSI MAC(s) at the next enties in the CAM after the ETH | ||
235 | * MAC(s). The function will wait until the ramrod completion | ||
236 | * returns. | ||
237 | * | ||
238 | * @param bp driver handle | ||
239 | * @param set set or clear the CAM entry | ||
240 | * | ||
241 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. | ||
242 | */ | ||
243 | int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set); | ||
244 | #endif | ||
245 | |||
246 | /** | ||
247 | * Initialize status block in FW and HW | ||
248 | * | ||
249 | * @param bp driver handle | ||
250 | * @param sb host_status_block | ||
251 | * @param dma_addr_t mapping | ||
252 | * @param int sb_id | ||
253 | */ | ||
254 | void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | ||
255 | dma_addr_t mapping, int sb_id); | ||
256 | |||
257 | /** | ||
258 | * Reconfigure FW/HW according to dev->flags rx mode | ||
259 | * | ||
260 | * @param dev net_device | ||
261 | * | ||
262 | */ | ||
263 | void bnx2x_set_rx_mode(struct net_device *dev); | ||
264 | |||
265 | /** | ||
266 | * Configure MAC filtering rules in a FW. | ||
267 | * | ||
268 | * @param bp driver handle | ||
269 | */ | ||
270 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp); | ||
271 | |||
272 | /* Parity errors related */ | ||
273 | void bnx2x_inc_load_cnt(struct bnx2x *bp); | ||
274 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp); | ||
275 | bool bnx2x_chk_parity_attn(struct bnx2x *bp); | ||
276 | bool bnx2x_reset_is_done(struct bnx2x *bp); | ||
277 | void bnx2x_disable_close_the_gate(struct bnx2x *bp); | ||
278 | |||
279 | /** | ||
280 | * Perform statistics handling according to event | ||
281 | * | ||
282 | * @param bp driver handle | ||
283 | * @param even tbnx2x_stats_event | ||
284 | */ | ||
285 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | ||
286 | |||
287 | /** | ||
288 | * Configures FW with client paramteres (like HW VLAN removal) | ||
289 | * for each active client. | ||
290 | * | ||
291 | * @param bp | ||
292 | */ | ||
293 | void bnx2x_set_client_config(struct bnx2x *bp); | ||
294 | |||
295 | /** | ||
296 | * Handle sp events | ||
297 | * | ||
298 | * @param fp fastpath handle for the event | ||
299 | * @param rr_cqe eth_rx_cqe | ||
300 | */ | ||
301 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); | ||
302 | |||
303 | |||
304 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | ||
305 | { | ||
306 | struct host_status_block *fpsb = fp->status_blk; | ||
307 | |||
308 | barrier(); /* status block is written to by the chip */ | ||
309 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; | ||
310 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; | ||
311 | } | ||
312 | |||
313 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | ||
314 | struct bnx2x_fastpath *fp, | ||
315 | u16 bd_prod, u16 rx_comp_prod, | ||
316 | u16 rx_sge_prod) | ||
317 | { | ||
318 | struct ustorm_eth_rx_producers rx_prods = {0}; | ||
319 | int i; | ||
320 | |||
321 | /* Update producers */ | ||
322 | rx_prods.bd_prod = bd_prod; | ||
323 | rx_prods.cqe_prod = rx_comp_prod; | ||
324 | rx_prods.sge_prod = rx_sge_prod; | ||
325 | |||
326 | /* | ||
327 | * Make sure that the BD and SGE data is updated before updating the | ||
328 | * producers since FW might read the BD/SGE right after the producer | ||
329 | * is updated. | ||
330 | * This is only applicable for weak-ordered memory model archs such | ||
331 | * as IA-64. The following barrier is also mandatory since FW will | ||
332 | * assumes BDs must have buffers. | ||
333 | */ | ||
334 | wmb(); | ||
335 | |||
336 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) | ||
337 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
338 | USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, | ||
339 | ((u32 *)&rx_prods)[i]); | ||
340 | |||
341 | mmiowb(); /* keep prod updates ordered */ | ||
342 | |||
343 | DP(NETIF_MSG_RX_STATUS, | ||
344 | "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", | ||
345 | fp->index, bd_prod, rx_comp_prod, rx_sge_prod); | ||
346 | } | ||
347 | |||
348 | |||
349 | |||
350 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | ||
351 | u8 storm, u16 index, u8 op, u8 update) | ||
352 | { | ||
353 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + | ||
354 | COMMAND_REG_INT_ACK); | ||
355 | struct igu_ack_register igu_ack; | ||
356 | |||
357 | igu_ack.status_block_index = index; | ||
358 | igu_ack.sb_id_and_flags = | ||
359 | ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | ||
360 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | ||
361 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | ||
362 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | ||
363 | |||
364 | DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", | ||
365 | (*(u32 *)&igu_ack), hc_addr); | ||
366 | REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); | ||
367 | |||
368 | /* Make sure that ACK is written */ | ||
369 | mmiowb(); | ||
370 | barrier(); | ||
371 | } | ||
372 | static inline u16 bnx2x_ack_int(struct bnx2x *bp) | ||
373 | { | ||
374 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + | ||
375 | COMMAND_REG_SIMD_MASK); | ||
376 | u32 result = REG_RD(bp, hc_addr); | ||
377 | |||
378 | DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", | ||
379 | result, hc_addr); | ||
380 | |||
381 | return result; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * fast path service functions | ||
386 | */ | ||
387 | |||
388 | static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) | ||
389 | { | ||
390 | /* Tell compiler that consumer and producer can change */ | ||
391 | barrier(); | ||
392 | return (fp->tx_pkt_prod != fp->tx_pkt_cons); | ||
393 | } | ||
394 | |||
395 | static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | ||
396 | { | ||
397 | s16 used; | ||
398 | u16 prod; | ||
399 | u16 cons; | ||
400 | |||
401 | prod = fp->tx_bd_prod; | ||
402 | cons = fp->tx_bd_cons; | ||
403 | |||
404 | /* NUM_TX_RINGS = number of "next-page" entries | ||
405 | It will be used as a threshold */ | ||
406 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | ||
407 | |||
408 | #ifdef BNX2X_STOP_ON_ERROR | ||
409 | WARN_ON(used < 0); | ||
410 | WARN_ON(used > fp->bp->tx_ring_size); | ||
411 | WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); | ||
412 | #endif | ||
413 | |||
414 | return (s16)(fp->bp->tx_ring_size) - used; | ||
415 | } | ||
416 | |||
417 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) | ||
418 | { | ||
419 | u16 hw_cons; | ||
420 | |||
421 | /* Tell compiler that status block fields can change */ | ||
422 | barrier(); | ||
423 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | ||
424 | return hw_cons != fp->tx_pkt_cons; | ||
425 | } | ||
426 | |||
427 | static inline void bnx2x_free_rx_sge(struct bnx2x *bp, | ||
428 | struct bnx2x_fastpath *fp, u16 index) | ||
429 | { | ||
430 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; | ||
431 | struct page *page = sw_buf->page; | ||
432 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; | ||
433 | |||
434 | /* Skip "next page" elements */ | ||
435 | if (!page) | ||
436 | return; | ||
437 | |||
438 | dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), | ||
439 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | ||
440 | __free_pages(page, PAGES_PER_SGE_SHIFT); | ||
441 | |||
442 | sw_buf->page = NULL; | ||
443 | sge->addr_hi = 0; | ||
444 | sge->addr_lo = 0; | ||
445 | } | ||
446 | |||
447 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | ||
448 | struct bnx2x_fastpath *fp, int last) | ||
449 | { | ||
450 | int i; | ||
451 | |||
452 | for (i = 0; i < last; i++) | ||
453 | bnx2x_free_rx_sge(bp, fp, i); | ||
454 | } | ||
455 | |||
456 | static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | ||
457 | struct bnx2x_fastpath *fp, u16 index) | ||
458 | { | ||
459 | struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); | ||
460 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; | ||
461 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; | ||
462 | dma_addr_t mapping; | ||
463 | |||
464 | if (unlikely(page == NULL)) | ||
465 | return -ENOMEM; | ||
466 | |||
467 | mapping = dma_map_page(&bp->pdev->dev, page, 0, | ||
468 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | ||
469 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
470 | __free_pages(page, PAGES_PER_SGE_SHIFT); | ||
471 | return -ENOMEM; | ||
472 | } | ||
473 | |||
474 | sw_buf->page = page; | ||
475 | dma_unmap_addr_set(sw_buf, mapping, mapping); | ||
476 | |||
477 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
478 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | ||
483 | struct bnx2x_fastpath *fp, u16 index) | ||
484 | { | ||
485 | struct sk_buff *skb; | ||
486 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; | ||
487 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; | ||
488 | dma_addr_t mapping; | ||
489 | |||
490 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
491 | if (unlikely(skb == NULL)) | ||
492 | return -ENOMEM; | ||
493 | |||
494 | mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, | ||
495 | DMA_FROM_DEVICE); | ||
496 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
497 | dev_kfree_skb(skb); | ||
498 | return -ENOMEM; | ||
499 | } | ||
500 | |||
501 | rx_buf->skb = skb; | ||
502 | dma_unmap_addr_set(rx_buf, mapping, mapping); | ||
503 | |||
504 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
505 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | /* note that we are not allocating a new skb, | ||
511 | * we are just moving one from cons to prod | ||
512 | * we are not creating a new mapping, | ||
513 | * so there is no need to check for dma_mapping_error(). | ||
514 | */ | ||
515 | static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | ||
516 | struct sk_buff *skb, u16 cons, u16 prod) | ||
517 | { | ||
518 | struct bnx2x *bp = fp->bp; | ||
519 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | ||
520 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | ||
521 | struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; | ||
522 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | ||
523 | |||
524 | dma_sync_single_for_device(&bp->pdev->dev, | ||
525 | dma_unmap_addr(cons_rx_buf, mapping), | ||
526 | RX_COPY_THRESH, DMA_FROM_DEVICE); | ||
527 | |||
528 | prod_rx_buf->skb = cons_rx_buf->skb; | ||
529 | dma_unmap_addr_set(prod_rx_buf, mapping, | ||
530 | dma_unmap_addr(cons_rx_buf, mapping)); | ||
531 | *prod_bd = *cons_bd; | ||
532 | } | ||
533 | |||
534 | static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) | ||
535 | { | ||
536 | int i, j; | ||
537 | |||
538 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | ||
539 | int idx = RX_SGE_CNT * i - 1; | ||
540 | |||
541 | for (j = 0; j < 2; j++) { | ||
542 | SGE_MASK_CLEAR_BIT(fp, idx); | ||
543 | idx--; | ||
544 | } | ||
545 | } | ||
546 | } | ||
547 | |||
548 | static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) | ||
549 | { | ||
550 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ | ||
551 | memset(fp->sge_mask, 0xff, | ||
552 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); | ||
553 | |||
554 | /* Clear the two last indices in the page to 1: | ||
555 | these are the indices that correspond to the "next" element, | ||
556 | hence will never be indicated and should be removed from | ||
557 | the calculations. */ | ||
558 | bnx2x_clear_sge_mask_next_elems(fp); | ||
559 | } | ||
560 | static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | ||
561 | struct bnx2x_fastpath *fp, int last) | ||
562 | { | ||
563 | int i; | ||
564 | |||
565 | for (i = 0; i < last; i++) { | ||
566 | struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); | ||
567 | struct sk_buff *skb = rx_buf->skb; | ||
568 | |||
569 | if (skb == NULL) { | ||
570 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); | ||
571 | continue; | ||
572 | } | ||
573 | |||
574 | if (fp->tpa_state[i] == BNX2X_TPA_START) | ||
575 | dma_unmap_single(&bp->pdev->dev, | ||
576 | dma_unmap_addr(rx_buf, mapping), | ||
577 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
578 | |||
579 | dev_kfree_skb(skb); | ||
580 | rx_buf->skb = NULL; | ||
581 | } | ||
582 | } | ||
583 | |||
584 | |||
585 | static inline void bnx2x_init_tx_ring(struct bnx2x *bp) | ||
586 | { | ||
587 | int i, j; | ||
588 | |||
589 | for_each_queue(bp, j) { | ||
590 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
591 | |||
592 | for (i = 1; i <= NUM_TX_RINGS; i++) { | ||
593 | struct eth_tx_next_bd *tx_next_bd = | ||
594 | &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; | ||
595 | |||
596 | tx_next_bd->addr_hi = | ||
597 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + | ||
598 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | ||
599 | tx_next_bd->addr_lo = | ||
600 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + | ||
601 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | ||
602 | } | ||
603 | |||
604 | fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; | ||
605 | fp->tx_db.data.zero_fill1 = 0; | ||
606 | fp->tx_db.data.prod = 0; | ||
607 | |||
608 | fp->tx_pkt_prod = 0; | ||
609 | fp->tx_pkt_cons = 0; | ||
610 | fp->tx_bd_prod = 0; | ||
611 | fp->tx_bd_cons = 0; | ||
612 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | ||
613 | fp->tx_pkt = 0; | ||
614 | } | ||
615 | } | ||
616 | static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | ||
617 | { | ||
618 | u16 rx_cons_sb; | ||
619 | |||
620 | /* Tell compiler that status block fields can change */ | ||
621 | barrier(); | ||
622 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
623 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
624 | rx_cons_sb++; | ||
625 | return (fp->rx_comp_cons != rx_cons_sb); | ||
626 | } | ||
627 | |||
628 | /* HW Lock for shared dual port PHYs */ | ||
629 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | ||
630 | void bnx2x_release_phy_lock(struct bnx2x *bp); | ||
631 | |||
632 | void bnx2x_link_report(struct bnx2x *bp); | ||
633 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); | ||
634 | int bnx2x_tx_int(struct bnx2x_fastpath *fp); | ||
635 | void bnx2x_init_rx_rings(struct bnx2x *bp); | ||
636 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
637 | |||
638 | int bnx2x_change_mac_addr(struct net_device *dev, void *p); | ||
639 | void bnx2x_tx_timeout(struct net_device *dev); | ||
640 | void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp); | ||
641 | void bnx2x_netif_start(struct bnx2x *bp); | ||
642 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); | ||
643 | void bnx2x_free_irq(struct bnx2x *bp, bool disable_only); | ||
644 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); | ||
645 | int bnx2x_resume(struct pci_dev *pdev); | ||
646 | void bnx2x_free_skbs(struct bnx2x *bp); | ||
647 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu); | ||
648 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); | ||
649 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode); | ||
650 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | ||
651 | |||
652 | #endif /* BNX2X_CMN_H */ | ||
diff --git a/drivers/net/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h index 3bb9a91bb3f7..3bb9a91bb3f7 100644 --- a/drivers/net/bnx2x_dump.h +++ b/drivers/net/bnx2x/bnx2x_dump.h | |||
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c new file mode 100644 index 000000000000..8b75b05e34c5 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -0,0 +1,1971 @@ | |||
1 | /* bnx2x_ethtool.c: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 Broadcom Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
10 | * Written by: Eliezer Tamir | ||
11 | * Based on code from Michael Chan's bnx2 driver | ||
12 | * UDP CSUM errata workaround by Arik Gendelman | ||
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | ||
14 | * Statistics and Link management by Yitchak Gertner | ||
15 | * | ||
16 | */ | ||
17 | #include <linux/ethtool.h> | ||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/crc32.h> | ||
22 | |||
23 | |||
24 | #include "bnx2x.h" | ||
25 | #include "bnx2x_cmn.h" | ||
26 | #include "bnx2x_dump.h" | ||
27 | |||
28 | |||
29 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
30 | { | ||
31 | struct bnx2x *bp = netdev_priv(dev); | ||
32 | |||
33 | cmd->supported = bp->port.supported; | ||
34 | cmd->advertising = bp->port.advertising; | ||
35 | |||
36 | if ((bp->state == BNX2X_STATE_OPEN) && | ||
37 | !(bp->flags & MF_FUNC_DIS) && | ||
38 | (bp->link_vars.link_up)) { | ||
39 | cmd->speed = bp->link_vars.line_speed; | ||
40 | cmd->duplex = bp->link_vars.duplex; | ||
41 | if (IS_E1HMF(bp)) { | ||
42 | u16 vn_max_rate; | ||
43 | |||
44 | vn_max_rate = | ||
45 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
46 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
47 | if (vn_max_rate < cmd->speed) | ||
48 | cmd->speed = vn_max_rate; | ||
49 | } | ||
50 | } else { | ||
51 | cmd->speed = -1; | ||
52 | cmd->duplex = -1; | ||
53 | } | ||
54 | |||
55 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { | ||
56 | u32 ext_phy_type = | ||
57 | XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | ||
58 | |||
59 | switch (ext_phy_type) { | ||
60 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | ||
61 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
62 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | ||
63 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | ||
64 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | ||
65 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: | ||
66 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | ||
67 | cmd->port = PORT_FIBRE; | ||
68 | break; | ||
69 | |||
70 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
71 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: | ||
72 | cmd->port = PORT_TP; | ||
73 | break; | ||
74 | |||
75 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: | ||
76 | BNX2X_ERR("XGXS PHY Failure detected 0x%x\n", | ||
77 | bp->link_params.ext_phy_config); | ||
78 | break; | ||
79 | |||
80 | default: | ||
81 | DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", | ||
82 | bp->link_params.ext_phy_config); | ||
83 | break; | ||
84 | } | ||
85 | } else | ||
86 | cmd->port = PORT_TP; | ||
87 | |||
88 | cmd->phy_address = bp->mdio.prtad; | ||
89 | cmd->transceiver = XCVR_INTERNAL; | ||
90 | |||
91 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) | ||
92 | cmd->autoneg = AUTONEG_ENABLE; | ||
93 | else | ||
94 | cmd->autoneg = AUTONEG_DISABLE; | ||
95 | |||
96 | cmd->maxtxpkt = 0; | ||
97 | cmd->maxrxpkt = 0; | ||
98 | |||
99 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" | ||
100 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" | ||
101 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" | ||
102 | DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", | ||
103 | cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, | ||
104 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, | ||
105 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
111 | { | ||
112 | struct bnx2x *bp = netdev_priv(dev); | ||
113 | u32 advertising; | ||
114 | |||
115 | if (IS_E1HMF(bp)) | ||
116 | return 0; | ||
117 | |||
118 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" | ||
119 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" | ||
120 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" | ||
121 | DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", | ||
122 | cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, | ||
123 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, | ||
124 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | ||
125 | |||
126 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
127 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { | ||
128 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | |||
132 | /* advertise the requested speed and duplex if supported */ | ||
133 | cmd->advertising &= bp->port.supported; | ||
134 | |||
135 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | ||
136 | bp->link_params.req_duplex = DUPLEX_FULL; | ||
137 | bp->port.advertising |= (ADVERTISED_Autoneg | | ||
138 | cmd->advertising); | ||
139 | |||
140 | } else { /* forced speed */ | ||
141 | /* advertise the requested speed and duplex if supported */ | ||
142 | switch (cmd->speed) { | ||
143 | case SPEED_10: | ||
144 | if (cmd->duplex == DUPLEX_FULL) { | ||
145 | if (!(bp->port.supported & | ||
146 | SUPPORTED_10baseT_Full)) { | ||
147 | DP(NETIF_MSG_LINK, | ||
148 | "10M full not supported\n"); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | |||
152 | advertising = (ADVERTISED_10baseT_Full | | ||
153 | ADVERTISED_TP); | ||
154 | } else { | ||
155 | if (!(bp->port.supported & | ||
156 | SUPPORTED_10baseT_Half)) { | ||
157 | DP(NETIF_MSG_LINK, | ||
158 | "10M half not supported\n"); | ||
159 | return -EINVAL; | ||
160 | } | ||
161 | |||
162 | advertising = (ADVERTISED_10baseT_Half | | ||
163 | ADVERTISED_TP); | ||
164 | } | ||
165 | break; | ||
166 | |||
167 | case SPEED_100: | ||
168 | if (cmd->duplex == DUPLEX_FULL) { | ||
169 | if (!(bp->port.supported & | ||
170 | SUPPORTED_100baseT_Full)) { | ||
171 | DP(NETIF_MSG_LINK, | ||
172 | "100M full not supported\n"); | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | |||
176 | advertising = (ADVERTISED_100baseT_Full | | ||
177 | ADVERTISED_TP); | ||
178 | } else { | ||
179 | if (!(bp->port.supported & | ||
180 | SUPPORTED_100baseT_Half)) { | ||
181 | DP(NETIF_MSG_LINK, | ||
182 | "100M half not supported\n"); | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | |||
186 | advertising = (ADVERTISED_100baseT_Half | | ||
187 | ADVERTISED_TP); | ||
188 | } | ||
189 | break; | ||
190 | |||
191 | case SPEED_1000: | ||
192 | if (cmd->duplex != DUPLEX_FULL) { | ||
193 | DP(NETIF_MSG_LINK, "1G half not supported\n"); | ||
194 | return -EINVAL; | ||
195 | } | ||
196 | |||
197 | if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { | ||
198 | DP(NETIF_MSG_LINK, "1G full not supported\n"); | ||
199 | return -EINVAL; | ||
200 | } | ||
201 | |||
202 | advertising = (ADVERTISED_1000baseT_Full | | ||
203 | ADVERTISED_TP); | ||
204 | break; | ||
205 | |||
206 | case SPEED_2500: | ||
207 | if (cmd->duplex != DUPLEX_FULL) { | ||
208 | DP(NETIF_MSG_LINK, | ||
209 | "2.5G half not supported\n"); | ||
210 | return -EINVAL; | ||
211 | } | ||
212 | |||
213 | if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { | ||
214 | DP(NETIF_MSG_LINK, | ||
215 | "2.5G full not supported\n"); | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | |||
219 | advertising = (ADVERTISED_2500baseX_Full | | ||
220 | ADVERTISED_TP); | ||
221 | break; | ||
222 | |||
223 | case SPEED_10000: | ||
224 | if (cmd->duplex != DUPLEX_FULL) { | ||
225 | DP(NETIF_MSG_LINK, "10G half not supported\n"); | ||
226 | return -EINVAL; | ||
227 | } | ||
228 | |||
229 | if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { | ||
230 | DP(NETIF_MSG_LINK, "10G full not supported\n"); | ||
231 | return -EINVAL; | ||
232 | } | ||
233 | |||
234 | advertising = (ADVERTISED_10000baseT_Full | | ||
235 | ADVERTISED_FIBRE); | ||
236 | break; | ||
237 | |||
238 | default: | ||
239 | DP(NETIF_MSG_LINK, "Unsupported speed\n"); | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | |||
243 | bp->link_params.req_line_speed = cmd->speed; | ||
244 | bp->link_params.req_duplex = cmd->duplex; | ||
245 | bp->port.advertising = advertising; | ||
246 | } | ||
247 | |||
248 | DP(NETIF_MSG_LINK, "req_line_speed %d\n" | ||
249 | DP_LEVEL " req_duplex %d advertising 0x%x\n", | ||
250 | bp->link_params.req_line_speed, bp->link_params.req_duplex, | ||
251 | bp->port.advertising); | ||
252 | |||
253 | if (netif_running(dev)) { | ||
254 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
255 | bnx2x_link_set(bp); | ||
256 | } | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) | ||
262 | #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) | ||
263 | |||
264 | static int bnx2x_get_regs_len(struct net_device *dev) | ||
265 | { | ||
266 | struct bnx2x *bp = netdev_priv(dev); | ||
267 | int regdump_len = 0; | ||
268 | int i; | ||
269 | |||
270 | if (CHIP_IS_E1(bp)) { | ||
271 | for (i = 0; i < REGS_COUNT; i++) | ||
272 | if (IS_E1_ONLINE(reg_addrs[i].info)) | ||
273 | regdump_len += reg_addrs[i].size; | ||
274 | |||
275 | for (i = 0; i < WREGS_COUNT_E1; i++) | ||
276 | if (IS_E1_ONLINE(wreg_addrs_e1[i].info)) | ||
277 | regdump_len += wreg_addrs_e1[i].size * | ||
278 | (1 + wreg_addrs_e1[i].read_regs_count); | ||
279 | |||
280 | } else { /* E1H */ | ||
281 | for (i = 0; i < REGS_COUNT; i++) | ||
282 | if (IS_E1H_ONLINE(reg_addrs[i].info)) | ||
283 | regdump_len += reg_addrs[i].size; | ||
284 | |||
285 | for (i = 0; i < WREGS_COUNT_E1H; i++) | ||
286 | if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) | ||
287 | regdump_len += wreg_addrs_e1h[i].size * | ||
288 | (1 + wreg_addrs_e1h[i].read_regs_count); | ||
289 | } | ||
290 | regdump_len *= 4; | ||
291 | regdump_len += sizeof(struct dump_hdr); | ||
292 | |||
293 | return regdump_len; | ||
294 | } | ||
295 | |||
296 | static void bnx2x_get_regs(struct net_device *dev, | ||
297 | struct ethtool_regs *regs, void *_p) | ||
298 | { | ||
299 | u32 *p = _p, i, j; | ||
300 | struct bnx2x *bp = netdev_priv(dev); | ||
301 | struct dump_hdr dump_hdr = {0}; | ||
302 | |||
303 | regs->version = 0; | ||
304 | memset(p, 0, regs->len); | ||
305 | |||
306 | if (!netif_running(bp->dev)) | ||
307 | return; | ||
308 | |||
309 | dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; | ||
310 | dump_hdr.dump_sign = dump_sign_all; | ||
311 | dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); | ||
312 | dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); | ||
313 | dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); | ||
314 | dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); | ||
315 | dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; | ||
316 | |||
317 | memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); | ||
318 | p += dump_hdr.hdr_size + 1; | ||
319 | |||
320 | if (CHIP_IS_E1(bp)) { | ||
321 | for (i = 0; i < REGS_COUNT; i++) | ||
322 | if (IS_E1_ONLINE(reg_addrs[i].info)) | ||
323 | for (j = 0; j < reg_addrs[i].size; j++) | ||
324 | *p++ = REG_RD(bp, | ||
325 | reg_addrs[i].addr + j*4); | ||
326 | |||
327 | } else { /* E1H */ | ||
328 | for (i = 0; i < REGS_COUNT; i++) | ||
329 | if (IS_E1H_ONLINE(reg_addrs[i].info)) | ||
330 | for (j = 0; j < reg_addrs[i].size; j++) | ||
331 | *p++ = REG_RD(bp, | ||
332 | reg_addrs[i].addr + j*4); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | #define PHY_FW_VER_LEN 10 | ||
337 | |||
338 | static void bnx2x_get_drvinfo(struct net_device *dev, | ||
339 | struct ethtool_drvinfo *info) | ||
340 | { | ||
341 | struct bnx2x *bp = netdev_priv(dev); | ||
342 | u8 phy_fw_ver[PHY_FW_VER_LEN]; | ||
343 | |||
344 | strcpy(info->driver, DRV_MODULE_NAME); | ||
345 | strcpy(info->version, DRV_MODULE_VERSION); | ||
346 | |||
347 | phy_fw_ver[0] = '\0'; | ||
348 | if (bp->port.pmf) { | ||
349 | bnx2x_acquire_phy_lock(bp); | ||
350 | bnx2x_get_ext_phy_fw_version(&bp->link_params, | ||
351 | (bp->state != BNX2X_STATE_CLOSED), | ||
352 | phy_fw_ver, PHY_FW_VER_LEN); | ||
353 | bnx2x_release_phy_lock(bp); | ||
354 | } | ||
355 | |||
356 | strncpy(info->fw_version, bp->fw_ver, 32); | ||
357 | snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), | ||
358 | "bc %d.%d.%d%s%s", | ||
359 | (bp->common.bc_ver & 0xff0000) >> 16, | ||
360 | (bp->common.bc_ver & 0xff00) >> 8, | ||
361 | (bp->common.bc_ver & 0xff), | ||
362 | ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); | ||
363 | strcpy(info->bus_info, pci_name(bp->pdev)); | ||
364 | info->n_stats = BNX2X_NUM_STATS; | ||
365 | info->testinfo_len = BNX2X_NUM_TESTS; | ||
366 | info->eedump_len = bp->common.flash_size; | ||
367 | info->regdump_len = bnx2x_get_regs_len(dev); | ||
368 | } | ||
369 | |||
370 | static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
371 | { | ||
372 | struct bnx2x *bp = netdev_priv(dev); | ||
373 | |||
374 | if (bp->flags & NO_WOL_FLAG) { | ||
375 | wol->supported = 0; | ||
376 | wol->wolopts = 0; | ||
377 | } else { | ||
378 | wol->supported = WAKE_MAGIC; | ||
379 | if (bp->wol) | ||
380 | wol->wolopts = WAKE_MAGIC; | ||
381 | else | ||
382 | wol->wolopts = 0; | ||
383 | } | ||
384 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
385 | } | ||
386 | |||
387 | static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
388 | { | ||
389 | struct bnx2x *bp = netdev_priv(dev); | ||
390 | |||
391 | if (wol->wolopts & ~WAKE_MAGIC) | ||
392 | return -EINVAL; | ||
393 | |||
394 | if (wol->wolopts & WAKE_MAGIC) { | ||
395 | if (bp->flags & NO_WOL_FLAG) | ||
396 | return -EINVAL; | ||
397 | |||
398 | bp->wol = 1; | ||
399 | } else | ||
400 | bp->wol = 0; | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static u32 bnx2x_get_msglevel(struct net_device *dev) | ||
406 | { | ||
407 | struct bnx2x *bp = netdev_priv(dev); | ||
408 | |||
409 | return bp->msg_enable; | ||
410 | } | ||
411 | |||
412 | static void bnx2x_set_msglevel(struct net_device *dev, u32 level) | ||
413 | { | ||
414 | struct bnx2x *bp = netdev_priv(dev); | ||
415 | |||
416 | if (capable(CAP_NET_ADMIN)) | ||
417 | bp->msg_enable = level; | ||
418 | } | ||
419 | |||
420 | static int bnx2x_nway_reset(struct net_device *dev) | ||
421 | { | ||
422 | struct bnx2x *bp = netdev_priv(dev); | ||
423 | |||
424 | if (!bp->port.pmf) | ||
425 | return 0; | ||
426 | |||
427 | if (netif_running(dev)) { | ||
428 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
429 | bnx2x_link_set(bp); | ||
430 | } | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static u32 bnx2x_get_link(struct net_device *dev) | ||
436 | { | ||
437 | struct bnx2x *bp = netdev_priv(dev); | ||
438 | |||
439 | if (bp->flags & MF_FUNC_DIS) | ||
440 | return 0; | ||
441 | |||
442 | return bp->link_vars.link_up; | ||
443 | } | ||
444 | |||
445 | static int bnx2x_get_eeprom_len(struct net_device *dev) | ||
446 | { | ||
447 | struct bnx2x *bp = netdev_priv(dev); | ||
448 | |||
449 | return bp->common.flash_size; | ||
450 | } | ||
451 | |||
452 | static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | ||
453 | { | ||
454 | int port = BP_PORT(bp); | ||
455 | int count, i; | ||
456 | u32 val = 0; | ||
457 | |||
458 | /* adjust timeout for emulation/FPGA */ | ||
459 | count = NVRAM_TIMEOUT_COUNT; | ||
460 | if (CHIP_REV_IS_SLOW(bp)) | ||
461 | count *= 100; | ||
462 | |||
463 | /* request access to nvram interface */ | ||
464 | REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, | ||
465 | (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); | ||
466 | |||
467 | for (i = 0; i < count*10; i++) { | ||
468 | val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); | ||
469 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) | ||
470 | break; | ||
471 | |||
472 | udelay(5); | ||
473 | } | ||
474 | |||
475 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { | ||
476 | DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); | ||
477 | return -EBUSY; | ||
478 | } | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int bnx2x_release_nvram_lock(struct bnx2x *bp) | ||
484 | { | ||
485 | int port = BP_PORT(bp); | ||
486 | int count, i; | ||
487 | u32 val = 0; | ||
488 | |||
489 | /* adjust timeout for emulation/FPGA */ | ||
490 | count = NVRAM_TIMEOUT_COUNT; | ||
491 | if (CHIP_REV_IS_SLOW(bp)) | ||
492 | count *= 100; | ||
493 | |||
494 | /* relinquish nvram interface */ | ||
495 | REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, | ||
496 | (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); | ||
497 | |||
498 | for (i = 0; i < count*10; i++) { | ||
499 | val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); | ||
500 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) | ||
501 | break; | ||
502 | |||
503 | udelay(5); | ||
504 | } | ||
505 | |||
506 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { | ||
507 | DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); | ||
508 | return -EBUSY; | ||
509 | } | ||
510 | |||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | static void bnx2x_enable_nvram_access(struct bnx2x *bp) | ||
515 | { | ||
516 | u32 val; | ||
517 | |||
518 | val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); | ||
519 | |||
520 | /* enable both bits, even on read */ | ||
521 | REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, | ||
522 | (val | MCPR_NVM_ACCESS_ENABLE_EN | | ||
523 | MCPR_NVM_ACCESS_ENABLE_WR_EN)); | ||
524 | } | ||
525 | |||
526 | static void bnx2x_disable_nvram_access(struct bnx2x *bp) | ||
527 | { | ||
528 | u32 val; | ||
529 | |||
530 | val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); | ||
531 | |||
532 | /* disable both bits, even after read */ | ||
533 | REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, | ||
534 | (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | | ||
535 | MCPR_NVM_ACCESS_ENABLE_WR_EN))); | ||
536 | } | ||
537 | |||
538 | static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, | ||
539 | u32 cmd_flags) | ||
540 | { | ||
541 | int count, i, rc; | ||
542 | u32 val; | ||
543 | |||
544 | /* build the command word */ | ||
545 | cmd_flags |= MCPR_NVM_COMMAND_DOIT; | ||
546 | |||
547 | /* need to clear DONE bit separately */ | ||
548 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); | ||
549 | |||
550 | /* address of the NVRAM to read from */ | ||
551 | REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, | ||
552 | (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); | ||
553 | |||
554 | /* issue a read command */ | ||
555 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); | ||
556 | |||
557 | /* adjust timeout for emulation/FPGA */ | ||
558 | count = NVRAM_TIMEOUT_COUNT; | ||
559 | if (CHIP_REV_IS_SLOW(bp)) | ||
560 | count *= 100; | ||
561 | |||
562 | /* wait for completion */ | ||
563 | *ret_val = 0; | ||
564 | rc = -EBUSY; | ||
565 | for (i = 0; i < count; i++) { | ||
566 | udelay(5); | ||
567 | val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); | ||
568 | |||
569 | if (val & MCPR_NVM_COMMAND_DONE) { | ||
570 | val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); | ||
571 | /* we read nvram data in cpu order | ||
572 | * but ethtool sees it as an array of bytes | ||
573 | * converting to big-endian will do the work */ | ||
574 | *ret_val = cpu_to_be32(val); | ||
575 | rc = 0; | ||
576 | break; | ||
577 | } | ||
578 | } | ||
579 | |||
580 | return rc; | ||
581 | } | ||
582 | |||
583 | static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, | ||
584 | int buf_size) | ||
585 | { | ||
586 | int rc; | ||
587 | u32 cmd_flags; | ||
588 | __be32 val; | ||
589 | |||
590 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | ||
591 | DP(BNX2X_MSG_NVM, | ||
592 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | ||
593 | offset, buf_size); | ||
594 | return -EINVAL; | ||
595 | } | ||
596 | |||
597 | if (offset + buf_size > bp->common.flash_size) { | ||
598 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" | ||
599 | " buf_size (0x%x) > flash_size (0x%x)\n", | ||
600 | offset, buf_size, bp->common.flash_size); | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | |||
604 | /* request access to nvram interface */ | ||
605 | rc = bnx2x_acquire_nvram_lock(bp); | ||
606 | if (rc) | ||
607 | return rc; | ||
608 | |||
609 | /* enable access to nvram interface */ | ||
610 | bnx2x_enable_nvram_access(bp); | ||
611 | |||
612 | /* read the first word(s) */ | ||
613 | cmd_flags = MCPR_NVM_COMMAND_FIRST; | ||
614 | while ((buf_size > sizeof(u32)) && (rc == 0)) { | ||
615 | rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); | ||
616 | memcpy(ret_buf, &val, 4); | ||
617 | |||
618 | /* advance to the next dword */ | ||
619 | offset += sizeof(u32); | ||
620 | ret_buf += sizeof(u32); | ||
621 | buf_size -= sizeof(u32); | ||
622 | cmd_flags = 0; | ||
623 | } | ||
624 | |||
625 | if (rc == 0) { | ||
626 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | ||
627 | rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); | ||
628 | memcpy(ret_buf, &val, 4); | ||
629 | } | ||
630 | |||
631 | /* disable access to nvram interface */ | ||
632 | bnx2x_disable_nvram_access(bp); | ||
633 | bnx2x_release_nvram_lock(bp); | ||
634 | |||
635 | return rc; | ||
636 | } | ||
637 | |||
638 | static int bnx2x_get_eeprom(struct net_device *dev, | ||
639 | struct ethtool_eeprom *eeprom, u8 *eebuf) | ||
640 | { | ||
641 | struct bnx2x *bp = netdev_priv(dev); | ||
642 | int rc; | ||
643 | |||
644 | if (!netif_running(dev)) | ||
645 | return -EAGAIN; | ||
646 | |||
647 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" | ||
648 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | ||
649 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | ||
650 | eeprom->len, eeprom->len); | ||
651 | |||
652 | /* parameters already validated in ethtool_get_eeprom */ | ||
653 | |||
654 | rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); | ||
655 | |||
656 | return rc; | ||
657 | } | ||
658 | |||
659 | static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, | ||
660 | u32 cmd_flags) | ||
661 | { | ||
662 | int count, i, rc; | ||
663 | |||
664 | /* build the command word */ | ||
665 | cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; | ||
666 | |||
667 | /* need to clear DONE bit separately */ | ||
668 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); | ||
669 | |||
670 | /* write the data */ | ||
671 | REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); | ||
672 | |||
673 | /* address of the NVRAM to write to */ | ||
674 | REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, | ||
675 | (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); | ||
676 | |||
677 | /* issue the write command */ | ||
678 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); | ||
679 | |||
680 | /* adjust timeout for emulation/FPGA */ | ||
681 | count = NVRAM_TIMEOUT_COUNT; | ||
682 | if (CHIP_REV_IS_SLOW(bp)) | ||
683 | count *= 100; | ||
684 | |||
685 | /* wait for completion */ | ||
686 | rc = -EBUSY; | ||
687 | for (i = 0; i < count; i++) { | ||
688 | udelay(5); | ||
689 | val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); | ||
690 | if (val & MCPR_NVM_COMMAND_DONE) { | ||
691 | rc = 0; | ||
692 | break; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | return rc; | ||
697 | } | ||
698 | |||
699 | #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) | ||
700 | |||
701 | static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | ||
702 | int buf_size) | ||
703 | { | ||
704 | int rc; | ||
705 | u32 cmd_flags; | ||
706 | u32 align_offset; | ||
707 | __be32 val; | ||
708 | |||
709 | if (offset + buf_size > bp->common.flash_size) { | ||
710 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" | ||
711 | " buf_size (0x%x) > flash_size (0x%x)\n", | ||
712 | offset, buf_size, bp->common.flash_size); | ||
713 | return -EINVAL; | ||
714 | } | ||
715 | |||
716 | /* request access to nvram interface */ | ||
717 | rc = bnx2x_acquire_nvram_lock(bp); | ||
718 | if (rc) | ||
719 | return rc; | ||
720 | |||
721 | /* enable access to nvram interface */ | ||
722 | bnx2x_enable_nvram_access(bp); | ||
723 | |||
724 | cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); | ||
725 | align_offset = (offset & ~0x03); | ||
726 | rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); | ||
727 | |||
728 | if (rc == 0) { | ||
729 | val &= ~(0xff << BYTE_OFFSET(offset)); | ||
730 | val |= (*data_buf << BYTE_OFFSET(offset)); | ||
731 | |||
732 | /* nvram data is returned as an array of bytes | ||
733 | * convert it back to cpu order */ | ||
734 | val = be32_to_cpu(val); | ||
735 | |||
736 | rc = bnx2x_nvram_write_dword(bp, align_offset, val, | ||
737 | cmd_flags); | ||
738 | } | ||
739 | |||
740 | /* disable access to nvram interface */ | ||
741 | bnx2x_disable_nvram_access(bp); | ||
742 | bnx2x_release_nvram_lock(bp); | ||
743 | |||
744 | return rc; | ||
745 | } | ||
746 | |||
747 | static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | ||
748 | int buf_size) | ||
749 | { | ||
750 | int rc; | ||
751 | u32 cmd_flags; | ||
752 | u32 val; | ||
753 | u32 written_so_far; | ||
754 | |||
755 | if (buf_size == 1) /* ethtool */ | ||
756 | return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); | ||
757 | |||
758 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | ||
759 | DP(BNX2X_MSG_NVM, | ||
760 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | ||
761 | offset, buf_size); | ||
762 | return -EINVAL; | ||
763 | } | ||
764 | |||
765 | if (offset + buf_size > bp->common.flash_size) { | ||
766 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" | ||
767 | " buf_size (0x%x) > flash_size (0x%x)\n", | ||
768 | offset, buf_size, bp->common.flash_size); | ||
769 | return -EINVAL; | ||
770 | } | ||
771 | |||
772 | /* request access to nvram interface */ | ||
773 | rc = bnx2x_acquire_nvram_lock(bp); | ||
774 | if (rc) | ||
775 | return rc; | ||
776 | |||
777 | /* enable access to nvram interface */ | ||
778 | bnx2x_enable_nvram_access(bp); | ||
779 | |||
780 | written_so_far = 0; | ||
781 | cmd_flags = MCPR_NVM_COMMAND_FIRST; | ||
782 | while ((written_so_far < buf_size) && (rc == 0)) { | ||
783 | if (written_so_far == (buf_size - sizeof(u32))) | ||
784 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | ||
785 | else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) | ||
786 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | ||
787 | else if ((offset % NVRAM_PAGE_SIZE) == 0) | ||
788 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; | ||
789 | |||
790 | memcpy(&val, data_buf, 4); | ||
791 | |||
792 | rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); | ||
793 | |||
794 | /* advance to the next dword */ | ||
795 | offset += sizeof(u32); | ||
796 | data_buf += sizeof(u32); | ||
797 | written_so_far += sizeof(u32); | ||
798 | cmd_flags = 0; | ||
799 | } | ||
800 | |||
801 | /* disable access to nvram interface */ | ||
802 | bnx2x_disable_nvram_access(bp); | ||
803 | bnx2x_release_nvram_lock(bp); | ||
804 | |||
805 | return rc; | ||
806 | } | ||
807 | |||
808 | static int bnx2x_set_eeprom(struct net_device *dev, | ||
809 | struct ethtool_eeprom *eeprom, u8 *eebuf) | ||
810 | { | ||
811 | struct bnx2x *bp = netdev_priv(dev); | ||
812 | int port = BP_PORT(bp); | ||
813 | int rc = 0; | ||
814 | |||
815 | if (!netif_running(dev)) | ||
816 | return -EAGAIN; | ||
817 | |||
818 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" | ||
819 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | ||
820 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | ||
821 | eeprom->len, eeprom->len); | ||
822 | |||
823 | /* parameters already validated in ethtool_set_eeprom */ | ||
824 | |||
825 | /* PHY eeprom can be accessed only by the PMF */ | ||
826 | if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && | ||
827 | !bp->port.pmf) | ||
828 | return -EINVAL; | ||
829 | |||
830 | if (eeprom->magic == 0x50485950) { | ||
831 | /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ | ||
832 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
833 | |||
834 | bnx2x_acquire_phy_lock(bp); | ||
835 | rc |= bnx2x_link_reset(&bp->link_params, | ||
836 | &bp->link_vars, 0); | ||
837 | if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == | ||
838 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) | ||
839 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, | ||
840 | MISC_REGISTERS_GPIO_HIGH, port); | ||
841 | bnx2x_release_phy_lock(bp); | ||
842 | bnx2x_link_report(bp); | ||
843 | |||
844 | } else if (eeprom->magic == 0x50485952) { | ||
845 | /* 'PHYR' (0x50485952): re-init link after FW upgrade */ | ||
846 | if (bp->state == BNX2X_STATE_OPEN) { | ||
847 | bnx2x_acquire_phy_lock(bp); | ||
848 | rc |= bnx2x_link_reset(&bp->link_params, | ||
849 | &bp->link_vars, 1); | ||
850 | |||
851 | rc |= bnx2x_phy_init(&bp->link_params, | ||
852 | &bp->link_vars); | ||
853 | bnx2x_release_phy_lock(bp); | ||
854 | bnx2x_calc_fc_adv(bp); | ||
855 | } | ||
856 | } else if (eeprom->magic == 0x53985943) { | ||
857 | /* 'PHYC' (0x53985943): PHY FW upgrade completed */ | ||
858 | if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == | ||
859 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { | ||
860 | u8 ext_phy_addr = | ||
861 | XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); | ||
862 | |||
863 | /* DSP Remove Download Mode */ | ||
864 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, | ||
865 | MISC_REGISTERS_GPIO_LOW, port); | ||
866 | |||
867 | bnx2x_acquire_phy_lock(bp); | ||
868 | |||
869 | bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); | ||
870 | |||
871 | /* wait 0.5 sec to allow it to run */ | ||
872 | msleep(500); | ||
873 | bnx2x_ext_phy_hw_reset(bp, port); | ||
874 | msleep(500); | ||
875 | bnx2x_release_phy_lock(bp); | ||
876 | } | ||
877 | } else | ||
878 | rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); | ||
879 | |||
880 | return rc; | ||
881 | } | ||
882 | static int bnx2x_get_coalesce(struct net_device *dev, | ||
883 | struct ethtool_coalesce *coal) | ||
884 | { | ||
885 | struct bnx2x *bp = netdev_priv(dev); | ||
886 | |||
887 | memset(coal, 0, sizeof(struct ethtool_coalesce)); | ||
888 | |||
889 | coal->rx_coalesce_usecs = bp->rx_ticks; | ||
890 | coal->tx_coalesce_usecs = bp->tx_ticks; | ||
891 | |||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | static int bnx2x_set_coalesce(struct net_device *dev, | ||
896 | struct ethtool_coalesce *coal) | ||
897 | { | ||
898 | struct bnx2x *bp = netdev_priv(dev); | ||
899 | |||
900 | bp->rx_ticks = (u16)coal->rx_coalesce_usecs; | ||
901 | if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) | ||
902 | bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; | ||
903 | |||
904 | bp->tx_ticks = (u16)coal->tx_coalesce_usecs; | ||
905 | if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) | ||
906 | bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; | ||
907 | |||
908 | if (netif_running(dev)) | ||
909 | bnx2x_update_coalesce(bp); | ||
910 | |||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | static void bnx2x_get_ringparam(struct net_device *dev, | ||
915 | struct ethtool_ringparam *ering) | ||
916 | { | ||
917 | struct bnx2x *bp = netdev_priv(dev); | ||
918 | |||
919 | ering->rx_max_pending = MAX_RX_AVAIL; | ||
920 | ering->rx_mini_max_pending = 0; | ||
921 | ering->rx_jumbo_max_pending = 0; | ||
922 | |||
923 | ering->rx_pending = bp->rx_ring_size; | ||
924 | ering->rx_mini_pending = 0; | ||
925 | ering->rx_jumbo_pending = 0; | ||
926 | |||
927 | ering->tx_max_pending = MAX_TX_AVAIL; | ||
928 | ering->tx_pending = bp->tx_ring_size; | ||
929 | } | ||
930 | |||
931 | static int bnx2x_set_ringparam(struct net_device *dev, | ||
932 | struct ethtool_ringparam *ering) | ||
933 | { | ||
934 | struct bnx2x *bp = netdev_priv(dev); | ||
935 | int rc = 0; | ||
936 | |||
937 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
938 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
939 | return -EAGAIN; | ||
940 | } | ||
941 | |||
942 | if ((ering->rx_pending > MAX_RX_AVAIL) || | ||
943 | (ering->tx_pending > MAX_TX_AVAIL) || | ||
944 | (ering->tx_pending <= MAX_SKB_FRAGS + 4)) | ||
945 | return -EINVAL; | ||
946 | |||
947 | bp->rx_ring_size = ering->rx_pending; | ||
948 | bp->tx_ring_size = ering->tx_pending; | ||
949 | |||
950 | if (netif_running(dev)) { | ||
951 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
952 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
953 | } | ||
954 | |||
955 | return rc; | ||
956 | } | ||
957 | |||
958 | static void bnx2x_get_pauseparam(struct net_device *dev, | ||
959 | struct ethtool_pauseparam *epause) | ||
960 | { | ||
961 | struct bnx2x *bp = netdev_priv(dev); | ||
962 | |||
963 | epause->autoneg = (bp->link_params.req_flow_ctrl == | ||
964 | BNX2X_FLOW_CTRL_AUTO) && | ||
965 | (bp->link_params.req_line_speed == SPEED_AUTO_NEG); | ||
966 | |||
967 | epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == | ||
968 | BNX2X_FLOW_CTRL_RX); | ||
969 | epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == | ||
970 | BNX2X_FLOW_CTRL_TX); | ||
971 | |||
972 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" | ||
973 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | ||
974 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | ||
975 | } | ||
976 | |||
977 | static int bnx2x_set_pauseparam(struct net_device *dev, | ||
978 | struct ethtool_pauseparam *epause) | ||
979 | { | ||
980 | struct bnx2x *bp = netdev_priv(dev); | ||
981 | |||
982 | if (IS_E1HMF(bp)) | ||
983 | return 0; | ||
984 | |||
985 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" | ||
986 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | ||
987 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | ||
988 | |||
989 | bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; | ||
990 | |||
991 | if (epause->rx_pause) | ||
992 | bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; | ||
993 | |||
994 | if (epause->tx_pause) | ||
995 | bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; | ||
996 | |||
997 | if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) | ||
998 | bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
999 | |||
1000 | if (epause->autoneg) { | ||
1001 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { | ||
1002 | DP(NETIF_MSG_LINK, "autoneg not supported\n"); | ||
1003 | return -EINVAL; | ||
1004 | } | ||
1005 | |||
1006 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) | ||
1007 | bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; | ||
1008 | } | ||
1009 | |||
1010 | DP(NETIF_MSG_LINK, | ||
1011 | "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); | ||
1012 | |||
1013 | if (netif_running(dev)) { | ||
1014 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
1015 | bnx2x_link_set(bp); | ||
1016 | } | ||
1017 | |||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | static int bnx2x_set_flags(struct net_device *dev, u32 data) | ||
1022 | { | ||
1023 | struct bnx2x *bp = netdev_priv(dev); | ||
1024 | int changed = 0; | ||
1025 | int rc = 0; | ||
1026 | |||
1027 | if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH)) | ||
1028 | return -EINVAL; | ||
1029 | |||
1030 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
1031 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
1032 | return -EAGAIN; | ||
1033 | } | ||
1034 | |||
1035 | /* TPA requires Rx CSUM offloading */ | ||
1036 | if ((data & ETH_FLAG_LRO) && bp->rx_csum) { | ||
1037 | if (!bp->disable_tpa) { | ||
1038 | if (!(dev->features & NETIF_F_LRO)) { | ||
1039 | dev->features |= NETIF_F_LRO; | ||
1040 | bp->flags |= TPA_ENABLE_FLAG; | ||
1041 | changed = 1; | ||
1042 | } | ||
1043 | } else | ||
1044 | rc = -EINVAL; | ||
1045 | } else if (dev->features & NETIF_F_LRO) { | ||
1046 | dev->features &= ~NETIF_F_LRO; | ||
1047 | bp->flags &= ~TPA_ENABLE_FLAG; | ||
1048 | changed = 1; | ||
1049 | } | ||
1050 | |||
1051 | if (data & ETH_FLAG_RXHASH) | ||
1052 | dev->features |= NETIF_F_RXHASH; | ||
1053 | else | ||
1054 | dev->features &= ~NETIF_F_RXHASH; | ||
1055 | |||
1056 | if (changed && netif_running(dev)) { | ||
1057 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
1058 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
1059 | } | ||
1060 | |||
1061 | return rc; | ||
1062 | } | ||
1063 | |||
1064 | static u32 bnx2x_get_rx_csum(struct net_device *dev) | ||
1065 | { | ||
1066 | struct bnx2x *bp = netdev_priv(dev); | ||
1067 | |||
1068 | return bp->rx_csum; | ||
1069 | } | ||
1070 | |||
1071 | static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) | ||
1072 | { | ||
1073 | struct bnx2x *bp = netdev_priv(dev); | ||
1074 | int rc = 0; | ||
1075 | |||
1076 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
1077 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
1078 | return -EAGAIN; | ||
1079 | } | ||
1080 | |||
1081 | bp->rx_csum = data; | ||
1082 | |||
1083 | /* Disable TPA, when Rx CSUM is disabled. Otherwise all | ||
1084 | TPA'ed packets will be discarded due to wrong TCP CSUM */ | ||
1085 | if (!data) { | ||
1086 | u32 flags = ethtool_op_get_flags(dev); | ||
1087 | |||
1088 | rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO)); | ||
1089 | } | ||
1090 | |||
1091 | return rc; | ||
1092 | } | ||
1093 | |||
1094 | static int bnx2x_set_tso(struct net_device *dev, u32 data) | ||
1095 | { | ||
1096 | if (data) { | ||
1097 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); | ||
1098 | dev->features |= NETIF_F_TSO6; | ||
1099 | } else { | ||
1100 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN); | ||
1101 | dev->features &= ~NETIF_F_TSO6; | ||
1102 | } | ||
1103 | |||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | static const struct { | ||
1108 | char string[ETH_GSTRING_LEN]; | ||
1109 | } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { | ||
1110 | { "register_test (offline)" }, | ||
1111 | { "memory_test (offline)" }, | ||
1112 | { "loopback_test (offline)" }, | ||
1113 | { "nvram_test (online)" }, | ||
1114 | { "interrupt_test (online)" }, | ||
1115 | { "link_test (online)" }, | ||
1116 | { "idle check (online)" } | ||
1117 | }; | ||
1118 | |||
1119 | static int bnx2x_test_registers(struct bnx2x *bp) | ||
1120 | { | ||
1121 | int idx, i, rc = -ENODEV; | ||
1122 | u32 wr_val = 0; | ||
1123 | int port = BP_PORT(bp); | ||
1124 | static const struct { | ||
1125 | u32 offset0; | ||
1126 | u32 offset1; | ||
1127 | u32 mask; | ||
1128 | } reg_tbl[] = { | ||
1129 | /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, | ||
1130 | { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, | ||
1131 | { HC_REG_AGG_INT_0, 4, 0x000003ff }, | ||
1132 | { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, | ||
1133 | { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, | ||
1134 | { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, | ||
1135 | { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, | ||
1136 | { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | ||
1137 | { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, | ||
1138 | { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | ||
1139 | /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, | ||
1140 | { QM_REG_CONNNUM_0, 4, 0x000fffff }, | ||
1141 | { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, | ||
1142 | { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, | ||
1143 | { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, | ||
1144 | { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, | ||
1145 | { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, | ||
1146 | { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, | ||
1147 | { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, | ||
1148 | { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, | ||
1149 | /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, | ||
1150 | { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, | ||
1151 | { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, | ||
1152 | { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, | ||
1153 | { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, | ||
1154 | { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, | ||
1155 | { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, | ||
1156 | { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, | ||
1157 | { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, | ||
1158 | { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, | ||
1159 | /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, | ||
1160 | { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, | ||
1161 | { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, | ||
1162 | { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, | ||
1163 | { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, | ||
1164 | { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, | ||
1165 | { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, | ||
1166 | |||
1167 | { 0xffffffff, 0, 0x00000000 } | ||
1168 | }; | ||
1169 | |||
1170 | if (!netif_running(bp->dev)) | ||
1171 | return rc; | ||
1172 | |||
1173 | /* Repeat the test twice: | ||
1174 | First by writing 0x00000000, second by writing 0xffffffff */ | ||
1175 | for (idx = 0; idx < 2; idx++) { | ||
1176 | |||
1177 | switch (idx) { | ||
1178 | case 0: | ||
1179 | wr_val = 0; | ||
1180 | break; | ||
1181 | case 1: | ||
1182 | wr_val = 0xffffffff; | ||
1183 | break; | ||
1184 | } | ||
1185 | |||
1186 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { | ||
1187 | u32 offset, mask, save_val, val; | ||
1188 | |||
1189 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; | ||
1190 | mask = reg_tbl[i].mask; | ||
1191 | |||
1192 | save_val = REG_RD(bp, offset); | ||
1193 | |||
1194 | REG_WR(bp, offset, (wr_val & mask)); | ||
1195 | val = REG_RD(bp, offset); | ||
1196 | |||
1197 | /* Restore the original register's value */ | ||
1198 | REG_WR(bp, offset, save_val); | ||
1199 | |||
1200 | /* verify value is as expected */ | ||
1201 | if ((val & mask) != (wr_val & mask)) { | ||
1202 | DP(NETIF_MSG_PROBE, | ||
1203 | "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", | ||
1204 | offset, val, wr_val, mask); | ||
1205 | goto test_reg_exit; | ||
1206 | } | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | rc = 0; | ||
1211 | |||
1212 | test_reg_exit: | ||
1213 | return rc; | ||
1214 | } | ||
1215 | |||
1216 | static int bnx2x_test_memory(struct bnx2x *bp) | ||
1217 | { | ||
1218 | int i, j, rc = -ENODEV; | ||
1219 | u32 val; | ||
1220 | static const struct { | ||
1221 | u32 offset; | ||
1222 | int size; | ||
1223 | } mem_tbl[] = { | ||
1224 | { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, | ||
1225 | { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, | ||
1226 | { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, | ||
1227 | { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, | ||
1228 | { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, | ||
1229 | { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, | ||
1230 | { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, | ||
1231 | |||
1232 | { 0xffffffff, 0 } | ||
1233 | }; | ||
1234 | static const struct { | ||
1235 | char *name; | ||
1236 | u32 offset; | ||
1237 | u32 e1_mask; | ||
1238 | u32 e1h_mask; | ||
1239 | } prty_tbl[] = { | ||
1240 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, | ||
1241 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, | ||
1242 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, | ||
1243 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, | ||
1244 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, | ||
1245 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, | ||
1246 | |||
1247 | { NULL, 0xffffffff, 0, 0 } | ||
1248 | }; | ||
1249 | |||
1250 | if (!netif_running(bp->dev)) | ||
1251 | return rc; | ||
1252 | |||
1253 | /* Go through all the memories */ | ||
1254 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) | ||
1255 | for (j = 0; j < mem_tbl[i].size; j++) | ||
1256 | REG_RD(bp, mem_tbl[i].offset + j*4); | ||
1257 | |||
1258 | /* Check the parity status */ | ||
1259 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | ||
1260 | val = REG_RD(bp, prty_tbl[i].offset); | ||
1261 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || | ||
1262 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { | ||
1263 | DP(NETIF_MSG_HW, | ||
1264 | "%s is 0x%x\n", prty_tbl[i].name, val); | ||
1265 | goto test_mem_exit; | ||
1266 | } | ||
1267 | } | ||
1268 | |||
1269 | rc = 0; | ||
1270 | |||
1271 | test_mem_exit: | ||
1272 | return rc; | ||
1273 | } | ||
1274 | |||
1275 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) | ||
1276 | { | ||
1277 | int cnt = 1000; | ||
1278 | |||
1279 | if (link_up) | ||
1280 | while (bnx2x_link_test(bp) && cnt--) | ||
1281 | msleep(10); | ||
1282 | } | ||
1283 | |||
1284 | static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | ||
1285 | { | ||
1286 | unsigned int pkt_size, num_pkts, i; | ||
1287 | struct sk_buff *skb; | ||
1288 | unsigned char *packet; | ||
1289 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; | ||
1290 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; | ||
1291 | u16 tx_start_idx, tx_idx; | ||
1292 | u16 rx_start_idx, rx_idx; | ||
1293 | u16 pkt_prod, bd_prod; | ||
1294 | struct sw_tx_bd *tx_buf; | ||
1295 | struct eth_tx_start_bd *tx_start_bd; | ||
1296 | struct eth_tx_parse_bd *pbd = NULL; | ||
1297 | dma_addr_t mapping; | ||
1298 | union eth_rx_cqe *cqe; | ||
1299 | u8 cqe_fp_flags; | ||
1300 | struct sw_rx_bd *rx_buf; | ||
1301 | u16 len; | ||
1302 | int rc = -ENODEV; | ||
1303 | |||
1304 | /* check the loopback mode */ | ||
1305 | switch (loopback_mode) { | ||
1306 | case BNX2X_PHY_LOOPBACK: | ||
1307 | if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) | ||
1308 | return -EINVAL; | ||
1309 | break; | ||
1310 | case BNX2X_MAC_LOOPBACK: | ||
1311 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | ||
1312 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | ||
1313 | break; | ||
1314 | default: | ||
1315 | return -EINVAL; | ||
1316 | } | ||
1317 | |||
1318 | /* prepare the loopback packet */ | ||
1319 | pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? | ||
1320 | bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); | ||
1321 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
1322 | if (!skb) { | ||
1323 | rc = -ENOMEM; | ||
1324 | goto test_loopback_exit; | ||
1325 | } | ||
1326 | packet = skb_put(skb, pkt_size); | ||
1327 | memcpy(packet, bp->dev->dev_addr, ETH_ALEN); | ||
1328 | memset(packet + ETH_ALEN, 0, ETH_ALEN); | ||
1329 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); | ||
1330 | for (i = ETH_HLEN; i < pkt_size; i++) | ||
1331 | packet[i] = (unsigned char) (i & 0xff); | ||
1332 | |||
1333 | /* send the loopback packet */ | ||
1334 | num_pkts = 0; | ||
1335 | tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); | ||
1336 | rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); | ||
1337 | |||
1338 | pkt_prod = fp_tx->tx_pkt_prod++; | ||
1339 | tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; | ||
1340 | tx_buf->first_bd = fp_tx->tx_bd_prod; | ||
1341 | tx_buf->skb = skb; | ||
1342 | tx_buf->flags = 0; | ||
1343 | |||
1344 | bd_prod = TX_BD(fp_tx->tx_bd_prod); | ||
1345 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; | ||
1346 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
1347 | skb_headlen(skb), DMA_TO_DEVICE); | ||
1348 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
1349 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
1350 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ | ||
1351 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | ||
1352 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); | ||
1353 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
1354 | tx_start_bd->general_data = ((UNICAST_ADDRESS << | ||
1355 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); | ||
1356 | |||
1357 | /* turn on parsing and get a BD */ | ||
1358 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
1359 | pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; | ||
1360 | |||
1361 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | ||
1362 | |||
1363 | wmb(); | ||
1364 | |||
1365 | fp_tx->tx_db.data.prod += 2; | ||
1366 | barrier(); | ||
1367 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); | ||
1368 | |||
1369 | mmiowb(); | ||
1370 | |||
1371 | num_pkts++; | ||
1372 | fp_tx->tx_bd_prod += 2; /* start + pbd */ | ||
1373 | |||
1374 | udelay(100); | ||
1375 | |||
1376 | tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); | ||
1377 | if (tx_idx != tx_start_idx + num_pkts) | ||
1378 | goto test_loopback_exit; | ||
1379 | |||
1380 | rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); | ||
1381 | if (rx_idx != rx_start_idx + num_pkts) | ||
1382 | goto test_loopback_exit; | ||
1383 | |||
1384 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; | ||
1385 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | ||
1386 | if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | ||
1387 | goto test_loopback_rx_exit; | ||
1388 | |||
1389 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | ||
1390 | if (len != pkt_size) | ||
1391 | goto test_loopback_rx_exit; | ||
1392 | |||
1393 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; | ||
1394 | skb = rx_buf->skb; | ||
1395 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); | ||
1396 | for (i = ETH_HLEN; i < pkt_size; i++) | ||
1397 | if (*(skb->data + i) != (unsigned char) (i & 0xff)) | ||
1398 | goto test_loopback_rx_exit; | ||
1399 | |||
1400 | rc = 0; | ||
1401 | |||
1402 | test_loopback_rx_exit: | ||
1403 | |||
1404 | fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); | ||
1405 | fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); | ||
1406 | fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); | ||
1407 | fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); | ||
1408 | |||
1409 | /* Update producers */ | ||
1410 | bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, | ||
1411 | fp_rx->rx_sge_prod); | ||
1412 | |||
1413 | test_loopback_exit: | ||
1414 | bp->link_params.loopback_mode = LOOPBACK_NONE; | ||
1415 | |||
1416 | return rc; | ||
1417 | } | ||
1418 | |||
1419 | static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | ||
1420 | { | ||
1421 | int rc = 0, res; | ||
1422 | |||
1423 | if (BP_NOMCP(bp)) | ||
1424 | return rc; | ||
1425 | |||
1426 | if (!netif_running(bp->dev)) | ||
1427 | return BNX2X_LOOPBACK_FAILED; | ||
1428 | |||
1429 | bnx2x_netif_stop(bp, 1); | ||
1430 | bnx2x_acquire_phy_lock(bp); | ||
1431 | |||
1432 | res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); | ||
1433 | if (res) { | ||
1434 | DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); | ||
1435 | rc |= BNX2X_PHY_LOOPBACK_FAILED; | ||
1436 | } | ||
1437 | |||
1438 | res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); | ||
1439 | if (res) { | ||
1440 | DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); | ||
1441 | rc |= BNX2X_MAC_LOOPBACK_FAILED; | ||
1442 | } | ||
1443 | |||
1444 | bnx2x_release_phy_lock(bp); | ||
1445 | bnx2x_netif_start(bp); | ||
1446 | |||
1447 | return rc; | ||
1448 | } | ||
1449 | |||
1450 | #define CRC32_RESIDUAL 0xdebb20e3 | ||
1451 | |||
1452 | static int bnx2x_test_nvram(struct bnx2x *bp) | ||
1453 | { | ||
1454 | static const struct { | ||
1455 | int offset; | ||
1456 | int size; | ||
1457 | } nvram_tbl[] = { | ||
1458 | { 0, 0x14 }, /* bootstrap */ | ||
1459 | { 0x14, 0xec }, /* dir */ | ||
1460 | { 0x100, 0x350 }, /* manuf_info */ | ||
1461 | { 0x450, 0xf0 }, /* feature_info */ | ||
1462 | { 0x640, 0x64 }, /* upgrade_key_info */ | ||
1463 | { 0x6a4, 0x64 }, | ||
1464 | { 0x708, 0x70 }, /* manuf_key_info */ | ||
1465 | { 0x778, 0x70 }, | ||
1466 | { 0, 0 } | ||
1467 | }; | ||
1468 | __be32 buf[0x350 / 4]; | ||
1469 | u8 *data = (u8 *)buf; | ||
1470 | int i, rc; | ||
1471 | u32 magic, crc; | ||
1472 | |||
1473 | if (BP_NOMCP(bp)) | ||
1474 | return 0; | ||
1475 | |||
1476 | rc = bnx2x_nvram_read(bp, 0, data, 4); | ||
1477 | if (rc) { | ||
1478 | DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); | ||
1479 | goto test_nvram_exit; | ||
1480 | } | ||
1481 | |||
1482 | magic = be32_to_cpu(buf[0]); | ||
1483 | if (magic != 0x669955aa) { | ||
1484 | DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); | ||
1485 | rc = -ENODEV; | ||
1486 | goto test_nvram_exit; | ||
1487 | } | ||
1488 | |||
1489 | for (i = 0; nvram_tbl[i].size; i++) { | ||
1490 | |||
1491 | rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, | ||
1492 | nvram_tbl[i].size); | ||
1493 | if (rc) { | ||
1494 | DP(NETIF_MSG_PROBE, | ||
1495 | "nvram_tbl[%d] read data (rc %d)\n", i, rc); | ||
1496 | goto test_nvram_exit; | ||
1497 | } | ||
1498 | |||
1499 | crc = ether_crc_le(nvram_tbl[i].size, data); | ||
1500 | if (crc != CRC32_RESIDUAL) { | ||
1501 | DP(NETIF_MSG_PROBE, | ||
1502 | "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); | ||
1503 | rc = -ENODEV; | ||
1504 | goto test_nvram_exit; | ||
1505 | } | ||
1506 | } | ||
1507 | |||
1508 | test_nvram_exit: | ||
1509 | return rc; | ||
1510 | } | ||
1511 | |||
1512 | static int bnx2x_test_intr(struct bnx2x *bp) | ||
1513 | { | ||
1514 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | ||
1515 | int i, rc; | ||
1516 | |||
1517 | if (!netif_running(bp->dev)) | ||
1518 | return -ENODEV; | ||
1519 | |||
1520 | config->hdr.length = 0; | ||
1521 | if (CHIP_IS_E1(bp)) | ||
1522 | /* use last unicast entries */ | ||
1523 | config->hdr.offset = (BP_PORT(bp) ? 63 : 31); | ||
1524 | else | ||
1525 | config->hdr.offset = BP_FUNC(bp); | ||
1526 | config->hdr.client_id = bp->fp->cl_id; | ||
1527 | config->hdr.reserved1 = 0; | ||
1528 | |||
1529 | bp->set_mac_pending++; | ||
1530 | smp_wmb(); | ||
1531 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
1532 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
1533 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | ||
1534 | if (rc == 0) { | ||
1535 | for (i = 0; i < 10; i++) { | ||
1536 | if (!bp->set_mac_pending) | ||
1537 | break; | ||
1538 | smp_rmb(); | ||
1539 | msleep_interruptible(10); | ||
1540 | } | ||
1541 | if (i == 10) | ||
1542 | rc = -ENODEV; | ||
1543 | } | ||
1544 | |||
1545 | return rc; | ||
1546 | } | ||
1547 | |||
1548 | static void bnx2x_self_test(struct net_device *dev, | ||
1549 | struct ethtool_test *etest, u64 *buf) | ||
1550 | { | ||
1551 | struct bnx2x *bp = netdev_priv(dev); | ||
1552 | |||
1553 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
1554 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
1555 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1556 | return; | ||
1557 | } | ||
1558 | |||
1559 | memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); | ||
1560 | |||
1561 | if (!netif_running(dev)) | ||
1562 | return; | ||
1563 | |||
1564 | /* offline tests are not supported in MF mode */ | ||
1565 | if (IS_E1HMF(bp)) | ||
1566 | etest->flags &= ~ETH_TEST_FL_OFFLINE; | ||
1567 | |||
1568 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | ||
1569 | int port = BP_PORT(bp); | ||
1570 | u32 val; | ||
1571 | u8 link_up; | ||
1572 | |||
1573 | /* save current value of input enable for TX port IF */ | ||
1574 | val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); | ||
1575 | /* disable input for TX port IF */ | ||
1576 | REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); | ||
1577 | |||
1578 | link_up = (bnx2x_link_test(bp) == 0); | ||
1579 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
1580 | bnx2x_nic_load(bp, LOAD_DIAG); | ||
1581 | /* wait until link state is restored */ | ||
1582 | bnx2x_wait_for_link(bp, link_up); | ||
1583 | |||
1584 | if (bnx2x_test_registers(bp) != 0) { | ||
1585 | buf[0] = 1; | ||
1586 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1587 | } | ||
1588 | if (bnx2x_test_memory(bp) != 0) { | ||
1589 | buf[1] = 1; | ||
1590 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1591 | } | ||
1592 | buf[2] = bnx2x_test_loopback(bp, link_up); | ||
1593 | if (buf[2] != 0) | ||
1594 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1595 | |||
1596 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
1597 | |||
1598 | /* restore input for TX port IF */ | ||
1599 | REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); | ||
1600 | |||
1601 | bnx2x_nic_load(bp, LOAD_NORMAL); | ||
1602 | /* wait until link state is restored */ | ||
1603 | bnx2x_wait_for_link(bp, link_up); | ||
1604 | } | ||
1605 | if (bnx2x_test_nvram(bp) != 0) { | ||
1606 | buf[3] = 1; | ||
1607 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1608 | } | ||
1609 | if (bnx2x_test_intr(bp) != 0) { | ||
1610 | buf[4] = 1; | ||
1611 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1612 | } | ||
1613 | if (bp->port.pmf) | ||
1614 | if (bnx2x_link_test(bp) != 0) { | ||
1615 | buf[5] = 1; | ||
1616 | etest->flags |= ETH_TEST_FL_FAILED; | ||
1617 | } | ||
1618 | |||
1619 | #ifdef BNX2X_EXTRA_DEBUG | ||
1620 | bnx2x_panic_dump(bp); | ||
1621 | #endif | ||
1622 | } | ||
1623 | |||
1624 | static const struct { | ||
1625 | long offset; | ||
1626 | int size; | ||
1627 | u8 string[ETH_GSTRING_LEN]; | ||
1628 | } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = { | ||
1629 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" }, | ||
1630 | { Q_STATS_OFFSET32(error_bytes_received_hi), | ||
1631 | 8, "[%d]: rx_error_bytes" }, | ||
1632 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), | ||
1633 | 8, "[%d]: rx_ucast_packets" }, | ||
1634 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), | ||
1635 | 8, "[%d]: rx_mcast_packets" }, | ||
1636 | { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
1637 | 8, "[%d]: rx_bcast_packets" }, | ||
1638 | { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" }, | ||
1639 | { Q_STATS_OFFSET32(rx_err_discard_pkt), | ||
1640 | 4, "[%d]: rx_phy_ip_err_discards"}, | ||
1641 | { Q_STATS_OFFSET32(rx_skb_alloc_failed), | ||
1642 | 4, "[%d]: rx_skb_alloc_discard" }, | ||
1643 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" }, | ||
1644 | |||
1645 | /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, | ||
1646 | { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
1647 | 8, "[%d]: tx_ucast_packets" }, | ||
1648 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
1649 | 8, "[%d]: tx_mcast_packets" }, | ||
1650 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
1651 | 8, "[%d]: tx_bcast_packets" } | ||
1652 | }; | ||
1653 | |||
1654 | static const struct { | ||
1655 | long offset; | ||
1656 | int size; | ||
1657 | u32 flags; | ||
1658 | #define STATS_FLAGS_PORT 1 | ||
1659 | #define STATS_FLAGS_FUNC 2 | ||
1660 | #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) | ||
1661 | u8 string[ETH_GSTRING_LEN]; | ||
1662 | } bnx2x_stats_arr[BNX2X_NUM_STATS] = { | ||
1663 | /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), | ||
1664 | 8, STATS_FLAGS_BOTH, "rx_bytes" }, | ||
1665 | { STATS_OFFSET32(error_bytes_received_hi), | ||
1666 | 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, | ||
1667 | { STATS_OFFSET32(total_unicast_packets_received_hi), | ||
1668 | 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, | ||
1669 | { STATS_OFFSET32(total_multicast_packets_received_hi), | ||
1670 | 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, | ||
1671 | { STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
1672 | 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, | ||
1673 | { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), | ||
1674 | 8, STATS_FLAGS_PORT, "rx_crc_errors" }, | ||
1675 | { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), | ||
1676 | 8, STATS_FLAGS_PORT, "rx_align_errors" }, | ||
1677 | { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), | ||
1678 | 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, | ||
1679 | { STATS_OFFSET32(etherstatsoverrsizepkts_hi), | ||
1680 | 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, | ||
1681 | /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), | ||
1682 | 8, STATS_FLAGS_PORT, "rx_fragments" }, | ||
1683 | { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), | ||
1684 | 8, STATS_FLAGS_PORT, "rx_jabbers" }, | ||
1685 | { STATS_OFFSET32(no_buff_discard_hi), | ||
1686 | 8, STATS_FLAGS_BOTH, "rx_discards" }, | ||
1687 | { STATS_OFFSET32(mac_filter_discard), | ||
1688 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, | ||
1689 | { STATS_OFFSET32(xxoverflow_discard), | ||
1690 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | ||
1691 | { STATS_OFFSET32(brb_drop_hi), | ||
1692 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, | ||
1693 | { STATS_OFFSET32(brb_truncate_hi), | ||
1694 | 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, | ||
1695 | { STATS_OFFSET32(pause_frames_received_hi), | ||
1696 | 8, STATS_FLAGS_PORT, "rx_pause_frames" }, | ||
1697 | { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), | ||
1698 | 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, | ||
1699 | { STATS_OFFSET32(nig_timer_max), | ||
1700 | 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, | ||
1701 | /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), | ||
1702 | 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, | ||
1703 | { STATS_OFFSET32(rx_skb_alloc_failed), | ||
1704 | 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, | ||
1705 | { STATS_OFFSET32(hw_csum_err), | ||
1706 | 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, | ||
1707 | |||
1708 | { STATS_OFFSET32(total_bytes_transmitted_hi), | ||
1709 | 8, STATS_FLAGS_BOTH, "tx_bytes" }, | ||
1710 | { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), | ||
1711 | 8, STATS_FLAGS_PORT, "tx_error_bytes" }, | ||
1712 | { STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
1713 | 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, | ||
1714 | { STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
1715 | 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, | ||
1716 | { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
1717 | 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, | ||
1718 | { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), | ||
1719 | 8, STATS_FLAGS_PORT, "tx_mac_errors" }, | ||
1720 | { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), | ||
1721 | 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, | ||
1722 | /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), | ||
1723 | 8, STATS_FLAGS_PORT, "tx_single_collisions" }, | ||
1724 | { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), | ||
1725 | 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, | ||
1726 | { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), | ||
1727 | 8, STATS_FLAGS_PORT, "tx_deferred" }, | ||
1728 | { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), | ||
1729 | 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, | ||
1730 | { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), | ||
1731 | 8, STATS_FLAGS_PORT, "tx_late_collisions" }, | ||
1732 | { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), | ||
1733 | 8, STATS_FLAGS_PORT, "tx_total_collisions" }, | ||
1734 | { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), | ||
1735 | 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, | ||
1736 | { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), | ||
1737 | 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, | ||
1738 | { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), | ||
1739 | 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, | ||
1740 | { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), | ||
1741 | 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, | ||
1742 | /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), | ||
1743 | 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, | ||
1744 | { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), | ||
1745 | 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, | ||
1746 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | ||
1747 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, | ||
1748 | { STATS_OFFSET32(pause_frames_sent_hi), | ||
1749 | 8, STATS_FLAGS_PORT, "tx_pause_frames" } | ||
1750 | }; | ||
1751 | |||
1752 | #define IS_PORT_STAT(i) \ | ||
1753 | ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) | ||
1754 | #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) | ||
1755 | #define IS_E1HMF_MODE_STAT(bp) \ | ||
1756 | (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) | ||
1757 | |||
1758 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | ||
1759 | { | ||
1760 | struct bnx2x *bp = netdev_priv(dev); | ||
1761 | int i, num_stats; | ||
1762 | |||
1763 | switch (stringset) { | ||
1764 | case ETH_SS_STATS: | ||
1765 | if (is_multi(bp)) { | ||
1766 | num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; | ||
1767 | if (!IS_E1HMF_MODE_STAT(bp)) | ||
1768 | num_stats += BNX2X_NUM_STATS; | ||
1769 | } else { | ||
1770 | if (IS_E1HMF_MODE_STAT(bp)) { | ||
1771 | num_stats = 0; | ||
1772 | for (i = 0; i < BNX2X_NUM_STATS; i++) | ||
1773 | if (IS_FUNC_STAT(i)) | ||
1774 | num_stats++; | ||
1775 | } else | ||
1776 | num_stats = BNX2X_NUM_STATS; | ||
1777 | } | ||
1778 | return num_stats; | ||
1779 | |||
1780 | case ETH_SS_TEST: | ||
1781 | return BNX2X_NUM_TESTS; | ||
1782 | |||
1783 | default: | ||
1784 | return -EINVAL; | ||
1785 | } | ||
1786 | } | ||
1787 | |||
1788 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | ||
1789 | { | ||
1790 | struct bnx2x *bp = netdev_priv(dev); | ||
1791 | int i, j, k; | ||
1792 | |||
1793 | switch (stringset) { | ||
1794 | case ETH_SS_STATS: | ||
1795 | if (is_multi(bp)) { | ||
1796 | k = 0; | ||
1797 | for_each_queue(bp, i) { | ||
1798 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | ||
1799 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, | ||
1800 | bnx2x_q_stats_arr[j].string, i); | ||
1801 | k += BNX2X_NUM_Q_STATS; | ||
1802 | } | ||
1803 | if (IS_E1HMF_MODE_STAT(bp)) | ||
1804 | break; | ||
1805 | for (j = 0; j < BNX2X_NUM_STATS; j++) | ||
1806 | strcpy(buf + (k + j)*ETH_GSTRING_LEN, | ||
1807 | bnx2x_stats_arr[j].string); | ||
1808 | } else { | ||
1809 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { | ||
1810 | if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) | ||
1811 | continue; | ||
1812 | strcpy(buf + j*ETH_GSTRING_LEN, | ||
1813 | bnx2x_stats_arr[i].string); | ||
1814 | j++; | ||
1815 | } | ||
1816 | } | ||
1817 | break; | ||
1818 | |||
1819 | case ETH_SS_TEST: | ||
1820 | memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); | ||
1821 | break; | ||
1822 | } | ||
1823 | } | ||
1824 | |||
1825 | static void bnx2x_get_ethtool_stats(struct net_device *dev, | ||
1826 | struct ethtool_stats *stats, u64 *buf) | ||
1827 | { | ||
1828 | struct bnx2x *bp = netdev_priv(dev); | ||
1829 | u32 *hw_stats, *offset; | ||
1830 | int i, j, k; | ||
1831 | |||
1832 | if (is_multi(bp)) { | ||
1833 | k = 0; | ||
1834 | for_each_queue(bp, i) { | ||
1835 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | ||
1836 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | ||
1837 | if (bnx2x_q_stats_arr[j].size == 0) { | ||
1838 | /* skip this counter */ | ||
1839 | buf[k + j] = 0; | ||
1840 | continue; | ||
1841 | } | ||
1842 | offset = (hw_stats + | ||
1843 | bnx2x_q_stats_arr[j].offset); | ||
1844 | if (bnx2x_q_stats_arr[j].size == 4) { | ||
1845 | /* 4-byte counter */ | ||
1846 | buf[k + j] = (u64) *offset; | ||
1847 | continue; | ||
1848 | } | ||
1849 | /* 8-byte counter */ | ||
1850 | buf[k + j] = HILO_U64(*offset, *(offset + 1)); | ||
1851 | } | ||
1852 | k += BNX2X_NUM_Q_STATS; | ||
1853 | } | ||
1854 | if (IS_E1HMF_MODE_STAT(bp)) | ||
1855 | return; | ||
1856 | hw_stats = (u32 *)&bp->eth_stats; | ||
1857 | for (j = 0; j < BNX2X_NUM_STATS; j++) { | ||
1858 | if (bnx2x_stats_arr[j].size == 0) { | ||
1859 | /* skip this counter */ | ||
1860 | buf[k + j] = 0; | ||
1861 | continue; | ||
1862 | } | ||
1863 | offset = (hw_stats + bnx2x_stats_arr[j].offset); | ||
1864 | if (bnx2x_stats_arr[j].size == 4) { | ||
1865 | /* 4-byte counter */ | ||
1866 | buf[k + j] = (u64) *offset; | ||
1867 | continue; | ||
1868 | } | ||
1869 | /* 8-byte counter */ | ||
1870 | buf[k + j] = HILO_U64(*offset, *(offset + 1)); | ||
1871 | } | ||
1872 | } else { | ||
1873 | hw_stats = (u32 *)&bp->eth_stats; | ||
1874 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { | ||
1875 | if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) | ||
1876 | continue; | ||
1877 | if (bnx2x_stats_arr[i].size == 0) { | ||
1878 | /* skip this counter */ | ||
1879 | buf[j] = 0; | ||
1880 | j++; | ||
1881 | continue; | ||
1882 | } | ||
1883 | offset = (hw_stats + bnx2x_stats_arr[i].offset); | ||
1884 | if (bnx2x_stats_arr[i].size == 4) { | ||
1885 | /* 4-byte counter */ | ||
1886 | buf[j] = (u64) *offset; | ||
1887 | j++; | ||
1888 | continue; | ||
1889 | } | ||
1890 | /* 8-byte counter */ | ||
1891 | buf[j] = HILO_U64(*offset, *(offset + 1)); | ||
1892 | j++; | ||
1893 | } | ||
1894 | } | ||
1895 | } | ||
1896 | |||
1897 | static int bnx2x_phys_id(struct net_device *dev, u32 data) | ||
1898 | { | ||
1899 | struct bnx2x *bp = netdev_priv(dev); | ||
1900 | int i; | ||
1901 | |||
1902 | if (!netif_running(dev)) | ||
1903 | return 0; | ||
1904 | |||
1905 | if (!bp->port.pmf) | ||
1906 | return 0; | ||
1907 | |||
1908 | if (data == 0) | ||
1909 | data = 2; | ||
1910 | |||
1911 | for (i = 0; i < (data * 2); i++) { | ||
1912 | if ((i % 2) == 0) | ||
1913 | bnx2x_set_led(&bp->link_params, LED_MODE_OPER, | ||
1914 | SPEED_1000); | ||
1915 | else | ||
1916 | bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); | ||
1917 | |||
1918 | msleep_interruptible(500); | ||
1919 | if (signal_pending(current)) | ||
1920 | break; | ||
1921 | } | ||
1922 | |||
1923 | if (bp->link_vars.link_up) | ||
1924 | bnx2x_set_led(&bp->link_params, LED_MODE_OPER, | ||
1925 | bp->link_vars.line_speed); | ||
1926 | |||
1927 | return 0; | ||
1928 | } | ||
1929 | |||
1930 | static const struct ethtool_ops bnx2x_ethtool_ops = { | ||
1931 | .get_settings = bnx2x_get_settings, | ||
1932 | .set_settings = bnx2x_set_settings, | ||
1933 | .get_drvinfo = bnx2x_get_drvinfo, | ||
1934 | .get_regs_len = bnx2x_get_regs_len, | ||
1935 | .get_regs = bnx2x_get_regs, | ||
1936 | .get_wol = bnx2x_get_wol, | ||
1937 | .set_wol = bnx2x_set_wol, | ||
1938 | .get_msglevel = bnx2x_get_msglevel, | ||
1939 | .set_msglevel = bnx2x_set_msglevel, | ||
1940 | .nway_reset = bnx2x_nway_reset, | ||
1941 | .get_link = bnx2x_get_link, | ||
1942 | .get_eeprom_len = bnx2x_get_eeprom_len, | ||
1943 | .get_eeprom = bnx2x_get_eeprom, | ||
1944 | .set_eeprom = bnx2x_set_eeprom, | ||
1945 | .get_coalesce = bnx2x_get_coalesce, | ||
1946 | .set_coalesce = bnx2x_set_coalesce, | ||
1947 | .get_ringparam = bnx2x_get_ringparam, | ||
1948 | .set_ringparam = bnx2x_set_ringparam, | ||
1949 | .get_pauseparam = bnx2x_get_pauseparam, | ||
1950 | .set_pauseparam = bnx2x_set_pauseparam, | ||
1951 | .get_rx_csum = bnx2x_get_rx_csum, | ||
1952 | .set_rx_csum = bnx2x_set_rx_csum, | ||
1953 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
1954 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
1955 | .set_flags = bnx2x_set_flags, | ||
1956 | .get_flags = ethtool_op_get_flags, | ||
1957 | .get_sg = ethtool_op_get_sg, | ||
1958 | .set_sg = ethtool_op_set_sg, | ||
1959 | .get_tso = ethtool_op_get_tso, | ||
1960 | .set_tso = bnx2x_set_tso, | ||
1961 | .self_test = bnx2x_self_test, | ||
1962 | .get_sset_count = bnx2x_get_sset_count, | ||
1963 | .get_strings = bnx2x_get_strings, | ||
1964 | .phys_id = bnx2x_phys_id, | ||
1965 | .get_ethtool_stats = bnx2x_get_ethtool_stats, | ||
1966 | }; | ||
1967 | |||
1968 | void bnx2x_set_ethtool_ops(struct net_device *netdev) | ||
1969 | { | ||
1970 | SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); | ||
1971 | } | ||
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h index 08d71bf438d6..08d71bf438d6 100644 --- a/drivers/net/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x/bnx2x_fw_defs.h | |||
diff --git a/drivers/net/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h index 3f5ee5d7cc2a..3f5ee5d7cc2a 100644 --- a/drivers/net/bnx2x_fw_file_hdr.h +++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h | |||
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index fd1f29e0317d..fd1f29e0317d 100644 --- a/drivers/net/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h | |||
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index 65b26cbfe3e7..65b26cbfe3e7 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h index 2b1363a6fe78..2b1363a6fe78 100644 --- a/drivers/net/bnx2x_init_ops.h +++ b/drivers/net/bnx2x/bnx2x_init_ops.h | |||
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 0383e3066313..0383e3066313 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index 40c2981de8ed..40c2981de8ed 100644 --- a/drivers/net/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h | |||
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 51b788339c90..b4ec2b02a465 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -51,15 +51,12 @@ | |||
51 | #include <linux/io.h> | 51 | #include <linux/io.h> |
52 | #include <linux/stringify.h> | 52 | #include <linux/stringify.h> |
53 | 53 | ||
54 | 54 | #define BNX2X_MAIN | |
55 | #include "bnx2x.h" | 55 | #include "bnx2x.h" |
56 | #include "bnx2x_init.h" | 56 | #include "bnx2x_init.h" |
57 | #include "bnx2x_init_ops.h" | 57 | #include "bnx2x_init_ops.h" |
58 | #include "bnx2x_dump.h" | 58 | #include "bnx2x_cmn.h" |
59 | 59 | ||
60 | #define DRV_MODULE_VERSION "1.52.53-1" | ||
61 | #define DRV_MODULE_RELDATE "2010/18/04" | ||
62 | #define BNX2X_BC_VER 0x040200 | ||
63 | 60 | ||
64 | #include <linux/firmware.h> | 61 | #include <linux/firmware.h> |
65 | #include "bnx2x_fw_file_hdr.h" | 62 | #include "bnx2x_fw_file_hdr.h" |
@@ -121,8 +118,6 @@ static int debug; | |||
121 | module_param(debug, int, 0); | 118 | module_param(debug, int, 0); |
122 | MODULE_PARM_DESC(debug, " Default debug msglevel"); | 119 | MODULE_PARM_DESC(debug, " Default debug msglevel"); |
123 | 120 | ||
124 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | ||
125 | |||
126 | static struct workqueue_struct *bnx2x_wq; | 121 | static struct workqueue_struct *bnx2x_wq; |
127 | 122 | ||
128 | enum bnx2x_board_type { | 123 | enum bnx2x_board_type { |
@@ -177,7 +172,7 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) | |||
177 | return val; | 172 | return val; |
178 | } | 173 | } |
179 | 174 | ||
180 | static const u32 dmae_reg_go_c[] = { | 175 | const u32 dmae_reg_go_c[] = { |
181 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, | 176 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, |
182 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, | 177 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, |
183 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, | 178 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, |
@@ -185,8 +180,7 @@ static const u32 dmae_reg_go_c[] = { | |||
185 | }; | 180 | }; |
186 | 181 | ||
187 | /* copy command into DMAE command memory and set DMAE command go */ | 182 | /* copy command into DMAE command memory and set DMAE command go */ |
188 | static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, | 183 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) |
189 | int idx) | ||
190 | { | 184 | { |
191 | u32 cmd_offset; | 185 | u32 cmd_offset; |
192 | int i; | 186 | int i; |
@@ -541,7 +535,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp) | |||
541 | pr_err("end of fw dump\n"); | 535 | pr_err("end of fw dump\n"); |
542 | } | 536 | } |
543 | 537 | ||
544 | static void bnx2x_panic_dump(struct bnx2x *bp) | 538 | void bnx2x_panic_dump(struct bnx2x *bp) |
545 | { | 539 | { |
546 | int i; | 540 | int i; |
547 | u16 j, start, end; | 541 | u16 j, start, end; |
@@ -654,7 +648,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
654 | BNX2X_ERR("end crash dump -----------------\n"); | 648 | BNX2X_ERR("end crash dump -----------------\n"); |
655 | } | 649 | } |
656 | 650 | ||
657 | static void bnx2x_int_enable(struct bnx2x *bp) | 651 | void bnx2x_int_enable(struct bnx2x *bp) |
658 | { | 652 | { |
659 | int port = BP_PORT(bp); | 653 | int port = BP_PORT(bp); |
660 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 654 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
@@ -736,7 +730,7 @@ static void bnx2x_int_disable(struct bnx2x *bp) | |||
736 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); | 730 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); |
737 | } | 731 | } |
738 | 732 | ||
739 | static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | 733 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) |
740 | { | 734 | { |
741 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 735 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
742 | int i, offset; | 736 | int i, offset; |
@@ -806,235 +800,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) | |||
806 | return false; | 800 | return false; |
807 | } | 801 | } |
808 | 802 | ||
809 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | ||
810 | u8 storm, u16 index, u8 op, u8 update) | ||
811 | { | ||
812 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + | ||
813 | COMMAND_REG_INT_ACK); | ||
814 | struct igu_ack_register igu_ack; | ||
815 | |||
816 | igu_ack.status_block_index = index; | ||
817 | igu_ack.sb_id_and_flags = | ||
818 | ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | ||
819 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | ||
820 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | ||
821 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | ||
822 | |||
823 | DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", | ||
824 | (*(u32 *)&igu_ack), hc_addr); | ||
825 | REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); | ||
826 | |||
827 | /* Make sure that ACK is written */ | ||
828 | mmiowb(); | ||
829 | barrier(); | ||
830 | } | ||
831 | |||
832 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | ||
833 | { | ||
834 | struct host_status_block *fpsb = fp->status_blk; | ||
835 | |||
836 | barrier(); /* status block is written to by the chip */ | ||
837 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; | ||
838 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; | ||
839 | } | ||
840 | |||
841 | static u16 bnx2x_ack_int(struct bnx2x *bp) | ||
842 | { | ||
843 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + | ||
844 | COMMAND_REG_SIMD_MASK); | ||
845 | u32 result = REG_RD(bp, hc_addr); | ||
846 | |||
847 | DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", | ||
848 | result, hc_addr); | ||
849 | |||
850 | return result; | ||
851 | } | ||
852 | |||
853 | |||
854 | /* | ||
855 | * fast path service functions | ||
856 | */ | ||
857 | |||
858 | static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) | ||
859 | { | ||
860 | /* Tell compiler that consumer and producer can change */ | ||
861 | barrier(); | ||
862 | return (fp->tx_pkt_prod != fp->tx_pkt_cons); | ||
863 | } | ||
864 | |||
865 | /* free skb in the packet ring at pos idx | ||
866 | * return idx of last bd freed | ||
867 | */ | ||
868 | static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
869 | u16 idx) | ||
870 | { | ||
871 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; | ||
872 | struct eth_tx_start_bd *tx_start_bd; | ||
873 | struct eth_tx_bd *tx_data_bd; | ||
874 | struct sk_buff *skb = tx_buf->skb; | ||
875 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | ||
876 | int nbd; | ||
877 | |||
878 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | ||
879 | prefetch(&skb->end); | ||
880 | |||
881 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | ||
882 | idx, tx_buf, skb); | ||
883 | |||
884 | /* unmap first bd */ | ||
885 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | ||
886 | tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; | ||
887 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
888 | BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); | ||
889 | |||
890 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | ||
891 | #ifdef BNX2X_STOP_ON_ERROR | ||
892 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { | ||
893 | BNX2X_ERR("BAD nbd!\n"); | ||
894 | bnx2x_panic(); | ||
895 | } | ||
896 | #endif | ||
897 | new_cons = nbd + tx_buf->first_bd; | ||
898 | |||
899 | /* Get the next bd */ | ||
900 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
901 | |||
902 | /* Skip a parse bd... */ | ||
903 | --nbd; | ||
904 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
905 | |||
906 | /* ...and the TSO split header bd since they have no mapping */ | ||
907 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | ||
908 | --nbd; | ||
909 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
910 | } | ||
911 | |||
912 | /* now free frags */ | ||
913 | while (nbd > 0) { | ||
914 | |||
915 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); | ||
916 | tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; | ||
917 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), | ||
918 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | ||
919 | if (--nbd) | ||
920 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
921 | } | ||
922 | |||
923 | /* release skb */ | ||
924 | WARN_ON(!skb); | ||
925 | dev_kfree_skb(skb); | ||
926 | tx_buf->first_bd = 0; | ||
927 | tx_buf->skb = NULL; | ||
928 | |||
929 | return new_cons; | ||
930 | } | ||
931 | |||
932 | static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | ||
933 | { | ||
934 | s16 used; | ||
935 | u16 prod; | ||
936 | u16 cons; | ||
937 | |||
938 | prod = fp->tx_bd_prod; | ||
939 | cons = fp->tx_bd_cons; | ||
940 | |||
941 | /* NUM_TX_RINGS = number of "next-page" entries | ||
942 | It will be used as a threshold */ | ||
943 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | ||
944 | |||
945 | #ifdef BNX2X_STOP_ON_ERROR | ||
946 | WARN_ON(used < 0); | ||
947 | WARN_ON(used > fp->bp->tx_ring_size); | ||
948 | WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); | ||
949 | #endif | ||
950 | |||
951 | return (s16)(fp->bp->tx_ring_size) - used; | ||
952 | } | ||
953 | |||
954 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) | ||
955 | { | ||
956 | u16 hw_cons; | ||
957 | |||
958 | /* Tell compiler that status block fields can change */ | ||
959 | barrier(); | ||
960 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | ||
961 | return hw_cons != fp->tx_pkt_cons; | ||
962 | } | ||
963 | |||
964 | static int bnx2x_tx_int(struct bnx2x_fastpath *fp) | ||
965 | { | ||
966 | struct bnx2x *bp = fp->bp; | ||
967 | struct netdev_queue *txq; | ||
968 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; | ||
969 | |||
970 | #ifdef BNX2X_STOP_ON_ERROR | ||
971 | if (unlikely(bp->panic)) | ||
972 | return -1; | ||
973 | #endif | ||
974 | |||
975 | txq = netdev_get_tx_queue(bp->dev, fp->index); | ||
976 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | ||
977 | sw_cons = fp->tx_pkt_cons; | ||
978 | |||
979 | while (sw_cons != hw_cons) { | ||
980 | u16 pkt_cons; | ||
981 | |||
982 | pkt_cons = TX_BD(sw_cons); | ||
983 | |||
984 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ | ||
985 | |||
986 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", | ||
987 | hw_cons, sw_cons, pkt_cons); | ||
988 | |||
989 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { | ||
990 | rmb(); | ||
991 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); | ||
992 | } | ||
993 | */ | ||
994 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); | ||
995 | sw_cons++; | ||
996 | } | ||
997 | |||
998 | fp->tx_pkt_cons = sw_cons; | ||
999 | fp->tx_bd_cons = bd_cons; | ||
1000 | |||
1001 | /* Need to make the tx_bd_cons update visible to start_xmit() | ||
1002 | * before checking for netif_tx_queue_stopped(). Without the | ||
1003 | * memory barrier, there is a small possibility that | ||
1004 | * start_xmit() will miss it and cause the queue to be stopped | ||
1005 | * forever. | ||
1006 | */ | ||
1007 | smp_mb(); | ||
1008 | |||
1009 | /* TBD need a thresh? */ | ||
1010 | if (unlikely(netif_tx_queue_stopped(txq))) { | ||
1011 | /* Taking tx_lock() is needed to prevent reenabling the queue | ||
1012 | * while it's empty. This could have happen if rx_action() gets | ||
1013 | * suspended in bnx2x_tx_int() after the condition before | ||
1014 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): | ||
1015 | * | ||
1016 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> | ||
1017 | * sends some packets consuming the whole queue again-> | ||
1018 | * stops the queue | ||
1019 | */ | ||
1020 | |||
1021 | __netif_tx_lock(txq, smp_processor_id()); | ||
1022 | |||
1023 | if ((netif_tx_queue_stopped(txq)) && | ||
1024 | (bp->state == BNX2X_STATE_OPEN) && | ||
1025 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | ||
1026 | netif_tx_wake_queue(txq); | ||
1027 | |||
1028 | __netif_tx_unlock(txq); | ||
1029 | } | ||
1030 | return 0; | ||
1031 | } | ||
1032 | 803 | ||
1033 | #ifdef BCM_CNIC | 804 | #ifdef BCM_CNIC |
1034 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); | 805 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); |
1035 | #endif | 806 | #endif |
1036 | 807 | ||
1037 | static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | 808 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, |
1038 | union eth_rx_cqe *rr_cqe) | 809 | union eth_rx_cqe *rr_cqe) |
1039 | { | 810 | { |
1040 | struct bnx2x *bp = fp->bp; | 811 | struct bnx2x *bp = fp->bp; |
@@ -1118,717 +889,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
1118 | mb(); /* force bnx2x_wait_ramrod() to see the change */ | 889 | mb(); /* force bnx2x_wait_ramrod() to see the change */ |
1119 | } | 890 | } |
1120 | 891 | ||
1121 | static inline void bnx2x_free_rx_sge(struct bnx2x *bp, | 892 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) |
1122 | struct bnx2x_fastpath *fp, u16 index) | ||
1123 | { | ||
1124 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; | ||
1125 | struct page *page = sw_buf->page; | ||
1126 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; | ||
1127 | |||
1128 | /* Skip "next page" elements */ | ||
1129 | if (!page) | ||
1130 | return; | ||
1131 | |||
1132 | dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), | ||
1133 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | ||
1134 | __free_pages(page, PAGES_PER_SGE_SHIFT); | ||
1135 | |||
1136 | sw_buf->page = NULL; | ||
1137 | sge->addr_hi = 0; | ||
1138 | sge->addr_lo = 0; | ||
1139 | } | ||
1140 | |||
1141 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | ||
1142 | struct bnx2x_fastpath *fp, int last) | ||
1143 | { | ||
1144 | int i; | ||
1145 | |||
1146 | for (i = 0; i < last; i++) | ||
1147 | bnx2x_free_rx_sge(bp, fp, i); | ||
1148 | } | ||
1149 | |||
1150 | static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | ||
1151 | struct bnx2x_fastpath *fp, u16 index) | ||
1152 | { | ||
1153 | struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); | ||
1154 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; | ||
1155 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; | ||
1156 | dma_addr_t mapping; | ||
1157 | |||
1158 | if (unlikely(page == NULL)) | ||
1159 | return -ENOMEM; | ||
1160 | |||
1161 | mapping = dma_map_page(&bp->pdev->dev, page, 0, | ||
1162 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | ||
1163 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
1164 | __free_pages(page, PAGES_PER_SGE_SHIFT); | ||
1165 | return -ENOMEM; | ||
1166 | } | ||
1167 | |||
1168 | sw_buf->page = page; | ||
1169 | dma_unmap_addr_set(sw_buf, mapping, mapping); | ||
1170 | |||
1171 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
1172 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
1173 | |||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | ||
1178 | struct bnx2x_fastpath *fp, u16 index) | ||
1179 | { | ||
1180 | struct sk_buff *skb; | ||
1181 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; | ||
1182 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; | ||
1183 | dma_addr_t mapping; | ||
1184 | |||
1185 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
1186 | if (unlikely(skb == NULL)) | ||
1187 | return -ENOMEM; | ||
1188 | |||
1189 | mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, | ||
1190 | DMA_FROM_DEVICE); | ||
1191 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
1192 | dev_kfree_skb(skb); | ||
1193 | return -ENOMEM; | ||
1194 | } | ||
1195 | |||
1196 | rx_buf->skb = skb; | ||
1197 | dma_unmap_addr_set(rx_buf, mapping, mapping); | ||
1198 | |||
1199 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
1200 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
1201 | |||
1202 | return 0; | ||
1203 | } | ||
1204 | |||
1205 | /* note that we are not allocating a new skb, | ||
1206 | * we are just moving one from cons to prod | ||
1207 | * we are not creating a new mapping, | ||
1208 | * so there is no need to check for dma_mapping_error(). | ||
1209 | */ | ||
1210 | static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | ||
1211 | struct sk_buff *skb, u16 cons, u16 prod) | ||
1212 | { | ||
1213 | struct bnx2x *bp = fp->bp; | ||
1214 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | ||
1215 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | ||
1216 | struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; | ||
1217 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | ||
1218 | |||
1219 | dma_sync_single_for_device(&bp->pdev->dev, | ||
1220 | dma_unmap_addr(cons_rx_buf, mapping), | ||
1221 | RX_COPY_THRESH, DMA_FROM_DEVICE); | ||
1222 | |||
1223 | prod_rx_buf->skb = cons_rx_buf->skb; | ||
1224 | dma_unmap_addr_set(prod_rx_buf, mapping, | ||
1225 | dma_unmap_addr(cons_rx_buf, mapping)); | ||
1226 | *prod_bd = *cons_bd; | ||
1227 | } | ||
1228 | |||
1229 | static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, | ||
1230 | u16 idx) | ||
1231 | { | ||
1232 | u16 last_max = fp->last_max_sge; | ||
1233 | |||
1234 | if (SUB_S16(idx, last_max) > 0) | ||
1235 | fp->last_max_sge = idx; | ||
1236 | } | ||
1237 | |||
1238 | static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) | ||
1239 | { | ||
1240 | int i, j; | ||
1241 | |||
1242 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | ||
1243 | int idx = RX_SGE_CNT * i - 1; | ||
1244 | |||
1245 | for (j = 0; j < 2; j++) { | ||
1246 | SGE_MASK_CLEAR_BIT(fp, idx); | ||
1247 | idx--; | ||
1248 | } | ||
1249 | } | ||
1250 | } | ||
1251 | |||
1252 | static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | ||
1253 | struct eth_fast_path_rx_cqe *fp_cqe) | ||
1254 | { | ||
1255 | struct bnx2x *bp = fp->bp; | ||
1256 | u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | ||
1257 | le16_to_cpu(fp_cqe->len_on_bd)) >> | ||
1258 | SGE_PAGE_SHIFT; | ||
1259 | u16 last_max, last_elem, first_elem; | ||
1260 | u16 delta = 0; | ||
1261 | u16 i; | ||
1262 | |||
1263 | if (!sge_len) | ||
1264 | return; | ||
1265 | |||
1266 | /* First mark all used pages */ | ||
1267 | for (i = 0; i < sge_len; i++) | ||
1268 | SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); | ||
1269 | |||
1270 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", | ||
1271 | sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); | ||
1272 | |||
1273 | /* Here we assume that the last SGE index is the biggest */ | ||
1274 | prefetch((void *)(fp->sge_mask)); | ||
1275 | bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); | ||
1276 | |||
1277 | last_max = RX_SGE(fp->last_max_sge); | ||
1278 | last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; | ||
1279 | first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; | ||
1280 | |||
1281 | /* If ring is not full */ | ||
1282 | if (last_elem + 1 != first_elem) | ||
1283 | last_elem++; | ||
1284 | |||
1285 | /* Now update the prod */ | ||
1286 | for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { | ||
1287 | if (likely(fp->sge_mask[i])) | ||
1288 | break; | ||
1289 | |||
1290 | fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; | ||
1291 | delta += RX_SGE_MASK_ELEM_SZ; | ||
1292 | } | ||
1293 | |||
1294 | if (delta > 0) { | ||
1295 | fp->rx_sge_prod += delta; | ||
1296 | /* clear page-end entries */ | ||
1297 | bnx2x_clear_sge_mask_next_elems(fp); | ||
1298 | } | ||
1299 | |||
1300 | DP(NETIF_MSG_RX_STATUS, | ||
1301 | "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", | ||
1302 | fp->last_max_sge, fp->rx_sge_prod); | ||
1303 | } | ||
1304 | |||
1305 | static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) | ||
1306 | { | ||
1307 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ | ||
1308 | memset(fp->sge_mask, 0xff, | ||
1309 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); | ||
1310 | |||
1311 | /* Clear the two last indices in the page to 1: | ||
1312 | these are the indices that correspond to the "next" element, | ||
1313 | hence will never be indicated and should be removed from | ||
1314 | the calculations. */ | ||
1315 | bnx2x_clear_sge_mask_next_elems(fp); | ||
1316 | } | ||
1317 | |||
1318 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | ||
1319 | struct sk_buff *skb, u16 cons, u16 prod) | ||
1320 | { | ||
1321 | struct bnx2x *bp = fp->bp; | ||
1322 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | ||
1323 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | ||
1324 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | ||
1325 | dma_addr_t mapping; | ||
1326 | |||
1327 | /* move empty skb from pool to prod and map it */ | ||
1328 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | ||
1329 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, | ||
1330 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
1331 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
1332 | |||
1333 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
1334 | fp->tpa_pool[queue] = *cons_rx_buf; | ||
1335 | |||
1336 | /* mark bin state as start - print error if current state != stop */ | ||
1337 | if (fp->tpa_state[queue] != BNX2X_TPA_STOP) | ||
1338 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); | ||
1339 | |||
1340 | fp->tpa_state[queue] = BNX2X_TPA_START; | ||
1341 | |||
1342 | /* point prod_bd to new skb */ | ||
1343 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
1344 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
1345 | |||
1346 | #ifdef BNX2X_STOP_ON_ERROR | ||
1347 | fp->tpa_queue_used |= (1 << queue); | ||
1348 | #ifdef _ASM_GENERIC_INT_L64_H | ||
1349 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
1350 | #else | ||
1351 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | ||
1352 | #endif | ||
1353 | fp->tpa_queue_used); | ||
1354 | #endif | ||
1355 | } | ||
1356 | |||
1357 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
1358 | struct sk_buff *skb, | ||
1359 | struct eth_fast_path_rx_cqe *fp_cqe, | ||
1360 | u16 cqe_idx) | ||
1361 | { | ||
1362 | struct sw_rx_page *rx_pg, old_rx_pg; | ||
1363 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | ||
1364 | u32 i, frag_len, frag_size, pages; | ||
1365 | int err; | ||
1366 | int j; | ||
1367 | |||
1368 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | ||
1369 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | ||
1370 | |||
1371 | /* This is needed in order to enable forwarding support */ | ||
1372 | if (frag_size) | ||
1373 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | ||
1374 | max(frag_size, (u32)len_on_bd)); | ||
1375 | |||
1376 | #ifdef BNX2X_STOP_ON_ERROR | ||
1377 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | ||
1378 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | ||
1379 | pages, cqe_idx); | ||
1380 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | ||
1381 | fp_cqe->pkt_len, len_on_bd); | ||
1382 | bnx2x_panic(); | ||
1383 | return -EINVAL; | ||
1384 | } | ||
1385 | #endif | ||
1386 | |||
1387 | /* Run through the SGL and compose the fragmented skb */ | ||
1388 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { | ||
1389 | u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); | ||
1390 | |||
1391 | /* FW gives the indices of the SGE as if the ring is an array | ||
1392 | (meaning that "next" element will consume 2 indices) */ | ||
1393 | frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); | ||
1394 | rx_pg = &fp->rx_page_ring[sge_idx]; | ||
1395 | old_rx_pg = *rx_pg; | ||
1396 | |||
1397 | /* If we fail to allocate a substitute page, we simply stop | ||
1398 | where we are and drop the whole packet */ | ||
1399 | err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); | ||
1400 | if (unlikely(err)) { | ||
1401 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
1402 | return err; | ||
1403 | } | ||
1404 | |||
1405 | /* Unmap the page as we r going to pass it to the stack */ | ||
1406 | dma_unmap_page(&bp->pdev->dev, | ||
1407 | dma_unmap_addr(&old_rx_pg, mapping), | ||
1408 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | ||
1409 | |||
1410 | /* Add one frag and update the appropriate fields in the skb */ | ||
1411 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | ||
1412 | |||
1413 | skb->data_len += frag_len; | ||
1414 | skb->truesize += frag_len; | ||
1415 | skb->len += frag_len; | ||
1416 | |||
1417 | frag_size -= frag_len; | ||
1418 | } | ||
1419 | |||
1420 | return 0; | ||
1421 | } | ||
1422 | |||
1423 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
1424 | u16 queue, int pad, int len, union eth_rx_cqe *cqe, | ||
1425 | u16 cqe_idx) | ||
1426 | { | ||
1427 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; | ||
1428 | struct sk_buff *skb = rx_buf->skb; | ||
1429 | /* alloc new skb */ | ||
1430 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
1431 | |||
1432 | /* Unmap skb in the pool anyway, as we are going to change | ||
1433 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | ||
1434 | fails. */ | ||
1435 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), | ||
1436 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
1437 | |||
1438 | if (likely(new_skb)) { | ||
1439 | /* fix ip xsum and give it to the stack */ | ||
1440 | /* (no need to map the new skb) */ | ||
1441 | #ifdef BCM_VLAN | ||
1442 | int is_vlan_cqe = | ||
1443 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | ||
1444 | PARSING_FLAGS_VLAN); | ||
1445 | int is_not_hwaccel_vlan_cqe = | ||
1446 | (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); | ||
1447 | #endif | ||
1448 | |||
1449 | prefetch(skb); | ||
1450 | prefetch(((char *)(skb)) + 128); | ||
1451 | |||
1452 | #ifdef BNX2X_STOP_ON_ERROR | ||
1453 | if (pad + len > bp->rx_buf_size) { | ||
1454 | BNX2X_ERR("skb_put is about to fail... " | ||
1455 | "pad %d len %d rx_buf_size %d\n", | ||
1456 | pad, len, bp->rx_buf_size); | ||
1457 | bnx2x_panic(); | ||
1458 | return; | ||
1459 | } | ||
1460 | #endif | ||
1461 | |||
1462 | skb_reserve(skb, pad); | ||
1463 | skb_put(skb, len); | ||
1464 | |||
1465 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
1466 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1467 | |||
1468 | { | ||
1469 | struct iphdr *iph; | ||
1470 | |||
1471 | iph = (struct iphdr *)skb->data; | ||
1472 | #ifdef BCM_VLAN | ||
1473 | /* If there is no Rx VLAN offloading - | ||
1474 | take VLAN tag into an account */ | ||
1475 | if (unlikely(is_not_hwaccel_vlan_cqe)) | ||
1476 | iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); | ||
1477 | #endif | ||
1478 | iph->check = 0; | ||
1479 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | ||
1480 | } | ||
1481 | |||
1482 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | ||
1483 | &cqe->fast_path_cqe, cqe_idx)) { | ||
1484 | #ifdef BCM_VLAN | ||
1485 | if ((bp->vlgrp != NULL) && is_vlan_cqe && | ||
1486 | (!is_not_hwaccel_vlan_cqe)) | ||
1487 | vlan_gro_receive(&fp->napi, bp->vlgrp, | ||
1488 | le16_to_cpu(cqe->fast_path_cqe. | ||
1489 | vlan_tag), skb); | ||
1490 | else | ||
1491 | #endif | ||
1492 | napi_gro_receive(&fp->napi, skb); | ||
1493 | } else { | ||
1494 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | ||
1495 | " - dropping packet!\n"); | ||
1496 | dev_kfree_skb(skb); | ||
1497 | } | ||
1498 | |||
1499 | |||
1500 | /* put new skb in bin */ | ||
1501 | fp->tpa_pool[queue].skb = new_skb; | ||
1502 | |||
1503 | } else { | ||
1504 | /* else drop the packet and keep the buffer in the bin */ | ||
1505 | DP(NETIF_MSG_RX_STATUS, | ||
1506 | "Failed to allocate new skb - dropping packet!\n"); | ||
1507 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
1508 | } | ||
1509 | |||
1510 | fp->tpa_state[queue] = BNX2X_TPA_STOP; | ||
1511 | } | ||
1512 | |||
1513 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | ||
1514 | struct bnx2x_fastpath *fp, | ||
1515 | u16 bd_prod, u16 rx_comp_prod, | ||
1516 | u16 rx_sge_prod) | ||
1517 | { | ||
1518 | struct ustorm_eth_rx_producers rx_prods = {0}; | ||
1519 | int i; | ||
1520 | |||
1521 | /* Update producers */ | ||
1522 | rx_prods.bd_prod = bd_prod; | ||
1523 | rx_prods.cqe_prod = rx_comp_prod; | ||
1524 | rx_prods.sge_prod = rx_sge_prod; | ||
1525 | |||
1526 | /* | ||
1527 | * Make sure that the BD and SGE data is updated before updating the | ||
1528 | * producers since FW might read the BD/SGE right after the producer | ||
1529 | * is updated. | ||
1530 | * This is only applicable for weak-ordered memory model archs such | ||
1531 | * as IA-64. The following barrier is also mandatory since FW will | ||
1532 | * assumes BDs must have buffers. | ||
1533 | */ | ||
1534 | wmb(); | ||
1535 | |||
1536 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) | ||
1537 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
1538 | USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, | ||
1539 | ((u32 *)&rx_prods)[i]); | ||
1540 | |||
1541 | mmiowb(); /* keep prod updates ordered */ | ||
1542 | |||
1543 | DP(NETIF_MSG_RX_STATUS, | ||
1544 | "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", | ||
1545 | fp->index, bd_prod, rx_comp_prod, rx_sge_prod); | ||
1546 | } | ||
1547 | |||
1548 | /* Set Toeplitz hash value in the skb using the value from the | ||
1549 | * CQE (calculated by HW). | ||
1550 | */ | ||
1551 | static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, | ||
1552 | struct sk_buff *skb) | ||
1553 | { | ||
1554 | /* Set Toeplitz hash from CQE */ | ||
1555 | if ((bp->dev->features & NETIF_F_RXHASH) && | ||
1556 | (cqe->fast_path_cqe.status_flags & | ||
1557 | ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) | ||
1558 | skb->rxhash = | ||
1559 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); | ||
1560 | } | ||
1561 | |||
1562 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | ||
1563 | { | ||
1564 | struct bnx2x *bp = fp->bp; | ||
1565 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; | ||
1566 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | ||
1567 | int rx_pkt = 0; | ||
1568 | |||
1569 | #ifdef BNX2X_STOP_ON_ERROR | ||
1570 | if (unlikely(bp->panic)) | ||
1571 | return 0; | ||
1572 | #endif | ||
1573 | |||
1574 | /* CQ "next element" is of the size of the regular element, | ||
1575 | that's why it's ok here */ | ||
1576 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); | ||
1577 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
1578 | hw_comp_cons++; | ||
1579 | |||
1580 | bd_cons = fp->rx_bd_cons; | ||
1581 | bd_prod = fp->rx_bd_prod; | ||
1582 | bd_prod_fw = bd_prod; | ||
1583 | sw_comp_cons = fp->rx_comp_cons; | ||
1584 | sw_comp_prod = fp->rx_comp_prod; | ||
1585 | |||
1586 | /* Memory barrier necessary as speculative reads of the rx | ||
1587 | * buffer can be ahead of the index in the status block | ||
1588 | */ | ||
1589 | rmb(); | ||
1590 | |||
1591 | DP(NETIF_MSG_RX_STATUS, | ||
1592 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", | ||
1593 | fp->index, hw_comp_cons, sw_comp_cons); | ||
1594 | |||
1595 | while (sw_comp_cons != hw_comp_cons) { | ||
1596 | struct sw_rx_bd *rx_buf = NULL; | ||
1597 | struct sk_buff *skb; | ||
1598 | union eth_rx_cqe *cqe; | ||
1599 | u8 cqe_fp_flags; | ||
1600 | u16 len, pad; | ||
1601 | |||
1602 | comp_ring_cons = RCQ_BD(sw_comp_cons); | ||
1603 | bd_prod = RX_BD(bd_prod); | ||
1604 | bd_cons = RX_BD(bd_cons); | ||
1605 | |||
1606 | /* Prefetch the page containing the BD descriptor | ||
1607 | at producer's index. It will be needed when new skb is | ||
1608 | allocated */ | ||
1609 | prefetch((void *)(PAGE_ALIGN((unsigned long) | ||
1610 | (&fp->rx_desc_ring[bd_prod])) - | ||
1611 | PAGE_SIZE + 1)); | ||
1612 | |||
1613 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | ||
1614 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | ||
1615 | |||
1616 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | ||
1617 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | ||
1618 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, | ||
1619 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), | ||
1620 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), | ||
1621 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | ||
1622 | |||
1623 | /* is this a slowpath msg? */ | ||
1624 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { | ||
1625 | bnx2x_sp_event(fp, cqe); | ||
1626 | goto next_cqe; | ||
1627 | |||
1628 | /* this is an rx packet */ | ||
1629 | } else { | ||
1630 | rx_buf = &fp->rx_buf_ring[bd_cons]; | ||
1631 | skb = rx_buf->skb; | ||
1632 | prefetch(skb); | ||
1633 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | ||
1634 | pad = cqe->fast_path_cqe.placement_offset; | ||
1635 | |||
1636 | /* If CQE is marked both TPA_START and TPA_END | ||
1637 | it is a non-TPA CQE */ | ||
1638 | if ((!fp->disable_tpa) && | ||
1639 | (TPA_TYPE(cqe_fp_flags) != | ||
1640 | (TPA_TYPE_START | TPA_TYPE_END))) { | ||
1641 | u16 queue = cqe->fast_path_cqe.queue_index; | ||
1642 | |||
1643 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { | ||
1644 | DP(NETIF_MSG_RX_STATUS, | ||
1645 | "calling tpa_start on queue %d\n", | ||
1646 | queue); | ||
1647 | |||
1648 | bnx2x_tpa_start(fp, queue, skb, | ||
1649 | bd_cons, bd_prod); | ||
1650 | |||
1651 | /* Set Toeplitz hash for an LRO skb */ | ||
1652 | bnx2x_set_skb_rxhash(bp, cqe, skb); | ||
1653 | |||
1654 | goto next_rx; | ||
1655 | } | ||
1656 | |||
1657 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) { | ||
1658 | DP(NETIF_MSG_RX_STATUS, | ||
1659 | "calling tpa_stop on queue %d\n", | ||
1660 | queue); | ||
1661 | |||
1662 | if (!BNX2X_RX_SUM_FIX(cqe)) | ||
1663 | BNX2X_ERR("STOP on none TCP " | ||
1664 | "data\n"); | ||
1665 | |||
1666 | /* This is a size of the linear data | ||
1667 | on this skb */ | ||
1668 | len = le16_to_cpu(cqe->fast_path_cqe. | ||
1669 | len_on_bd); | ||
1670 | bnx2x_tpa_stop(bp, fp, queue, pad, | ||
1671 | len, cqe, comp_ring_cons); | ||
1672 | #ifdef BNX2X_STOP_ON_ERROR | ||
1673 | if (bp->panic) | ||
1674 | return 0; | ||
1675 | #endif | ||
1676 | |||
1677 | bnx2x_update_sge_prod(fp, | ||
1678 | &cqe->fast_path_cqe); | ||
1679 | goto next_cqe; | ||
1680 | } | ||
1681 | } | ||
1682 | |||
1683 | dma_sync_single_for_device(&bp->pdev->dev, | ||
1684 | dma_unmap_addr(rx_buf, mapping), | ||
1685 | pad + RX_COPY_THRESH, | ||
1686 | DMA_FROM_DEVICE); | ||
1687 | prefetch(((char *)(skb)) + 128); | ||
1688 | |||
1689 | /* is this an error packet? */ | ||
1690 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { | ||
1691 | DP(NETIF_MSG_RX_ERR, | ||
1692 | "ERROR flags %x rx packet %u\n", | ||
1693 | cqe_fp_flags, sw_comp_cons); | ||
1694 | fp->eth_q_stats.rx_err_discard_pkt++; | ||
1695 | goto reuse_rx; | ||
1696 | } | ||
1697 | |||
1698 | /* Since we don't have a jumbo ring | ||
1699 | * copy small packets if mtu > 1500 | ||
1700 | */ | ||
1701 | if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && | ||
1702 | (len <= RX_COPY_THRESH)) { | ||
1703 | struct sk_buff *new_skb; | ||
1704 | |||
1705 | new_skb = netdev_alloc_skb(bp->dev, | ||
1706 | len + pad); | ||
1707 | if (new_skb == NULL) { | ||
1708 | DP(NETIF_MSG_RX_ERR, | ||
1709 | "ERROR packet dropped " | ||
1710 | "because of alloc failure\n"); | ||
1711 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
1712 | goto reuse_rx; | ||
1713 | } | ||
1714 | |||
1715 | /* aligned copy */ | ||
1716 | skb_copy_from_linear_data_offset(skb, pad, | ||
1717 | new_skb->data + pad, len); | ||
1718 | skb_reserve(new_skb, pad); | ||
1719 | skb_put(new_skb, len); | ||
1720 | |||
1721 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | ||
1722 | |||
1723 | skb = new_skb; | ||
1724 | |||
1725 | } else | ||
1726 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { | ||
1727 | dma_unmap_single(&bp->pdev->dev, | ||
1728 | dma_unmap_addr(rx_buf, mapping), | ||
1729 | bp->rx_buf_size, | ||
1730 | DMA_FROM_DEVICE); | ||
1731 | skb_reserve(skb, pad); | ||
1732 | skb_put(skb, len); | ||
1733 | |||
1734 | } else { | ||
1735 | DP(NETIF_MSG_RX_ERR, | ||
1736 | "ERROR packet dropped because " | ||
1737 | "of alloc failure\n"); | ||
1738 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
1739 | reuse_rx: | ||
1740 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | ||
1741 | goto next_rx; | ||
1742 | } | ||
1743 | |||
1744 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
1745 | |||
1746 | /* Set Toeplitz hash for a none-LRO skb */ | ||
1747 | bnx2x_set_skb_rxhash(bp, cqe, skb); | ||
1748 | |||
1749 | skb->ip_summed = CHECKSUM_NONE; | ||
1750 | if (bp->rx_csum) { | ||
1751 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
1752 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1753 | else | ||
1754 | fp->eth_q_stats.hw_csum_err++; | ||
1755 | } | ||
1756 | } | ||
1757 | |||
1758 | skb_record_rx_queue(skb, fp->index); | ||
1759 | |||
1760 | #ifdef BCM_VLAN | ||
1761 | if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && | ||
1762 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | ||
1763 | PARSING_FLAGS_VLAN)) | ||
1764 | vlan_gro_receive(&fp->napi, bp->vlgrp, | ||
1765 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); | ||
1766 | else | ||
1767 | #endif | ||
1768 | napi_gro_receive(&fp->napi, skb); | ||
1769 | |||
1770 | |||
1771 | next_rx: | ||
1772 | rx_buf->skb = NULL; | ||
1773 | |||
1774 | bd_cons = NEXT_RX_IDX(bd_cons); | ||
1775 | bd_prod = NEXT_RX_IDX(bd_prod); | ||
1776 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); | ||
1777 | rx_pkt++; | ||
1778 | next_cqe: | ||
1779 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); | ||
1780 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); | ||
1781 | |||
1782 | if (rx_pkt == budget) | ||
1783 | break; | ||
1784 | } /* while */ | ||
1785 | |||
1786 | fp->rx_bd_cons = bd_cons; | ||
1787 | fp->rx_bd_prod = bd_prod_fw; | ||
1788 | fp->rx_comp_cons = sw_comp_cons; | ||
1789 | fp->rx_comp_prod = sw_comp_prod; | ||
1790 | |||
1791 | /* Update producers */ | ||
1792 | bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, | ||
1793 | fp->rx_sge_prod); | ||
1794 | |||
1795 | fp->rx_pkt += rx_pkt; | ||
1796 | fp->rx_calls++; | ||
1797 | |||
1798 | return rx_pkt; | ||
1799 | } | ||
1800 | |||
1801 | static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | ||
1802 | { | ||
1803 | struct bnx2x_fastpath *fp = fp_cookie; | ||
1804 | struct bnx2x *bp = fp->bp; | ||
1805 | |||
1806 | /* Return here if interrupt is disabled */ | ||
1807 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
1808 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
1809 | return IRQ_HANDLED; | ||
1810 | } | ||
1811 | |||
1812 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", | ||
1813 | fp->index, fp->sb_id); | ||
1814 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); | ||
1815 | |||
1816 | #ifdef BNX2X_STOP_ON_ERROR | ||
1817 | if (unlikely(bp->panic)) | ||
1818 | return IRQ_HANDLED; | ||
1819 | #endif | ||
1820 | |||
1821 | /* Handle Rx and Tx according to MSI-X vector */ | ||
1822 | prefetch(fp->rx_cons_sb); | ||
1823 | prefetch(fp->tx_cons_sb); | ||
1824 | prefetch(&fp->status_blk->u_status_block.status_block_index); | ||
1825 | prefetch(&fp->status_blk->c_status_block.status_block_index); | ||
1826 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
1827 | |||
1828 | return IRQ_HANDLED; | ||
1829 | } | ||
1830 | |||
1831 | static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | ||
1832 | { | 893 | { |
1833 | struct bnx2x *bp = netdev_priv(dev_instance); | 894 | struct bnx2x *bp = netdev_priv(dev_instance); |
1834 | u16 status = bnx2x_ack_int(bp); | 895 | u16 status = bnx2x_ack_int(bp); |
@@ -1902,7 +963,6 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1902 | 963 | ||
1903 | /* end of fast path */ | 964 | /* end of fast path */ |
1904 | 965 | ||
1905 | static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | ||
1906 | 966 | ||
1907 | /* Link */ | 967 | /* Link */ |
1908 | 968 | ||
@@ -1910,7 +970,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | |||
1910 | * General service functions | 970 | * General service functions |
1911 | */ | 971 | */ |
1912 | 972 | ||
1913 | static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) | 973 | int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) |
1914 | { | 974 | { |
1915 | u32 lock_status; | 975 | u32 lock_status; |
1916 | u32 resource_bit = (1 << resource); | 976 | u32 resource_bit = (1 << resource); |
@@ -1955,7 +1015,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) | |||
1955 | return -EAGAIN; | 1015 | return -EAGAIN; |
1956 | } | 1016 | } |
1957 | 1017 | ||
1958 | static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) | 1018 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) |
1959 | { | 1019 | { |
1960 | u32 lock_status; | 1020 | u32 lock_status; |
1961 | u32 resource_bit = (1 << resource); | 1021 | u32 resource_bit = (1 << resource); |
@@ -1991,22 +1051,6 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) | |||
1991 | return 0; | 1051 | return 0; |
1992 | } | 1052 | } |
1993 | 1053 | ||
1994 | /* HW Lock for shared dual port PHYs */ | ||
1995 | static void bnx2x_acquire_phy_lock(struct bnx2x *bp) | ||
1996 | { | ||
1997 | mutex_lock(&bp->port.phy_mutex); | ||
1998 | |||
1999 | if (bp->port.need_hw_lock) | ||
2000 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | ||
2001 | } | ||
2002 | |||
2003 | static void bnx2x_release_phy_lock(struct bnx2x *bp) | ||
2004 | { | ||
2005 | if (bp->port.need_hw_lock) | ||
2006 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | ||
2007 | |||
2008 | mutex_unlock(&bp->port.phy_mutex); | ||
2009 | } | ||
2010 | 1054 | ||
2011 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) | 1055 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) |
2012 | { | 1056 | { |
@@ -2183,7 +1227,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) | |||
2183 | return 0; | 1227 | return 0; |
2184 | } | 1228 | } |
2185 | 1229 | ||
2186 | static void bnx2x_calc_fc_adv(struct bnx2x *bp) | 1230 | void bnx2x_calc_fc_adv(struct bnx2x *bp) |
2187 | { | 1231 | { |
2188 | switch (bp->link_vars.ieee_fc & | 1232 | switch (bp->link_vars.ieee_fc & |
2189 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { | 1233 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { |
@@ -2208,58 +1252,8 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp) | |||
2208 | } | 1252 | } |
2209 | } | 1253 | } |
2210 | 1254 | ||
2211 | static void bnx2x_link_report(struct bnx2x *bp) | ||
2212 | { | ||
2213 | if (bp->flags & MF_FUNC_DIS) { | ||
2214 | netif_carrier_off(bp->dev); | ||
2215 | netdev_err(bp->dev, "NIC Link is Down\n"); | ||
2216 | return; | ||
2217 | } | ||
2218 | |||
2219 | if (bp->link_vars.link_up) { | ||
2220 | u16 line_speed; | ||
2221 | 1255 | ||
2222 | if (bp->state == BNX2X_STATE_OPEN) | 1256 | u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) |
2223 | netif_carrier_on(bp->dev); | ||
2224 | netdev_info(bp->dev, "NIC Link is Up, "); | ||
2225 | |||
2226 | line_speed = bp->link_vars.line_speed; | ||
2227 | if (IS_E1HMF(bp)) { | ||
2228 | u16 vn_max_rate; | ||
2229 | |||
2230 | vn_max_rate = | ||
2231 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
2232 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
2233 | if (vn_max_rate < line_speed) | ||
2234 | line_speed = vn_max_rate; | ||
2235 | } | ||
2236 | pr_cont("%d Mbps ", line_speed); | ||
2237 | |||
2238 | if (bp->link_vars.duplex == DUPLEX_FULL) | ||
2239 | pr_cont("full duplex"); | ||
2240 | else | ||
2241 | pr_cont("half duplex"); | ||
2242 | |||
2243 | if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { | ||
2244 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { | ||
2245 | pr_cont(", receive "); | ||
2246 | if (bp->link_vars.flow_ctrl & | ||
2247 | BNX2X_FLOW_CTRL_TX) | ||
2248 | pr_cont("& transmit "); | ||
2249 | } else { | ||
2250 | pr_cont(", transmit "); | ||
2251 | } | ||
2252 | pr_cont("flow control ON"); | ||
2253 | } | ||
2254 | pr_cont("\n"); | ||
2255 | |||
2256 | } else { /* link_down */ | ||
2257 | netif_carrier_off(bp->dev); | ||
2258 | netdev_err(bp->dev, "NIC Link is Down\n"); | ||
2259 | } | ||
2260 | } | ||
2261 | |||
2262 | static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | ||
2263 | { | 1257 | { |
2264 | if (!BP_NOMCP(bp)) { | 1258 | if (!BP_NOMCP(bp)) { |
2265 | u8 rc; | 1259 | u8 rc; |
@@ -2294,7 +1288,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | |||
2294 | return -EINVAL; | 1288 | return -EINVAL; |
2295 | } | 1289 | } |
2296 | 1290 | ||
2297 | static void bnx2x_link_set(struct bnx2x *bp) | 1291 | void bnx2x_link_set(struct bnx2x *bp) |
2298 | { | 1292 | { |
2299 | if (!BP_NOMCP(bp)) { | 1293 | if (!BP_NOMCP(bp)) { |
2300 | bnx2x_acquire_phy_lock(bp); | 1294 | bnx2x_acquire_phy_lock(bp); |
@@ -2316,7 +1310,7 @@ static void bnx2x__link_reset(struct bnx2x *bp) | |||
2316 | BNX2X_ERR("Bootcode is missing - can not reset link\n"); | 1310 | BNX2X_ERR("Bootcode is missing - can not reset link\n"); |
2317 | } | 1311 | } |
2318 | 1312 | ||
2319 | static u8 bnx2x_link_test(struct bnx2x *bp) | 1313 | u8 bnx2x_link_test(struct bnx2x *bp) |
2320 | { | 1314 | { |
2321 | u8 rc = 0; | 1315 | u8 rc = 0; |
2322 | 1316 | ||
@@ -2548,7 +1542,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2548 | } | 1542 | } |
2549 | } | 1543 | } |
2550 | 1544 | ||
2551 | static void bnx2x__link_status_update(struct bnx2x *bp) | 1545 | void bnx2x__link_status_update(struct bnx2x *bp) |
2552 | { | 1546 | { |
2553 | if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) | 1547 | if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) |
2554 | return; | 1548 | return; |
@@ -2629,9 +1623,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
2629 | return rc; | 1623 | return rc; |
2630 | } | 1624 | } |
2631 | 1625 | ||
2632 | static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); | ||
2633 | static void bnx2x_set_rx_mode(struct net_device *dev); | ||
2634 | |||
2635 | static void bnx2x_e1h_disable(struct bnx2x *bp) | 1626 | static void bnx2x_e1h_disable(struct bnx2x *bp) |
2636 | { | 1627 | { |
2637 | int port = BP_PORT(bp); | 1628 | int port = BP_PORT(bp); |
@@ -2759,7 +1750,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) | |||
2759 | } | 1750 | } |
2760 | 1751 | ||
2761 | /* the slow path queue is odd since completions arrive on the fastpath ring */ | 1752 | /* the slow path queue is odd since completions arrive on the fastpath ring */ |
2762 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 1753 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
2763 | u32 data_hi, u32 data_lo, int common) | 1754 | u32 data_hi, u32 data_lo, int common) |
2764 | { | 1755 | { |
2765 | struct eth_spe *spe; | 1756 | struct eth_spe *spe; |
@@ -3171,10 +2162,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
3171 | } | 2162 | } |
3172 | } | 2163 | } |
3173 | 2164 | ||
3174 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); | ||
3175 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode); | ||
3176 | |||
3177 | |||
3178 | #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 | 2165 | #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 |
3179 | #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ | 2166 | #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ |
3180 | #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) | 2167 | #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) |
@@ -3208,7 +2195,7 @@ static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp) | |||
3208 | /* | 2195 | /* |
3209 | * should be run under rtnl lock | 2196 | * should be run under rtnl lock |
3210 | */ | 2197 | */ |
3211 | static inline bool bnx2x_reset_is_done(struct bnx2x *bp) | 2198 | bool bnx2x_reset_is_done(struct bnx2x *bp) |
3212 | { | 2199 | { |
3213 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 2200 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); |
3214 | DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); | 2201 | DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); |
@@ -3218,7 +2205,7 @@ static inline bool bnx2x_reset_is_done(struct bnx2x *bp) | |||
3218 | /* | 2205 | /* |
3219 | * should be run under rtnl lock | 2206 | * should be run under rtnl lock |
3220 | */ | 2207 | */ |
3221 | static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) | 2208 | inline void bnx2x_inc_load_cnt(struct bnx2x *bp) |
3222 | { | 2209 | { |
3223 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 2210 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); |
3224 | 2211 | ||
@@ -3233,7 +2220,7 @@ static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) | |||
3233 | /* | 2220 | /* |
3234 | * should be run under rtnl lock | 2221 | * should be run under rtnl lock |
3235 | */ | 2222 | */ |
3236 | static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp) | 2223 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp) |
3237 | { | 2224 | { |
3238 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 2225 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); |
3239 | 2226 | ||
@@ -3451,7 +2438,7 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, | |||
3451 | return false; | 2438 | return false; |
3452 | } | 2439 | } |
3453 | 2440 | ||
3454 | static bool bnx2x_chk_parity_attn(struct bnx2x *bp) | 2441 | bool bnx2x_chk_parity_attn(struct bnx2x *bp) |
3455 | { | 2442 | { |
3456 | struct attn_route attn; | 2443 | struct attn_route attn; |
3457 | int port = BP_PORT(bp); | 2444 | int port = BP_PORT(bp); |
@@ -3629,7 +2616,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
3629 | IGU_INT_ENABLE, 1); | 2616 | IGU_INT_ENABLE, 1); |
3630 | } | 2617 | } |
3631 | 2618 | ||
3632 | static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | 2619 | irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) |
3633 | { | 2620 | { |
3634 | struct net_device *dev = dev_instance; | 2621 | struct net_device *dev = dev_instance; |
3635 | struct bnx2x *bp = netdev_priv(dev); | 2622 | struct bnx2x *bp = netdev_priv(dev); |
@@ -3665,1387 +2652,6 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
3665 | 2652 | ||
3666 | /* end of slow path */ | 2653 | /* end of slow path */ |
3667 | 2654 | ||
3668 | /* Statistics */ | ||
3669 | |||
3670 | /**************************************************************************** | ||
3671 | * Macros | ||
3672 | ****************************************************************************/ | ||
3673 | |||
3674 | /* sum[hi:lo] += add[hi:lo] */ | ||
3675 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | ||
3676 | do { \ | ||
3677 | s_lo += a_lo; \ | ||
3678 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ | ||
3679 | } while (0) | ||
3680 | |||
3681 | /* difference = minuend - subtrahend */ | ||
3682 | #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ | ||
3683 | do { \ | ||
3684 | if (m_lo < s_lo) { \ | ||
3685 | /* underflow */ \ | ||
3686 | d_hi = m_hi - s_hi; \ | ||
3687 | if (d_hi > 0) { \ | ||
3688 | /* we can 'loan' 1 */ \ | ||
3689 | d_hi--; \ | ||
3690 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | ||
3691 | } else { \ | ||
3692 | /* m_hi <= s_hi */ \ | ||
3693 | d_hi = 0; \ | ||
3694 | d_lo = 0; \ | ||
3695 | } \ | ||
3696 | } else { \ | ||
3697 | /* m_lo >= s_lo */ \ | ||
3698 | if (m_hi < s_hi) { \ | ||
3699 | d_hi = 0; \ | ||
3700 | d_lo = 0; \ | ||
3701 | } else { \ | ||
3702 | /* m_hi >= s_hi */ \ | ||
3703 | d_hi = m_hi - s_hi; \ | ||
3704 | d_lo = m_lo - s_lo; \ | ||
3705 | } \ | ||
3706 | } \ | ||
3707 | } while (0) | ||
3708 | |||
3709 | #define UPDATE_STAT64(s, t) \ | ||
3710 | do { \ | ||
3711 | DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ | ||
3712 | diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ | ||
3713 | pstats->mac_stx[0].t##_hi = new->s##_hi; \ | ||
3714 | pstats->mac_stx[0].t##_lo = new->s##_lo; \ | ||
3715 | ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ | ||
3716 | pstats->mac_stx[1].t##_lo, diff.lo); \ | ||
3717 | } while (0) | ||
3718 | |||
3719 | #define UPDATE_STAT64_NIG(s, t) \ | ||
3720 | do { \ | ||
3721 | DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ | ||
3722 | diff.lo, new->s##_lo, old->s##_lo); \ | ||
3723 | ADD_64(estats->t##_hi, diff.hi, \ | ||
3724 | estats->t##_lo, diff.lo); \ | ||
3725 | } while (0) | ||
3726 | |||
3727 | /* sum[hi:lo] += add */ | ||
3728 | #define ADD_EXTEND_64(s_hi, s_lo, a) \ | ||
3729 | do { \ | ||
3730 | s_lo += a; \ | ||
3731 | s_hi += (s_lo < a) ? 1 : 0; \ | ||
3732 | } while (0) | ||
3733 | |||
3734 | #define UPDATE_EXTEND_STAT(s) \ | ||
3735 | do { \ | ||
3736 | ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ | ||
3737 | pstats->mac_stx[1].s##_lo, \ | ||
3738 | new->s); \ | ||
3739 | } while (0) | ||
3740 | |||
3741 | #define UPDATE_EXTEND_TSTAT(s, t) \ | ||
3742 | do { \ | ||
3743 | diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ | ||
3744 | old_tclient->s = tclient->s; \ | ||
3745 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
3746 | } while (0) | ||
3747 | |||
3748 | #define UPDATE_EXTEND_USTAT(s, t) \ | ||
3749 | do { \ | ||
3750 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
3751 | old_uclient->s = uclient->s; \ | ||
3752 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
3753 | } while (0) | ||
3754 | |||
3755 | #define UPDATE_EXTEND_XSTAT(s, t) \ | ||
3756 | do { \ | ||
3757 | diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ | ||
3758 | old_xclient->s = xclient->s; \ | ||
3759 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
3760 | } while (0) | ||
3761 | |||
3762 | /* minuend -= subtrahend */ | ||
3763 | #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ | ||
3764 | do { \ | ||
3765 | DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ | ||
3766 | } while (0) | ||
3767 | |||
3768 | /* minuend[hi:lo] -= subtrahend */ | ||
3769 | #define SUB_EXTEND_64(m_hi, m_lo, s) \ | ||
3770 | do { \ | ||
3771 | SUB_64(m_hi, 0, m_lo, s); \ | ||
3772 | } while (0) | ||
3773 | |||
3774 | #define SUB_EXTEND_USTAT(s, t) \ | ||
3775 | do { \ | ||
3776 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
3777 | SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
3778 | } while (0) | ||
3779 | |||
3780 | /* | ||
3781 | * General service functions | ||
3782 | */ | ||
3783 | |||
3784 | static inline long bnx2x_hilo(u32 *hiref) | ||
3785 | { | ||
3786 | u32 lo = *(hiref + 1); | ||
3787 | #if (BITS_PER_LONG == 64) | ||
3788 | u32 hi = *hiref; | ||
3789 | |||
3790 | return HILO_U64(hi, lo); | ||
3791 | #else | ||
3792 | return lo; | ||
3793 | #endif | ||
3794 | } | ||
3795 | |||
3796 | /* | ||
3797 | * Init service functions | ||
3798 | */ | ||
3799 | |||
3800 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | ||
3801 | { | ||
3802 | if (!bp->stats_pending) { | ||
3803 | struct eth_query_ramrod_data ramrod_data = {0}; | ||
3804 | int i, rc; | ||
3805 | |||
3806 | ramrod_data.drv_counter = bp->stats_counter++; | ||
3807 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | ||
3808 | for_each_queue(bp, i) | ||
3809 | ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); | ||
3810 | |||
3811 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, | ||
3812 | ((u32 *)&ramrod_data)[1], | ||
3813 | ((u32 *)&ramrod_data)[0], 0); | ||
3814 | if (rc == 0) { | ||
3815 | /* stats ramrod has it's own slot on the spq */ | ||
3816 | bp->spq_left++; | ||
3817 | bp->stats_pending = 1; | ||
3818 | } | ||
3819 | } | ||
3820 | } | ||
3821 | |||
3822 | static void bnx2x_hw_stats_post(struct bnx2x *bp) | ||
3823 | { | ||
3824 | struct dmae_command *dmae = &bp->stats_dmae; | ||
3825 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
3826 | |||
3827 | *stats_comp = DMAE_COMP_VAL; | ||
3828 | if (CHIP_REV_IS_SLOW(bp)) | ||
3829 | return; | ||
3830 | |||
3831 | /* loader */ | ||
3832 | if (bp->executer_idx) { | ||
3833 | int loader_idx = PMF_DMAE_C(bp); | ||
3834 | |||
3835 | memset(dmae, 0, sizeof(struct dmae_command)); | ||
3836 | |||
3837 | dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
3838 | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | | ||
3839 | DMAE_CMD_DST_RESET | | ||
3840 | #ifdef __BIG_ENDIAN | ||
3841 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
3842 | #else | ||
3843 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
3844 | #endif | ||
3845 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : | ||
3846 | DMAE_CMD_PORT_0) | | ||
3847 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
3848 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); | ||
3849 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); | ||
3850 | dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + | ||
3851 | sizeof(struct dmae_command) * | ||
3852 | (loader_idx + 1)) >> 2; | ||
3853 | dmae->dst_addr_hi = 0; | ||
3854 | dmae->len = sizeof(struct dmae_command) >> 2; | ||
3855 | if (CHIP_IS_E1(bp)) | ||
3856 | dmae->len--; | ||
3857 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; | ||
3858 | dmae->comp_addr_hi = 0; | ||
3859 | dmae->comp_val = 1; | ||
3860 | |||
3861 | *stats_comp = 0; | ||
3862 | bnx2x_post_dmae(bp, dmae, loader_idx); | ||
3863 | |||
3864 | } else if (bp->func_stx) { | ||
3865 | *stats_comp = 0; | ||
3866 | bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); | ||
3867 | } | ||
3868 | } | ||
3869 | |||
3870 | static int bnx2x_stats_comp(struct bnx2x *bp) | ||
3871 | { | ||
3872 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
3873 | int cnt = 10; | ||
3874 | |||
3875 | might_sleep(); | ||
3876 | while (*stats_comp != DMAE_COMP_VAL) { | ||
3877 | if (!cnt) { | ||
3878 | BNX2X_ERR("timeout waiting for stats finished\n"); | ||
3879 | break; | ||
3880 | } | ||
3881 | cnt--; | ||
3882 | msleep(1); | ||
3883 | } | ||
3884 | return 1; | ||
3885 | } | ||
3886 | |||
3887 | /* | ||
3888 | * Statistics service functions | ||
3889 | */ | ||
3890 | |||
3891 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
3892 | { | ||
3893 | struct dmae_command *dmae; | ||
3894 | u32 opcode; | ||
3895 | int loader_idx = PMF_DMAE_C(bp); | ||
3896 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
3897 | |||
3898 | /* sanity */ | ||
3899 | if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { | ||
3900 | BNX2X_ERR("BUG!\n"); | ||
3901 | return; | ||
3902 | } | ||
3903 | |||
3904 | bp->executer_idx = 0; | ||
3905 | |||
3906 | opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
3907 | DMAE_CMD_C_ENABLE | | ||
3908 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
3909 | #ifdef __BIG_ENDIAN | ||
3910 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
3911 | #else | ||
3912 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
3913 | #endif | ||
3914 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
3915 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
3916 | |||
3917 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
3918 | dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); | ||
3919 | dmae->src_addr_lo = bp->port.port_stx >> 2; | ||
3920 | dmae->src_addr_hi = 0; | ||
3921 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
3922 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
3923 | dmae->len = DMAE_LEN32_RD_MAX; | ||
3924 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
3925 | dmae->comp_addr_hi = 0; | ||
3926 | dmae->comp_val = 1; | ||
3927 | |||
3928 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
3929 | dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); | ||
3930 | dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; | ||
3931 | dmae->src_addr_hi = 0; | ||
3932 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + | ||
3933 | DMAE_LEN32_RD_MAX * 4); | ||
3934 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + | ||
3935 | DMAE_LEN32_RD_MAX * 4); | ||
3936 | dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; | ||
3937 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
3938 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
3939 | dmae->comp_val = DMAE_COMP_VAL; | ||
3940 | |||
3941 | *stats_comp = 0; | ||
3942 | bnx2x_hw_stats_post(bp); | ||
3943 | bnx2x_stats_comp(bp); | ||
3944 | } | ||
3945 | |||
3946 | static void bnx2x_port_stats_init(struct bnx2x *bp) | ||
3947 | { | ||
3948 | struct dmae_command *dmae; | ||
3949 | int port = BP_PORT(bp); | ||
3950 | int vn = BP_E1HVN(bp); | ||
3951 | u32 opcode; | ||
3952 | int loader_idx = PMF_DMAE_C(bp); | ||
3953 | u32 mac_addr; | ||
3954 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
3955 | |||
3956 | /* sanity */ | ||
3957 | if (!bp->link_vars.link_up || !bp->port.pmf) { | ||
3958 | BNX2X_ERR("BUG!\n"); | ||
3959 | return; | ||
3960 | } | ||
3961 | |||
3962 | bp->executer_idx = 0; | ||
3963 | |||
3964 | /* MCP */ | ||
3965 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
3966 | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | | ||
3967 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
3968 | #ifdef __BIG_ENDIAN | ||
3969 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
3970 | #else | ||
3971 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
3972 | #endif | ||
3973 | (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
3974 | (vn << DMAE_CMD_E1HVN_SHIFT)); | ||
3975 | |||
3976 | if (bp->port.port_stx) { | ||
3977 | |||
3978 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
3979 | dmae->opcode = opcode; | ||
3980 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
3981 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
3982 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | ||
3983 | dmae->dst_addr_hi = 0; | ||
3984 | dmae->len = sizeof(struct host_port_stats) >> 2; | ||
3985 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
3986 | dmae->comp_addr_hi = 0; | ||
3987 | dmae->comp_val = 1; | ||
3988 | } | ||
3989 | |||
3990 | if (bp->func_stx) { | ||
3991 | |||
3992 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
3993 | dmae->opcode = opcode; | ||
3994 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); | ||
3995 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); | ||
3996 | dmae->dst_addr_lo = bp->func_stx >> 2; | ||
3997 | dmae->dst_addr_hi = 0; | ||
3998 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
3999 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4000 | dmae->comp_addr_hi = 0; | ||
4001 | dmae->comp_val = 1; | ||
4002 | } | ||
4003 | |||
4004 | /* MAC */ | ||
4005 | opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
4006 | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | | ||
4007 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
4008 | #ifdef __BIG_ENDIAN | ||
4009 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
4010 | #else | ||
4011 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
4012 | #endif | ||
4013 | (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
4014 | (vn << DMAE_CMD_E1HVN_SHIFT)); | ||
4015 | |||
4016 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | ||
4017 | |||
4018 | mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
4019 | NIG_REG_INGRESS_BMAC0_MEM); | ||
4020 | |||
4021 | /* BIGMAC_REGISTER_TX_STAT_GTPKT .. | ||
4022 | BIGMAC_REGISTER_TX_STAT_GTBYT */ | ||
4023 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4024 | dmae->opcode = opcode; | ||
4025 | dmae->src_addr_lo = (mac_addr + | ||
4026 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
4027 | dmae->src_addr_hi = 0; | ||
4028 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
4029 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
4030 | dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - | ||
4031 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
4032 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4033 | dmae->comp_addr_hi = 0; | ||
4034 | dmae->comp_val = 1; | ||
4035 | |||
4036 | /* BIGMAC_REGISTER_RX_STAT_GR64 .. | ||
4037 | BIGMAC_REGISTER_RX_STAT_GRIPJ */ | ||
4038 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4039 | dmae->opcode = opcode; | ||
4040 | dmae->src_addr_lo = (mac_addr + | ||
4041 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
4042 | dmae->src_addr_hi = 0; | ||
4043 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
4044 | offsetof(struct bmac_stats, rx_stat_gr64_lo)); | ||
4045 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
4046 | offsetof(struct bmac_stats, rx_stat_gr64_lo)); | ||
4047 | dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - | ||
4048 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
4049 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4050 | dmae->comp_addr_hi = 0; | ||
4051 | dmae->comp_val = 1; | ||
4052 | |||
4053 | } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { | ||
4054 | |||
4055 | mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); | ||
4056 | |||
4057 | /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ | ||
4058 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4059 | dmae->opcode = opcode; | ||
4060 | dmae->src_addr_lo = (mac_addr + | ||
4061 | EMAC_REG_EMAC_RX_STAT_AC) >> 2; | ||
4062 | dmae->src_addr_hi = 0; | ||
4063 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
4064 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
4065 | dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; | ||
4066 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4067 | dmae->comp_addr_hi = 0; | ||
4068 | dmae->comp_val = 1; | ||
4069 | |||
4070 | /* EMAC_REG_EMAC_RX_STAT_AC_28 */ | ||
4071 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4072 | dmae->opcode = opcode; | ||
4073 | dmae->src_addr_lo = (mac_addr + | ||
4074 | EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; | ||
4075 | dmae->src_addr_hi = 0; | ||
4076 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
4077 | offsetof(struct emac_stats, rx_stat_falsecarriererrors)); | ||
4078 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
4079 | offsetof(struct emac_stats, rx_stat_falsecarriererrors)); | ||
4080 | dmae->len = 1; | ||
4081 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4082 | dmae->comp_addr_hi = 0; | ||
4083 | dmae->comp_val = 1; | ||
4084 | |||
4085 | /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ | ||
4086 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4087 | dmae->opcode = opcode; | ||
4088 | dmae->src_addr_lo = (mac_addr + | ||
4089 | EMAC_REG_EMAC_TX_STAT_AC) >> 2; | ||
4090 | dmae->src_addr_hi = 0; | ||
4091 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
4092 | offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); | ||
4093 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
4094 | offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); | ||
4095 | dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; | ||
4096 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4097 | dmae->comp_addr_hi = 0; | ||
4098 | dmae->comp_val = 1; | ||
4099 | } | ||
4100 | |||
4101 | /* NIG */ | ||
4102 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4103 | dmae->opcode = opcode; | ||
4104 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : | ||
4105 | NIG_REG_STAT0_BRB_DISCARD) >> 2; | ||
4106 | dmae->src_addr_hi = 0; | ||
4107 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); | ||
4108 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); | ||
4109 | dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; | ||
4110 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4111 | dmae->comp_addr_hi = 0; | ||
4112 | dmae->comp_val = 1; | ||
4113 | |||
4114 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4115 | dmae->opcode = opcode; | ||
4116 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : | ||
4117 | NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; | ||
4118 | dmae->src_addr_hi = 0; | ||
4119 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
4120 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
4121 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
4122 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
4123 | dmae->len = (2*sizeof(u32)) >> 2; | ||
4124 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4125 | dmae->comp_addr_hi = 0; | ||
4126 | dmae->comp_val = 1; | ||
4127 | |||
4128 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4129 | dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
4130 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
4131 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
4132 | #ifdef __BIG_ENDIAN | ||
4133 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
4134 | #else | ||
4135 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
4136 | #endif | ||
4137 | (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
4138 | (vn << DMAE_CMD_E1HVN_SHIFT)); | ||
4139 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : | ||
4140 | NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; | ||
4141 | dmae->src_addr_hi = 0; | ||
4142 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
4143 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
4144 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
4145 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
4146 | dmae->len = (2*sizeof(u32)) >> 2; | ||
4147 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
4148 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
4149 | dmae->comp_val = DMAE_COMP_VAL; | ||
4150 | |||
4151 | *stats_comp = 0; | ||
4152 | } | ||
4153 | |||
4154 | static void bnx2x_func_stats_init(struct bnx2x *bp) | ||
4155 | { | ||
4156 | struct dmae_command *dmae = &bp->stats_dmae; | ||
4157 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
4158 | |||
4159 | /* sanity */ | ||
4160 | if (!bp->func_stx) { | ||
4161 | BNX2X_ERR("BUG!\n"); | ||
4162 | return; | ||
4163 | } | ||
4164 | |||
4165 | bp->executer_idx = 0; | ||
4166 | memset(dmae, 0, sizeof(struct dmae_command)); | ||
4167 | |||
4168 | dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
4169 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
4170 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
4171 | #ifdef __BIG_ENDIAN | ||
4172 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
4173 | #else | ||
4174 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
4175 | #endif | ||
4176 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
4177 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
4178 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); | ||
4179 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); | ||
4180 | dmae->dst_addr_lo = bp->func_stx >> 2; | ||
4181 | dmae->dst_addr_hi = 0; | ||
4182 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
4183 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
4184 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
4185 | dmae->comp_val = DMAE_COMP_VAL; | ||
4186 | |||
4187 | *stats_comp = 0; | ||
4188 | } | ||
4189 | |||
4190 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
4191 | { | ||
4192 | if (bp->port.pmf) | ||
4193 | bnx2x_port_stats_init(bp); | ||
4194 | |||
4195 | else if (bp->func_stx) | ||
4196 | bnx2x_func_stats_init(bp); | ||
4197 | |||
4198 | bnx2x_hw_stats_post(bp); | ||
4199 | bnx2x_storm_stats_post(bp); | ||
4200 | } | ||
4201 | |||
4202 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | ||
4203 | { | ||
4204 | bnx2x_stats_comp(bp); | ||
4205 | bnx2x_stats_pmf_update(bp); | ||
4206 | bnx2x_stats_start(bp); | ||
4207 | } | ||
4208 | |||
4209 | static void bnx2x_stats_restart(struct bnx2x *bp) | ||
4210 | { | ||
4211 | bnx2x_stats_comp(bp); | ||
4212 | bnx2x_stats_start(bp); | ||
4213 | } | ||
4214 | |||
4215 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | ||
4216 | { | ||
4217 | struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); | ||
4218 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
4219 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4220 | struct { | ||
4221 | u32 lo; | ||
4222 | u32 hi; | ||
4223 | } diff; | ||
4224 | |||
4225 | UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); | ||
4226 | UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); | ||
4227 | UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); | ||
4228 | UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); | ||
4229 | UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); | ||
4230 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | ||
4231 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); | ||
4232 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | ||
4233 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | ||
4234 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | ||
4235 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); | ||
4236 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); | ||
4237 | UPDATE_STAT64(tx_stat_gt127, | ||
4238 | tx_stat_etherstatspkts65octetsto127octets); | ||
4239 | UPDATE_STAT64(tx_stat_gt255, | ||
4240 | tx_stat_etherstatspkts128octetsto255octets); | ||
4241 | UPDATE_STAT64(tx_stat_gt511, | ||
4242 | tx_stat_etherstatspkts256octetsto511octets); | ||
4243 | UPDATE_STAT64(tx_stat_gt1023, | ||
4244 | tx_stat_etherstatspkts512octetsto1023octets); | ||
4245 | UPDATE_STAT64(tx_stat_gt1518, | ||
4246 | tx_stat_etherstatspkts1024octetsto1522octets); | ||
4247 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); | ||
4248 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); | ||
4249 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); | ||
4250 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); | ||
4251 | UPDATE_STAT64(tx_stat_gterr, | ||
4252 | tx_stat_dot3statsinternalmactransmiterrors); | ||
4253 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); | ||
4254 | |||
4255 | estats->pause_frames_received_hi = | ||
4256 | pstats->mac_stx[1].rx_stat_bmac_xpf_hi; | ||
4257 | estats->pause_frames_received_lo = | ||
4258 | pstats->mac_stx[1].rx_stat_bmac_xpf_lo; | ||
4259 | |||
4260 | estats->pause_frames_sent_hi = | ||
4261 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; | ||
4262 | estats->pause_frames_sent_lo = | ||
4263 | pstats->mac_stx[1].tx_stat_outxoffsent_lo; | ||
4264 | } | ||
4265 | |||
4266 | static void bnx2x_emac_stats_update(struct bnx2x *bp) | ||
4267 | { | ||
4268 | struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); | ||
4269 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
4270 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4271 | |||
4272 | UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); | ||
4273 | UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); | ||
4274 | UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); | ||
4275 | UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); | ||
4276 | UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); | ||
4277 | UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); | ||
4278 | UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); | ||
4279 | UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); | ||
4280 | UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); | ||
4281 | UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); | ||
4282 | UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); | ||
4283 | UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); | ||
4284 | UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); | ||
4285 | UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); | ||
4286 | UPDATE_EXTEND_STAT(tx_stat_outxonsent); | ||
4287 | UPDATE_EXTEND_STAT(tx_stat_outxoffsent); | ||
4288 | UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); | ||
4289 | UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); | ||
4290 | UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); | ||
4291 | UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); | ||
4292 | UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); | ||
4293 | UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); | ||
4294 | UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); | ||
4295 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); | ||
4296 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); | ||
4297 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); | ||
4298 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); | ||
4299 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); | ||
4300 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); | ||
4301 | UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); | ||
4302 | UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); | ||
4303 | |||
4304 | estats->pause_frames_received_hi = | ||
4305 | pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; | ||
4306 | estats->pause_frames_received_lo = | ||
4307 | pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; | ||
4308 | ADD_64(estats->pause_frames_received_hi, | ||
4309 | pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, | ||
4310 | estats->pause_frames_received_lo, | ||
4311 | pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); | ||
4312 | |||
4313 | estats->pause_frames_sent_hi = | ||
4314 | pstats->mac_stx[1].tx_stat_outxonsent_hi; | ||
4315 | estats->pause_frames_sent_lo = | ||
4316 | pstats->mac_stx[1].tx_stat_outxonsent_lo; | ||
4317 | ADD_64(estats->pause_frames_sent_hi, | ||
4318 | pstats->mac_stx[1].tx_stat_outxoffsent_hi, | ||
4319 | estats->pause_frames_sent_lo, | ||
4320 | pstats->mac_stx[1].tx_stat_outxoffsent_lo); | ||
4321 | } | ||
4322 | |||
4323 | static int bnx2x_hw_stats_update(struct bnx2x *bp) | ||
4324 | { | ||
4325 | struct nig_stats *new = bnx2x_sp(bp, nig_stats); | ||
4326 | struct nig_stats *old = &(bp->port.old_nig_stats); | ||
4327 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
4328 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4329 | struct { | ||
4330 | u32 lo; | ||
4331 | u32 hi; | ||
4332 | } diff; | ||
4333 | |||
4334 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) | ||
4335 | bnx2x_bmac_stats_update(bp); | ||
4336 | |||
4337 | else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) | ||
4338 | bnx2x_emac_stats_update(bp); | ||
4339 | |||
4340 | else { /* unreached */ | ||
4341 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | ||
4342 | return -1; | ||
4343 | } | ||
4344 | |||
4345 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, | ||
4346 | new->brb_discard - old->brb_discard); | ||
4347 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, | ||
4348 | new->brb_truncate - old->brb_truncate); | ||
4349 | |||
4350 | UPDATE_STAT64_NIG(egress_mac_pkt0, | ||
4351 | etherstatspkts1024octetsto1522octets); | ||
4352 | UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); | ||
4353 | |||
4354 | memcpy(old, new, sizeof(struct nig_stats)); | ||
4355 | |||
4356 | memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), | ||
4357 | sizeof(struct mac_stx)); | ||
4358 | estats->brb_drop_hi = pstats->brb_drop_hi; | ||
4359 | estats->brb_drop_lo = pstats->brb_drop_lo; | ||
4360 | |||
4361 | pstats->host_port_stats_start = ++pstats->host_port_stats_end; | ||
4362 | |||
4363 | if (!BP_NOMCP(bp)) { | ||
4364 | u32 nig_timer_max = | ||
4365 | SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); | ||
4366 | if (nig_timer_max != estats->nig_timer_max) { | ||
4367 | estats->nig_timer_max = nig_timer_max; | ||
4368 | BNX2X_ERR("NIG timer max (%u)\n", | ||
4369 | estats->nig_timer_max); | ||
4370 | } | ||
4371 | } | ||
4372 | |||
4373 | return 0; | ||
4374 | } | ||
4375 | |||
4376 | static int bnx2x_storm_stats_update(struct bnx2x *bp) | ||
4377 | { | ||
4378 | struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); | ||
4379 | struct tstorm_per_port_stats *tport = | ||
4380 | &stats->tstorm_common.port_statistics; | ||
4381 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); | ||
4382 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4383 | int i; | ||
4384 | |||
4385 | memcpy(&(fstats->total_bytes_received_hi), | ||
4386 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), | ||
4387 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | ||
4388 | estats->error_bytes_received_hi = 0; | ||
4389 | estats->error_bytes_received_lo = 0; | ||
4390 | estats->etherstatsoverrsizepkts_hi = 0; | ||
4391 | estats->etherstatsoverrsizepkts_lo = 0; | ||
4392 | estats->no_buff_discard_hi = 0; | ||
4393 | estats->no_buff_discard_lo = 0; | ||
4394 | |||
4395 | for_each_queue(bp, i) { | ||
4396 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
4397 | int cl_id = fp->cl_id; | ||
4398 | struct tstorm_per_client_stats *tclient = | ||
4399 | &stats->tstorm_common.client_statistics[cl_id]; | ||
4400 | struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; | ||
4401 | struct ustorm_per_client_stats *uclient = | ||
4402 | &stats->ustorm_common.client_statistics[cl_id]; | ||
4403 | struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; | ||
4404 | struct xstorm_per_client_stats *xclient = | ||
4405 | &stats->xstorm_common.client_statistics[cl_id]; | ||
4406 | struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; | ||
4407 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | ||
4408 | u32 diff; | ||
4409 | |||
4410 | /* are storm stats valid? */ | ||
4411 | if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != | ||
4412 | bp->stats_counter) { | ||
4413 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" | ||
4414 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
4415 | i, xclient->stats_counter, bp->stats_counter); | ||
4416 | return -1; | ||
4417 | } | ||
4418 | if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != | ||
4419 | bp->stats_counter) { | ||
4420 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" | ||
4421 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
4422 | i, tclient->stats_counter, bp->stats_counter); | ||
4423 | return -2; | ||
4424 | } | ||
4425 | if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != | ||
4426 | bp->stats_counter) { | ||
4427 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" | ||
4428 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | ||
4429 | i, uclient->stats_counter, bp->stats_counter); | ||
4430 | return -4; | ||
4431 | } | ||
4432 | |||
4433 | qstats->total_bytes_received_hi = | ||
4434 | le32_to_cpu(tclient->rcv_broadcast_bytes.hi); | ||
4435 | qstats->total_bytes_received_lo = | ||
4436 | le32_to_cpu(tclient->rcv_broadcast_bytes.lo); | ||
4437 | |||
4438 | ADD_64(qstats->total_bytes_received_hi, | ||
4439 | le32_to_cpu(tclient->rcv_multicast_bytes.hi), | ||
4440 | qstats->total_bytes_received_lo, | ||
4441 | le32_to_cpu(tclient->rcv_multicast_bytes.lo)); | ||
4442 | |||
4443 | ADD_64(qstats->total_bytes_received_hi, | ||
4444 | le32_to_cpu(tclient->rcv_unicast_bytes.hi), | ||
4445 | qstats->total_bytes_received_lo, | ||
4446 | le32_to_cpu(tclient->rcv_unicast_bytes.lo)); | ||
4447 | |||
4448 | SUB_64(qstats->total_bytes_received_hi, | ||
4449 | le32_to_cpu(uclient->bcast_no_buff_bytes.hi), | ||
4450 | qstats->total_bytes_received_lo, | ||
4451 | le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); | ||
4452 | |||
4453 | SUB_64(qstats->total_bytes_received_hi, | ||
4454 | le32_to_cpu(uclient->mcast_no_buff_bytes.hi), | ||
4455 | qstats->total_bytes_received_lo, | ||
4456 | le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); | ||
4457 | |||
4458 | SUB_64(qstats->total_bytes_received_hi, | ||
4459 | le32_to_cpu(uclient->ucast_no_buff_bytes.hi), | ||
4460 | qstats->total_bytes_received_lo, | ||
4461 | le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); | ||
4462 | |||
4463 | qstats->valid_bytes_received_hi = | ||
4464 | qstats->total_bytes_received_hi; | ||
4465 | qstats->valid_bytes_received_lo = | ||
4466 | qstats->total_bytes_received_lo; | ||
4467 | |||
4468 | qstats->error_bytes_received_hi = | ||
4469 | le32_to_cpu(tclient->rcv_error_bytes.hi); | ||
4470 | qstats->error_bytes_received_lo = | ||
4471 | le32_to_cpu(tclient->rcv_error_bytes.lo); | ||
4472 | |||
4473 | ADD_64(qstats->total_bytes_received_hi, | ||
4474 | qstats->error_bytes_received_hi, | ||
4475 | qstats->total_bytes_received_lo, | ||
4476 | qstats->error_bytes_received_lo); | ||
4477 | |||
4478 | UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, | ||
4479 | total_unicast_packets_received); | ||
4480 | UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, | ||
4481 | total_multicast_packets_received); | ||
4482 | UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, | ||
4483 | total_broadcast_packets_received); | ||
4484 | UPDATE_EXTEND_TSTAT(packets_too_big_discard, | ||
4485 | etherstatsoverrsizepkts); | ||
4486 | UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); | ||
4487 | |||
4488 | SUB_EXTEND_USTAT(ucast_no_buff_pkts, | ||
4489 | total_unicast_packets_received); | ||
4490 | SUB_EXTEND_USTAT(mcast_no_buff_pkts, | ||
4491 | total_multicast_packets_received); | ||
4492 | SUB_EXTEND_USTAT(bcast_no_buff_pkts, | ||
4493 | total_broadcast_packets_received); | ||
4494 | UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); | ||
4495 | UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); | ||
4496 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); | ||
4497 | |||
4498 | qstats->total_bytes_transmitted_hi = | ||
4499 | le32_to_cpu(xclient->unicast_bytes_sent.hi); | ||
4500 | qstats->total_bytes_transmitted_lo = | ||
4501 | le32_to_cpu(xclient->unicast_bytes_sent.lo); | ||
4502 | |||
4503 | ADD_64(qstats->total_bytes_transmitted_hi, | ||
4504 | le32_to_cpu(xclient->multicast_bytes_sent.hi), | ||
4505 | qstats->total_bytes_transmitted_lo, | ||
4506 | le32_to_cpu(xclient->multicast_bytes_sent.lo)); | ||
4507 | |||
4508 | ADD_64(qstats->total_bytes_transmitted_hi, | ||
4509 | le32_to_cpu(xclient->broadcast_bytes_sent.hi), | ||
4510 | qstats->total_bytes_transmitted_lo, | ||
4511 | le32_to_cpu(xclient->broadcast_bytes_sent.lo)); | ||
4512 | |||
4513 | UPDATE_EXTEND_XSTAT(unicast_pkts_sent, | ||
4514 | total_unicast_packets_transmitted); | ||
4515 | UPDATE_EXTEND_XSTAT(multicast_pkts_sent, | ||
4516 | total_multicast_packets_transmitted); | ||
4517 | UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, | ||
4518 | total_broadcast_packets_transmitted); | ||
4519 | |||
4520 | old_tclient->checksum_discard = tclient->checksum_discard; | ||
4521 | old_tclient->ttl0_discard = tclient->ttl0_discard; | ||
4522 | |||
4523 | ADD_64(fstats->total_bytes_received_hi, | ||
4524 | qstats->total_bytes_received_hi, | ||
4525 | fstats->total_bytes_received_lo, | ||
4526 | qstats->total_bytes_received_lo); | ||
4527 | ADD_64(fstats->total_bytes_transmitted_hi, | ||
4528 | qstats->total_bytes_transmitted_hi, | ||
4529 | fstats->total_bytes_transmitted_lo, | ||
4530 | qstats->total_bytes_transmitted_lo); | ||
4531 | ADD_64(fstats->total_unicast_packets_received_hi, | ||
4532 | qstats->total_unicast_packets_received_hi, | ||
4533 | fstats->total_unicast_packets_received_lo, | ||
4534 | qstats->total_unicast_packets_received_lo); | ||
4535 | ADD_64(fstats->total_multicast_packets_received_hi, | ||
4536 | qstats->total_multicast_packets_received_hi, | ||
4537 | fstats->total_multicast_packets_received_lo, | ||
4538 | qstats->total_multicast_packets_received_lo); | ||
4539 | ADD_64(fstats->total_broadcast_packets_received_hi, | ||
4540 | qstats->total_broadcast_packets_received_hi, | ||
4541 | fstats->total_broadcast_packets_received_lo, | ||
4542 | qstats->total_broadcast_packets_received_lo); | ||
4543 | ADD_64(fstats->total_unicast_packets_transmitted_hi, | ||
4544 | qstats->total_unicast_packets_transmitted_hi, | ||
4545 | fstats->total_unicast_packets_transmitted_lo, | ||
4546 | qstats->total_unicast_packets_transmitted_lo); | ||
4547 | ADD_64(fstats->total_multicast_packets_transmitted_hi, | ||
4548 | qstats->total_multicast_packets_transmitted_hi, | ||
4549 | fstats->total_multicast_packets_transmitted_lo, | ||
4550 | qstats->total_multicast_packets_transmitted_lo); | ||
4551 | ADD_64(fstats->total_broadcast_packets_transmitted_hi, | ||
4552 | qstats->total_broadcast_packets_transmitted_hi, | ||
4553 | fstats->total_broadcast_packets_transmitted_lo, | ||
4554 | qstats->total_broadcast_packets_transmitted_lo); | ||
4555 | ADD_64(fstats->valid_bytes_received_hi, | ||
4556 | qstats->valid_bytes_received_hi, | ||
4557 | fstats->valid_bytes_received_lo, | ||
4558 | qstats->valid_bytes_received_lo); | ||
4559 | |||
4560 | ADD_64(estats->error_bytes_received_hi, | ||
4561 | qstats->error_bytes_received_hi, | ||
4562 | estats->error_bytes_received_lo, | ||
4563 | qstats->error_bytes_received_lo); | ||
4564 | ADD_64(estats->etherstatsoverrsizepkts_hi, | ||
4565 | qstats->etherstatsoverrsizepkts_hi, | ||
4566 | estats->etherstatsoverrsizepkts_lo, | ||
4567 | qstats->etherstatsoverrsizepkts_lo); | ||
4568 | ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, | ||
4569 | estats->no_buff_discard_lo, qstats->no_buff_discard_lo); | ||
4570 | } | ||
4571 | |||
4572 | ADD_64(fstats->total_bytes_received_hi, | ||
4573 | estats->rx_stat_ifhcinbadoctets_hi, | ||
4574 | fstats->total_bytes_received_lo, | ||
4575 | estats->rx_stat_ifhcinbadoctets_lo); | ||
4576 | |||
4577 | memcpy(estats, &(fstats->total_bytes_received_hi), | ||
4578 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | ||
4579 | |||
4580 | ADD_64(estats->etherstatsoverrsizepkts_hi, | ||
4581 | estats->rx_stat_dot3statsframestoolong_hi, | ||
4582 | estats->etherstatsoverrsizepkts_lo, | ||
4583 | estats->rx_stat_dot3statsframestoolong_lo); | ||
4584 | ADD_64(estats->error_bytes_received_hi, | ||
4585 | estats->rx_stat_ifhcinbadoctets_hi, | ||
4586 | estats->error_bytes_received_lo, | ||
4587 | estats->rx_stat_ifhcinbadoctets_lo); | ||
4588 | |||
4589 | if (bp->port.pmf) { | ||
4590 | estats->mac_filter_discard = | ||
4591 | le32_to_cpu(tport->mac_filter_discard); | ||
4592 | estats->xxoverflow_discard = | ||
4593 | le32_to_cpu(tport->xxoverflow_discard); | ||
4594 | estats->brb_truncate_discard = | ||
4595 | le32_to_cpu(tport->brb_truncate_discard); | ||
4596 | estats->mac_discard = le32_to_cpu(tport->mac_discard); | ||
4597 | } | ||
4598 | |||
4599 | fstats->host_func_stats_start = ++fstats->host_func_stats_end; | ||
4600 | |||
4601 | bp->stats_pending = 0; | ||
4602 | |||
4603 | return 0; | ||
4604 | } | ||
4605 | |||
4606 | static void bnx2x_net_stats_update(struct bnx2x *bp) | ||
4607 | { | ||
4608 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4609 | struct net_device_stats *nstats = &bp->dev->stats; | ||
4610 | int i; | ||
4611 | |||
4612 | nstats->rx_packets = | ||
4613 | bnx2x_hilo(&estats->total_unicast_packets_received_hi) + | ||
4614 | bnx2x_hilo(&estats->total_multicast_packets_received_hi) + | ||
4615 | bnx2x_hilo(&estats->total_broadcast_packets_received_hi); | ||
4616 | |||
4617 | nstats->tx_packets = | ||
4618 | bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + | ||
4619 | bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + | ||
4620 | bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); | ||
4621 | |||
4622 | nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); | ||
4623 | |||
4624 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); | ||
4625 | |||
4626 | nstats->rx_dropped = estats->mac_discard; | ||
4627 | for_each_queue(bp, i) | ||
4628 | nstats->rx_dropped += | ||
4629 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); | ||
4630 | |||
4631 | nstats->tx_dropped = 0; | ||
4632 | |||
4633 | nstats->multicast = | ||
4634 | bnx2x_hilo(&estats->total_multicast_packets_received_hi); | ||
4635 | |||
4636 | nstats->collisions = | ||
4637 | bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); | ||
4638 | |||
4639 | nstats->rx_length_errors = | ||
4640 | bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + | ||
4641 | bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); | ||
4642 | nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + | ||
4643 | bnx2x_hilo(&estats->brb_truncate_hi); | ||
4644 | nstats->rx_crc_errors = | ||
4645 | bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); | ||
4646 | nstats->rx_frame_errors = | ||
4647 | bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); | ||
4648 | nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); | ||
4649 | nstats->rx_missed_errors = estats->xxoverflow_discard; | ||
4650 | |||
4651 | nstats->rx_errors = nstats->rx_length_errors + | ||
4652 | nstats->rx_over_errors + | ||
4653 | nstats->rx_crc_errors + | ||
4654 | nstats->rx_frame_errors + | ||
4655 | nstats->rx_fifo_errors + | ||
4656 | nstats->rx_missed_errors; | ||
4657 | |||
4658 | nstats->tx_aborted_errors = | ||
4659 | bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + | ||
4660 | bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); | ||
4661 | nstats->tx_carrier_errors = | ||
4662 | bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); | ||
4663 | nstats->tx_fifo_errors = 0; | ||
4664 | nstats->tx_heartbeat_errors = 0; | ||
4665 | nstats->tx_window_errors = 0; | ||
4666 | |||
4667 | nstats->tx_errors = nstats->tx_aborted_errors + | ||
4668 | nstats->tx_carrier_errors + | ||
4669 | bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); | ||
4670 | } | ||
4671 | |||
4672 | static void bnx2x_drv_stats_update(struct bnx2x *bp) | ||
4673 | { | ||
4674 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4675 | int i; | ||
4676 | |||
4677 | estats->driver_xoff = 0; | ||
4678 | estats->rx_err_discard_pkt = 0; | ||
4679 | estats->rx_skb_alloc_failed = 0; | ||
4680 | estats->hw_csum_err = 0; | ||
4681 | for_each_queue(bp, i) { | ||
4682 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; | ||
4683 | |||
4684 | estats->driver_xoff += qstats->driver_xoff; | ||
4685 | estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; | ||
4686 | estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; | ||
4687 | estats->hw_csum_err += qstats->hw_csum_err; | ||
4688 | } | ||
4689 | } | ||
4690 | |||
4691 | static void bnx2x_stats_update(struct bnx2x *bp) | ||
4692 | { | ||
4693 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
4694 | |||
4695 | if (*stats_comp != DMAE_COMP_VAL) | ||
4696 | return; | ||
4697 | |||
4698 | if (bp->port.pmf) | ||
4699 | bnx2x_hw_stats_update(bp); | ||
4700 | |||
4701 | if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { | ||
4702 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | ||
4703 | bnx2x_panic(); | ||
4704 | return; | ||
4705 | } | ||
4706 | |||
4707 | bnx2x_net_stats_update(bp); | ||
4708 | bnx2x_drv_stats_update(bp); | ||
4709 | |||
4710 | if (netif_msg_timer(bp)) { | ||
4711 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
4712 | int i; | ||
4713 | |||
4714 | printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", | ||
4715 | bp->dev->name, | ||
4716 | estats->brb_drop_lo, estats->brb_truncate_lo); | ||
4717 | |||
4718 | for_each_queue(bp, i) { | ||
4719 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
4720 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | ||
4721 | |||
4722 | printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" | ||
4723 | " rx pkt(%lu) rx calls(%lu %lu)\n", | ||
4724 | fp->name, (le16_to_cpu(*fp->rx_cons_sb) - | ||
4725 | fp->rx_comp_cons), | ||
4726 | le16_to_cpu(*fp->rx_cons_sb), | ||
4727 | bnx2x_hilo(&qstats-> | ||
4728 | total_unicast_packets_received_hi), | ||
4729 | fp->rx_calls, fp->rx_pkt); | ||
4730 | } | ||
4731 | |||
4732 | for_each_queue(bp, i) { | ||
4733 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
4734 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | ||
4735 | struct netdev_queue *txq = | ||
4736 | netdev_get_tx_queue(bp->dev, i); | ||
4737 | |||
4738 | printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" | ||
4739 | " tx pkt(%lu) tx calls (%lu)" | ||
4740 | " %s (Xoff events %u)\n", | ||
4741 | fp->name, bnx2x_tx_avail(fp), | ||
4742 | le16_to_cpu(*fp->tx_cons_sb), | ||
4743 | bnx2x_hilo(&qstats-> | ||
4744 | total_unicast_packets_transmitted_hi), | ||
4745 | fp->tx_pkt, | ||
4746 | (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), | ||
4747 | qstats->driver_xoff); | ||
4748 | } | ||
4749 | } | ||
4750 | |||
4751 | bnx2x_hw_stats_post(bp); | ||
4752 | bnx2x_storm_stats_post(bp); | ||
4753 | } | ||
4754 | |||
4755 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | ||
4756 | { | ||
4757 | struct dmae_command *dmae; | ||
4758 | u32 opcode; | ||
4759 | int loader_idx = PMF_DMAE_C(bp); | ||
4760 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
4761 | |||
4762 | bp->executer_idx = 0; | ||
4763 | |||
4764 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
4765 | DMAE_CMD_C_ENABLE | | ||
4766 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
4767 | #ifdef __BIG_ENDIAN | ||
4768 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
4769 | #else | ||
4770 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
4771 | #endif | ||
4772 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
4773 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
4774 | |||
4775 | if (bp->port.port_stx) { | ||
4776 | |||
4777 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4778 | if (bp->func_stx) | ||
4779 | dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); | ||
4780 | else | ||
4781 | dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); | ||
4782 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
4783 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
4784 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | ||
4785 | dmae->dst_addr_hi = 0; | ||
4786 | dmae->len = sizeof(struct host_port_stats) >> 2; | ||
4787 | if (bp->func_stx) { | ||
4788 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
4789 | dmae->comp_addr_hi = 0; | ||
4790 | dmae->comp_val = 1; | ||
4791 | } else { | ||
4792 | dmae->comp_addr_lo = | ||
4793 | U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
4794 | dmae->comp_addr_hi = | ||
4795 | U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
4796 | dmae->comp_val = DMAE_COMP_VAL; | ||
4797 | |||
4798 | *stats_comp = 0; | ||
4799 | } | ||
4800 | } | ||
4801 | |||
4802 | if (bp->func_stx) { | ||
4803 | |||
4804 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4805 | dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); | ||
4806 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); | ||
4807 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); | ||
4808 | dmae->dst_addr_lo = bp->func_stx >> 2; | ||
4809 | dmae->dst_addr_hi = 0; | ||
4810 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
4811 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
4812 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
4813 | dmae->comp_val = DMAE_COMP_VAL; | ||
4814 | |||
4815 | *stats_comp = 0; | ||
4816 | } | ||
4817 | } | ||
4818 | |||
4819 | static void bnx2x_stats_stop(struct bnx2x *bp) | ||
4820 | { | ||
4821 | int update = 0; | ||
4822 | |||
4823 | bnx2x_stats_comp(bp); | ||
4824 | |||
4825 | if (bp->port.pmf) | ||
4826 | update = (bnx2x_hw_stats_update(bp) == 0); | ||
4827 | |||
4828 | update |= (bnx2x_storm_stats_update(bp) == 0); | ||
4829 | |||
4830 | if (update) { | ||
4831 | bnx2x_net_stats_update(bp); | ||
4832 | |||
4833 | if (bp->port.pmf) | ||
4834 | bnx2x_port_stats_stop(bp); | ||
4835 | |||
4836 | bnx2x_hw_stats_post(bp); | ||
4837 | bnx2x_stats_comp(bp); | ||
4838 | } | ||
4839 | } | ||
4840 | |||
4841 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | ||
4842 | { | ||
4843 | } | ||
4844 | |||
4845 | static const struct { | ||
4846 | void (*action)(struct bnx2x *bp); | ||
4847 | enum bnx2x_stats_state next_state; | ||
4848 | } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { | ||
4849 | /* state event */ | ||
4850 | { | ||
4851 | /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, | ||
4852 | /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, | ||
4853 | /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, | ||
4854 | /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} | ||
4855 | }, | ||
4856 | { | ||
4857 | /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, | ||
4858 | /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, | ||
4859 | /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, | ||
4860 | /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} | ||
4861 | } | ||
4862 | }; | ||
4863 | |||
4864 | static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | ||
4865 | { | ||
4866 | enum bnx2x_stats_state state = bp->stats_state; | ||
4867 | |||
4868 | if (unlikely(bp->panic)) | ||
4869 | return; | ||
4870 | |||
4871 | bnx2x_stats_stm[state][event].action(bp); | ||
4872 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | ||
4873 | |||
4874 | /* Make sure the state has been "changed" */ | ||
4875 | smp_wmb(); | ||
4876 | |||
4877 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | ||
4878 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | ||
4879 | state, event, bp->stats_state); | ||
4880 | } | ||
4881 | |||
4882 | static void bnx2x_port_stats_base_init(struct bnx2x *bp) | ||
4883 | { | ||
4884 | struct dmae_command *dmae; | ||
4885 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
4886 | |||
4887 | /* sanity */ | ||
4888 | if (!bp->port.pmf || !bp->port.port_stx) { | ||
4889 | BNX2X_ERR("BUG!\n"); | ||
4890 | return; | ||
4891 | } | ||
4892 | |||
4893 | bp->executer_idx = 0; | ||
4894 | |||
4895 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
4896 | dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
4897 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
4898 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
4899 | #ifdef __BIG_ENDIAN | ||
4900 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
4901 | #else | ||
4902 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
4903 | #endif | ||
4904 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
4905 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
4906 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
4907 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
4908 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | ||
4909 | dmae->dst_addr_hi = 0; | ||
4910 | dmae->len = sizeof(struct host_port_stats) >> 2; | ||
4911 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
4912 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
4913 | dmae->comp_val = DMAE_COMP_VAL; | ||
4914 | |||
4915 | *stats_comp = 0; | ||
4916 | bnx2x_hw_stats_post(bp); | ||
4917 | bnx2x_stats_comp(bp); | ||
4918 | } | ||
4919 | |||
4920 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) | ||
4921 | { | ||
4922 | int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; | ||
4923 | int port = BP_PORT(bp); | ||
4924 | int func; | ||
4925 | u32 func_stx; | ||
4926 | |||
4927 | /* sanity */ | ||
4928 | if (!bp->port.pmf || !bp->func_stx) { | ||
4929 | BNX2X_ERR("BUG!\n"); | ||
4930 | return; | ||
4931 | } | ||
4932 | |||
4933 | /* save our func_stx */ | ||
4934 | func_stx = bp->func_stx; | ||
4935 | |||
4936 | for (vn = VN_0; vn < vn_max; vn++) { | ||
4937 | func = 2*vn + port; | ||
4938 | |||
4939 | bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); | ||
4940 | bnx2x_func_stats_init(bp); | ||
4941 | bnx2x_hw_stats_post(bp); | ||
4942 | bnx2x_stats_comp(bp); | ||
4943 | } | ||
4944 | |||
4945 | /* restore our func_stx */ | ||
4946 | bp->func_stx = func_stx; | ||
4947 | } | ||
4948 | |||
4949 | static void bnx2x_func_stats_base_update(struct bnx2x *bp) | ||
4950 | { | ||
4951 | struct dmae_command *dmae = &bp->stats_dmae; | ||
4952 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
4953 | |||
4954 | /* sanity */ | ||
4955 | if (!bp->func_stx) { | ||
4956 | BNX2X_ERR("BUG!\n"); | ||
4957 | return; | ||
4958 | } | ||
4959 | |||
4960 | bp->executer_idx = 0; | ||
4961 | memset(dmae, 0, sizeof(struct dmae_command)); | ||
4962 | |||
4963 | dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
4964 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
4965 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
4966 | #ifdef __BIG_ENDIAN | ||
4967 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
4968 | #else | ||
4969 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
4970 | #endif | ||
4971 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
4972 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
4973 | dmae->src_addr_lo = bp->func_stx >> 2; | ||
4974 | dmae->src_addr_hi = 0; | ||
4975 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); | ||
4976 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); | ||
4977 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
4978 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
4979 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
4980 | dmae->comp_val = DMAE_COMP_VAL; | ||
4981 | |||
4982 | *stats_comp = 0; | ||
4983 | bnx2x_hw_stats_post(bp); | ||
4984 | bnx2x_stats_comp(bp); | ||
4985 | } | ||
4986 | |||
4987 | static void bnx2x_stats_init(struct bnx2x *bp) | ||
4988 | { | ||
4989 | int port = BP_PORT(bp); | ||
4990 | int func = BP_FUNC(bp); | ||
4991 | int i; | ||
4992 | |||
4993 | bp->stats_pending = 0; | ||
4994 | bp->executer_idx = 0; | ||
4995 | bp->stats_counter = 0; | ||
4996 | |||
4997 | /* port and func stats for management */ | ||
4998 | if (!BP_NOMCP(bp)) { | ||
4999 | bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); | ||
5000 | bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); | ||
5001 | |||
5002 | } else { | ||
5003 | bp->port.port_stx = 0; | ||
5004 | bp->func_stx = 0; | ||
5005 | } | ||
5006 | DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", | ||
5007 | bp->port.port_stx, bp->func_stx); | ||
5008 | |||
5009 | /* port stats */ | ||
5010 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); | ||
5011 | bp->port.old_nig_stats.brb_discard = | ||
5012 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); | ||
5013 | bp->port.old_nig_stats.brb_truncate = | ||
5014 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); | ||
5015 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, | ||
5016 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); | ||
5017 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, | ||
5018 | &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); | ||
5019 | |||
5020 | /* function stats */ | ||
5021 | for_each_queue(bp, i) { | ||
5022 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
5023 | |||
5024 | memset(&fp->old_tclient, 0, | ||
5025 | sizeof(struct tstorm_per_client_stats)); | ||
5026 | memset(&fp->old_uclient, 0, | ||
5027 | sizeof(struct ustorm_per_client_stats)); | ||
5028 | memset(&fp->old_xclient, 0, | ||
5029 | sizeof(struct xstorm_per_client_stats)); | ||
5030 | memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); | ||
5031 | } | ||
5032 | |||
5033 | memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); | ||
5034 | memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); | ||
5035 | |||
5036 | bp->stats_state = STATS_STATE_DISABLED; | ||
5037 | |||
5038 | if (bp->port.pmf) { | ||
5039 | if (bp->port.port_stx) | ||
5040 | bnx2x_port_stats_base_init(bp); | ||
5041 | |||
5042 | if (bp->func_stx) | ||
5043 | bnx2x_func_stats_base_init(bp); | ||
5044 | |||
5045 | } else if (bp->func_stx) | ||
5046 | bnx2x_func_stats_base_update(bp); | ||
5047 | } | ||
5048 | |||
5049 | static void bnx2x_timer(unsigned long data) | 2655 | static void bnx2x_timer(unsigned long data) |
5050 | { | 2656 | { |
5051 | struct bnx2x *bp = (struct bnx2x *) data; | 2657 | struct bnx2x *bp = (struct bnx2x *) data; |
@@ -5116,7 +2722,7 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) | |||
5116 | CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); | 2722 | CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); |
5117 | } | 2723 | } |
5118 | 2724 | ||
5119 | static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | 2725 | void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, |
5120 | dma_addr_t mapping, int sb_id) | 2726 | dma_addr_t mapping, int sb_id) |
5121 | { | 2727 | { |
5122 | int port = BP_PORT(bp); | 2728 | int port = BP_PORT(bp); |
@@ -5295,7 +2901,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
5295 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 2901 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
5296 | } | 2902 | } |
5297 | 2903 | ||
5298 | static void bnx2x_update_coalesce(struct bnx2x *bp) | 2904 | void bnx2x_update_coalesce(struct bnx2x *bp) |
5299 | { | 2905 | { |
5300 | int port = BP_PORT(bp); | 2906 | int port = BP_PORT(bp); |
5301 | int i; | 2907 | int i; |
@@ -5325,207 +2931,6 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) | |||
5325 | } | 2931 | } |
5326 | } | 2932 | } |
5327 | 2933 | ||
5328 | static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | ||
5329 | struct bnx2x_fastpath *fp, int last) | ||
5330 | { | ||
5331 | int i; | ||
5332 | |||
5333 | for (i = 0; i < last; i++) { | ||
5334 | struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); | ||
5335 | struct sk_buff *skb = rx_buf->skb; | ||
5336 | |||
5337 | if (skb == NULL) { | ||
5338 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); | ||
5339 | continue; | ||
5340 | } | ||
5341 | |||
5342 | if (fp->tpa_state[i] == BNX2X_TPA_START) | ||
5343 | dma_unmap_single(&bp->pdev->dev, | ||
5344 | dma_unmap_addr(rx_buf, mapping), | ||
5345 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
5346 | |||
5347 | dev_kfree_skb(skb); | ||
5348 | rx_buf->skb = NULL; | ||
5349 | } | ||
5350 | } | ||
5351 | |||
5352 | static void bnx2x_init_rx_rings(struct bnx2x *bp) | ||
5353 | { | ||
5354 | int func = BP_FUNC(bp); | ||
5355 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
5356 | ETH_MAX_AGGREGATION_QUEUES_E1H; | ||
5357 | u16 ring_prod, cqe_ring_prod; | ||
5358 | int i, j; | ||
5359 | |||
5360 | bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; | ||
5361 | DP(NETIF_MSG_IFUP, | ||
5362 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); | ||
5363 | |||
5364 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
5365 | |||
5366 | for_each_queue(bp, j) { | ||
5367 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
5368 | |||
5369 | for (i = 0; i < max_agg_queues; i++) { | ||
5370 | fp->tpa_pool[i].skb = | ||
5371 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
5372 | if (!fp->tpa_pool[i].skb) { | ||
5373 | BNX2X_ERR("Failed to allocate TPA " | ||
5374 | "skb pool for queue[%d] - " | ||
5375 | "disabling TPA on this " | ||
5376 | "queue!\n", j); | ||
5377 | bnx2x_free_tpa_pool(bp, fp, i); | ||
5378 | fp->disable_tpa = 1; | ||
5379 | break; | ||
5380 | } | ||
5381 | dma_unmap_addr_set((struct sw_rx_bd *) | ||
5382 | &bp->fp->tpa_pool[i], | ||
5383 | mapping, 0); | ||
5384 | fp->tpa_state[i] = BNX2X_TPA_STOP; | ||
5385 | } | ||
5386 | } | ||
5387 | } | ||
5388 | |||
5389 | for_each_queue(bp, j) { | ||
5390 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
5391 | |||
5392 | fp->rx_bd_cons = 0; | ||
5393 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | ||
5394 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; | ||
5395 | |||
5396 | /* "next page" elements initialization */ | ||
5397 | /* SGE ring */ | ||
5398 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | ||
5399 | struct eth_rx_sge *sge; | ||
5400 | |||
5401 | sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; | ||
5402 | sge->addr_hi = | ||
5403 | cpu_to_le32(U64_HI(fp->rx_sge_mapping + | ||
5404 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); | ||
5405 | sge->addr_lo = | ||
5406 | cpu_to_le32(U64_LO(fp->rx_sge_mapping + | ||
5407 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); | ||
5408 | } | ||
5409 | |||
5410 | bnx2x_init_sge_ring_bit_mask(fp); | ||
5411 | |||
5412 | /* RX BD ring */ | ||
5413 | for (i = 1; i <= NUM_RX_RINGS; i++) { | ||
5414 | struct eth_rx_bd *rx_bd; | ||
5415 | |||
5416 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; | ||
5417 | rx_bd->addr_hi = | ||
5418 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + | ||
5419 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | ||
5420 | rx_bd->addr_lo = | ||
5421 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + | ||
5422 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | ||
5423 | } | ||
5424 | |||
5425 | /* CQ ring */ | ||
5426 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { | ||
5427 | struct eth_rx_cqe_next_page *nextpg; | ||
5428 | |||
5429 | nextpg = (struct eth_rx_cqe_next_page *) | ||
5430 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; | ||
5431 | nextpg->addr_hi = | ||
5432 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + | ||
5433 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | ||
5434 | nextpg->addr_lo = | ||
5435 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + | ||
5436 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | ||
5437 | } | ||
5438 | |||
5439 | /* Allocate SGEs and initialize the ring elements */ | ||
5440 | for (i = 0, ring_prod = 0; | ||
5441 | i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { | ||
5442 | |||
5443 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { | ||
5444 | BNX2X_ERR("was only able to allocate " | ||
5445 | "%d rx sges\n", i); | ||
5446 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); | ||
5447 | /* Cleanup already allocated elements */ | ||
5448 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); | ||
5449 | bnx2x_free_tpa_pool(bp, fp, max_agg_queues); | ||
5450 | fp->disable_tpa = 1; | ||
5451 | ring_prod = 0; | ||
5452 | break; | ||
5453 | } | ||
5454 | ring_prod = NEXT_SGE_IDX(ring_prod); | ||
5455 | } | ||
5456 | fp->rx_sge_prod = ring_prod; | ||
5457 | |||
5458 | /* Allocate BDs and initialize BD ring */ | ||
5459 | fp->rx_comp_cons = 0; | ||
5460 | cqe_ring_prod = ring_prod = 0; | ||
5461 | for (i = 0; i < bp->rx_ring_size; i++) { | ||
5462 | if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { | ||
5463 | BNX2X_ERR("was only able to allocate " | ||
5464 | "%d rx skbs on queue[%d]\n", i, j); | ||
5465 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
5466 | break; | ||
5467 | } | ||
5468 | ring_prod = NEXT_RX_IDX(ring_prod); | ||
5469 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); | ||
5470 | WARN_ON(ring_prod <= i); | ||
5471 | } | ||
5472 | |||
5473 | fp->rx_bd_prod = ring_prod; | ||
5474 | /* must not have more available CQEs than BDs */ | ||
5475 | fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, | ||
5476 | cqe_ring_prod); | ||
5477 | fp->rx_pkt = fp->rx_calls = 0; | ||
5478 | |||
5479 | /* Warning! | ||
5480 | * this will generate an interrupt (to the TSTORM) | ||
5481 | * must only be done after chip is initialized | ||
5482 | */ | ||
5483 | bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod, | ||
5484 | fp->rx_sge_prod); | ||
5485 | if (j != 0) | ||
5486 | continue; | ||
5487 | |||
5488 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
5489 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), | ||
5490 | U64_LO(fp->rx_comp_mapping)); | ||
5491 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
5492 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, | ||
5493 | U64_HI(fp->rx_comp_mapping)); | ||
5494 | } | ||
5495 | } | ||
5496 | |||
5497 | static void bnx2x_init_tx_ring(struct bnx2x *bp) | ||
5498 | { | ||
5499 | int i, j; | ||
5500 | |||
5501 | for_each_queue(bp, j) { | ||
5502 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
5503 | |||
5504 | for (i = 1; i <= NUM_TX_RINGS; i++) { | ||
5505 | struct eth_tx_next_bd *tx_next_bd = | ||
5506 | &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; | ||
5507 | |||
5508 | tx_next_bd->addr_hi = | ||
5509 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + | ||
5510 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | ||
5511 | tx_next_bd->addr_lo = | ||
5512 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + | ||
5513 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | ||
5514 | } | ||
5515 | |||
5516 | fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; | ||
5517 | fp->tx_db.data.zero_fill1 = 0; | ||
5518 | fp->tx_db.data.prod = 0; | ||
5519 | |||
5520 | fp->tx_pkt_prod = 0; | ||
5521 | fp->tx_pkt_cons = 0; | ||
5522 | fp->tx_bd_prod = 0; | ||
5523 | fp->tx_bd_cons = 0; | ||
5524 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | ||
5525 | fp->tx_pkt = 0; | ||
5526 | } | ||
5527 | } | ||
5528 | |||
5529 | static void bnx2x_init_sp_ring(struct bnx2x *bp) | 2934 | static void bnx2x_init_sp_ring(struct bnx2x *bp) |
5530 | { | 2935 | { |
5531 | int func = BP_FUNC(bp); | 2936 | int func = BP_FUNC(bp); |
@@ -5640,7 +3045,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
5640 | bp->fp->cl_id + (i % bp->num_queues)); | 3045 | bp->fp->cl_id + (i % bp->num_queues)); |
5641 | } | 3046 | } |
5642 | 3047 | ||
5643 | static void bnx2x_set_client_config(struct bnx2x *bp) | 3048 | void bnx2x_set_client_config(struct bnx2x *bp) |
5644 | { | 3049 | { |
5645 | struct tstorm_eth_client_config tstorm_client = {0}; | 3050 | struct tstorm_eth_client_config tstorm_client = {0}; |
5646 | int port = BP_PORT(bp); | 3051 | int port = BP_PORT(bp); |
@@ -5673,7 +3078,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp) | |||
5673 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); | 3078 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); |
5674 | } | 3079 | } |
5675 | 3080 | ||
5676 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 3081 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
5677 | { | 3082 | { |
5678 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; | 3083 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; |
5679 | int mode = bp->rx_mode; | 3084 | int mode = bp->rx_mode; |
@@ -5993,7 +3398,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
5993 | } | 3398 | } |
5994 | } | 3399 | } |
5995 | 3400 | ||
5996 | static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | 3401 | void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) |
5997 | { | 3402 | { |
5998 | int i; | 3403 | int i; |
5999 | 3404 | ||
@@ -7074,7 +4479,7 @@ static int bnx2x_init_func(struct bnx2x *bp) | |||
7074 | return 0; | 4479 | return 0; |
7075 | } | 4480 | } |
7076 | 4481 | ||
7077 | static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | 4482 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) |
7078 | { | 4483 | { |
7079 | int i, rc = 0; | 4484 | int i, rc = 0; |
7080 | 4485 | ||
@@ -7136,7 +4541,7 @@ init_hw_err: | |||
7136 | return rc; | 4541 | return rc; |
7137 | } | 4542 | } |
7138 | 4543 | ||
7139 | static void bnx2x_free_mem(struct bnx2x *bp) | 4544 | void bnx2x_free_mem(struct bnx2x *bp) |
7140 | { | 4545 | { |
7141 | 4546 | ||
7142 | #define BNX2X_PCI_FREE(x, y, size) \ | 4547 | #define BNX2X_PCI_FREE(x, y, size) \ |
@@ -7218,7 +4623,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
7218 | #undef BNX2X_KFREE | 4623 | #undef BNX2X_KFREE |
7219 | } | 4624 | } |
7220 | 4625 | ||
7221 | static int bnx2x_alloc_mem(struct bnx2x *bp) | 4626 | int bnx2x_alloc_mem(struct bnx2x *bp) |
7222 | { | 4627 | { |
7223 | 4628 | ||
7224 | #define BNX2X_PCI_ALLOC(x, y, size) \ | 4629 | #define BNX2X_PCI_ALLOC(x, y, size) \ |
@@ -7324,264 +4729,6 @@ alloc_mem_err: | |||
7324 | #undef BNX2X_ALLOC | 4729 | #undef BNX2X_ALLOC |
7325 | } | 4730 | } |
7326 | 4731 | ||
7327 | static void bnx2x_free_tx_skbs(struct bnx2x *bp) | ||
7328 | { | ||
7329 | int i; | ||
7330 | |||
7331 | for_each_queue(bp, i) { | ||
7332 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
7333 | |||
7334 | u16 bd_cons = fp->tx_bd_cons; | ||
7335 | u16 sw_prod = fp->tx_pkt_prod; | ||
7336 | u16 sw_cons = fp->tx_pkt_cons; | ||
7337 | |||
7338 | while (sw_cons != sw_prod) { | ||
7339 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); | ||
7340 | sw_cons++; | ||
7341 | } | ||
7342 | } | ||
7343 | } | ||
7344 | |||
7345 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) | ||
7346 | { | ||
7347 | int i, j; | ||
7348 | |||
7349 | for_each_queue(bp, j) { | ||
7350 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
7351 | |||
7352 | for (i = 0; i < NUM_RX_BD; i++) { | ||
7353 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | ||
7354 | struct sk_buff *skb = rx_buf->skb; | ||
7355 | |||
7356 | if (skb == NULL) | ||
7357 | continue; | ||
7358 | |||
7359 | dma_unmap_single(&bp->pdev->dev, | ||
7360 | dma_unmap_addr(rx_buf, mapping), | ||
7361 | bp->rx_buf_size, DMA_FROM_DEVICE); | ||
7362 | |||
7363 | rx_buf->skb = NULL; | ||
7364 | dev_kfree_skb(skb); | ||
7365 | } | ||
7366 | if (!fp->disable_tpa) | ||
7367 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | ||
7368 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
7369 | ETH_MAX_AGGREGATION_QUEUES_E1H); | ||
7370 | } | ||
7371 | } | ||
7372 | |||
7373 | static void bnx2x_free_skbs(struct bnx2x *bp) | ||
7374 | { | ||
7375 | bnx2x_free_tx_skbs(bp); | ||
7376 | bnx2x_free_rx_skbs(bp); | ||
7377 | } | ||
7378 | |||
7379 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | ||
7380 | { | ||
7381 | int i, offset = 1; | ||
7382 | |||
7383 | free_irq(bp->msix_table[0].vector, bp->dev); | ||
7384 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | ||
7385 | bp->msix_table[0].vector); | ||
7386 | |||
7387 | #ifdef BCM_CNIC | ||
7388 | offset++; | ||
7389 | #endif | ||
7390 | for_each_queue(bp, i) { | ||
7391 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | ||
7392 | "state %x\n", i, bp->msix_table[i + offset].vector, | ||
7393 | bnx2x_fp(bp, i, state)); | ||
7394 | |||
7395 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); | ||
7396 | } | ||
7397 | } | ||
7398 | |||
7399 | static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) | ||
7400 | { | ||
7401 | if (bp->flags & USING_MSIX_FLAG) { | ||
7402 | if (!disable_only) | ||
7403 | bnx2x_free_msix_irqs(bp); | ||
7404 | pci_disable_msix(bp->pdev); | ||
7405 | bp->flags &= ~USING_MSIX_FLAG; | ||
7406 | |||
7407 | } else if (bp->flags & USING_MSI_FLAG) { | ||
7408 | if (!disable_only) | ||
7409 | free_irq(bp->pdev->irq, bp->dev); | ||
7410 | pci_disable_msi(bp->pdev); | ||
7411 | bp->flags &= ~USING_MSI_FLAG; | ||
7412 | |||
7413 | } else if (!disable_only) | ||
7414 | free_irq(bp->pdev->irq, bp->dev); | ||
7415 | } | ||
7416 | |||
7417 | static int bnx2x_enable_msix(struct bnx2x *bp) | ||
7418 | { | ||
7419 | int i, rc, offset = 1; | ||
7420 | int igu_vec = 0; | ||
7421 | |||
7422 | bp->msix_table[0].entry = igu_vec; | ||
7423 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); | ||
7424 | |||
7425 | #ifdef BCM_CNIC | ||
7426 | igu_vec = BP_L_ID(bp) + offset; | ||
7427 | bp->msix_table[1].entry = igu_vec; | ||
7428 | DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); | ||
7429 | offset++; | ||
7430 | #endif | ||
7431 | for_each_queue(bp, i) { | ||
7432 | igu_vec = BP_L_ID(bp) + offset + i; | ||
7433 | bp->msix_table[i + offset].entry = igu_vec; | ||
7434 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | ||
7435 | "(fastpath #%u)\n", i + offset, igu_vec, i); | ||
7436 | } | ||
7437 | |||
7438 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | ||
7439 | BNX2X_NUM_QUEUES(bp) + offset); | ||
7440 | |||
7441 | /* | ||
7442 | * reconfigure number of tx/rx queues according to available | ||
7443 | * MSI-X vectors | ||
7444 | */ | ||
7445 | if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { | ||
7446 | /* vectors available for FP */ | ||
7447 | int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; | ||
7448 | |||
7449 | DP(NETIF_MSG_IFUP, | ||
7450 | "Trying to use less MSI-X vectors: %d\n", rc); | ||
7451 | |||
7452 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); | ||
7453 | |||
7454 | if (rc) { | ||
7455 | DP(NETIF_MSG_IFUP, | ||
7456 | "MSI-X is not attainable rc %d\n", rc); | ||
7457 | return rc; | ||
7458 | } | ||
7459 | |||
7460 | bp->num_queues = min(bp->num_queues, fp_vec); | ||
7461 | |||
7462 | DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", | ||
7463 | bp->num_queues); | ||
7464 | } else if (rc) { | ||
7465 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); | ||
7466 | return rc; | ||
7467 | } | ||
7468 | |||
7469 | bp->flags |= USING_MSIX_FLAG; | ||
7470 | |||
7471 | return 0; | ||
7472 | } | ||
7473 | |||
7474 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | ||
7475 | { | ||
7476 | int i, rc, offset = 1; | ||
7477 | |||
7478 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | ||
7479 | bp->dev->name, bp->dev); | ||
7480 | if (rc) { | ||
7481 | BNX2X_ERR("request sp irq failed\n"); | ||
7482 | return -EBUSY; | ||
7483 | } | ||
7484 | |||
7485 | #ifdef BCM_CNIC | ||
7486 | offset++; | ||
7487 | #endif | ||
7488 | for_each_queue(bp, i) { | ||
7489 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
7490 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | ||
7491 | bp->dev->name, i); | ||
7492 | |||
7493 | rc = request_irq(bp->msix_table[i + offset].vector, | ||
7494 | bnx2x_msix_fp_int, 0, fp->name, fp); | ||
7495 | if (rc) { | ||
7496 | BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); | ||
7497 | bnx2x_free_msix_irqs(bp); | ||
7498 | return -EBUSY; | ||
7499 | } | ||
7500 | |||
7501 | fp->state = BNX2X_FP_STATE_IRQ; | ||
7502 | } | ||
7503 | |||
7504 | i = BNX2X_NUM_QUEUES(bp); | ||
7505 | netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" | ||
7506 | " ... fp[%d] %d\n", | ||
7507 | bp->msix_table[0].vector, | ||
7508 | 0, bp->msix_table[offset].vector, | ||
7509 | i - 1, bp->msix_table[offset + i - 1].vector); | ||
7510 | |||
7511 | return 0; | ||
7512 | } | ||
7513 | |||
7514 | static int bnx2x_enable_msi(struct bnx2x *bp) | ||
7515 | { | ||
7516 | int rc; | ||
7517 | |||
7518 | rc = pci_enable_msi(bp->pdev); | ||
7519 | if (rc) { | ||
7520 | DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); | ||
7521 | return -1; | ||
7522 | } | ||
7523 | bp->flags |= USING_MSI_FLAG; | ||
7524 | |||
7525 | return 0; | ||
7526 | } | ||
7527 | |||
7528 | static int bnx2x_req_irq(struct bnx2x *bp) | ||
7529 | { | ||
7530 | unsigned long flags; | ||
7531 | int rc; | ||
7532 | |||
7533 | if (bp->flags & USING_MSI_FLAG) | ||
7534 | flags = 0; | ||
7535 | else | ||
7536 | flags = IRQF_SHARED; | ||
7537 | |||
7538 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | ||
7539 | bp->dev->name, bp->dev); | ||
7540 | if (!rc) | ||
7541 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | ||
7542 | |||
7543 | return rc; | ||
7544 | } | ||
7545 | |||
7546 | static void bnx2x_napi_enable(struct bnx2x *bp) | ||
7547 | { | ||
7548 | int i; | ||
7549 | |||
7550 | for_each_queue(bp, i) | ||
7551 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
7552 | } | ||
7553 | |||
7554 | static void bnx2x_napi_disable(struct bnx2x *bp) | ||
7555 | { | ||
7556 | int i; | ||
7557 | |||
7558 | for_each_queue(bp, i) | ||
7559 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
7560 | } | ||
7561 | |||
7562 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
7563 | { | ||
7564 | int intr_sem; | ||
7565 | |||
7566 | intr_sem = atomic_dec_and_test(&bp->intr_sem); | ||
7567 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | ||
7568 | |||
7569 | if (intr_sem) { | ||
7570 | if (netif_running(bp->dev)) { | ||
7571 | bnx2x_napi_enable(bp); | ||
7572 | bnx2x_int_enable(bp); | ||
7573 | if (bp->state == BNX2X_STATE_OPEN) | ||
7574 | netif_tx_wake_all_queues(bp->dev); | ||
7575 | } | ||
7576 | } | ||
7577 | } | ||
7578 | |||
7579 | static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | ||
7580 | { | ||
7581 | bnx2x_int_disable_sync(bp, disable_hw); | ||
7582 | bnx2x_napi_disable(bp); | ||
7583 | netif_tx_disable(bp->dev); | ||
7584 | } | ||
7585 | 4732 | ||
7586 | /* | 4733 | /* |
7587 | * Init service functions | 4734 | * Init service functions |
@@ -7752,7 +4899,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
7752 | return -EBUSY; | 4899 | return -EBUSY; |
7753 | } | 4900 | } |
7754 | 4901 | ||
7755 | static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) | 4902 | void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) |
7756 | { | 4903 | { |
7757 | bp->set_mac_pending++; | 4904 | bp->set_mac_pending++; |
7758 | smp_wmb(); | 4905 | smp_wmb(); |
@@ -7764,7 +4911,7 @@ static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) | |||
7764 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | 4911 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); |
7765 | } | 4912 | } |
7766 | 4913 | ||
7767 | static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) | 4914 | void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) |
7768 | { | 4915 | { |
7769 | bp->set_mac_pending++; | 4916 | bp->set_mac_pending++; |
7770 | smp_wmb(); | 4917 | smp_wmb(); |
@@ -7788,7 +4935,7 @@ static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) | |||
7788 | * | 4935 | * |
7789 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. | 4936 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. |
7790 | */ | 4937 | */ |
7791 | static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | 4938 | int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) |
7792 | { | 4939 | { |
7793 | u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); | 4940 | u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); |
7794 | 4941 | ||
@@ -7815,7 +4962,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | |||
7815 | } | 4962 | } |
7816 | #endif | 4963 | #endif |
7817 | 4964 | ||
7818 | static int bnx2x_setup_leading(struct bnx2x *bp) | 4965 | int bnx2x_setup_leading(struct bnx2x *bp) |
7819 | { | 4966 | { |
7820 | int rc; | 4967 | int rc; |
7821 | 4968 | ||
@@ -7831,7 +4978,7 @@ static int bnx2x_setup_leading(struct bnx2x *bp) | |||
7831 | return rc; | 4978 | return rc; |
7832 | } | 4979 | } |
7833 | 4980 | ||
7834 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) | 4981 | int bnx2x_setup_multi(struct bnx2x *bp, int index) |
7835 | { | 4982 | { |
7836 | struct bnx2x_fastpath *fp = &bp->fp[index]; | 4983 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
7837 | 4984 | ||
@@ -7848,9 +4995,8 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
7848 | &(fp->state), 0); | 4995 | &(fp->state), 0); |
7849 | } | 4996 | } |
7850 | 4997 | ||
7851 | static int bnx2x_poll(struct napi_struct *napi, int budget); | ||
7852 | 4998 | ||
7853 | static void bnx2x_set_num_queues_msix(struct bnx2x *bp) | 4999 | void bnx2x_set_num_queues_msix(struct bnx2x *bp) |
7854 | { | 5000 | { |
7855 | 5001 | ||
7856 | switch (bp->multi_mode) { | 5002 | switch (bp->multi_mode) { |
@@ -7874,292 +5020,7 @@ static void bnx2x_set_num_queues_msix(struct bnx2x *bp) | |||
7874 | } | 5020 | } |
7875 | } | 5021 | } |
7876 | 5022 | ||
7877 | static int bnx2x_set_num_queues(struct bnx2x *bp) | ||
7878 | { | ||
7879 | int rc = 0; | ||
7880 | |||
7881 | switch (int_mode) { | ||
7882 | case INT_MODE_INTx: | ||
7883 | case INT_MODE_MSI: | ||
7884 | bp->num_queues = 1; | ||
7885 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); | ||
7886 | break; | ||
7887 | default: | ||
7888 | /* Set number of queues according to bp->multi_mode value */ | ||
7889 | bnx2x_set_num_queues_msix(bp); | ||
7890 | |||
7891 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", | ||
7892 | bp->num_queues); | ||
7893 | |||
7894 | /* if we can't use MSI-X we only need one fp, | ||
7895 | * so try to enable MSI-X with the requested number of fp's | ||
7896 | * and fallback to MSI or legacy INTx with one fp | ||
7897 | */ | ||
7898 | rc = bnx2x_enable_msix(bp); | ||
7899 | if (rc) | ||
7900 | /* failed to enable MSI-X */ | ||
7901 | bp->num_queues = 1; | ||
7902 | break; | ||
7903 | } | ||
7904 | bp->dev->real_num_tx_queues = bp->num_queues; | ||
7905 | return rc; | ||
7906 | } | ||
7907 | |||
7908 | #ifdef BCM_CNIC | ||
7909 | static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); | ||
7910 | static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); | ||
7911 | #endif | ||
7912 | |||
7913 | /* must be called with rtnl_lock */ | ||
7914 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
7915 | { | ||
7916 | u32 load_code; | ||
7917 | int i, rc; | ||
7918 | |||
7919 | #ifdef BNX2X_STOP_ON_ERROR | ||
7920 | if (unlikely(bp->panic)) | ||
7921 | return -EPERM; | ||
7922 | #endif | ||
7923 | |||
7924 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | ||
7925 | |||
7926 | rc = bnx2x_set_num_queues(bp); | ||
7927 | |||
7928 | if (bnx2x_alloc_mem(bp)) { | ||
7929 | bnx2x_free_irq(bp, true); | ||
7930 | return -ENOMEM; | ||
7931 | } | ||
7932 | |||
7933 | for_each_queue(bp, i) | ||
7934 | bnx2x_fp(bp, i, disable_tpa) = | ||
7935 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | ||
7936 | |||
7937 | for_each_queue(bp, i) | ||
7938 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | ||
7939 | bnx2x_poll, 128); | ||
7940 | 5023 | ||
7941 | bnx2x_napi_enable(bp); | ||
7942 | |||
7943 | if (bp->flags & USING_MSIX_FLAG) { | ||
7944 | rc = bnx2x_req_msix_irqs(bp); | ||
7945 | if (rc) { | ||
7946 | bnx2x_free_irq(bp, true); | ||
7947 | goto load_error1; | ||
7948 | } | ||
7949 | } else { | ||
7950 | /* Fall to INTx if failed to enable MSI-X due to lack of | ||
7951 | memory (in bnx2x_set_num_queues()) */ | ||
7952 | if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) | ||
7953 | bnx2x_enable_msi(bp); | ||
7954 | bnx2x_ack_int(bp); | ||
7955 | rc = bnx2x_req_irq(bp); | ||
7956 | if (rc) { | ||
7957 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
7958 | bnx2x_free_irq(bp, true); | ||
7959 | goto load_error1; | ||
7960 | } | ||
7961 | if (bp->flags & USING_MSI_FLAG) { | ||
7962 | bp->dev->irq = bp->pdev->irq; | ||
7963 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
7964 | bp->pdev->irq); | ||
7965 | } | ||
7966 | } | ||
7967 | |||
7968 | /* Send LOAD_REQUEST command to MCP | ||
7969 | Returns the type of LOAD command: | ||
7970 | if it is the first port to be initialized | ||
7971 | common blocks should be initialized, otherwise - not | ||
7972 | */ | ||
7973 | if (!BP_NOMCP(bp)) { | ||
7974 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | ||
7975 | if (!load_code) { | ||
7976 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
7977 | rc = -EBUSY; | ||
7978 | goto load_error2; | ||
7979 | } | ||
7980 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | ||
7981 | rc = -EBUSY; /* other port in diagnostic mode */ | ||
7982 | goto load_error2; | ||
7983 | } | ||
7984 | |||
7985 | } else { | ||
7986 | int port = BP_PORT(bp); | ||
7987 | |||
7988 | DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", | ||
7989 | load_count[0], load_count[1], load_count[2]); | ||
7990 | load_count[0]++; | ||
7991 | load_count[1 + port]++; | ||
7992 | DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", | ||
7993 | load_count[0], load_count[1], load_count[2]); | ||
7994 | if (load_count[0] == 1) | ||
7995 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
7996 | else if (load_count[1 + port] == 1) | ||
7997 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
7998 | else | ||
7999 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
8000 | } | ||
8001 | |||
8002 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | ||
8003 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) | ||
8004 | bp->port.pmf = 1; | ||
8005 | else | ||
8006 | bp->port.pmf = 0; | ||
8007 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
8008 | |||
8009 | /* Initialize HW */ | ||
8010 | rc = bnx2x_init_hw(bp, load_code); | ||
8011 | if (rc) { | ||
8012 | BNX2X_ERR("HW init failed, aborting\n"); | ||
8013 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | ||
8014 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | ||
8015 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
8016 | goto load_error2; | ||
8017 | } | ||
8018 | |||
8019 | /* Setup NIC internals and enable interrupts */ | ||
8020 | bnx2x_nic_init(bp, load_code); | ||
8021 | |||
8022 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && | ||
8023 | (bp->common.shmem2_base)) | ||
8024 | SHMEM2_WR(bp, dcc_support, | ||
8025 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | | ||
8026 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | ||
8027 | |||
8028 | /* Send LOAD_DONE command to MCP */ | ||
8029 | if (!BP_NOMCP(bp)) { | ||
8030 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | ||
8031 | if (!load_code) { | ||
8032 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
8033 | rc = -EBUSY; | ||
8034 | goto load_error3; | ||
8035 | } | ||
8036 | } | ||
8037 | |||
8038 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
8039 | |||
8040 | rc = bnx2x_setup_leading(bp); | ||
8041 | if (rc) { | ||
8042 | BNX2X_ERR("Setup leading failed!\n"); | ||
8043 | #ifndef BNX2X_STOP_ON_ERROR | ||
8044 | goto load_error3; | ||
8045 | #else | ||
8046 | bp->panic = 1; | ||
8047 | return -EBUSY; | ||
8048 | #endif | ||
8049 | } | ||
8050 | |||
8051 | if (CHIP_IS_E1H(bp)) | ||
8052 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { | ||
8053 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); | ||
8054 | bp->flags |= MF_FUNC_DIS; | ||
8055 | } | ||
8056 | |||
8057 | if (bp->state == BNX2X_STATE_OPEN) { | ||
8058 | #ifdef BCM_CNIC | ||
8059 | /* Enable Timer scan */ | ||
8060 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); | ||
8061 | #endif | ||
8062 | for_each_nondefault_queue(bp, i) { | ||
8063 | rc = bnx2x_setup_multi(bp, i); | ||
8064 | if (rc) | ||
8065 | #ifdef BCM_CNIC | ||
8066 | goto load_error4; | ||
8067 | #else | ||
8068 | goto load_error3; | ||
8069 | #endif | ||
8070 | } | ||
8071 | |||
8072 | if (CHIP_IS_E1(bp)) | ||
8073 | bnx2x_set_eth_mac_addr_e1(bp, 1); | ||
8074 | else | ||
8075 | bnx2x_set_eth_mac_addr_e1h(bp, 1); | ||
8076 | #ifdef BCM_CNIC | ||
8077 | /* Set iSCSI L2 MAC */ | ||
8078 | mutex_lock(&bp->cnic_mutex); | ||
8079 | if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { | ||
8080 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | ||
8081 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; | ||
8082 | bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, | ||
8083 | CNIC_SB_ID(bp)); | ||
8084 | } | ||
8085 | mutex_unlock(&bp->cnic_mutex); | ||
8086 | #endif | ||
8087 | } | ||
8088 | |||
8089 | if (bp->port.pmf) | ||
8090 | bnx2x_initial_phy_init(bp, load_mode); | ||
8091 | |||
8092 | /* Start fast path */ | ||
8093 | switch (load_mode) { | ||
8094 | case LOAD_NORMAL: | ||
8095 | if (bp->state == BNX2X_STATE_OPEN) { | ||
8096 | /* Tx queue should be only reenabled */ | ||
8097 | netif_tx_wake_all_queues(bp->dev); | ||
8098 | } | ||
8099 | /* Initialize the receive filter. */ | ||
8100 | bnx2x_set_rx_mode(bp->dev); | ||
8101 | break; | ||
8102 | |||
8103 | case LOAD_OPEN: | ||
8104 | netif_tx_start_all_queues(bp->dev); | ||
8105 | if (bp->state != BNX2X_STATE_OPEN) | ||
8106 | netif_tx_disable(bp->dev); | ||
8107 | /* Initialize the receive filter. */ | ||
8108 | bnx2x_set_rx_mode(bp->dev); | ||
8109 | break; | ||
8110 | |||
8111 | case LOAD_DIAG: | ||
8112 | /* Initialize the receive filter. */ | ||
8113 | bnx2x_set_rx_mode(bp->dev); | ||
8114 | bp->state = BNX2X_STATE_DIAG; | ||
8115 | break; | ||
8116 | |||
8117 | default: | ||
8118 | break; | ||
8119 | } | ||
8120 | |||
8121 | if (!bp->port.pmf) | ||
8122 | bnx2x__link_status_update(bp); | ||
8123 | |||
8124 | /* start the timer */ | ||
8125 | mod_timer(&bp->timer, jiffies + bp->current_interval); | ||
8126 | |||
8127 | #ifdef BCM_CNIC | ||
8128 | bnx2x_setup_cnic_irq_info(bp); | ||
8129 | if (bp->state == BNX2X_STATE_OPEN) | ||
8130 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); | ||
8131 | #endif | ||
8132 | bnx2x_inc_load_cnt(bp); | ||
8133 | |||
8134 | return 0; | ||
8135 | |||
8136 | #ifdef BCM_CNIC | ||
8137 | load_error4: | ||
8138 | /* Disable Timer scan */ | ||
8139 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); | ||
8140 | #endif | ||
8141 | load_error3: | ||
8142 | bnx2x_int_disable_sync(bp, 1); | ||
8143 | if (!BP_NOMCP(bp)) { | ||
8144 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | ||
8145 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
8146 | } | ||
8147 | bp->port.pmf = 0; | ||
8148 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
8149 | bnx2x_free_skbs(bp); | ||
8150 | for_each_queue(bp, i) | ||
8151 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
8152 | load_error2: | ||
8153 | /* Release IRQs */ | ||
8154 | bnx2x_free_irq(bp, false); | ||
8155 | load_error1: | ||
8156 | bnx2x_napi_disable(bp); | ||
8157 | for_each_queue(bp, i) | ||
8158 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
8159 | bnx2x_free_mem(bp); | ||
8160 | |||
8161 | return rc; | ||
8162 | } | ||
8163 | 5024 | ||
8164 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) | 5025 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) |
8165 | { | 5026 | { |
@@ -8317,7 +5178,7 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | |||
8317 | } | 5178 | } |
8318 | } | 5179 | } |
8319 | 5180 | ||
8320 | static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | 5181 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) |
8321 | { | 5182 | { |
8322 | int port = BP_PORT(bp); | 5183 | int port = BP_PORT(bp); |
8323 | u32 reset_code = 0; | 5184 | u32 reset_code = 0; |
@@ -8465,7 +5326,7 @@ unload_error: | |||
8465 | 5326 | ||
8466 | } | 5327 | } |
8467 | 5328 | ||
8468 | static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) | 5329 | void bnx2x_disable_close_the_gate(struct bnx2x *bp) |
8469 | { | 5330 | { |
8470 | u32 val; | 5331 | u32 val; |
8471 | 5332 | ||
@@ -8487,71 +5348,6 @@ static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) | |||
8487 | } | 5348 | } |
8488 | } | 5349 | } |
8489 | 5350 | ||
8490 | /* must be called with rtnl_lock */ | ||
8491 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
8492 | { | ||
8493 | int i; | ||
8494 | |||
8495 | if (bp->state == BNX2X_STATE_CLOSED) { | ||
8496 | /* Interface has been removed - nothing to recover */ | ||
8497 | bp->recovery_state = BNX2X_RECOVERY_DONE; | ||
8498 | bp->is_leader = 0; | ||
8499 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | ||
8500 | smp_wmb(); | ||
8501 | |||
8502 | return -EINVAL; | ||
8503 | } | ||
8504 | |||
8505 | #ifdef BCM_CNIC | ||
8506 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
8507 | #endif | ||
8508 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
8509 | |||
8510 | /* Set "drop all" */ | ||
8511 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
8512 | bnx2x_set_storm_rx_mode(bp); | ||
8513 | |||
8514 | /* Disable HW interrupts, NAPI and Tx */ | ||
8515 | bnx2x_netif_stop(bp, 1); | ||
8516 | netif_carrier_off(bp->dev); | ||
8517 | |||
8518 | del_timer_sync(&bp->timer); | ||
8519 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | ||
8520 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | ||
8521 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
8522 | |||
8523 | /* Release IRQs */ | ||
8524 | bnx2x_free_irq(bp, false); | ||
8525 | |||
8526 | /* Cleanup the chip if needed */ | ||
8527 | if (unload_mode != UNLOAD_RECOVERY) | ||
8528 | bnx2x_chip_cleanup(bp, unload_mode); | ||
8529 | |||
8530 | bp->port.pmf = 0; | ||
8531 | |||
8532 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
8533 | bnx2x_free_skbs(bp); | ||
8534 | for_each_queue(bp, i) | ||
8535 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
8536 | for_each_queue(bp, i) | ||
8537 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
8538 | bnx2x_free_mem(bp); | ||
8539 | |||
8540 | bp->state = BNX2X_STATE_CLOSED; | ||
8541 | |||
8542 | /* The last driver must disable a "close the gate" if there is no | ||
8543 | * parity attention or "process kill" pending. | ||
8544 | */ | ||
8545 | if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && | ||
8546 | bnx2x_reset_is_done(bp)) | ||
8547 | bnx2x_disable_close_the_gate(bp); | ||
8548 | |||
8549 | /* Reset MCP mail box sequence if there is on going recovery */ | ||
8550 | if (unload_mode == UNLOAD_RECOVERY) | ||
8551 | bp->fw_seq = 0; | ||
8552 | |||
8553 | return 0; | ||
8554 | } | ||
8555 | 5351 | ||
8556 | /* Close gates #2, #3 and #4: */ | 5352 | /* Close gates #2, #3 and #4: */ |
8557 | static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) | 5353 | static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) |
@@ -8864,8 +5660,6 @@ exit_leader_reset: | |||
8864 | return rc; | 5660 | return rc; |
8865 | } | 5661 | } |
8866 | 5662 | ||
8867 | static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | ||
8868 | |||
8869 | /* Assumption: runs under rtnl lock. This together with the fact | 5663 | /* Assumption: runs under rtnl lock. This together with the fact |
8870 | * that it's called only from bnx2x_reset_task() ensure that it | 5664 | * that it's called only from bnx2x_reset_task() ensure that it |
8871 | * will never be called when netif_running(bp->dev) is false. | 5665 | * will never be called when netif_running(bp->dev) is false. |
@@ -9002,8 +5796,6 @@ reset_task_exit: | |||
9002 | 5796 | ||
9003 | /* end of nic load/unload */ | 5797 | /* end of nic load/unload */ |
9004 | 5798 | ||
9005 | /* ethtool_ops */ | ||
9006 | |||
9007 | /* | 5799 | /* |
9008 | * Init service functions | 5800 | * Init service functions |
9009 | */ | 5801 | */ |
@@ -9922,6 +6714,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9922 | 6714 | ||
9923 | mutex_init(&bp->port.phy_mutex); | 6715 | mutex_init(&bp->port.phy_mutex); |
9924 | mutex_init(&bp->fw_mb_mutex); | 6716 | mutex_init(&bp->fw_mb_mutex); |
6717 | spin_lock_init(&bp->stats_lock); | ||
9925 | #ifdef BCM_CNIC | 6718 | #ifdef BCM_CNIC |
9926 | mutex_init(&bp->cnic_mutex); | 6719 | mutex_init(&bp->cnic_mutex); |
9927 | #endif | 6720 | #endif |
@@ -9951,7 +6744,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9951 | multi_mode = ETH_RSS_MODE_DISABLED; | 6744 | multi_mode = ETH_RSS_MODE_DISABLED; |
9952 | } | 6745 | } |
9953 | bp->multi_mode = multi_mode; | 6746 | bp->multi_mode = multi_mode; |
9954 | 6747 | bp->int_mode = int_mode; | |
9955 | 6748 | ||
9956 | bp->dev->features |= NETIF_F_GRO; | 6749 | bp->dev->features |= NETIF_F_GRO; |
9957 | 6750 | ||
@@ -9963,6 +6756,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9963 | bp->flags |= TPA_ENABLE_FLAG; | 6756 | bp->flags |= TPA_ENABLE_FLAG; |
9964 | bp->dev->features |= NETIF_F_LRO; | 6757 | bp->dev->features |= NETIF_F_LRO; |
9965 | } | 6758 | } |
6759 | bp->disable_tpa = disable_tpa; | ||
9966 | 6760 | ||
9967 | if (CHIP_IS_E1(bp)) | 6761 | if (CHIP_IS_E1(bp)) |
9968 | bp->dropless_fc = 0; | 6762 | bp->dropless_fc = 0; |
@@ -9991,2547 +6785,11 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9991 | return rc; | 6785 | return rc; |
9992 | } | 6786 | } |
9993 | 6787 | ||
9994 | /* | ||
9995 | * ethtool service functions | ||
9996 | */ | ||
9997 | |||
9998 | /* All ethtool functions called with rtnl_lock */ | ||
9999 | |||
10000 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
10001 | { | ||
10002 | struct bnx2x *bp = netdev_priv(dev); | ||
10003 | |||
10004 | cmd->supported = bp->port.supported; | ||
10005 | cmd->advertising = bp->port.advertising; | ||
10006 | |||
10007 | if ((bp->state == BNX2X_STATE_OPEN) && | ||
10008 | !(bp->flags & MF_FUNC_DIS) && | ||
10009 | (bp->link_vars.link_up)) { | ||
10010 | cmd->speed = bp->link_vars.line_speed; | ||
10011 | cmd->duplex = bp->link_vars.duplex; | ||
10012 | if (IS_E1HMF(bp)) { | ||
10013 | u16 vn_max_rate; | ||
10014 | |||
10015 | vn_max_rate = | ||
10016 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
10017 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
10018 | if (vn_max_rate < cmd->speed) | ||
10019 | cmd->speed = vn_max_rate; | ||
10020 | } | ||
10021 | } else { | ||
10022 | cmd->speed = -1; | ||
10023 | cmd->duplex = -1; | ||
10024 | } | ||
10025 | |||
10026 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { | ||
10027 | u32 ext_phy_type = | ||
10028 | XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | ||
10029 | |||
10030 | switch (ext_phy_type) { | ||
10031 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | ||
10032 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
10033 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | ||
10034 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | ||
10035 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | ||
10036 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: | ||
10037 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | ||
10038 | cmd->port = PORT_FIBRE; | ||
10039 | break; | ||
10040 | |||
10041 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
10042 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: | ||
10043 | cmd->port = PORT_TP; | ||
10044 | break; | ||
10045 | |||
10046 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: | ||
10047 | BNX2X_ERR("XGXS PHY Failure detected 0x%x\n", | ||
10048 | bp->link_params.ext_phy_config); | ||
10049 | break; | ||
10050 | |||
10051 | default: | ||
10052 | DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", | ||
10053 | bp->link_params.ext_phy_config); | ||
10054 | break; | ||
10055 | } | ||
10056 | } else | ||
10057 | cmd->port = PORT_TP; | ||
10058 | |||
10059 | cmd->phy_address = bp->mdio.prtad; | ||
10060 | cmd->transceiver = XCVR_INTERNAL; | ||
10061 | |||
10062 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) | ||
10063 | cmd->autoneg = AUTONEG_ENABLE; | ||
10064 | else | ||
10065 | cmd->autoneg = AUTONEG_DISABLE; | ||
10066 | |||
10067 | cmd->maxtxpkt = 0; | ||
10068 | cmd->maxrxpkt = 0; | ||
10069 | |||
10070 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" | ||
10071 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" | ||
10072 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" | ||
10073 | DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", | ||
10074 | cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, | ||
10075 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, | ||
10076 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | ||
10077 | |||
10078 | return 0; | ||
10079 | } | ||
10080 | |||
10081 | static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
10082 | { | ||
10083 | struct bnx2x *bp = netdev_priv(dev); | ||
10084 | u32 advertising; | ||
10085 | |||
10086 | if (IS_E1HMF(bp)) | ||
10087 | return 0; | ||
10088 | |||
10089 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" | ||
10090 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" | ||
10091 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" | ||
10092 | DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", | ||
10093 | cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, | ||
10094 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, | ||
10095 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | ||
10096 | |||
10097 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
10098 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { | ||
10099 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); | ||
10100 | return -EINVAL; | ||
10101 | } | ||
10102 | |||
10103 | /* advertise the requested speed and duplex if supported */ | ||
10104 | cmd->advertising &= bp->port.supported; | ||
10105 | |||
10106 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | ||
10107 | bp->link_params.req_duplex = DUPLEX_FULL; | ||
10108 | bp->port.advertising |= (ADVERTISED_Autoneg | | ||
10109 | cmd->advertising); | ||
10110 | |||
10111 | } else { /* forced speed */ | ||
10112 | /* advertise the requested speed and duplex if supported */ | ||
10113 | switch (cmd->speed) { | ||
10114 | case SPEED_10: | ||
10115 | if (cmd->duplex == DUPLEX_FULL) { | ||
10116 | if (!(bp->port.supported & | ||
10117 | SUPPORTED_10baseT_Full)) { | ||
10118 | DP(NETIF_MSG_LINK, | ||
10119 | "10M full not supported\n"); | ||
10120 | return -EINVAL; | ||
10121 | } | ||
10122 | |||
10123 | advertising = (ADVERTISED_10baseT_Full | | ||
10124 | ADVERTISED_TP); | ||
10125 | } else { | ||
10126 | if (!(bp->port.supported & | ||
10127 | SUPPORTED_10baseT_Half)) { | ||
10128 | DP(NETIF_MSG_LINK, | ||
10129 | "10M half not supported\n"); | ||
10130 | return -EINVAL; | ||
10131 | } | ||
10132 | |||
10133 | advertising = (ADVERTISED_10baseT_Half | | ||
10134 | ADVERTISED_TP); | ||
10135 | } | ||
10136 | break; | ||
10137 | |||
10138 | case SPEED_100: | ||
10139 | if (cmd->duplex == DUPLEX_FULL) { | ||
10140 | if (!(bp->port.supported & | ||
10141 | SUPPORTED_100baseT_Full)) { | ||
10142 | DP(NETIF_MSG_LINK, | ||
10143 | "100M full not supported\n"); | ||
10144 | return -EINVAL; | ||
10145 | } | ||
10146 | |||
10147 | advertising = (ADVERTISED_100baseT_Full | | ||
10148 | ADVERTISED_TP); | ||
10149 | } else { | ||
10150 | if (!(bp->port.supported & | ||
10151 | SUPPORTED_100baseT_Half)) { | ||
10152 | DP(NETIF_MSG_LINK, | ||
10153 | "100M half not supported\n"); | ||
10154 | return -EINVAL; | ||
10155 | } | ||
10156 | |||
10157 | advertising = (ADVERTISED_100baseT_Half | | ||
10158 | ADVERTISED_TP); | ||
10159 | } | ||
10160 | break; | ||
10161 | |||
10162 | case SPEED_1000: | ||
10163 | if (cmd->duplex != DUPLEX_FULL) { | ||
10164 | DP(NETIF_MSG_LINK, "1G half not supported\n"); | ||
10165 | return -EINVAL; | ||
10166 | } | ||
10167 | |||
10168 | if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { | ||
10169 | DP(NETIF_MSG_LINK, "1G full not supported\n"); | ||
10170 | return -EINVAL; | ||
10171 | } | ||
10172 | |||
10173 | advertising = (ADVERTISED_1000baseT_Full | | ||
10174 | ADVERTISED_TP); | ||
10175 | break; | ||
10176 | |||
10177 | case SPEED_2500: | ||
10178 | if (cmd->duplex != DUPLEX_FULL) { | ||
10179 | DP(NETIF_MSG_LINK, | ||
10180 | "2.5G half not supported\n"); | ||
10181 | return -EINVAL; | ||
10182 | } | ||
10183 | |||
10184 | if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { | ||
10185 | DP(NETIF_MSG_LINK, | ||
10186 | "2.5G full not supported\n"); | ||
10187 | return -EINVAL; | ||
10188 | } | ||
10189 | |||
10190 | advertising = (ADVERTISED_2500baseX_Full | | ||
10191 | ADVERTISED_TP); | ||
10192 | break; | ||
10193 | |||
10194 | case SPEED_10000: | ||
10195 | if (cmd->duplex != DUPLEX_FULL) { | ||
10196 | DP(NETIF_MSG_LINK, "10G half not supported\n"); | ||
10197 | return -EINVAL; | ||
10198 | } | ||
10199 | |||
10200 | if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { | ||
10201 | DP(NETIF_MSG_LINK, "10G full not supported\n"); | ||
10202 | return -EINVAL; | ||
10203 | } | ||
10204 | |||
10205 | advertising = (ADVERTISED_10000baseT_Full | | ||
10206 | ADVERTISED_FIBRE); | ||
10207 | break; | ||
10208 | |||
10209 | default: | ||
10210 | DP(NETIF_MSG_LINK, "Unsupported speed\n"); | ||
10211 | return -EINVAL; | ||
10212 | } | ||
10213 | |||
10214 | bp->link_params.req_line_speed = cmd->speed; | ||
10215 | bp->link_params.req_duplex = cmd->duplex; | ||
10216 | bp->port.advertising = advertising; | ||
10217 | } | ||
10218 | |||
10219 | DP(NETIF_MSG_LINK, "req_line_speed %d\n" | ||
10220 | DP_LEVEL " req_duplex %d advertising 0x%x\n", | ||
10221 | bp->link_params.req_line_speed, bp->link_params.req_duplex, | ||
10222 | bp->port.advertising); | ||
10223 | |||
10224 | if (netif_running(dev)) { | ||
10225 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
10226 | bnx2x_link_set(bp); | ||
10227 | } | ||
10228 | |||
10229 | return 0; | ||
10230 | } | ||
10231 | |||
10232 | #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) | ||
10233 | #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) | ||
10234 | |||
10235 | static int bnx2x_get_regs_len(struct net_device *dev) | ||
10236 | { | ||
10237 | struct bnx2x *bp = netdev_priv(dev); | ||
10238 | int regdump_len = 0; | ||
10239 | int i; | ||
10240 | |||
10241 | if (CHIP_IS_E1(bp)) { | ||
10242 | for (i = 0; i < REGS_COUNT; i++) | ||
10243 | if (IS_E1_ONLINE(reg_addrs[i].info)) | ||
10244 | regdump_len += reg_addrs[i].size; | ||
10245 | |||
10246 | for (i = 0; i < WREGS_COUNT_E1; i++) | ||
10247 | if (IS_E1_ONLINE(wreg_addrs_e1[i].info)) | ||
10248 | regdump_len += wreg_addrs_e1[i].size * | ||
10249 | (1 + wreg_addrs_e1[i].read_regs_count); | ||
10250 | |||
10251 | } else { /* E1H */ | ||
10252 | for (i = 0; i < REGS_COUNT; i++) | ||
10253 | if (IS_E1H_ONLINE(reg_addrs[i].info)) | ||
10254 | regdump_len += reg_addrs[i].size; | ||
10255 | |||
10256 | for (i = 0; i < WREGS_COUNT_E1H; i++) | ||
10257 | if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) | ||
10258 | regdump_len += wreg_addrs_e1h[i].size * | ||
10259 | (1 + wreg_addrs_e1h[i].read_regs_count); | ||
10260 | } | ||
10261 | regdump_len *= 4; | ||
10262 | regdump_len += sizeof(struct dump_hdr); | ||
10263 | |||
10264 | return regdump_len; | ||
10265 | } | ||
10266 | |||
10267 | static void bnx2x_get_regs(struct net_device *dev, | ||
10268 | struct ethtool_regs *regs, void *_p) | ||
10269 | { | ||
10270 | u32 *p = _p, i, j; | ||
10271 | struct bnx2x *bp = netdev_priv(dev); | ||
10272 | struct dump_hdr dump_hdr = {0}; | ||
10273 | |||
10274 | regs->version = 0; | ||
10275 | memset(p, 0, regs->len); | ||
10276 | |||
10277 | if (!netif_running(bp->dev)) | ||
10278 | return; | ||
10279 | |||
10280 | dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; | ||
10281 | dump_hdr.dump_sign = dump_sign_all; | ||
10282 | dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); | ||
10283 | dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); | ||
10284 | dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); | ||
10285 | dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); | ||
10286 | dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; | ||
10287 | |||
10288 | memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); | ||
10289 | p += dump_hdr.hdr_size + 1; | ||
10290 | |||
10291 | if (CHIP_IS_E1(bp)) { | ||
10292 | for (i = 0; i < REGS_COUNT; i++) | ||
10293 | if (IS_E1_ONLINE(reg_addrs[i].info)) | ||
10294 | for (j = 0; j < reg_addrs[i].size; j++) | ||
10295 | *p++ = REG_RD(bp, | ||
10296 | reg_addrs[i].addr + j*4); | ||
10297 | |||
10298 | } else { /* E1H */ | ||
10299 | for (i = 0; i < REGS_COUNT; i++) | ||
10300 | if (IS_E1H_ONLINE(reg_addrs[i].info)) | ||
10301 | for (j = 0; j < reg_addrs[i].size; j++) | ||
10302 | *p++ = REG_RD(bp, | ||
10303 | reg_addrs[i].addr + j*4); | ||
10304 | } | ||
10305 | } | ||
10306 | |||
10307 | #define PHY_FW_VER_LEN 10 | ||
10308 | |||
10309 | static void bnx2x_get_drvinfo(struct net_device *dev, | ||
10310 | struct ethtool_drvinfo *info) | ||
10311 | { | ||
10312 | struct bnx2x *bp = netdev_priv(dev); | ||
10313 | u8 phy_fw_ver[PHY_FW_VER_LEN]; | ||
10314 | |||
10315 | strcpy(info->driver, DRV_MODULE_NAME); | ||
10316 | strcpy(info->version, DRV_MODULE_VERSION); | ||
10317 | |||
10318 | phy_fw_ver[0] = '\0'; | ||
10319 | if (bp->port.pmf) { | ||
10320 | bnx2x_acquire_phy_lock(bp); | ||
10321 | bnx2x_get_ext_phy_fw_version(&bp->link_params, | ||
10322 | (bp->state != BNX2X_STATE_CLOSED), | ||
10323 | phy_fw_ver, PHY_FW_VER_LEN); | ||
10324 | bnx2x_release_phy_lock(bp); | ||
10325 | } | ||
10326 | |||
10327 | strncpy(info->fw_version, bp->fw_ver, 32); | ||
10328 | snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), | ||
10329 | "bc %d.%d.%d%s%s", | ||
10330 | (bp->common.bc_ver & 0xff0000) >> 16, | ||
10331 | (bp->common.bc_ver & 0xff00) >> 8, | ||
10332 | (bp->common.bc_ver & 0xff), | ||
10333 | ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); | ||
10334 | strcpy(info->bus_info, pci_name(bp->pdev)); | ||
10335 | info->n_stats = BNX2X_NUM_STATS; | ||
10336 | info->testinfo_len = BNX2X_NUM_TESTS; | ||
10337 | info->eedump_len = bp->common.flash_size; | ||
10338 | info->regdump_len = bnx2x_get_regs_len(dev); | ||
10339 | } | ||
10340 | |||
10341 | static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
10342 | { | ||
10343 | struct bnx2x *bp = netdev_priv(dev); | ||
10344 | |||
10345 | if (bp->flags & NO_WOL_FLAG) { | ||
10346 | wol->supported = 0; | ||
10347 | wol->wolopts = 0; | ||
10348 | } else { | ||
10349 | wol->supported = WAKE_MAGIC; | ||
10350 | if (bp->wol) | ||
10351 | wol->wolopts = WAKE_MAGIC; | ||
10352 | else | ||
10353 | wol->wolopts = 0; | ||
10354 | } | ||
10355 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
10356 | } | ||
10357 | |||
10358 | static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
10359 | { | ||
10360 | struct bnx2x *bp = netdev_priv(dev); | ||
10361 | |||
10362 | if (wol->wolopts & ~WAKE_MAGIC) | ||
10363 | return -EINVAL; | ||
10364 | |||
10365 | if (wol->wolopts & WAKE_MAGIC) { | ||
10366 | if (bp->flags & NO_WOL_FLAG) | ||
10367 | return -EINVAL; | ||
10368 | |||
10369 | bp->wol = 1; | ||
10370 | } else | ||
10371 | bp->wol = 0; | ||
10372 | |||
10373 | return 0; | ||
10374 | } | ||
10375 | |||
10376 | static u32 bnx2x_get_msglevel(struct net_device *dev) | ||
10377 | { | ||
10378 | struct bnx2x *bp = netdev_priv(dev); | ||
10379 | |||
10380 | return bp->msg_enable; | ||
10381 | } | ||
10382 | |||
10383 | static void bnx2x_set_msglevel(struct net_device *dev, u32 level) | ||
10384 | { | ||
10385 | struct bnx2x *bp = netdev_priv(dev); | ||
10386 | |||
10387 | if (capable(CAP_NET_ADMIN)) | ||
10388 | bp->msg_enable = level; | ||
10389 | } | ||
10390 | |||
10391 | static int bnx2x_nway_reset(struct net_device *dev) | ||
10392 | { | ||
10393 | struct bnx2x *bp = netdev_priv(dev); | ||
10394 | |||
10395 | if (!bp->port.pmf) | ||
10396 | return 0; | ||
10397 | |||
10398 | if (netif_running(dev)) { | ||
10399 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
10400 | bnx2x_link_set(bp); | ||
10401 | } | ||
10402 | |||
10403 | return 0; | ||
10404 | } | ||
10405 | |||
10406 | static u32 bnx2x_get_link(struct net_device *dev) | ||
10407 | { | ||
10408 | struct bnx2x *bp = netdev_priv(dev); | ||
10409 | |||
10410 | if (bp->flags & MF_FUNC_DIS) | ||
10411 | return 0; | ||
10412 | |||
10413 | return bp->link_vars.link_up; | ||
10414 | } | ||
10415 | |||
10416 | static int bnx2x_get_eeprom_len(struct net_device *dev) | ||
10417 | { | ||
10418 | struct bnx2x *bp = netdev_priv(dev); | ||
10419 | |||
10420 | return bp->common.flash_size; | ||
10421 | } | ||
10422 | |||
10423 | static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | ||
10424 | { | ||
10425 | int port = BP_PORT(bp); | ||
10426 | int count, i; | ||
10427 | u32 val = 0; | ||
10428 | |||
10429 | /* adjust timeout for emulation/FPGA */ | ||
10430 | count = NVRAM_TIMEOUT_COUNT; | ||
10431 | if (CHIP_REV_IS_SLOW(bp)) | ||
10432 | count *= 100; | ||
10433 | |||
10434 | /* request access to nvram interface */ | ||
10435 | REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, | ||
10436 | (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); | ||
10437 | |||
10438 | for (i = 0; i < count*10; i++) { | ||
10439 | val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); | ||
10440 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) | ||
10441 | break; | ||
10442 | |||
10443 | udelay(5); | ||
10444 | } | ||
10445 | |||
10446 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { | ||
10447 | DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); | ||
10448 | return -EBUSY; | ||
10449 | } | ||
10450 | |||
10451 | return 0; | ||
10452 | } | ||
10453 | |||
10454 | static int bnx2x_release_nvram_lock(struct bnx2x *bp) | ||
10455 | { | ||
10456 | int port = BP_PORT(bp); | ||
10457 | int count, i; | ||
10458 | u32 val = 0; | ||
10459 | |||
10460 | /* adjust timeout for emulation/FPGA */ | ||
10461 | count = NVRAM_TIMEOUT_COUNT; | ||
10462 | if (CHIP_REV_IS_SLOW(bp)) | ||
10463 | count *= 100; | ||
10464 | |||
10465 | /* relinquish nvram interface */ | ||
10466 | REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, | ||
10467 | (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); | ||
10468 | |||
10469 | for (i = 0; i < count*10; i++) { | ||
10470 | val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); | ||
10471 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) | ||
10472 | break; | ||
10473 | |||
10474 | udelay(5); | ||
10475 | } | ||
10476 | |||
10477 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { | ||
10478 | DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); | ||
10479 | return -EBUSY; | ||
10480 | } | ||
10481 | |||
10482 | return 0; | ||
10483 | } | ||
10484 | |||
10485 | static void bnx2x_enable_nvram_access(struct bnx2x *bp) | ||
10486 | { | ||
10487 | u32 val; | ||
10488 | |||
10489 | val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); | ||
10490 | |||
10491 | /* enable both bits, even on read */ | ||
10492 | REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, | ||
10493 | (val | MCPR_NVM_ACCESS_ENABLE_EN | | ||
10494 | MCPR_NVM_ACCESS_ENABLE_WR_EN)); | ||
10495 | } | ||
10496 | |||
10497 | static void bnx2x_disable_nvram_access(struct bnx2x *bp) | ||
10498 | { | ||
10499 | u32 val; | ||
10500 | |||
10501 | val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); | ||
10502 | |||
10503 | /* disable both bits, even after read */ | ||
10504 | REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, | ||
10505 | (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | | ||
10506 | MCPR_NVM_ACCESS_ENABLE_WR_EN))); | ||
10507 | } | ||
10508 | |||
10509 | static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, | ||
10510 | u32 cmd_flags) | ||
10511 | { | ||
10512 | int count, i, rc; | ||
10513 | u32 val; | ||
10514 | |||
10515 | /* build the command word */ | ||
10516 | cmd_flags |= MCPR_NVM_COMMAND_DOIT; | ||
10517 | |||
10518 | /* need to clear DONE bit separately */ | ||
10519 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); | ||
10520 | |||
10521 | /* address of the NVRAM to read from */ | ||
10522 | REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, | ||
10523 | (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); | ||
10524 | |||
10525 | /* issue a read command */ | ||
10526 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); | ||
10527 | |||
10528 | /* adjust timeout for emulation/FPGA */ | ||
10529 | count = NVRAM_TIMEOUT_COUNT; | ||
10530 | if (CHIP_REV_IS_SLOW(bp)) | ||
10531 | count *= 100; | ||
10532 | |||
10533 | /* wait for completion */ | ||
10534 | *ret_val = 0; | ||
10535 | rc = -EBUSY; | ||
10536 | for (i = 0; i < count; i++) { | ||
10537 | udelay(5); | ||
10538 | val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); | ||
10539 | |||
10540 | if (val & MCPR_NVM_COMMAND_DONE) { | ||
10541 | val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); | ||
10542 | /* we read nvram data in cpu order | ||
10543 | * but ethtool sees it as an array of bytes | ||
10544 | * converting to big-endian will do the work */ | ||
10545 | *ret_val = cpu_to_be32(val); | ||
10546 | rc = 0; | ||
10547 | break; | ||
10548 | } | ||
10549 | } | ||
10550 | |||
10551 | return rc; | ||
10552 | } | ||
10553 | |||
10554 | static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, | ||
10555 | int buf_size) | ||
10556 | { | ||
10557 | int rc; | ||
10558 | u32 cmd_flags; | ||
10559 | __be32 val; | ||
10560 | |||
10561 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | ||
10562 | DP(BNX2X_MSG_NVM, | ||
10563 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | ||
10564 | offset, buf_size); | ||
10565 | return -EINVAL; | ||
10566 | } | ||
10567 | |||
10568 | if (offset + buf_size > bp->common.flash_size) { | ||
10569 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" | ||
10570 | " buf_size (0x%x) > flash_size (0x%x)\n", | ||
10571 | offset, buf_size, bp->common.flash_size); | ||
10572 | return -EINVAL; | ||
10573 | } | ||
10574 | |||
10575 | /* request access to nvram interface */ | ||
10576 | rc = bnx2x_acquire_nvram_lock(bp); | ||
10577 | if (rc) | ||
10578 | return rc; | ||
10579 | |||
10580 | /* enable access to nvram interface */ | ||
10581 | bnx2x_enable_nvram_access(bp); | ||
10582 | |||
10583 | /* read the first word(s) */ | ||
10584 | cmd_flags = MCPR_NVM_COMMAND_FIRST; | ||
10585 | while ((buf_size > sizeof(u32)) && (rc == 0)) { | ||
10586 | rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); | ||
10587 | memcpy(ret_buf, &val, 4); | ||
10588 | |||
10589 | /* advance to the next dword */ | ||
10590 | offset += sizeof(u32); | ||
10591 | ret_buf += sizeof(u32); | ||
10592 | buf_size -= sizeof(u32); | ||
10593 | cmd_flags = 0; | ||
10594 | } | ||
10595 | |||
10596 | if (rc == 0) { | ||
10597 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | ||
10598 | rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); | ||
10599 | memcpy(ret_buf, &val, 4); | ||
10600 | } | ||
10601 | |||
10602 | /* disable access to nvram interface */ | ||
10603 | bnx2x_disable_nvram_access(bp); | ||
10604 | bnx2x_release_nvram_lock(bp); | ||
10605 | |||
10606 | return rc; | ||
10607 | } | ||
10608 | |||
10609 | static int bnx2x_get_eeprom(struct net_device *dev, | ||
10610 | struct ethtool_eeprom *eeprom, u8 *eebuf) | ||
10611 | { | ||
10612 | struct bnx2x *bp = netdev_priv(dev); | ||
10613 | int rc; | ||
10614 | |||
10615 | if (!netif_running(dev)) | ||
10616 | return -EAGAIN; | ||
10617 | |||
10618 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" | ||
10619 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | ||
10620 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | ||
10621 | eeprom->len, eeprom->len); | ||
10622 | |||
10623 | /* parameters already validated in ethtool_get_eeprom */ | ||
10624 | |||
10625 | rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); | ||
10626 | |||
10627 | return rc; | ||
10628 | } | ||
10629 | |||
10630 | static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, | ||
10631 | u32 cmd_flags) | ||
10632 | { | ||
10633 | int count, i, rc; | ||
10634 | |||
10635 | /* build the command word */ | ||
10636 | cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; | ||
10637 | |||
10638 | /* need to clear DONE bit separately */ | ||
10639 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); | ||
10640 | |||
10641 | /* write the data */ | ||
10642 | REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); | ||
10643 | |||
10644 | /* address of the NVRAM to write to */ | ||
10645 | REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, | ||
10646 | (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); | ||
10647 | |||
10648 | /* issue the write command */ | ||
10649 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); | ||
10650 | |||
10651 | /* adjust timeout for emulation/FPGA */ | ||
10652 | count = NVRAM_TIMEOUT_COUNT; | ||
10653 | if (CHIP_REV_IS_SLOW(bp)) | ||
10654 | count *= 100; | ||
10655 | |||
10656 | /* wait for completion */ | ||
10657 | rc = -EBUSY; | ||
10658 | for (i = 0; i < count; i++) { | ||
10659 | udelay(5); | ||
10660 | val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); | ||
10661 | if (val & MCPR_NVM_COMMAND_DONE) { | ||
10662 | rc = 0; | ||
10663 | break; | ||
10664 | } | ||
10665 | } | ||
10666 | |||
10667 | return rc; | ||
10668 | } | ||
10669 | |||
10670 | #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) | ||
10671 | |||
10672 | static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | ||
10673 | int buf_size) | ||
10674 | { | ||
10675 | int rc; | ||
10676 | u32 cmd_flags; | ||
10677 | u32 align_offset; | ||
10678 | __be32 val; | ||
10679 | |||
10680 | if (offset + buf_size > bp->common.flash_size) { | ||
10681 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" | ||
10682 | " buf_size (0x%x) > flash_size (0x%x)\n", | ||
10683 | offset, buf_size, bp->common.flash_size); | ||
10684 | return -EINVAL; | ||
10685 | } | ||
10686 | |||
10687 | /* request access to nvram interface */ | ||
10688 | rc = bnx2x_acquire_nvram_lock(bp); | ||
10689 | if (rc) | ||
10690 | return rc; | ||
10691 | |||
10692 | /* enable access to nvram interface */ | ||
10693 | bnx2x_enable_nvram_access(bp); | ||
10694 | |||
10695 | cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); | ||
10696 | align_offset = (offset & ~0x03); | ||
10697 | rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); | ||
10698 | |||
10699 | if (rc == 0) { | ||
10700 | val &= ~(0xff << BYTE_OFFSET(offset)); | ||
10701 | val |= (*data_buf << BYTE_OFFSET(offset)); | ||
10702 | |||
10703 | /* nvram data is returned as an array of bytes | ||
10704 | * convert it back to cpu order */ | ||
10705 | val = be32_to_cpu(val); | ||
10706 | |||
10707 | rc = bnx2x_nvram_write_dword(bp, align_offset, val, | ||
10708 | cmd_flags); | ||
10709 | } | ||
10710 | |||
10711 | /* disable access to nvram interface */ | ||
10712 | bnx2x_disable_nvram_access(bp); | ||
10713 | bnx2x_release_nvram_lock(bp); | ||
10714 | |||
10715 | return rc; | ||
10716 | } | ||
10717 | |||
10718 | static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | ||
10719 | int buf_size) | ||
10720 | { | ||
10721 | int rc; | ||
10722 | u32 cmd_flags; | ||
10723 | u32 val; | ||
10724 | u32 written_so_far; | ||
10725 | |||
10726 | if (buf_size == 1) /* ethtool */ | ||
10727 | return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); | ||
10728 | |||
10729 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | ||
10730 | DP(BNX2X_MSG_NVM, | ||
10731 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | ||
10732 | offset, buf_size); | ||
10733 | return -EINVAL; | ||
10734 | } | ||
10735 | |||
10736 | if (offset + buf_size > bp->common.flash_size) { | ||
10737 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" | ||
10738 | " buf_size (0x%x) > flash_size (0x%x)\n", | ||
10739 | offset, buf_size, bp->common.flash_size); | ||
10740 | return -EINVAL; | ||
10741 | } | ||
10742 | |||
10743 | /* request access to nvram interface */ | ||
10744 | rc = bnx2x_acquire_nvram_lock(bp); | ||
10745 | if (rc) | ||
10746 | return rc; | ||
10747 | |||
10748 | /* enable access to nvram interface */ | ||
10749 | bnx2x_enable_nvram_access(bp); | ||
10750 | |||
10751 | written_so_far = 0; | ||
10752 | cmd_flags = MCPR_NVM_COMMAND_FIRST; | ||
10753 | while ((written_so_far < buf_size) && (rc == 0)) { | ||
10754 | if (written_so_far == (buf_size - sizeof(u32))) | ||
10755 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | ||
10756 | else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) | ||
10757 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | ||
10758 | else if ((offset % NVRAM_PAGE_SIZE) == 0) | ||
10759 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; | ||
10760 | |||
10761 | memcpy(&val, data_buf, 4); | ||
10762 | |||
10763 | rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); | ||
10764 | |||
10765 | /* advance to the next dword */ | ||
10766 | offset += sizeof(u32); | ||
10767 | data_buf += sizeof(u32); | ||
10768 | written_so_far += sizeof(u32); | ||
10769 | cmd_flags = 0; | ||
10770 | } | ||
10771 | |||
10772 | /* disable access to nvram interface */ | ||
10773 | bnx2x_disable_nvram_access(bp); | ||
10774 | bnx2x_release_nvram_lock(bp); | ||
10775 | |||
10776 | return rc; | ||
10777 | } | ||
10778 | |||
10779 | static int bnx2x_set_eeprom(struct net_device *dev, | ||
10780 | struct ethtool_eeprom *eeprom, u8 *eebuf) | ||
10781 | { | ||
10782 | struct bnx2x *bp = netdev_priv(dev); | ||
10783 | int port = BP_PORT(bp); | ||
10784 | int rc = 0; | ||
10785 | |||
10786 | if (!netif_running(dev)) | ||
10787 | return -EAGAIN; | ||
10788 | |||
10789 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" | ||
10790 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | ||
10791 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | ||
10792 | eeprom->len, eeprom->len); | ||
10793 | |||
10794 | /* parameters already validated in ethtool_set_eeprom */ | ||
10795 | |||
10796 | /* PHY eeprom can be accessed only by the PMF */ | ||
10797 | if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && | ||
10798 | !bp->port.pmf) | ||
10799 | return -EINVAL; | ||
10800 | |||
10801 | if (eeprom->magic == 0x50485950) { | ||
10802 | /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ | ||
10803 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
10804 | |||
10805 | bnx2x_acquire_phy_lock(bp); | ||
10806 | rc |= bnx2x_link_reset(&bp->link_params, | ||
10807 | &bp->link_vars, 0); | ||
10808 | if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == | ||
10809 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) | ||
10810 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, | ||
10811 | MISC_REGISTERS_GPIO_HIGH, port); | ||
10812 | bnx2x_release_phy_lock(bp); | ||
10813 | bnx2x_link_report(bp); | ||
10814 | |||
10815 | } else if (eeprom->magic == 0x50485952) { | ||
10816 | /* 'PHYR' (0x50485952): re-init link after FW upgrade */ | ||
10817 | if (bp->state == BNX2X_STATE_OPEN) { | ||
10818 | bnx2x_acquire_phy_lock(bp); | ||
10819 | rc |= bnx2x_link_reset(&bp->link_params, | ||
10820 | &bp->link_vars, 1); | ||
10821 | |||
10822 | rc |= bnx2x_phy_init(&bp->link_params, | ||
10823 | &bp->link_vars); | ||
10824 | bnx2x_release_phy_lock(bp); | ||
10825 | bnx2x_calc_fc_adv(bp); | ||
10826 | } | ||
10827 | } else if (eeprom->magic == 0x53985943) { | ||
10828 | /* 'PHYC' (0x53985943): PHY FW upgrade completed */ | ||
10829 | if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == | ||
10830 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { | ||
10831 | u8 ext_phy_addr = | ||
10832 | XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); | ||
10833 | |||
10834 | /* DSP Remove Download Mode */ | ||
10835 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, | ||
10836 | MISC_REGISTERS_GPIO_LOW, port); | ||
10837 | |||
10838 | bnx2x_acquire_phy_lock(bp); | ||
10839 | |||
10840 | bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); | ||
10841 | |||
10842 | /* wait 0.5 sec to allow it to run */ | ||
10843 | msleep(500); | ||
10844 | bnx2x_ext_phy_hw_reset(bp, port); | ||
10845 | msleep(500); | ||
10846 | bnx2x_release_phy_lock(bp); | ||
10847 | } | ||
10848 | } else | ||
10849 | rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); | ||
10850 | |||
10851 | return rc; | ||
10852 | } | ||
10853 | |||
10854 | static int bnx2x_get_coalesce(struct net_device *dev, | ||
10855 | struct ethtool_coalesce *coal) | ||
10856 | { | ||
10857 | struct bnx2x *bp = netdev_priv(dev); | ||
10858 | |||
10859 | memset(coal, 0, sizeof(struct ethtool_coalesce)); | ||
10860 | |||
10861 | coal->rx_coalesce_usecs = bp->rx_ticks; | ||
10862 | coal->tx_coalesce_usecs = bp->tx_ticks; | ||
10863 | |||
10864 | return 0; | ||
10865 | } | ||
10866 | |||
10867 | static int bnx2x_set_coalesce(struct net_device *dev, | ||
10868 | struct ethtool_coalesce *coal) | ||
10869 | { | ||
10870 | struct bnx2x *bp = netdev_priv(dev); | ||
10871 | |||
10872 | bp->rx_ticks = (u16)coal->rx_coalesce_usecs; | ||
10873 | if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) | ||
10874 | bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; | ||
10875 | |||
10876 | bp->tx_ticks = (u16)coal->tx_coalesce_usecs; | ||
10877 | if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) | ||
10878 | bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; | ||
10879 | |||
10880 | if (netif_running(dev)) | ||
10881 | bnx2x_update_coalesce(bp); | ||
10882 | |||
10883 | return 0; | ||
10884 | } | ||
10885 | |||
10886 | static void bnx2x_get_ringparam(struct net_device *dev, | ||
10887 | struct ethtool_ringparam *ering) | ||
10888 | { | ||
10889 | struct bnx2x *bp = netdev_priv(dev); | ||
10890 | |||
10891 | ering->rx_max_pending = MAX_RX_AVAIL; | ||
10892 | ering->rx_mini_max_pending = 0; | ||
10893 | ering->rx_jumbo_max_pending = 0; | ||
10894 | |||
10895 | ering->rx_pending = bp->rx_ring_size; | ||
10896 | ering->rx_mini_pending = 0; | ||
10897 | ering->rx_jumbo_pending = 0; | ||
10898 | |||
10899 | ering->tx_max_pending = MAX_TX_AVAIL; | ||
10900 | ering->tx_pending = bp->tx_ring_size; | ||
10901 | } | ||
10902 | |||
10903 | static int bnx2x_set_ringparam(struct net_device *dev, | ||
10904 | struct ethtool_ringparam *ering) | ||
10905 | { | ||
10906 | struct bnx2x *bp = netdev_priv(dev); | ||
10907 | int rc = 0; | ||
10908 | |||
10909 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
10910 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
10911 | return -EAGAIN; | ||
10912 | } | ||
10913 | |||
10914 | if ((ering->rx_pending > MAX_RX_AVAIL) || | ||
10915 | (ering->tx_pending > MAX_TX_AVAIL) || | ||
10916 | (ering->tx_pending <= MAX_SKB_FRAGS + 4)) | ||
10917 | return -EINVAL; | ||
10918 | |||
10919 | bp->rx_ring_size = ering->rx_pending; | ||
10920 | bp->tx_ring_size = ering->tx_pending; | ||
10921 | |||
10922 | if (netif_running(dev)) { | ||
10923 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
10924 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
10925 | } | ||
10926 | |||
10927 | return rc; | ||
10928 | } | ||
10929 | |||
10930 | static void bnx2x_get_pauseparam(struct net_device *dev, | ||
10931 | struct ethtool_pauseparam *epause) | ||
10932 | { | ||
10933 | struct bnx2x *bp = netdev_priv(dev); | ||
10934 | |||
10935 | epause->autoneg = (bp->link_params.req_flow_ctrl == | ||
10936 | BNX2X_FLOW_CTRL_AUTO) && | ||
10937 | (bp->link_params.req_line_speed == SPEED_AUTO_NEG); | ||
10938 | |||
10939 | epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == | ||
10940 | BNX2X_FLOW_CTRL_RX); | ||
10941 | epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == | ||
10942 | BNX2X_FLOW_CTRL_TX); | ||
10943 | |||
10944 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" | ||
10945 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | ||
10946 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | ||
10947 | } | ||
10948 | |||
10949 | static int bnx2x_set_pauseparam(struct net_device *dev, | ||
10950 | struct ethtool_pauseparam *epause) | ||
10951 | { | ||
10952 | struct bnx2x *bp = netdev_priv(dev); | ||
10953 | |||
10954 | if (IS_E1HMF(bp)) | ||
10955 | return 0; | ||
10956 | |||
10957 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" | ||
10958 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | ||
10959 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | ||
10960 | |||
10961 | bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; | ||
10962 | |||
10963 | if (epause->rx_pause) | ||
10964 | bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; | ||
10965 | |||
10966 | if (epause->tx_pause) | ||
10967 | bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; | ||
10968 | |||
10969 | if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) | ||
10970 | bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
10971 | |||
10972 | if (epause->autoneg) { | ||
10973 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { | ||
10974 | DP(NETIF_MSG_LINK, "autoneg not supported\n"); | ||
10975 | return -EINVAL; | ||
10976 | } | ||
10977 | |||
10978 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) | ||
10979 | bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; | ||
10980 | } | ||
10981 | |||
10982 | DP(NETIF_MSG_LINK, | ||
10983 | "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); | ||
10984 | |||
10985 | if (netif_running(dev)) { | ||
10986 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
10987 | bnx2x_link_set(bp); | ||
10988 | } | ||
10989 | |||
10990 | return 0; | ||
10991 | } | ||
10992 | |||
10993 | static int bnx2x_set_flags(struct net_device *dev, u32 data) | ||
10994 | { | ||
10995 | struct bnx2x *bp = netdev_priv(dev); | ||
10996 | int changed = 0; | ||
10997 | int rc = 0; | ||
10998 | |||
10999 | if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH)) | ||
11000 | return -EINVAL; | ||
11001 | |||
11002 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
11003 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
11004 | return -EAGAIN; | ||
11005 | } | ||
11006 | |||
11007 | /* TPA requires Rx CSUM offloading */ | ||
11008 | if ((data & ETH_FLAG_LRO) && bp->rx_csum) { | ||
11009 | if (!disable_tpa) { | ||
11010 | if (!(dev->features & NETIF_F_LRO)) { | ||
11011 | dev->features |= NETIF_F_LRO; | ||
11012 | bp->flags |= TPA_ENABLE_FLAG; | ||
11013 | changed = 1; | ||
11014 | } | ||
11015 | } else | ||
11016 | rc = -EINVAL; | ||
11017 | } else if (dev->features & NETIF_F_LRO) { | ||
11018 | dev->features &= ~NETIF_F_LRO; | ||
11019 | bp->flags &= ~TPA_ENABLE_FLAG; | ||
11020 | changed = 1; | ||
11021 | } | ||
11022 | |||
11023 | if (data & ETH_FLAG_RXHASH) | ||
11024 | dev->features |= NETIF_F_RXHASH; | ||
11025 | else | ||
11026 | dev->features &= ~NETIF_F_RXHASH; | ||
11027 | |||
11028 | if (changed && netif_running(dev)) { | ||
11029 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
11030 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
11031 | } | ||
11032 | |||
11033 | return rc; | ||
11034 | } | ||
11035 | |||
11036 | static u32 bnx2x_get_rx_csum(struct net_device *dev) | ||
11037 | { | ||
11038 | struct bnx2x *bp = netdev_priv(dev); | ||
11039 | |||
11040 | return bp->rx_csum; | ||
11041 | } | ||
11042 | |||
11043 | static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) | ||
11044 | { | ||
11045 | struct bnx2x *bp = netdev_priv(dev); | ||
11046 | int rc = 0; | ||
11047 | |||
11048 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
11049 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
11050 | return -EAGAIN; | ||
11051 | } | ||
11052 | |||
11053 | bp->rx_csum = data; | ||
11054 | |||
11055 | /* Disable TPA, when Rx CSUM is disabled. Otherwise all | ||
11056 | TPA'ed packets will be discarded due to wrong TCP CSUM */ | ||
11057 | if (!data) { | ||
11058 | u32 flags = ethtool_op_get_flags(dev); | ||
11059 | |||
11060 | rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO)); | ||
11061 | } | ||
11062 | |||
11063 | return rc; | ||
11064 | } | ||
11065 | |||
11066 | static int bnx2x_set_tso(struct net_device *dev, u32 data) | ||
11067 | { | ||
11068 | if (data) { | ||
11069 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); | ||
11070 | dev->features |= NETIF_F_TSO6; | ||
11071 | } else { | ||
11072 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN); | ||
11073 | dev->features &= ~NETIF_F_TSO6; | ||
11074 | } | ||
11075 | |||
11076 | return 0; | ||
11077 | } | ||
11078 | |||
11079 | static const struct { | ||
11080 | char string[ETH_GSTRING_LEN]; | ||
11081 | } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { | ||
11082 | { "register_test (offline)" }, | ||
11083 | { "memory_test (offline)" }, | ||
11084 | { "loopback_test (offline)" }, | ||
11085 | { "nvram_test (online)" }, | ||
11086 | { "interrupt_test (online)" }, | ||
11087 | { "link_test (online)" }, | ||
11088 | { "idle check (online)" } | ||
11089 | }; | ||
11090 | |||
11091 | static int bnx2x_test_registers(struct bnx2x *bp) | ||
11092 | { | ||
11093 | int idx, i, rc = -ENODEV; | ||
11094 | u32 wr_val = 0; | ||
11095 | int port = BP_PORT(bp); | ||
11096 | static const struct { | ||
11097 | u32 offset0; | ||
11098 | u32 offset1; | ||
11099 | u32 mask; | ||
11100 | } reg_tbl[] = { | ||
11101 | /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, | ||
11102 | { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, | ||
11103 | { HC_REG_AGG_INT_0, 4, 0x000003ff }, | ||
11104 | { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, | ||
11105 | { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, | ||
11106 | { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, | ||
11107 | { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, | ||
11108 | { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | ||
11109 | { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, | ||
11110 | { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | ||
11111 | /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, | ||
11112 | { QM_REG_CONNNUM_0, 4, 0x000fffff }, | ||
11113 | { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, | ||
11114 | { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, | ||
11115 | { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, | ||
11116 | { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, | ||
11117 | { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, | ||
11118 | { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, | ||
11119 | { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, | ||
11120 | { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, | ||
11121 | /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, | ||
11122 | { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, | ||
11123 | { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, | ||
11124 | { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, | ||
11125 | { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, | ||
11126 | { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, | ||
11127 | { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, | ||
11128 | { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, | ||
11129 | { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, | ||
11130 | { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, | ||
11131 | /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, | ||
11132 | { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, | ||
11133 | { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, | ||
11134 | { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, | ||
11135 | { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, | ||
11136 | { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, | ||
11137 | { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, | ||
11138 | |||
11139 | { 0xffffffff, 0, 0x00000000 } | ||
11140 | }; | ||
11141 | |||
11142 | if (!netif_running(bp->dev)) | ||
11143 | return rc; | ||
11144 | |||
11145 | /* Repeat the test twice: | ||
11146 | First by writing 0x00000000, second by writing 0xffffffff */ | ||
11147 | for (idx = 0; idx < 2; idx++) { | ||
11148 | |||
11149 | switch (idx) { | ||
11150 | case 0: | ||
11151 | wr_val = 0; | ||
11152 | break; | ||
11153 | case 1: | ||
11154 | wr_val = 0xffffffff; | ||
11155 | break; | ||
11156 | } | ||
11157 | |||
11158 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { | ||
11159 | u32 offset, mask, save_val, val; | ||
11160 | |||
11161 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; | ||
11162 | mask = reg_tbl[i].mask; | ||
11163 | |||
11164 | save_val = REG_RD(bp, offset); | ||
11165 | |||
11166 | REG_WR(bp, offset, (wr_val & mask)); | ||
11167 | val = REG_RD(bp, offset); | ||
11168 | |||
11169 | /* Restore the original register's value */ | ||
11170 | REG_WR(bp, offset, save_val); | ||
11171 | |||
11172 | /* verify value is as expected */ | ||
11173 | if ((val & mask) != (wr_val & mask)) { | ||
11174 | DP(NETIF_MSG_PROBE, | ||
11175 | "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", | ||
11176 | offset, val, wr_val, mask); | ||
11177 | goto test_reg_exit; | ||
11178 | } | ||
11179 | } | ||
11180 | } | ||
11181 | |||
11182 | rc = 0; | ||
11183 | |||
11184 | test_reg_exit: | ||
11185 | return rc; | ||
11186 | } | ||
11187 | |||
11188 | static int bnx2x_test_memory(struct bnx2x *bp) | ||
11189 | { | ||
11190 | int i, j, rc = -ENODEV; | ||
11191 | u32 val; | ||
11192 | static const struct { | ||
11193 | u32 offset; | ||
11194 | int size; | ||
11195 | } mem_tbl[] = { | ||
11196 | { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, | ||
11197 | { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, | ||
11198 | { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, | ||
11199 | { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, | ||
11200 | { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, | ||
11201 | { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, | ||
11202 | { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, | ||
11203 | |||
11204 | { 0xffffffff, 0 } | ||
11205 | }; | ||
11206 | static const struct { | ||
11207 | char *name; | ||
11208 | u32 offset; | ||
11209 | u32 e1_mask; | ||
11210 | u32 e1h_mask; | ||
11211 | } prty_tbl[] = { | ||
11212 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, | ||
11213 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, | ||
11214 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, | ||
11215 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, | ||
11216 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, | ||
11217 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, | ||
11218 | |||
11219 | { NULL, 0xffffffff, 0, 0 } | ||
11220 | }; | ||
11221 | |||
11222 | if (!netif_running(bp->dev)) | ||
11223 | return rc; | ||
11224 | |||
11225 | /* Go through all the memories */ | ||
11226 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) | ||
11227 | for (j = 0; j < mem_tbl[i].size; j++) | ||
11228 | REG_RD(bp, mem_tbl[i].offset + j*4); | ||
11229 | |||
11230 | /* Check the parity status */ | ||
11231 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | ||
11232 | val = REG_RD(bp, prty_tbl[i].offset); | ||
11233 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || | ||
11234 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { | ||
11235 | DP(NETIF_MSG_HW, | ||
11236 | "%s is 0x%x\n", prty_tbl[i].name, val); | ||
11237 | goto test_mem_exit; | ||
11238 | } | ||
11239 | } | ||
11240 | |||
11241 | rc = 0; | ||
11242 | |||
11243 | test_mem_exit: | ||
11244 | return rc; | ||
11245 | } | ||
11246 | |||
11247 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) | ||
11248 | { | ||
11249 | int cnt = 1000; | ||
11250 | |||
11251 | if (link_up) | ||
11252 | while (bnx2x_link_test(bp) && cnt--) | ||
11253 | msleep(10); | ||
11254 | } | ||
11255 | |||
11256 | static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | ||
11257 | { | ||
11258 | unsigned int pkt_size, num_pkts, i; | ||
11259 | struct sk_buff *skb; | ||
11260 | unsigned char *packet; | ||
11261 | struct bnx2x_fastpath *fp_rx = &bp->fp[0]; | ||
11262 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; | ||
11263 | u16 tx_start_idx, tx_idx; | ||
11264 | u16 rx_start_idx, rx_idx; | ||
11265 | u16 pkt_prod, bd_prod; | ||
11266 | struct sw_tx_bd *tx_buf; | ||
11267 | struct eth_tx_start_bd *tx_start_bd; | ||
11268 | struct eth_tx_parse_bd *pbd = NULL; | ||
11269 | dma_addr_t mapping; | ||
11270 | union eth_rx_cqe *cqe; | ||
11271 | u8 cqe_fp_flags; | ||
11272 | struct sw_rx_bd *rx_buf; | ||
11273 | u16 len; | ||
11274 | int rc = -ENODEV; | ||
11275 | |||
11276 | /* check the loopback mode */ | ||
11277 | switch (loopback_mode) { | ||
11278 | case BNX2X_PHY_LOOPBACK: | ||
11279 | if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) | ||
11280 | return -EINVAL; | ||
11281 | break; | ||
11282 | case BNX2X_MAC_LOOPBACK: | ||
11283 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | ||
11284 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | ||
11285 | break; | ||
11286 | default: | ||
11287 | return -EINVAL; | ||
11288 | } | ||
11289 | |||
11290 | /* prepare the loopback packet */ | ||
11291 | pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? | ||
11292 | bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); | ||
11293 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | ||
11294 | if (!skb) { | ||
11295 | rc = -ENOMEM; | ||
11296 | goto test_loopback_exit; | ||
11297 | } | ||
11298 | packet = skb_put(skb, pkt_size); | ||
11299 | memcpy(packet, bp->dev->dev_addr, ETH_ALEN); | ||
11300 | memset(packet + ETH_ALEN, 0, ETH_ALEN); | ||
11301 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); | ||
11302 | for (i = ETH_HLEN; i < pkt_size; i++) | ||
11303 | packet[i] = (unsigned char) (i & 0xff); | ||
11304 | |||
11305 | /* send the loopback packet */ | ||
11306 | num_pkts = 0; | ||
11307 | tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); | ||
11308 | rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); | ||
11309 | |||
11310 | pkt_prod = fp_tx->tx_pkt_prod++; | ||
11311 | tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; | ||
11312 | tx_buf->first_bd = fp_tx->tx_bd_prod; | ||
11313 | tx_buf->skb = skb; | ||
11314 | tx_buf->flags = 0; | ||
11315 | |||
11316 | bd_prod = TX_BD(fp_tx->tx_bd_prod); | ||
11317 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; | ||
11318 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
11319 | skb_headlen(skb), DMA_TO_DEVICE); | ||
11320 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
11321 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
11322 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ | ||
11323 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | ||
11324 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); | ||
11325 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
11326 | tx_start_bd->general_data = ((UNICAST_ADDRESS << | ||
11327 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); | ||
11328 | |||
11329 | /* turn on parsing and get a BD */ | ||
11330 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
11331 | pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; | ||
11332 | |||
11333 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | ||
11334 | |||
11335 | wmb(); | ||
11336 | |||
11337 | fp_tx->tx_db.data.prod += 2; | ||
11338 | barrier(); | ||
11339 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); | ||
11340 | |||
11341 | mmiowb(); | ||
11342 | |||
11343 | num_pkts++; | ||
11344 | fp_tx->tx_bd_prod += 2; /* start + pbd */ | ||
11345 | |||
11346 | udelay(100); | ||
11347 | |||
11348 | tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); | ||
11349 | if (tx_idx != tx_start_idx + num_pkts) | ||
11350 | goto test_loopback_exit; | ||
11351 | |||
11352 | rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); | ||
11353 | if (rx_idx != rx_start_idx + num_pkts) | ||
11354 | goto test_loopback_exit; | ||
11355 | |||
11356 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; | ||
11357 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | ||
11358 | if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | ||
11359 | goto test_loopback_rx_exit; | ||
11360 | |||
11361 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | ||
11362 | if (len != pkt_size) | ||
11363 | goto test_loopback_rx_exit; | ||
11364 | |||
11365 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; | ||
11366 | skb = rx_buf->skb; | ||
11367 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); | ||
11368 | for (i = ETH_HLEN; i < pkt_size; i++) | ||
11369 | if (*(skb->data + i) != (unsigned char) (i & 0xff)) | ||
11370 | goto test_loopback_rx_exit; | ||
11371 | |||
11372 | rc = 0; | ||
11373 | |||
11374 | test_loopback_rx_exit: | ||
11375 | |||
11376 | fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); | ||
11377 | fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); | ||
11378 | fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); | ||
11379 | fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); | ||
11380 | |||
11381 | /* Update producers */ | ||
11382 | bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, | ||
11383 | fp_rx->rx_sge_prod); | ||
11384 | |||
11385 | test_loopback_exit: | ||
11386 | bp->link_params.loopback_mode = LOOPBACK_NONE; | ||
11387 | |||
11388 | return rc; | ||
11389 | } | ||
11390 | |||
11391 | static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | ||
11392 | { | ||
11393 | int rc = 0, res; | ||
11394 | |||
11395 | if (BP_NOMCP(bp)) | ||
11396 | return rc; | ||
11397 | |||
11398 | if (!netif_running(bp->dev)) | ||
11399 | return BNX2X_LOOPBACK_FAILED; | ||
11400 | |||
11401 | bnx2x_netif_stop(bp, 1); | ||
11402 | bnx2x_acquire_phy_lock(bp); | ||
11403 | |||
11404 | res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); | ||
11405 | if (res) { | ||
11406 | DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); | ||
11407 | rc |= BNX2X_PHY_LOOPBACK_FAILED; | ||
11408 | } | ||
11409 | |||
11410 | res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); | ||
11411 | if (res) { | ||
11412 | DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); | ||
11413 | rc |= BNX2X_MAC_LOOPBACK_FAILED; | ||
11414 | } | ||
11415 | |||
11416 | bnx2x_release_phy_lock(bp); | ||
11417 | bnx2x_netif_start(bp); | ||
11418 | |||
11419 | return rc; | ||
11420 | } | ||
11421 | |||
11422 | #define CRC32_RESIDUAL 0xdebb20e3 | ||
11423 | |||
11424 | static int bnx2x_test_nvram(struct bnx2x *bp) | ||
11425 | { | ||
11426 | static const struct { | ||
11427 | int offset; | ||
11428 | int size; | ||
11429 | } nvram_tbl[] = { | ||
11430 | { 0, 0x14 }, /* bootstrap */ | ||
11431 | { 0x14, 0xec }, /* dir */ | ||
11432 | { 0x100, 0x350 }, /* manuf_info */ | ||
11433 | { 0x450, 0xf0 }, /* feature_info */ | ||
11434 | { 0x640, 0x64 }, /* upgrade_key_info */ | ||
11435 | { 0x6a4, 0x64 }, | ||
11436 | { 0x708, 0x70 }, /* manuf_key_info */ | ||
11437 | { 0x778, 0x70 }, | ||
11438 | { 0, 0 } | ||
11439 | }; | ||
11440 | __be32 buf[0x350 / 4]; | ||
11441 | u8 *data = (u8 *)buf; | ||
11442 | int i, rc; | ||
11443 | u32 magic, crc; | ||
11444 | |||
11445 | if (BP_NOMCP(bp)) | ||
11446 | return 0; | ||
11447 | |||
11448 | rc = bnx2x_nvram_read(bp, 0, data, 4); | ||
11449 | if (rc) { | ||
11450 | DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); | ||
11451 | goto test_nvram_exit; | ||
11452 | } | ||
11453 | |||
11454 | magic = be32_to_cpu(buf[0]); | ||
11455 | if (magic != 0x669955aa) { | ||
11456 | DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); | ||
11457 | rc = -ENODEV; | ||
11458 | goto test_nvram_exit; | ||
11459 | } | ||
11460 | |||
11461 | for (i = 0; nvram_tbl[i].size; i++) { | ||
11462 | |||
11463 | rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, | ||
11464 | nvram_tbl[i].size); | ||
11465 | if (rc) { | ||
11466 | DP(NETIF_MSG_PROBE, | ||
11467 | "nvram_tbl[%d] read data (rc %d)\n", i, rc); | ||
11468 | goto test_nvram_exit; | ||
11469 | } | ||
11470 | |||
11471 | crc = ether_crc_le(nvram_tbl[i].size, data); | ||
11472 | if (crc != CRC32_RESIDUAL) { | ||
11473 | DP(NETIF_MSG_PROBE, | ||
11474 | "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); | ||
11475 | rc = -ENODEV; | ||
11476 | goto test_nvram_exit; | ||
11477 | } | ||
11478 | } | ||
11479 | |||
11480 | test_nvram_exit: | ||
11481 | return rc; | ||
11482 | } | ||
11483 | |||
11484 | static int bnx2x_test_intr(struct bnx2x *bp) | ||
11485 | { | ||
11486 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | ||
11487 | int i, rc; | ||
11488 | |||
11489 | if (!netif_running(bp->dev)) | ||
11490 | return -ENODEV; | ||
11491 | |||
11492 | config->hdr.length = 0; | ||
11493 | if (CHIP_IS_E1(bp)) | ||
11494 | /* use last unicast entries */ | ||
11495 | config->hdr.offset = (BP_PORT(bp) ? 63 : 31); | ||
11496 | else | ||
11497 | config->hdr.offset = BP_FUNC(bp); | ||
11498 | config->hdr.client_id = bp->fp->cl_id; | ||
11499 | config->hdr.reserved1 = 0; | ||
11500 | |||
11501 | bp->set_mac_pending++; | ||
11502 | smp_wmb(); | ||
11503 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
11504 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
11505 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | ||
11506 | if (rc == 0) { | ||
11507 | for (i = 0; i < 10; i++) { | ||
11508 | if (!bp->set_mac_pending) | ||
11509 | break; | ||
11510 | smp_rmb(); | ||
11511 | msleep_interruptible(10); | ||
11512 | } | ||
11513 | if (i == 10) | ||
11514 | rc = -ENODEV; | ||
11515 | } | ||
11516 | |||
11517 | return rc; | ||
11518 | } | ||
11519 | |||
11520 | static void bnx2x_self_test(struct net_device *dev, | ||
11521 | struct ethtool_test *etest, u64 *buf) | ||
11522 | { | ||
11523 | struct bnx2x *bp = netdev_priv(dev); | ||
11524 | |||
11525 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
11526 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
11527 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11528 | return; | ||
11529 | } | ||
11530 | |||
11531 | memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); | ||
11532 | |||
11533 | if (!netif_running(dev)) | ||
11534 | return; | ||
11535 | |||
11536 | /* offline tests are not supported in MF mode */ | ||
11537 | if (IS_E1HMF(bp)) | ||
11538 | etest->flags &= ~ETH_TEST_FL_OFFLINE; | ||
11539 | |||
11540 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | ||
11541 | int port = BP_PORT(bp); | ||
11542 | u32 val; | ||
11543 | u8 link_up; | ||
11544 | |||
11545 | /* save current value of input enable for TX port IF */ | ||
11546 | val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); | ||
11547 | /* disable input for TX port IF */ | ||
11548 | REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); | ||
11549 | |||
11550 | link_up = (bnx2x_link_test(bp) == 0); | ||
11551 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
11552 | bnx2x_nic_load(bp, LOAD_DIAG); | ||
11553 | /* wait until link state is restored */ | ||
11554 | bnx2x_wait_for_link(bp, link_up); | ||
11555 | |||
11556 | if (bnx2x_test_registers(bp) != 0) { | ||
11557 | buf[0] = 1; | ||
11558 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11559 | } | ||
11560 | if (bnx2x_test_memory(bp) != 0) { | ||
11561 | buf[1] = 1; | ||
11562 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11563 | } | ||
11564 | buf[2] = bnx2x_test_loopback(bp, link_up); | ||
11565 | if (buf[2] != 0) | ||
11566 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11567 | |||
11568 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
11569 | |||
11570 | /* restore input for TX port IF */ | ||
11571 | REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); | ||
11572 | |||
11573 | bnx2x_nic_load(bp, LOAD_NORMAL); | ||
11574 | /* wait until link state is restored */ | ||
11575 | bnx2x_wait_for_link(bp, link_up); | ||
11576 | } | ||
11577 | if (bnx2x_test_nvram(bp) != 0) { | ||
11578 | buf[3] = 1; | ||
11579 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11580 | } | ||
11581 | if (bnx2x_test_intr(bp) != 0) { | ||
11582 | buf[4] = 1; | ||
11583 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11584 | } | ||
11585 | if (bp->port.pmf) | ||
11586 | if (bnx2x_link_test(bp) != 0) { | ||
11587 | buf[5] = 1; | ||
11588 | etest->flags |= ETH_TEST_FL_FAILED; | ||
11589 | } | ||
11590 | |||
11591 | #ifdef BNX2X_EXTRA_DEBUG | ||
11592 | bnx2x_panic_dump(bp); | ||
11593 | #endif | ||
11594 | } | ||
11595 | |||
11596 | static const struct { | ||
11597 | long offset; | ||
11598 | int size; | ||
11599 | u8 string[ETH_GSTRING_LEN]; | ||
11600 | } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = { | ||
11601 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" }, | ||
11602 | { Q_STATS_OFFSET32(error_bytes_received_hi), | ||
11603 | 8, "[%d]: rx_error_bytes" }, | ||
11604 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), | ||
11605 | 8, "[%d]: rx_ucast_packets" }, | ||
11606 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), | ||
11607 | 8, "[%d]: rx_mcast_packets" }, | ||
11608 | { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
11609 | 8, "[%d]: rx_bcast_packets" }, | ||
11610 | { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" }, | ||
11611 | { Q_STATS_OFFSET32(rx_err_discard_pkt), | ||
11612 | 4, "[%d]: rx_phy_ip_err_discards"}, | ||
11613 | { Q_STATS_OFFSET32(rx_skb_alloc_failed), | ||
11614 | 4, "[%d]: rx_skb_alloc_discard" }, | ||
11615 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" }, | ||
11616 | |||
11617 | /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, | ||
11618 | { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
11619 | 8, "[%d]: tx_ucast_packets" }, | ||
11620 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
11621 | 8, "[%d]: tx_mcast_packets" }, | ||
11622 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
11623 | 8, "[%d]: tx_bcast_packets" } | ||
11624 | }; | ||
11625 | |||
11626 | static const struct { | ||
11627 | long offset; | ||
11628 | int size; | ||
11629 | u32 flags; | ||
11630 | #define STATS_FLAGS_PORT 1 | ||
11631 | #define STATS_FLAGS_FUNC 2 | ||
11632 | #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) | ||
11633 | u8 string[ETH_GSTRING_LEN]; | ||
11634 | } bnx2x_stats_arr[BNX2X_NUM_STATS] = { | ||
11635 | /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), | ||
11636 | 8, STATS_FLAGS_BOTH, "rx_bytes" }, | ||
11637 | { STATS_OFFSET32(error_bytes_received_hi), | ||
11638 | 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, | ||
11639 | { STATS_OFFSET32(total_unicast_packets_received_hi), | ||
11640 | 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, | ||
11641 | { STATS_OFFSET32(total_multicast_packets_received_hi), | ||
11642 | 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, | ||
11643 | { STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
11644 | 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, | ||
11645 | { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), | ||
11646 | 8, STATS_FLAGS_PORT, "rx_crc_errors" }, | ||
11647 | { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), | ||
11648 | 8, STATS_FLAGS_PORT, "rx_align_errors" }, | ||
11649 | { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), | ||
11650 | 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, | ||
11651 | { STATS_OFFSET32(etherstatsoverrsizepkts_hi), | ||
11652 | 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, | ||
11653 | /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), | ||
11654 | 8, STATS_FLAGS_PORT, "rx_fragments" }, | ||
11655 | { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), | ||
11656 | 8, STATS_FLAGS_PORT, "rx_jabbers" }, | ||
11657 | { STATS_OFFSET32(no_buff_discard_hi), | ||
11658 | 8, STATS_FLAGS_BOTH, "rx_discards" }, | ||
11659 | { STATS_OFFSET32(mac_filter_discard), | ||
11660 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, | ||
11661 | { STATS_OFFSET32(xxoverflow_discard), | ||
11662 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | ||
11663 | { STATS_OFFSET32(brb_drop_hi), | ||
11664 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, | ||
11665 | { STATS_OFFSET32(brb_truncate_hi), | ||
11666 | 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, | ||
11667 | { STATS_OFFSET32(pause_frames_received_hi), | ||
11668 | 8, STATS_FLAGS_PORT, "rx_pause_frames" }, | ||
11669 | { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), | ||
11670 | 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, | ||
11671 | { STATS_OFFSET32(nig_timer_max), | ||
11672 | 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, | ||
11673 | /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), | ||
11674 | 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, | ||
11675 | { STATS_OFFSET32(rx_skb_alloc_failed), | ||
11676 | 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, | ||
11677 | { STATS_OFFSET32(hw_csum_err), | ||
11678 | 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, | ||
11679 | |||
11680 | { STATS_OFFSET32(total_bytes_transmitted_hi), | ||
11681 | 8, STATS_FLAGS_BOTH, "tx_bytes" }, | ||
11682 | { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), | ||
11683 | 8, STATS_FLAGS_PORT, "tx_error_bytes" }, | ||
11684 | { STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
11685 | 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, | ||
11686 | { STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
11687 | 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, | ||
11688 | { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
11689 | 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, | ||
11690 | { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), | ||
11691 | 8, STATS_FLAGS_PORT, "tx_mac_errors" }, | ||
11692 | { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), | ||
11693 | 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, | ||
11694 | /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), | ||
11695 | 8, STATS_FLAGS_PORT, "tx_single_collisions" }, | ||
11696 | { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), | ||
11697 | 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, | ||
11698 | { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), | ||
11699 | 8, STATS_FLAGS_PORT, "tx_deferred" }, | ||
11700 | { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), | ||
11701 | 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, | ||
11702 | { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), | ||
11703 | 8, STATS_FLAGS_PORT, "tx_late_collisions" }, | ||
11704 | { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), | ||
11705 | 8, STATS_FLAGS_PORT, "tx_total_collisions" }, | ||
11706 | { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), | ||
11707 | 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, | ||
11708 | { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), | ||
11709 | 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, | ||
11710 | { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), | ||
11711 | 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, | ||
11712 | { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), | ||
11713 | 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, | ||
11714 | /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), | ||
11715 | 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, | ||
11716 | { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), | ||
11717 | 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, | ||
11718 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | ||
11719 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, | ||
11720 | { STATS_OFFSET32(pause_frames_sent_hi), | ||
11721 | 8, STATS_FLAGS_PORT, "tx_pause_frames" } | ||
11722 | }; | ||
11723 | |||
11724 | #define IS_PORT_STAT(i) \ | ||
11725 | ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) | ||
11726 | #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) | ||
11727 | #define IS_E1HMF_MODE_STAT(bp) \ | ||
11728 | (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) | ||
11729 | |||
11730 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | ||
11731 | { | ||
11732 | struct bnx2x *bp = netdev_priv(dev); | ||
11733 | int i, num_stats; | ||
11734 | |||
11735 | switch (stringset) { | ||
11736 | case ETH_SS_STATS: | ||
11737 | if (is_multi(bp)) { | ||
11738 | num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; | ||
11739 | if (!IS_E1HMF_MODE_STAT(bp)) | ||
11740 | num_stats += BNX2X_NUM_STATS; | ||
11741 | } else { | ||
11742 | if (IS_E1HMF_MODE_STAT(bp)) { | ||
11743 | num_stats = 0; | ||
11744 | for (i = 0; i < BNX2X_NUM_STATS; i++) | ||
11745 | if (IS_FUNC_STAT(i)) | ||
11746 | num_stats++; | ||
11747 | } else | ||
11748 | num_stats = BNX2X_NUM_STATS; | ||
11749 | } | ||
11750 | return num_stats; | ||
11751 | |||
11752 | case ETH_SS_TEST: | ||
11753 | return BNX2X_NUM_TESTS; | ||
11754 | |||
11755 | default: | ||
11756 | return -EINVAL; | ||
11757 | } | ||
11758 | } | ||
11759 | |||
11760 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | ||
11761 | { | ||
11762 | struct bnx2x *bp = netdev_priv(dev); | ||
11763 | int i, j, k; | ||
11764 | |||
11765 | switch (stringset) { | ||
11766 | case ETH_SS_STATS: | ||
11767 | if (is_multi(bp)) { | ||
11768 | k = 0; | ||
11769 | for_each_queue(bp, i) { | ||
11770 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | ||
11771 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, | ||
11772 | bnx2x_q_stats_arr[j].string, i); | ||
11773 | k += BNX2X_NUM_Q_STATS; | ||
11774 | } | ||
11775 | if (IS_E1HMF_MODE_STAT(bp)) | ||
11776 | break; | ||
11777 | for (j = 0; j < BNX2X_NUM_STATS; j++) | ||
11778 | strcpy(buf + (k + j)*ETH_GSTRING_LEN, | ||
11779 | bnx2x_stats_arr[j].string); | ||
11780 | } else { | ||
11781 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { | ||
11782 | if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) | ||
11783 | continue; | ||
11784 | strcpy(buf + j*ETH_GSTRING_LEN, | ||
11785 | bnx2x_stats_arr[i].string); | ||
11786 | j++; | ||
11787 | } | ||
11788 | } | ||
11789 | break; | ||
11790 | |||
11791 | case ETH_SS_TEST: | ||
11792 | memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); | ||
11793 | break; | ||
11794 | } | ||
11795 | } | ||
11796 | |||
11797 | static void bnx2x_get_ethtool_stats(struct net_device *dev, | ||
11798 | struct ethtool_stats *stats, u64 *buf) | ||
11799 | { | ||
11800 | struct bnx2x *bp = netdev_priv(dev); | ||
11801 | u32 *hw_stats, *offset; | ||
11802 | int i, j, k; | ||
11803 | |||
11804 | if (is_multi(bp)) { | ||
11805 | k = 0; | ||
11806 | for_each_queue(bp, i) { | ||
11807 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | ||
11808 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | ||
11809 | if (bnx2x_q_stats_arr[j].size == 0) { | ||
11810 | /* skip this counter */ | ||
11811 | buf[k + j] = 0; | ||
11812 | continue; | ||
11813 | } | ||
11814 | offset = (hw_stats + | ||
11815 | bnx2x_q_stats_arr[j].offset); | ||
11816 | if (bnx2x_q_stats_arr[j].size == 4) { | ||
11817 | /* 4-byte counter */ | ||
11818 | buf[k + j] = (u64) *offset; | ||
11819 | continue; | ||
11820 | } | ||
11821 | /* 8-byte counter */ | ||
11822 | buf[k + j] = HILO_U64(*offset, *(offset + 1)); | ||
11823 | } | ||
11824 | k += BNX2X_NUM_Q_STATS; | ||
11825 | } | ||
11826 | if (IS_E1HMF_MODE_STAT(bp)) | ||
11827 | return; | ||
11828 | hw_stats = (u32 *)&bp->eth_stats; | ||
11829 | for (j = 0; j < BNX2X_NUM_STATS; j++) { | ||
11830 | if (bnx2x_stats_arr[j].size == 0) { | ||
11831 | /* skip this counter */ | ||
11832 | buf[k + j] = 0; | ||
11833 | continue; | ||
11834 | } | ||
11835 | offset = (hw_stats + bnx2x_stats_arr[j].offset); | ||
11836 | if (bnx2x_stats_arr[j].size == 4) { | ||
11837 | /* 4-byte counter */ | ||
11838 | buf[k + j] = (u64) *offset; | ||
11839 | continue; | ||
11840 | } | ||
11841 | /* 8-byte counter */ | ||
11842 | buf[k + j] = HILO_U64(*offset, *(offset + 1)); | ||
11843 | } | ||
11844 | } else { | ||
11845 | hw_stats = (u32 *)&bp->eth_stats; | ||
11846 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { | ||
11847 | if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) | ||
11848 | continue; | ||
11849 | if (bnx2x_stats_arr[i].size == 0) { | ||
11850 | /* skip this counter */ | ||
11851 | buf[j] = 0; | ||
11852 | j++; | ||
11853 | continue; | ||
11854 | } | ||
11855 | offset = (hw_stats + bnx2x_stats_arr[i].offset); | ||
11856 | if (bnx2x_stats_arr[i].size == 4) { | ||
11857 | /* 4-byte counter */ | ||
11858 | buf[j] = (u64) *offset; | ||
11859 | j++; | ||
11860 | continue; | ||
11861 | } | ||
11862 | /* 8-byte counter */ | ||
11863 | buf[j] = HILO_U64(*offset, *(offset + 1)); | ||
11864 | j++; | ||
11865 | } | ||
11866 | } | ||
11867 | } | ||
11868 | |||
11869 | static int bnx2x_phys_id(struct net_device *dev, u32 data) | ||
11870 | { | ||
11871 | struct bnx2x *bp = netdev_priv(dev); | ||
11872 | int i; | ||
11873 | |||
11874 | if (!netif_running(dev)) | ||
11875 | return 0; | ||
11876 | |||
11877 | if (!bp->port.pmf) | ||
11878 | return 0; | ||
11879 | |||
11880 | if (data == 0) | ||
11881 | data = 2; | ||
11882 | |||
11883 | for (i = 0; i < (data * 2); i++) { | ||
11884 | if ((i % 2) == 0) | ||
11885 | bnx2x_set_led(&bp->link_params, LED_MODE_OPER, | ||
11886 | SPEED_1000); | ||
11887 | else | ||
11888 | bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); | ||
11889 | |||
11890 | msleep_interruptible(500); | ||
11891 | if (signal_pending(current)) | ||
11892 | break; | ||
11893 | } | ||
11894 | |||
11895 | if (bp->link_vars.link_up) | ||
11896 | bnx2x_set_led(&bp->link_params, LED_MODE_OPER, | ||
11897 | bp->link_vars.line_speed); | ||
11898 | |||
11899 | return 0; | ||
11900 | } | ||
11901 | |||
11902 | static const struct ethtool_ops bnx2x_ethtool_ops = { | ||
11903 | .get_settings = bnx2x_get_settings, | ||
11904 | .set_settings = bnx2x_set_settings, | ||
11905 | .get_drvinfo = bnx2x_get_drvinfo, | ||
11906 | .get_regs_len = bnx2x_get_regs_len, | ||
11907 | .get_regs = bnx2x_get_regs, | ||
11908 | .get_wol = bnx2x_get_wol, | ||
11909 | .set_wol = bnx2x_set_wol, | ||
11910 | .get_msglevel = bnx2x_get_msglevel, | ||
11911 | .set_msglevel = bnx2x_set_msglevel, | ||
11912 | .nway_reset = bnx2x_nway_reset, | ||
11913 | .get_link = bnx2x_get_link, | ||
11914 | .get_eeprom_len = bnx2x_get_eeprom_len, | ||
11915 | .get_eeprom = bnx2x_get_eeprom, | ||
11916 | .set_eeprom = bnx2x_set_eeprom, | ||
11917 | .get_coalesce = bnx2x_get_coalesce, | ||
11918 | .set_coalesce = bnx2x_set_coalesce, | ||
11919 | .get_ringparam = bnx2x_get_ringparam, | ||
11920 | .set_ringparam = bnx2x_set_ringparam, | ||
11921 | .get_pauseparam = bnx2x_get_pauseparam, | ||
11922 | .set_pauseparam = bnx2x_set_pauseparam, | ||
11923 | .get_rx_csum = bnx2x_get_rx_csum, | ||
11924 | .set_rx_csum = bnx2x_set_rx_csum, | ||
11925 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
11926 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
11927 | .set_flags = bnx2x_set_flags, | ||
11928 | .get_flags = ethtool_op_get_flags, | ||
11929 | .get_sg = ethtool_op_get_sg, | ||
11930 | .set_sg = ethtool_op_set_sg, | ||
11931 | .get_tso = ethtool_op_get_tso, | ||
11932 | .set_tso = bnx2x_set_tso, | ||
11933 | .self_test = bnx2x_self_test, | ||
11934 | .get_sset_count = bnx2x_get_sset_count, | ||
11935 | .get_strings = bnx2x_get_strings, | ||
11936 | .phys_id = bnx2x_phys_id, | ||
11937 | .get_ethtool_stats = bnx2x_get_ethtool_stats, | ||
11938 | }; | ||
11939 | |||
11940 | /* end of ethtool_ops */ | ||
11941 | 6788 | ||
11942 | /**************************************************************************** | 6789 | /**************************************************************************** |
11943 | * General service functions | 6790 | * General service functions |
11944 | ****************************************************************************/ | 6791 | ****************************************************************************/ |
11945 | 6792 | ||
11946 | static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | ||
11947 | { | ||
11948 | u16 pmcsr; | ||
11949 | |||
11950 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
11951 | |||
11952 | switch (state) { | ||
11953 | case PCI_D0: | ||
11954 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | ||
11955 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | ||
11956 | PCI_PM_CTRL_PME_STATUS)); | ||
11957 | |||
11958 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) | ||
11959 | /* delay required during transition out of D3hot */ | ||
11960 | msleep(20); | ||
11961 | break; | ||
11962 | |||
11963 | case PCI_D3hot: | ||
11964 | /* If there are other clients above don't | ||
11965 | shut down the power */ | ||
11966 | if (atomic_read(&bp->pdev->enable_cnt) != 1) | ||
11967 | return 0; | ||
11968 | /* Don't shut down the power for emulation and FPGA */ | ||
11969 | if (CHIP_REV_IS_SLOW(bp)) | ||
11970 | return 0; | ||
11971 | |||
11972 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | ||
11973 | pmcsr |= 3; | ||
11974 | |||
11975 | if (bp->wol) | ||
11976 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; | ||
11977 | |||
11978 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | ||
11979 | pmcsr); | ||
11980 | |||
11981 | /* No more memory access after this point until | ||
11982 | * device is brought back to D0. | ||
11983 | */ | ||
11984 | break; | ||
11985 | |||
11986 | default: | ||
11987 | return -EINVAL; | ||
11988 | } | ||
11989 | return 0; | ||
11990 | } | ||
11991 | |||
11992 | static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | ||
11993 | { | ||
11994 | u16 rx_cons_sb; | ||
11995 | |||
11996 | /* Tell compiler that status block fields can change */ | ||
11997 | barrier(); | ||
11998 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
11999 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
12000 | rx_cons_sb++; | ||
12001 | return (fp->rx_comp_cons != rx_cons_sb); | ||
12002 | } | ||
12003 | |||
12004 | /* | ||
12005 | * net_device service functions | ||
12006 | */ | ||
12007 | |||
12008 | static int bnx2x_poll(struct napi_struct *napi, int budget) | ||
12009 | { | ||
12010 | int work_done = 0; | ||
12011 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | ||
12012 | napi); | ||
12013 | struct bnx2x *bp = fp->bp; | ||
12014 | |||
12015 | while (1) { | ||
12016 | #ifdef BNX2X_STOP_ON_ERROR | ||
12017 | if (unlikely(bp->panic)) { | ||
12018 | napi_complete(napi); | ||
12019 | return 0; | ||
12020 | } | ||
12021 | #endif | ||
12022 | |||
12023 | if (bnx2x_has_tx_work(fp)) | ||
12024 | bnx2x_tx_int(fp); | ||
12025 | |||
12026 | if (bnx2x_has_rx_work(fp)) { | ||
12027 | work_done += bnx2x_rx_int(fp, budget - work_done); | ||
12028 | |||
12029 | /* must not complete if we consumed full budget */ | ||
12030 | if (work_done >= budget) | ||
12031 | break; | ||
12032 | } | ||
12033 | |||
12034 | /* Fall out from the NAPI loop if needed */ | ||
12035 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
12036 | bnx2x_update_fpsb_idx(fp); | ||
12037 | /* bnx2x_has_rx_work() reads the status block, thus we need | ||
12038 | * to ensure that status block indices have been actually read | ||
12039 | * (bnx2x_update_fpsb_idx) prior to this check | ||
12040 | * (bnx2x_has_rx_work) so that we won't write the "newer" | ||
12041 | * value of the status block to IGU (if there was a DMA right | ||
12042 | * after bnx2x_has_rx_work and if there is no rmb, the memory | ||
12043 | * reading (bnx2x_update_fpsb_idx) may be postponed to right | ||
12044 | * before bnx2x_ack_sb). In this case there will never be | ||
12045 | * another interrupt until there is another update of the | ||
12046 | * status block, while there is still unhandled work. | ||
12047 | */ | ||
12048 | rmb(); | ||
12049 | |||
12050 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
12051 | napi_complete(napi); | ||
12052 | /* Re-enable interrupts */ | ||
12053 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | ||
12054 | le16_to_cpu(fp->fp_c_idx), | ||
12055 | IGU_INT_NOP, 1); | ||
12056 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | ||
12057 | le16_to_cpu(fp->fp_u_idx), | ||
12058 | IGU_INT_ENABLE, 1); | ||
12059 | break; | ||
12060 | } | ||
12061 | } | ||
12062 | } | ||
12063 | |||
12064 | return work_done; | ||
12065 | } | ||
12066 | |||
12067 | |||
12068 | /* we split the first BD into headers and data BDs | ||
12069 | * to ease the pain of our fellow microcode engineers | ||
12070 | * we use one mapping for both BDs | ||
12071 | * So far this has only been observed to happen | ||
12072 | * in Other Operating Systems(TM) | ||
12073 | */ | ||
12074 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | ||
12075 | struct bnx2x_fastpath *fp, | ||
12076 | struct sw_tx_bd *tx_buf, | ||
12077 | struct eth_tx_start_bd **tx_bd, u16 hlen, | ||
12078 | u16 bd_prod, int nbd) | ||
12079 | { | ||
12080 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; | ||
12081 | struct eth_tx_bd *d_tx_bd; | ||
12082 | dma_addr_t mapping; | ||
12083 | int old_len = le16_to_cpu(h_tx_bd->nbytes); | ||
12084 | |||
12085 | /* first fix first BD */ | ||
12086 | h_tx_bd->nbd = cpu_to_le16(nbd); | ||
12087 | h_tx_bd->nbytes = cpu_to_le16(hlen); | ||
12088 | |||
12089 | DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " | ||
12090 | "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, | ||
12091 | h_tx_bd->addr_lo, h_tx_bd->nbd); | ||
12092 | |||
12093 | /* now get a new data BD | ||
12094 | * (after the pbd) and fill it */ | ||
12095 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
12096 | d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
12097 | |||
12098 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), | ||
12099 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; | ||
12100 | |||
12101 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
12102 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
12103 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); | ||
12104 | |||
12105 | /* this marks the BD as one that has no individual mapping */ | ||
12106 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; | ||
12107 | |||
12108 | DP(NETIF_MSG_TX_QUEUED, | ||
12109 | "TSO split data size is %d (%x:%x)\n", | ||
12110 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); | ||
12111 | |||
12112 | /* update tx_bd */ | ||
12113 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; | ||
12114 | |||
12115 | return bd_prod; | ||
12116 | } | ||
12117 | |||
12118 | static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) | ||
12119 | { | ||
12120 | if (fix > 0) | ||
12121 | csum = (u16) ~csum_fold(csum_sub(csum, | ||
12122 | csum_partial(t_header - fix, fix, 0))); | ||
12123 | |||
12124 | else if (fix < 0) | ||
12125 | csum = (u16) ~csum_fold(csum_add(csum, | ||
12126 | csum_partial(t_header, -fix, 0))); | ||
12127 | |||
12128 | return swab16(csum); | ||
12129 | } | ||
12130 | |||
12131 | static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | ||
12132 | { | ||
12133 | u32 rc; | ||
12134 | |||
12135 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
12136 | rc = XMIT_PLAIN; | ||
12137 | |||
12138 | else { | ||
12139 | if (skb->protocol == htons(ETH_P_IPV6)) { | ||
12140 | rc = XMIT_CSUM_V6; | ||
12141 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
12142 | rc |= XMIT_CSUM_TCP; | ||
12143 | |||
12144 | } else { | ||
12145 | rc = XMIT_CSUM_V4; | ||
12146 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
12147 | rc |= XMIT_CSUM_TCP; | ||
12148 | } | ||
12149 | } | ||
12150 | |||
12151 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | ||
12152 | rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); | ||
12153 | |||
12154 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
12155 | rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); | ||
12156 | |||
12157 | return rc; | ||
12158 | } | ||
12159 | |||
12160 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
12161 | /* check if packet requires linearization (packet is too fragmented) | ||
12162 | no need to check fragmentation if page size > 8K (there will be no | ||
12163 | violation to FW restrictions) */ | ||
12164 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, | ||
12165 | u32 xmit_type) | ||
12166 | { | ||
12167 | int to_copy = 0; | ||
12168 | int hlen = 0; | ||
12169 | int first_bd_sz = 0; | ||
12170 | |||
12171 | /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ | ||
12172 | if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { | ||
12173 | |||
12174 | if (xmit_type & XMIT_GSO) { | ||
12175 | unsigned short lso_mss = skb_shinfo(skb)->gso_size; | ||
12176 | /* Check if LSO packet needs to be copied: | ||
12177 | 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ | ||
12178 | int wnd_size = MAX_FETCH_BD - 3; | ||
12179 | /* Number of windows to check */ | ||
12180 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; | ||
12181 | int wnd_idx = 0; | ||
12182 | int frag_idx = 0; | ||
12183 | u32 wnd_sum = 0; | ||
12184 | |||
12185 | /* Headers length */ | ||
12186 | hlen = (int)(skb_transport_header(skb) - skb->data) + | ||
12187 | tcp_hdrlen(skb); | ||
12188 | |||
12189 | /* Amount of data (w/o headers) on linear part of SKB*/ | ||
12190 | first_bd_sz = skb_headlen(skb) - hlen; | ||
12191 | |||
12192 | wnd_sum = first_bd_sz; | ||
12193 | |||
12194 | /* Calculate the first sum - it's special */ | ||
12195 | for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) | ||
12196 | wnd_sum += | ||
12197 | skb_shinfo(skb)->frags[frag_idx].size; | ||
12198 | |||
12199 | /* If there was data on linear skb data - check it */ | ||
12200 | if (first_bd_sz > 0) { | ||
12201 | if (unlikely(wnd_sum < lso_mss)) { | ||
12202 | to_copy = 1; | ||
12203 | goto exit_lbl; | ||
12204 | } | ||
12205 | |||
12206 | wnd_sum -= first_bd_sz; | ||
12207 | } | ||
12208 | |||
12209 | /* Others are easier: run through the frag list and | ||
12210 | check all windows */ | ||
12211 | for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { | ||
12212 | wnd_sum += | ||
12213 | skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; | ||
12214 | |||
12215 | if (unlikely(wnd_sum < lso_mss)) { | ||
12216 | to_copy = 1; | ||
12217 | break; | ||
12218 | } | ||
12219 | wnd_sum -= | ||
12220 | skb_shinfo(skb)->frags[wnd_idx].size; | ||
12221 | } | ||
12222 | } else { | ||
12223 | /* in non-LSO too fragmented packet should always | ||
12224 | be linearized */ | ||
12225 | to_copy = 1; | ||
12226 | } | ||
12227 | } | ||
12228 | |||
12229 | exit_lbl: | ||
12230 | if (unlikely(to_copy)) | ||
12231 | DP(NETIF_MSG_TX_QUEUED, | ||
12232 | "Linearization IS REQUIRED for %s packet. " | ||
12233 | "num_frags %d hlen %d first_bd_sz %d\n", | ||
12234 | (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", | ||
12235 | skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); | ||
12236 | |||
12237 | return to_copy; | ||
12238 | } | ||
12239 | #endif | ||
12240 | |||
12241 | /* called with netif_tx_lock | ||
12242 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call | ||
12243 | * netif_wake_queue() | ||
12244 | */ | ||
12245 | static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
12246 | { | ||
12247 | struct bnx2x *bp = netdev_priv(dev); | ||
12248 | struct bnx2x_fastpath *fp; | ||
12249 | struct netdev_queue *txq; | ||
12250 | struct sw_tx_bd *tx_buf; | ||
12251 | struct eth_tx_start_bd *tx_start_bd; | ||
12252 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | ||
12253 | struct eth_tx_parse_bd *pbd = NULL; | ||
12254 | u16 pkt_prod, bd_prod; | ||
12255 | int nbd, fp_index; | ||
12256 | dma_addr_t mapping; | ||
12257 | u32 xmit_type = bnx2x_xmit_type(bp, skb); | ||
12258 | int i; | ||
12259 | u8 hlen = 0; | ||
12260 | __le16 pkt_size = 0; | ||
12261 | struct ethhdr *eth; | ||
12262 | u8 mac_type = UNICAST_ADDRESS; | ||
12263 | |||
12264 | #ifdef BNX2X_STOP_ON_ERROR | ||
12265 | if (unlikely(bp->panic)) | ||
12266 | return NETDEV_TX_BUSY; | ||
12267 | #endif | ||
12268 | |||
12269 | fp_index = skb_get_queue_mapping(skb); | ||
12270 | txq = netdev_get_tx_queue(dev, fp_index); | ||
12271 | |||
12272 | fp = &bp->fp[fp_index]; | ||
12273 | |||
12274 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | ||
12275 | fp->eth_q_stats.driver_xoff++; | ||
12276 | netif_tx_stop_queue(txq); | ||
12277 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | ||
12278 | return NETDEV_TX_BUSY; | ||
12279 | } | ||
12280 | |||
12281 | DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" | ||
12282 | " gso type %x xmit_type %x\n", | ||
12283 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, | ||
12284 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); | ||
12285 | |||
12286 | eth = (struct ethhdr *)skb->data; | ||
12287 | |||
12288 | /* set flag according to packet type (UNICAST_ADDRESS is default)*/ | ||
12289 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { | ||
12290 | if (is_broadcast_ether_addr(eth->h_dest)) | ||
12291 | mac_type = BROADCAST_ADDRESS; | ||
12292 | else | ||
12293 | mac_type = MULTICAST_ADDRESS; | ||
12294 | } | ||
12295 | |||
12296 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
12297 | /* First, check if we need to linearize the skb (due to FW | ||
12298 | restrictions). No need to check fragmentation if page size > 8K | ||
12299 | (there will be no violation to FW restrictions) */ | ||
12300 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { | ||
12301 | /* Statistics of linearization */ | ||
12302 | bp->lin_cnt++; | ||
12303 | if (skb_linearize(skb) != 0) { | ||
12304 | DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " | ||
12305 | "silently dropping this SKB\n"); | ||
12306 | dev_kfree_skb_any(skb); | ||
12307 | return NETDEV_TX_OK; | ||
12308 | } | ||
12309 | } | ||
12310 | #endif | ||
12311 | |||
12312 | /* | ||
12313 | Please read carefully. First we use one BD which we mark as start, | ||
12314 | then we have a parsing info BD (used for TSO or xsum), | ||
12315 | and only then we have the rest of the TSO BDs. | ||
12316 | (don't forget to mark the last one as last, | ||
12317 | and to unmap only AFTER you write to the BD ...) | ||
12318 | And above all, all pdb sizes are in words - NOT DWORDS! | ||
12319 | */ | ||
12320 | |||
12321 | pkt_prod = fp->tx_pkt_prod++; | ||
12322 | bd_prod = TX_BD(fp->tx_bd_prod); | ||
12323 | |||
12324 | /* get a tx_buf and first BD */ | ||
12325 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | ||
12326 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; | ||
12327 | |||
12328 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
12329 | tx_start_bd->general_data = (mac_type << | ||
12330 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); | ||
12331 | /* header nbd */ | ||
12332 | tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); | ||
12333 | |||
12334 | /* remember the first BD of the packet */ | ||
12335 | tx_buf->first_bd = fp->tx_bd_prod; | ||
12336 | tx_buf->skb = skb; | ||
12337 | tx_buf->flags = 0; | ||
12338 | |||
12339 | DP(NETIF_MSG_TX_QUEUED, | ||
12340 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | ||
12341 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); | ||
12342 | |||
12343 | #ifdef BCM_VLAN | ||
12344 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && | ||
12345 | (bp->flags & HW_VLAN_TX_FLAG)) { | ||
12346 | tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | ||
12347 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; | ||
12348 | } else | ||
12349 | #endif | ||
12350 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); | ||
12351 | |||
12352 | /* turn on parsing and get a BD */ | ||
12353 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
12354 | pbd = &fp->tx_desc_ring[bd_prod].parse_bd; | ||
12355 | |||
12356 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | ||
12357 | |||
12358 | if (xmit_type & XMIT_CSUM) { | ||
12359 | hlen = (skb_network_header(skb) - skb->data) / 2; | ||
12360 | |||
12361 | /* for now NS flag is not used in Linux */ | ||
12362 | pbd->global_data = | ||
12363 | (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << | ||
12364 | ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); | ||
12365 | |||
12366 | pbd->ip_hlen = (skb_transport_header(skb) - | ||
12367 | skb_network_header(skb)) / 2; | ||
12368 | |||
12369 | hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; | ||
12370 | |||
12371 | pbd->total_hlen = cpu_to_le16(hlen); | ||
12372 | hlen = hlen*2; | ||
12373 | |||
12374 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | ||
12375 | |||
12376 | if (xmit_type & XMIT_CSUM_V4) | ||
12377 | tx_start_bd->bd_flags.as_bitfield |= | ||
12378 | ETH_TX_BD_FLAGS_IP_CSUM; | ||
12379 | else | ||
12380 | tx_start_bd->bd_flags.as_bitfield |= | ||
12381 | ETH_TX_BD_FLAGS_IPV6; | ||
12382 | |||
12383 | if (xmit_type & XMIT_CSUM_TCP) { | ||
12384 | pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); | ||
12385 | |||
12386 | } else { | ||
12387 | s8 fix = SKB_CS_OFF(skb); /* signed! */ | ||
12388 | |||
12389 | pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; | ||
12390 | |||
12391 | DP(NETIF_MSG_TX_QUEUED, | ||
12392 | "hlen %d fix %d csum before fix %x\n", | ||
12393 | le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); | ||
12394 | |||
12395 | /* HW bug: fixup the CSUM */ | ||
12396 | pbd->tcp_pseudo_csum = | ||
12397 | bnx2x_csum_fix(skb_transport_header(skb), | ||
12398 | SKB_CS(skb), fix); | ||
12399 | |||
12400 | DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", | ||
12401 | pbd->tcp_pseudo_csum); | ||
12402 | } | ||
12403 | } | ||
12404 | |||
12405 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
12406 | skb_headlen(skb), DMA_TO_DEVICE); | ||
12407 | |||
12408 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
12409 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
12410 | nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ | ||
12411 | tx_start_bd->nbd = cpu_to_le16(nbd); | ||
12412 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | ||
12413 | pkt_size = tx_start_bd->nbytes; | ||
12414 | |||
12415 | DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" | ||
12416 | " nbytes %d flags %x vlan %x\n", | ||
12417 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, | ||
12418 | le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), | ||
12419 | tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); | ||
12420 | |||
12421 | if (xmit_type & XMIT_GSO) { | ||
12422 | |||
12423 | DP(NETIF_MSG_TX_QUEUED, | ||
12424 | "TSO packet len %d hlen %d total len %d tso size %d\n", | ||
12425 | skb->len, hlen, skb_headlen(skb), | ||
12426 | skb_shinfo(skb)->gso_size); | ||
12427 | |||
12428 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; | ||
12429 | |||
12430 | if (unlikely(skb_headlen(skb) > hlen)) | ||
12431 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, | ||
12432 | hlen, bd_prod, ++nbd); | ||
12433 | |||
12434 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | ||
12435 | pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); | ||
12436 | pbd->tcp_flags = pbd_tcp_flags(skb); | ||
12437 | |||
12438 | if (xmit_type & XMIT_GSO_V4) { | ||
12439 | pbd->ip_id = swab16(ip_hdr(skb)->id); | ||
12440 | pbd->tcp_pseudo_csum = | ||
12441 | swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
12442 | ip_hdr(skb)->daddr, | ||
12443 | 0, IPPROTO_TCP, 0)); | ||
12444 | |||
12445 | } else | ||
12446 | pbd->tcp_pseudo_csum = | ||
12447 | swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
12448 | &ipv6_hdr(skb)->daddr, | ||
12449 | 0, IPPROTO_TCP, 0)); | ||
12450 | |||
12451 | pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; | ||
12452 | } | ||
12453 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; | ||
12454 | |||
12455 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
12456 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
12457 | |||
12458 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
12459 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
12460 | if (total_pkt_bd == NULL) | ||
12461 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | ||
12462 | |||
12463 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
12464 | frag->page_offset, | ||
12465 | frag->size, DMA_TO_DEVICE); | ||
12466 | |||
12467 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
12468 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
12469 | tx_data_bd->nbytes = cpu_to_le16(frag->size); | ||
12470 | le16_add_cpu(&pkt_size, frag->size); | ||
12471 | |||
12472 | DP(NETIF_MSG_TX_QUEUED, | ||
12473 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", | ||
12474 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, | ||
12475 | le16_to_cpu(tx_data_bd->nbytes)); | ||
12476 | } | ||
12477 | |||
12478 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); | ||
12479 | |||
12480 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
12481 | |||
12482 | /* now send a tx doorbell, counting the next BD | ||
12483 | * if the packet contains or ends with it | ||
12484 | */ | ||
12485 | if (TX_BD_POFF(bd_prod) < nbd) | ||
12486 | nbd++; | ||
12487 | |||
12488 | if (total_pkt_bd != NULL) | ||
12489 | total_pkt_bd->total_pkt_bytes = pkt_size; | ||
12490 | |||
12491 | if (pbd) | ||
12492 | DP(NETIF_MSG_TX_QUEUED, | ||
12493 | "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" | ||
12494 | " tcp_flags %x xsum %x seq %u hlen %u\n", | ||
12495 | pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, | ||
12496 | pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, | ||
12497 | pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); | ||
12498 | |||
12499 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | ||
12500 | |||
12501 | /* | ||
12502 | * Make sure that the BD data is updated before updating the producer | ||
12503 | * since FW might read the BD right after the producer is updated. | ||
12504 | * This is only applicable for weak-ordered memory model archs such | ||
12505 | * as IA-64. The following barrier is also mandatory since FW will | ||
12506 | * assumes packets must have BDs. | ||
12507 | */ | ||
12508 | wmb(); | ||
12509 | |||
12510 | fp->tx_db.data.prod += nbd; | ||
12511 | barrier(); | ||
12512 | DOORBELL(bp, fp->index, fp->tx_db.raw); | ||
12513 | |||
12514 | mmiowb(); | ||
12515 | |||
12516 | fp->tx_bd_prod += nbd; | ||
12517 | |||
12518 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { | ||
12519 | netif_tx_stop_queue(txq); | ||
12520 | |||
12521 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep | ||
12522 | * ordering of set_bit() in netif_tx_stop_queue() and read of | ||
12523 | * fp->bd_tx_cons */ | ||
12524 | smp_mb(); | ||
12525 | |||
12526 | fp->eth_q_stats.driver_xoff++; | ||
12527 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | ||
12528 | netif_tx_wake_queue(txq); | ||
12529 | } | ||
12530 | fp->tx_pkt++; | ||
12531 | |||
12532 | return NETDEV_TX_OK; | ||
12533 | } | ||
12534 | |||
12535 | /* called with rtnl_lock */ | 6793 | /* called with rtnl_lock */ |
12536 | static int bnx2x_open(struct net_device *dev) | 6794 | static int bnx2x_open(struct net_device *dev) |
12537 | { | 6795 | { |
@@ -12591,7 +6849,7 @@ static int bnx2x_close(struct net_device *dev) | |||
12591 | } | 6849 | } |
12592 | 6850 | ||
12593 | /* called with netif_tx_lock from dev_mcast.c */ | 6851 | /* called with netif_tx_lock from dev_mcast.c */ |
12594 | static void bnx2x_set_rx_mode(struct net_device *dev) | 6852 | void bnx2x_set_rx_mode(struct net_device *dev) |
12595 | { | 6853 | { |
12596 | struct bnx2x *bp = netdev_priv(dev); | 6854 | struct bnx2x *bp = netdev_priv(dev); |
12597 | u32 rx_mode = BNX2X_RX_MODE_NORMAL; | 6855 | u32 rx_mode = BNX2X_RX_MODE_NORMAL; |
@@ -12711,25 +6969,6 @@ static void bnx2x_set_rx_mode(struct net_device *dev) | |||
12711 | bnx2x_set_storm_rx_mode(bp); | 6969 | bnx2x_set_storm_rx_mode(bp); |
12712 | } | 6970 | } |
12713 | 6971 | ||
12714 | /* called with rtnl_lock */ | ||
12715 | static int bnx2x_change_mac_addr(struct net_device *dev, void *p) | ||
12716 | { | ||
12717 | struct sockaddr *addr = p; | ||
12718 | struct bnx2x *bp = netdev_priv(dev); | ||
12719 | |||
12720 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) | ||
12721 | return -EINVAL; | ||
12722 | |||
12723 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
12724 | if (netif_running(dev)) { | ||
12725 | if (CHIP_IS_E1(bp)) | ||
12726 | bnx2x_set_eth_mac_addr_e1(bp, 1); | ||
12727 | else | ||
12728 | bnx2x_set_eth_mac_addr_e1h(bp, 1); | ||
12729 | } | ||
12730 | |||
12731 | return 0; | ||
12732 | } | ||
12733 | 6972 | ||
12734 | /* called with rtnl_lock */ | 6973 | /* called with rtnl_lock */ |
12735 | static int bnx2x_mdio_read(struct net_device *netdev, int prtad, | 6974 | static int bnx2x_mdio_read(struct net_device *netdev, int prtad, |
@@ -12805,71 +7044,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
12805 | return mdio_mii_ioctl(&bp->mdio, mdio, cmd); | 7044 | return mdio_mii_ioctl(&bp->mdio, mdio, cmd); |
12806 | } | 7045 | } |
12807 | 7046 | ||
12808 | /* called with rtnl_lock */ | ||
12809 | static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | ||
12810 | { | ||
12811 | struct bnx2x *bp = netdev_priv(dev); | ||
12812 | int rc = 0; | ||
12813 | |||
12814 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
12815 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
12816 | return -EAGAIN; | ||
12817 | } | ||
12818 | |||
12819 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | ||
12820 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | ||
12821 | return -EINVAL; | ||
12822 | |||
12823 | /* This does not race with packet allocation | ||
12824 | * because the actual alloc size is | ||
12825 | * only updated as part of load | ||
12826 | */ | ||
12827 | dev->mtu = new_mtu; | ||
12828 | |||
12829 | if (netif_running(dev)) { | ||
12830 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
12831 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
12832 | } | ||
12833 | |||
12834 | return rc; | ||
12835 | } | ||
12836 | |||
12837 | static void bnx2x_tx_timeout(struct net_device *dev) | ||
12838 | { | ||
12839 | struct bnx2x *bp = netdev_priv(dev); | ||
12840 | |||
12841 | #ifdef BNX2X_STOP_ON_ERROR | ||
12842 | if (!bp->panic) | ||
12843 | bnx2x_panic(); | ||
12844 | #endif | ||
12845 | /* This allows the netif to be shutdown gracefully before resetting */ | ||
12846 | schedule_delayed_work(&bp->reset_task, 0); | ||
12847 | } | ||
12848 | |||
12849 | #ifdef BCM_VLAN | ||
12850 | /* called with rtnl_lock */ | ||
12851 | static void bnx2x_vlan_rx_register(struct net_device *dev, | ||
12852 | struct vlan_group *vlgrp) | ||
12853 | { | ||
12854 | struct bnx2x *bp = netdev_priv(dev); | ||
12855 | |||
12856 | bp->vlgrp = vlgrp; | ||
12857 | |||
12858 | /* Set flags according to the required capabilities */ | ||
12859 | bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); | ||
12860 | |||
12861 | if (dev->features & NETIF_F_HW_VLAN_TX) | ||
12862 | bp->flags |= HW_VLAN_TX_FLAG; | ||
12863 | |||
12864 | if (dev->features & NETIF_F_HW_VLAN_RX) | ||
12865 | bp->flags |= HW_VLAN_RX_FLAG; | ||
12866 | |||
12867 | if (netif_running(dev)) | ||
12868 | bnx2x_set_client_config(bp); | ||
12869 | } | ||
12870 | |||
12871 | #endif | ||
12872 | |||
12873 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7047 | #ifdef CONFIG_NET_POLL_CONTROLLER |
12874 | static void poll_bnx2x(struct net_device *dev) | 7048 | static void poll_bnx2x(struct net_device *dev) |
12875 | { | 7049 | { |
@@ -13018,7 +7192,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
13018 | dev->watchdog_timeo = TX_TIMEOUT; | 7192 | dev->watchdog_timeo = TX_TIMEOUT; |
13019 | 7193 | ||
13020 | dev->netdev_ops = &bnx2x_netdev_ops; | 7194 | dev->netdev_ops = &bnx2x_netdev_ops; |
13021 | dev->ethtool_ops = &bnx2x_ethtool_ops; | 7195 | bnx2x_set_ethtool_ops(dev); |
13022 | dev->features |= NETIF_F_SG; | 7196 | dev->features |= NETIF_F_SG; |
13023 | dev->features |= NETIF_F_HW_CSUM; | 7197 | dev->features |= NETIF_F_HW_CSUM; |
13024 | if (bp->flags & USING_DAC_FLAG) | 7198 | if (bp->flags & USING_DAC_FLAG) |
@@ -13371,73 +7545,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
13371 | pci_set_drvdata(pdev, NULL); | 7545 | pci_set_drvdata(pdev, NULL); |
13372 | } | 7546 | } |
13373 | 7547 | ||
13374 | static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | ||
13375 | { | ||
13376 | struct net_device *dev = pci_get_drvdata(pdev); | ||
13377 | struct bnx2x *bp; | ||
13378 | |||
13379 | if (!dev) { | ||
13380 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | ||
13381 | return -ENODEV; | ||
13382 | } | ||
13383 | bp = netdev_priv(dev); | ||
13384 | |||
13385 | rtnl_lock(); | ||
13386 | |||
13387 | pci_save_state(pdev); | ||
13388 | |||
13389 | if (!netif_running(dev)) { | ||
13390 | rtnl_unlock(); | ||
13391 | return 0; | ||
13392 | } | ||
13393 | |||
13394 | netif_device_detach(dev); | ||
13395 | |||
13396 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | ||
13397 | |||
13398 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | ||
13399 | |||
13400 | rtnl_unlock(); | ||
13401 | |||
13402 | return 0; | ||
13403 | } | ||
13404 | |||
13405 | static int bnx2x_resume(struct pci_dev *pdev) | ||
13406 | { | ||
13407 | struct net_device *dev = pci_get_drvdata(pdev); | ||
13408 | struct bnx2x *bp; | ||
13409 | int rc; | ||
13410 | |||
13411 | if (!dev) { | ||
13412 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | ||
13413 | return -ENODEV; | ||
13414 | } | ||
13415 | bp = netdev_priv(dev); | ||
13416 | |||
13417 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
13418 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
13419 | return -EAGAIN; | ||
13420 | } | ||
13421 | |||
13422 | rtnl_lock(); | ||
13423 | |||
13424 | pci_restore_state(pdev); | ||
13425 | |||
13426 | if (!netif_running(dev)) { | ||
13427 | rtnl_unlock(); | ||
13428 | return 0; | ||
13429 | } | ||
13430 | |||
13431 | bnx2x_set_power_state(bp, PCI_D0); | ||
13432 | netif_device_attach(dev); | ||
13433 | |||
13434 | rc = bnx2x_nic_load(bp, LOAD_OPEN); | ||
13435 | |||
13436 | rtnl_unlock(); | ||
13437 | |||
13438 | return rc; | ||
13439 | } | ||
13440 | |||
13441 | static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | 7548 | static int bnx2x_eeh_nic_unload(struct bnx2x *bp) |
13442 | { | 7549 | { |
13443 | int i; | 7550 | int i; |
@@ -13759,7 +7866,7 @@ static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) | |||
13759 | /* | 7866 | /* |
13760 | * for commands that have no data | 7867 | * for commands that have no data |
13761 | */ | 7868 | */ |
13762 | static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) | 7869 | int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) |
13763 | { | 7870 | { |
13764 | struct cnic_ctl_info ctl = {0}; | 7871 | struct cnic_ctl_info ctl = {0}; |
13765 | 7872 | ||
@@ -13827,7 +7934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
13827 | return rc; | 7934 | return rc; |
13828 | } | 7935 | } |
13829 | 7936 | ||
13830 | static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) | 7937 | void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) |
13831 | { | 7938 | { |
13832 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | 7939 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
13833 | 7940 | ||
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index a1f3bf0cd630..a1f3bf0cd630 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c new file mode 100644 index 000000000000..c74724461020 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -0,0 +1,1411 @@ | |||
1 | /* bnx2x_stats.c: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 Broadcom Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
10 | * Written by: Eliezer Tamir | ||
11 | * Based on code from Michael Chan's bnx2 driver | ||
12 | * UDP CSUM errata workaround by Arik Gendelman | ||
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | ||
14 | * Statistics and Link management by Yitchak Gertner | ||
15 | * | ||
16 | */ | ||
17 | #include "bnx2x_cmn.h" | ||
18 | #include "bnx2x_stats.h" | ||
19 | |||
20 | /* Statistics */ | ||
21 | |||
22 | /**************************************************************************** | ||
23 | * Macros | ||
24 | ****************************************************************************/ | ||
25 | |||
26 | /* sum[hi:lo] += add[hi:lo] */ | ||
27 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | ||
28 | do { \ | ||
29 | s_lo += a_lo; \ | ||
30 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ | ||
31 | } while (0) | ||
32 | |||
33 | /* difference = minuend - subtrahend */ | ||
34 | #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ | ||
35 | do { \ | ||
36 | if (m_lo < s_lo) { \ | ||
37 | /* underflow */ \ | ||
38 | d_hi = m_hi - s_hi; \ | ||
39 | if (d_hi > 0) { \ | ||
40 | /* we can 'loan' 1 */ \ | ||
41 | d_hi--; \ | ||
42 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | ||
43 | } else { \ | ||
44 | /* m_hi <= s_hi */ \ | ||
45 | d_hi = 0; \ | ||
46 | d_lo = 0; \ | ||
47 | } \ | ||
48 | } else { \ | ||
49 | /* m_lo >= s_lo */ \ | ||
50 | if (m_hi < s_hi) { \ | ||
51 | d_hi = 0; \ | ||
52 | d_lo = 0; \ | ||
53 | } else { \ | ||
54 | /* m_hi >= s_hi */ \ | ||
55 | d_hi = m_hi - s_hi; \ | ||
56 | d_lo = m_lo - s_lo; \ | ||
57 | } \ | ||
58 | } \ | ||
59 | } while (0) | ||
60 | |||
61 | #define UPDATE_STAT64(s, t) \ | ||
62 | do { \ | ||
63 | DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ | ||
64 | diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ | ||
65 | pstats->mac_stx[0].t##_hi = new->s##_hi; \ | ||
66 | pstats->mac_stx[0].t##_lo = new->s##_lo; \ | ||
67 | ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ | ||
68 | pstats->mac_stx[1].t##_lo, diff.lo); \ | ||
69 | } while (0) | ||
70 | |||
71 | #define UPDATE_STAT64_NIG(s, t) \ | ||
72 | do { \ | ||
73 | DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ | ||
74 | diff.lo, new->s##_lo, old->s##_lo); \ | ||
75 | ADD_64(estats->t##_hi, diff.hi, \ | ||
76 | estats->t##_lo, diff.lo); \ | ||
77 | } while (0) | ||
78 | |||
79 | /* sum[hi:lo] += add */ | ||
80 | #define ADD_EXTEND_64(s_hi, s_lo, a) \ | ||
81 | do { \ | ||
82 | s_lo += a; \ | ||
83 | s_hi += (s_lo < a) ? 1 : 0; \ | ||
84 | } while (0) | ||
85 | |||
86 | #define UPDATE_EXTEND_STAT(s) \ | ||
87 | do { \ | ||
88 | ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ | ||
89 | pstats->mac_stx[1].s##_lo, \ | ||
90 | new->s); \ | ||
91 | } while (0) | ||
92 | |||
93 | #define UPDATE_EXTEND_TSTAT(s, t) \ | ||
94 | do { \ | ||
95 | diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ | ||
96 | old_tclient->s = tclient->s; \ | ||
97 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
98 | } while (0) | ||
99 | |||
100 | #define UPDATE_EXTEND_USTAT(s, t) \ | ||
101 | do { \ | ||
102 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
103 | old_uclient->s = uclient->s; \ | ||
104 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
105 | } while (0) | ||
106 | |||
107 | #define UPDATE_EXTEND_XSTAT(s, t) \ | ||
108 | do { \ | ||
109 | diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ | ||
110 | old_xclient->s = xclient->s; \ | ||
111 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
112 | } while (0) | ||
113 | |||
114 | /* minuend -= subtrahend */ | ||
115 | #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ | ||
116 | do { \ | ||
117 | DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ | ||
118 | } while (0) | ||
119 | |||
120 | /* minuend[hi:lo] -= subtrahend */ | ||
121 | #define SUB_EXTEND_64(m_hi, m_lo, s) \ | ||
122 | do { \ | ||
123 | SUB_64(m_hi, 0, m_lo, s); \ | ||
124 | } while (0) | ||
125 | |||
126 | #define SUB_EXTEND_USTAT(s, t) \ | ||
127 | do { \ | ||
128 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
129 | SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
130 | } while (0) | ||
131 | |||
132 | /* | ||
133 | * General service functions | ||
134 | */ | ||
135 | |||
136 | static inline long bnx2x_hilo(u32 *hiref) | ||
137 | { | ||
138 | u32 lo = *(hiref + 1); | ||
139 | #if (BITS_PER_LONG == 64) | ||
140 | u32 hi = *hiref; | ||
141 | |||
142 | return HILO_U64(hi, lo); | ||
143 | #else | ||
144 | return lo; | ||
145 | #endif | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Init service functions | ||
150 | */ | ||
151 | |||
152 | |||
153 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | ||
154 | { | ||
155 | if (!bp->stats_pending) { | ||
156 | struct eth_query_ramrod_data ramrod_data = {0}; | ||
157 | int i, rc; | ||
158 | |||
159 | spin_lock_bh(&bp->stats_lock); | ||
160 | |||
161 | ramrod_data.drv_counter = bp->stats_counter++; | ||
162 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | ||
163 | for_each_queue(bp, i) | ||
164 | ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); | ||
165 | |||
166 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, | ||
167 | ((u32 *)&ramrod_data)[1], | ||
168 | ((u32 *)&ramrod_data)[0], 0); | ||
169 | if (rc == 0) { | ||
170 | /* stats ramrod has it's own slot on the spq */ | ||
171 | bp->spq_left++; | ||
172 | bp->stats_pending = 1; | ||
173 | } | ||
174 | |||
175 | spin_unlock_bh(&bp->stats_lock); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | static void bnx2x_hw_stats_post(struct bnx2x *bp) | ||
180 | { | ||
181 | struct dmae_command *dmae = &bp->stats_dmae; | ||
182 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
183 | |||
184 | *stats_comp = DMAE_COMP_VAL; | ||
185 | if (CHIP_REV_IS_SLOW(bp)) | ||
186 | return; | ||
187 | |||
188 | /* loader */ | ||
189 | if (bp->executer_idx) { | ||
190 | int loader_idx = PMF_DMAE_C(bp); | ||
191 | |||
192 | memset(dmae, 0, sizeof(struct dmae_command)); | ||
193 | |||
194 | dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
195 | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | | ||
196 | DMAE_CMD_DST_RESET | | ||
197 | #ifdef __BIG_ENDIAN | ||
198 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
199 | #else | ||
200 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
201 | #endif | ||
202 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : | ||
203 | DMAE_CMD_PORT_0) | | ||
204 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
205 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); | ||
206 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); | ||
207 | dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + | ||
208 | sizeof(struct dmae_command) * | ||
209 | (loader_idx + 1)) >> 2; | ||
210 | dmae->dst_addr_hi = 0; | ||
211 | dmae->len = sizeof(struct dmae_command) >> 2; | ||
212 | if (CHIP_IS_E1(bp)) | ||
213 | dmae->len--; | ||
214 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; | ||
215 | dmae->comp_addr_hi = 0; | ||
216 | dmae->comp_val = 1; | ||
217 | |||
218 | *stats_comp = 0; | ||
219 | bnx2x_post_dmae(bp, dmae, loader_idx); | ||
220 | |||
221 | } else if (bp->func_stx) { | ||
222 | *stats_comp = 0; | ||
223 | bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | static int bnx2x_stats_comp(struct bnx2x *bp) | ||
228 | { | ||
229 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
230 | int cnt = 10; | ||
231 | |||
232 | might_sleep(); | ||
233 | while (*stats_comp != DMAE_COMP_VAL) { | ||
234 | if (!cnt) { | ||
235 | BNX2X_ERR("timeout waiting for stats finished\n"); | ||
236 | break; | ||
237 | } | ||
238 | cnt--; | ||
239 | msleep(1); | ||
240 | } | ||
241 | return 1; | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Statistics service functions | ||
246 | */ | ||
247 | |||
248 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
249 | { | ||
250 | struct dmae_command *dmae; | ||
251 | u32 opcode; | ||
252 | int loader_idx = PMF_DMAE_C(bp); | ||
253 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
254 | |||
255 | /* sanity */ | ||
256 | if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { | ||
257 | BNX2X_ERR("BUG!\n"); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | bp->executer_idx = 0; | ||
262 | |||
263 | opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
264 | DMAE_CMD_C_ENABLE | | ||
265 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
266 | #ifdef __BIG_ENDIAN | ||
267 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
268 | #else | ||
269 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
270 | #endif | ||
271 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
272 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
273 | |||
274 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
275 | dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); | ||
276 | dmae->src_addr_lo = bp->port.port_stx >> 2; | ||
277 | dmae->src_addr_hi = 0; | ||
278 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
279 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
280 | dmae->len = DMAE_LEN32_RD_MAX; | ||
281 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
282 | dmae->comp_addr_hi = 0; | ||
283 | dmae->comp_val = 1; | ||
284 | |||
285 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
286 | dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); | ||
287 | dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; | ||
288 | dmae->src_addr_hi = 0; | ||
289 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + | ||
290 | DMAE_LEN32_RD_MAX * 4); | ||
291 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + | ||
292 | DMAE_LEN32_RD_MAX * 4); | ||
293 | dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; | ||
294 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
295 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
296 | dmae->comp_val = DMAE_COMP_VAL; | ||
297 | |||
298 | *stats_comp = 0; | ||
299 | bnx2x_hw_stats_post(bp); | ||
300 | bnx2x_stats_comp(bp); | ||
301 | } | ||
302 | |||
303 | static void bnx2x_port_stats_init(struct bnx2x *bp) | ||
304 | { | ||
305 | struct dmae_command *dmae; | ||
306 | int port = BP_PORT(bp); | ||
307 | int vn = BP_E1HVN(bp); | ||
308 | u32 opcode; | ||
309 | int loader_idx = PMF_DMAE_C(bp); | ||
310 | u32 mac_addr; | ||
311 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
312 | |||
313 | /* sanity */ | ||
314 | if (!bp->link_vars.link_up || !bp->port.pmf) { | ||
315 | BNX2X_ERR("BUG!\n"); | ||
316 | return; | ||
317 | } | ||
318 | |||
319 | bp->executer_idx = 0; | ||
320 | |||
321 | /* MCP */ | ||
322 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
323 | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | | ||
324 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
325 | #ifdef __BIG_ENDIAN | ||
326 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
327 | #else | ||
328 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
329 | #endif | ||
330 | (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
331 | (vn << DMAE_CMD_E1HVN_SHIFT)); | ||
332 | |||
333 | if (bp->port.port_stx) { | ||
334 | |||
335 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
336 | dmae->opcode = opcode; | ||
337 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
338 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
339 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | ||
340 | dmae->dst_addr_hi = 0; | ||
341 | dmae->len = sizeof(struct host_port_stats) >> 2; | ||
342 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
343 | dmae->comp_addr_hi = 0; | ||
344 | dmae->comp_val = 1; | ||
345 | } | ||
346 | |||
347 | if (bp->func_stx) { | ||
348 | |||
349 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
350 | dmae->opcode = opcode; | ||
351 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); | ||
352 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); | ||
353 | dmae->dst_addr_lo = bp->func_stx >> 2; | ||
354 | dmae->dst_addr_hi = 0; | ||
355 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
356 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
357 | dmae->comp_addr_hi = 0; | ||
358 | dmae->comp_val = 1; | ||
359 | } | ||
360 | |||
361 | /* MAC */ | ||
362 | opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
363 | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | | ||
364 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
365 | #ifdef __BIG_ENDIAN | ||
366 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
367 | #else | ||
368 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
369 | #endif | ||
370 | (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
371 | (vn << DMAE_CMD_E1HVN_SHIFT)); | ||
372 | |||
373 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | ||
374 | |||
375 | mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
376 | NIG_REG_INGRESS_BMAC0_MEM); | ||
377 | |||
378 | /* BIGMAC_REGISTER_TX_STAT_GTPKT .. | ||
379 | BIGMAC_REGISTER_TX_STAT_GTBYT */ | ||
380 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
381 | dmae->opcode = opcode; | ||
382 | dmae->src_addr_lo = (mac_addr + | ||
383 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
384 | dmae->src_addr_hi = 0; | ||
385 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
386 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
387 | dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - | ||
388 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
389 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
390 | dmae->comp_addr_hi = 0; | ||
391 | dmae->comp_val = 1; | ||
392 | |||
393 | /* BIGMAC_REGISTER_RX_STAT_GR64 .. | ||
394 | BIGMAC_REGISTER_RX_STAT_GRIPJ */ | ||
395 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
396 | dmae->opcode = opcode; | ||
397 | dmae->src_addr_lo = (mac_addr + | ||
398 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
399 | dmae->src_addr_hi = 0; | ||
400 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
401 | offsetof(struct bmac_stats, rx_stat_gr64_lo)); | ||
402 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
403 | offsetof(struct bmac_stats, rx_stat_gr64_lo)); | ||
404 | dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - | ||
405 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
406 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
407 | dmae->comp_addr_hi = 0; | ||
408 | dmae->comp_val = 1; | ||
409 | |||
410 | } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { | ||
411 | |||
412 | mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); | ||
413 | |||
414 | /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ | ||
415 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
416 | dmae->opcode = opcode; | ||
417 | dmae->src_addr_lo = (mac_addr + | ||
418 | EMAC_REG_EMAC_RX_STAT_AC) >> 2; | ||
419 | dmae->src_addr_hi = 0; | ||
420 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
421 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
422 | dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; | ||
423 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
424 | dmae->comp_addr_hi = 0; | ||
425 | dmae->comp_val = 1; | ||
426 | |||
427 | /* EMAC_REG_EMAC_RX_STAT_AC_28 */ | ||
428 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
429 | dmae->opcode = opcode; | ||
430 | dmae->src_addr_lo = (mac_addr + | ||
431 | EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; | ||
432 | dmae->src_addr_hi = 0; | ||
433 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
434 | offsetof(struct emac_stats, rx_stat_falsecarriererrors)); | ||
435 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
436 | offsetof(struct emac_stats, rx_stat_falsecarriererrors)); | ||
437 | dmae->len = 1; | ||
438 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
439 | dmae->comp_addr_hi = 0; | ||
440 | dmae->comp_val = 1; | ||
441 | |||
442 | /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ | ||
443 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
444 | dmae->opcode = opcode; | ||
445 | dmae->src_addr_lo = (mac_addr + | ||
446 | EMAC_REG_EMAC_TX_STAT_AC) >> 2; | ||
447 | dmae->src_addr_hi = 0; | ||
448 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
449 | offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); | ||
450 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
451 | offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); | ||
452 | dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; | ||
453 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
454 | dmae->comp_addr_hi = 0; | ||
455 | dmae->comp_val = 1; | ||
456 | } | ||
457 | |||
458 | /* NIG */ | ||
459 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
460 | dmae->opcode = opcode; | ||
461 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : | ||
462 | NIG_REG_STAT0_BRB_DISCARD) >> 2; | ||
463 | dmae->src_addr_hi = 0; | ||
464 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); | ||
465 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); | ||
466 | dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; | ||
467 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
468 | dmae->comp_addr_hi = 0; | ||
469 | dmae->comp_val = 1; | ||
470 | |||
471 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
472 | dmae->opcode = opcode; | ||
473 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : | ||
474 | NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; | ||
475 | dmae->src_addr_hi = 0; | ||
476 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
477 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
478 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
479 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
480 | dmae->len = (2*sizeof(u32)) >> 2; | ||
481 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
482 | dmae->comp_addr_hi = 0; | ||
483 | dmae->comp_val = 1; | ||
484 | |||
485 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
486 | dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
487 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
488 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
489 | #ifdef __BIG_ENDIAN | ||
490 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
491 | #else | ||
492 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
493 | #endif | ||
494 | (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
495 | (vn << DMAE_CMD_E1HVN_SHIFT)); | ||
496 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : | ||
497 | NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; | ||
498 | dmae->src_addr_hi = 0; | ||
499 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
500 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
501 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
502 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
503 | dmae->len = (2*sizeof(u32)) >> 2; | ||
504 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
505 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
506 | dmae->comp_val = DMAE_COMP_VAL; | ||
507 | |||
508 | *stats_comp = 0; | ||
509 | } | ||
510 | |||
511 | static void bnx2x_func_stats_init(struct bnx2x *bp) | ||
512 | { | ||
513 | struct dmae_command *dmae = &bp->stats_dmae; | ||
514 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
515 | |||
516 | /* sanity */ | ||
517 | if (!bp->func_stx) { | ||
518 | BNX2X_ERR("BUG!\n"); | ||
519 | return; | ||
520 | } | ||
521 | |||
522 | bp->executer_idx = 0; | ||
523 | memset(dmae, 0, sizeof(struct dmae_command)); | ||
524 | |||
525 | dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
526 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
527 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
528 | #ifdef __BIG_ENDIAN | ||
529 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
530 | #else | ||
531 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
532 | #endif | ||
533 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
534 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
535 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); | ||
536 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); | ||
537 | dmae->dst_addr_lo = bp->func_stx >> 2; | ||
538 | dmae->dst_addr_hi = 0; | ||
539 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
540 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
541 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
542 | dmae->comp_val = DMAE_COMP_VAL; | ||
543 | |||
544 | *stats_comp = 0; | ||
545 | } | ||
546 | |||
547 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
548 | { | ||
549 | if (bp->port.pmf) | ||
550 | bnx2x_port_stats_init(bp); | ||
551 | |||
552 | else if (bp->func_stx) | ||
553 | bnx2x_func_stats_init(bp); | ||
554 | |||
555 | bnx2x_hw_stats_post(bp); | ||
556 | bnx2x_storm_stats_post(bp); | ||
557 | } | ||
558 | |||
559 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | ||
560 | { | ||
561 | bnx2x_stats_comp(bp); | ||
562 | bnx2x_stats_pmf_update(bp); | ||
563 | bnx2x_stats_start(bp); | ||
564 | } | ||
565 | |||
566 | static void bnx2x_stats_restart(struct bnx2x *bp) | ||
567 | { | ||
568 | bnx2x_stats_comp(bp); | ||
569 | bnx2x_stats_start(bp); | ||
570 | } | ||
571 | |||
572 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | ||
573 | { | ||
574 | struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); | ||
575 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
576 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
577 | struct { | ||
578 | u32 lo; | ||
579 | u32 hi; | ||
580 | } diff; | ||
581 | |||
582 | UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); | ||
583 | UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); | ||
584 | UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); | ||
585 | UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); | ||
586 | UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); | ||
587 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | ||
588 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); | ||
589 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | ||
590 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | ||
591 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | ||
592 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); | ||
593 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); | ||
594 | UPDATE_STAT64(tx_stat_gt127, | ||
595 | tx_stat_etherstatspkts65octetsto127octets); | ||
596 | UPDATE_STAT64(tx_stat_gt255, | ||
597 | tx_stat_etherstatspkts128octetsto255octets); | ||
598 | UPDATE_STAT64(tx_stat_gt511, | ||
599 | tx_stat_etherstatspkts256octetsto511octets); | ||
600 | UPDATE_STAT64(tx_stat_gt1023, | ||
601 | tx_stat_etherstatspkts512octetsto1023octets); | ||
602 | UPDATE_STAT64(tx_stat_gt1518, | ||
603 | tx_stat_etherstatspkts1024octetsto1522octets); | ||
604 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); | ||
605 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); | ||
606 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); | ||
607 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); | ||
608 | UPDATE_STAT64(tx_stat_gterr, | ||
609 | tx_stat_dot3statsinternalmactransmiterrors); | ||
610 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); | ||
611 | |||
612 | estats->pause_frames_received_hi = | ||
613 | pstats->mac_stx[1].rx_stat_bmac_xpf_hi; | ||
614 | estats->pause_frames_received_lo = | ||
615 | pstats->mac_stx[1].rx_stat_bmac_xpf_lo; | ||
616 | |||
617 | estats->pause_frames_sent_hi = | ||
618 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; | ||
619 | estats->pause_frames_sent_lo = | ||
620 | pstats->mac_stx[1].tx_stat_outxoffsent_lo; | ||
621 | } | ||
622 | |||
623 | static void bnx2x_emac_stats_update(struct bnx2x *bp) | ||
624 | { | ||
625 | struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); | ||
626 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
627 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
628 | |||
629 | UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); | ||
630 | UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); | ||
631 | UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); | ||
632 | UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); | ||
633 | UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); | ||
634 | UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); | ||
635 | UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); | ||
636 | UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); | ||
637 | UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); | ||
638 | UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); | ||
639 | UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); | ||
640 | UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); | ||
641 | UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); | ||
642 | UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); | ||
643 | UPDATE_EXTEND_STAT(tx_stat_outxonsent); | ||
644 | UPDATE_EXTEND_STAT(tx_stat_outxoffsent); | ||
645 | UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); | ||
646 | UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); | ||
647 | UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); | ||
648 | UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); | ||
649 | UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); | ||
650 | UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); | ||
651 | UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); | ||
652 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); | ||
653 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); | ||
654 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); | ||
655 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); | ||
656 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); | ||
657 | UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); | ||
658 | UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); | ||
659 | UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); | ||
660 | |||
661 | estats->pause_frames_received_hi = | ||
662 | pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; | ||
663 | estats->pause_frames_received_lo = | ||
664 | pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; | ||
665 | ADD_64(estats->pause_frames_received_hi, | ||
666 | pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, | ||
667 | estats->pause_frames_received_lo, | ||
668 | pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); | ||
669 | |||
670 | estats->pause_frames_sent_hi = | ||
671 | pstats->mac_stx[1].tx_stat_outxonsent_hi; | ||
672 | estats->pause_frames_sent_lo = | ||
673 | pstats->mac_stx[1].tx_stat_outxonsent_lo; | ||
674 | ADD_64(estats->pause_frames_sent_hi, | ||
675 | pstats->mac_stx[1].tx_stat_outxoffsent_hi, | ||
676 | estats->pause_frames_sent_lo, | ||
677 | pstats->mac_stx[1].tx_stat_outxoffsent_lo); | ||
678 | } | ||
679 | |||
680 | static int bnx2x_hw_stats_update(struct bnx2x *bp) | ||
681 | { | ||
682 | struct nig_stats *new = bnx2x_sp(bp, nig_stats); | ||
683 | struct nig_stats *old = &(bp->port.old_nig_stats); | ||
684 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
685 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
686 | struct { | ||
687 | u32 lo; | ||
688 | u32 hi; | ||
689 | } diff; | ||
690 | |||
691 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) | ||
692 | bnx2x_bmac_stats_update(bp); | ||
693 | |||
694 | else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) | ||
695 | bnx2x_emac_stats_update(bp); | ||
696 | |||
697 | else { /* unreached */ | ||
698 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | ||
699 | return -1; | ||
700 | } | ||
701 | |||
702 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, | ||
703 | new->brb_discard - old->brb_discard); | ||
704 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, | ||
705 | new->brb_truncate - old->brb_truncate); | ||
706 | |||
707 | UPDATE_STAT64_NIG(egress_mac_pkt0, | ||
708 | etherstatspkts1024octetsto1522octets); | ||
709 | UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); | ||
710 | |||
711 | memcpy(old, new, sizeof(struct nig_stats)); | ||
712 | |||
713 | memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), | ||
714 | sizeof(struct mac_stx)); | ||
715 | estats->brb_drop_hi = pstats->brb_drop_hi; | ||
716 | estats->brb_drop_lo = pstats->brb_drop_lo; | ||
717 | |||
718 | pstats->host_port_stats_start = ++pstats->host_port_stats_end; | ||
719 | |||
720 | if (!BP_NOMCP(bp)) { | ||
721 | u32 nig_timer_max = | ||
722 | SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); | ||
723 | if (nig_timer_max != estats->nig_timer_max) { | ||
724 | estats->nig_timer_max = nig_timer_max; | ||
725 | BNX2X_ERR("NIG timer max (%u)\n", | ||
726 | estats->nig_timer_max); | ||
727 | } | ||
728 | } | ||
729 | |||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | static int bnx2x_storm_stats_update(struct bnx2x *bp) | ||
734 | { | ||
735 | struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); | ||
736 | struct tstorm_per_port_stats *tport = | ||
737 | &stats->tstorm_common.port_statistics; | ||
738 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); | ||
739 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
740 | int i; | ||
741 | u16 cur_stats_counter; | ||
742 | |||
743 | /* Make sure we use the value of the counter | ||
744 | * used for sending the last stats ramrod. | ||
745 | */ | ||
746 | spin_lock_bh(&bp->stats_lock); | ||
747 | cur_stats_counter = bp->stats_counter - 1; | ||
748 | spin_unlock_bh(&bp->stats_lock); | ||
749 | |||
750 | memcpy(&(fstats->total_bytes_received_hi), | ||
751 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), | ||
752 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | ||
753 | estats->error_bytes_received_hi = 0; | ||
754 | estats->error_bytes_received_lo = 0; | ||
755 | estats->etherstatsoverrsizepkts_hi = 0; | ||
756 | estats->etherstatsoverrsizepkts_lo = 0; | ||
757 | estats->no_buff_discard_hi = 0; | ||
758 | estats->no_buff_discard_lo = 0; | ||
759 | |||
760 | for_each_queue(bp, i) { | ||
761 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
762 | int cl_id = fp->cl_id; | ||
763 | struct tstorm_per_client_stats *tclient = | ||
764 | &stats->tstorm_common.client_statistics[cl_id]; | ||
765 | struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; | ||
766 | struct ustorm_per_client_stats *uclient = | ||
767 | &stats->ustorm_common.client_statistics[cl_id]; | ||
768 | struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; | ||
769 | struct xstorm_per_client_stats *xclient = | ||
770 | &stats->xstorm_common.client_statistics[cl_id]; | ||
771 | struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; | ||
772 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | ||
773 | u32 diff; | ||
774 | |||
775 | /* are storm stats valid? */ | ||
776 | if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { | ||
777 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" | ||
778 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
779 | i, xclient->stats_counter, cur_stats_counter + 1); | ||
780 | return -1; | ||
781 | } | ||
782 | if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { | ||
783 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" | ||
784 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
785 | i, tclient->stats_counter, cur_stats_counter + 1); | ||
786 | return -2; | ||
787 | } | ||
788 | if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { | ||
789 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" | ||
790 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | ||
791 | i, uclient->stats_counter, cur_stats_counter + 1); | ||
792 | return -4; | ||
793 | } | ||
794 | |||
795 | qstats->total_bytes_received_hi = | ||
796 | le32_to_cpu(tclient->rcv_broadcast_bytes.hi); | ||
797 | qstats->total_bytes_received_lo = | ||
798 | le32_to_cpu(tclient->rcv_broadcast_bytes.lo); | ||
799 | |||
800 | ADD_64(qstats->total_bytes_received_hi, | ||
801 | le32_to_cpu(tclient->rcv_multicast_bytes.hi), | ||
802 | qstats->total_bytes_received_lo, | ||
803 | le32_to_cpu(tclient->rcv_multicast_bytes.lo)); | ||
804 | |||
805 | ADD_64(qstats->total_bytes_received_hi, | ||
806 | le32_to_cpu(tclient->rcv_unicast_bytes.hi), | ||
807 | qstats->total_bytes_received_lo, | ||
808 | le32_to_cpu(tclient->rcv_unicast_bytes.lo)); | ||
809 | |||
810 | SUB_64(qstats->total_bytes_received_hi, | ||
811 | le32_to_cpu(uclient->bcast_no_buff_bytes.hi), | ||
812 | qstats->total_bytes_received_lo, | ||
813 | le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); | ||
814 | |||
815 | SUB_64(qstats->total_bytes_received_hi, | ||
816 | le32_to_cpu(uclient->mcast_no_buff_bytes.hi), | ||
817 | qstats->total_bytes_received_lo, | ||
818 | le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); | ||
819 | |||
820 | SUB_64(qstats->total_bytes_received_hi, | ||
821 | le32_to_cpu(uclient->ucast_no_buff_bytes.hi), | ||
822 | qstats->total_bytes_received_lo, | ||
823 | le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); | ||
824 | |||
825 | qstats->valid_bytes_received_hi = | ||
826 | qstats->total_bytes_received_hi; | ||
827 | qstats->valid_bytes_received_lo = | ||
828 | qstats->total_bytes_received_lo; | ||
829 | |||
830 | qstats->error_bytes_received_hi = | ||
831 | le32_to_cpu(tclient->rcv_error_bytes.hi); | ||
832 | qstats->error_bytes_received_lo = | ||
833 | le32_to_cpu(tclient->rcv_error_bytes.lo); | ||
834 | |||
835 | ADD_64(qstats->total_bytes_received_hi, | ||
836 | qstats->error_bytes_received_hi, | ||
837 | qstats->total_bytes_received_lo, | ||
838 | qstats->error_bytes_received_lo); | ||
839 | |||
840 | UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, | ||
841 | total_unicast_packets_received); | ||
842 | UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, | ||
843 | total_multicast_packets_received); | ||
844 | UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, | ||
845 | total_broadcast_packets_received); | ||
846 | UPDATE_EXTEND_TSTAT(packets_too_big_discard, | ||
847 | etherstatsoverrsizepkts); | ||
848 | UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); | ||
849 | |||
850 | SUB_EXTEND_USTAT(ucast_no_buff_pkts, | ||
851 | total_unicast_packets_received); | ||
852 | SUB_EXTEND_USTAT(mcast_no_buff_pkts, | ||
853 | total_multicast_packets_received); | ||
854 | SUB_EXTEND_USTAT(bcast_no_buff_pkts, | ||
855 | total_broadcast_packets_received); | ||
856 | UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); | ||
857 | UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); | ||
858 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); | ||
859 | |||
860 | qstats->total_bytes_transmitted_hi = | ||
861 | le32_to_cpu(xclient->unicast_bytes_sent.hi); | ||
862 | qstats->total_bytes_transmitted_lo = | ||
863 | le32_to_cpu(xclient->unicast_bytes_sent.lo); | ||
864 | |||
865 | ADD_64(qstats->total_bytes_transmitted_hi, | ||
866 | le32_to_cpu(xclient->multicast_bytes_sent.hi), | ||
867 | qstats->total_bytes_transmitted_lo, | ||
868 | le32_to_cpu(xclient->multicast_bytes_sent.lo)); | ||
869 | |||
870 | ADD_64(qstats->total_bytes_transmitted_hi, | ||
871 | le32_to_cpu(xclient->broadcast_bytes_sent.hi), | ||
872 | qstats->total_bytes_transmitted_lo, | ||
873 | le32_to_cpu(xclient->broadcast_bytes_sent.lo)); | ||
874 | |||
875 | UPDATE_EXTEND_XSTAT(unicast_pkts_sent, | ||
876 | total_unicast_packets_transmitted); | ||
877 | UPDATE_EXTEND_XSTAT(multicast_pkts_sent, | ||
878 | total_multicast_packets_transmitted); | ||
879 | UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, | ||
880 | total_broadcast_packets_transmitted); | ||
881 | |||
882 | old_tclient->checksum_discard = tclient->checksum_discard; | ||
883 | old_tclient->ttl0_discard = tclient->ttl0_discard; | ||
884 | |||
885 | ADD_64(fstats->total_bytes_received_hi, | ||
886 | qstats->total_bytes_received_hi, | ||
887 | fstats->total_bytes_received_lo, | ||
888 | qstats->total_bytes_received_lo); | ||
889 | ADD_64(fstats->total_bytes_transmitted_hi, | ||
890 | qstats->total_bytes_transmitted_hi, | ||
891 | fstats->total_bytes_transmitted_lo, | ||
892 | qstats->total_bytes_transmitted_lo); | ||
893 | ADD_64(fstats->total_unicast_packets_received_hi, | ||
894 | qstats->total_unicast_packets_received_hi, | ||
895 | fstats->total_unicast_packets_received_lo, | ||
896 | qstats->total_unicast_packets_received_lo); | ||
897 | ADD_64(fstats->total_multicast_packets_received_hi, | ||
898 | qstats->total_multicast_packets_received_hi, | ||
899 | fstats->total_multicast_packets_received_lo, | ||
900 | qstats->total_multicast_packets_received_lo); | ||
901 | ADD_64(fstats->total_broadcast_packets_received_hi, | ||
902 | qstats->total_broadcast_packets_received_hi, | ||
903 | fstats->total_broadcast_packets_received_lo, | ||
904 | qstats->total_broadcast_packets_received_lo); | ||
905 | ADD_64(fstats->total_unicast_packets_transmitted_hi, | ||
906 | qstats->total_unicast_packets_transmitted_hi, | ||
907 | fstats->total_unicast_packets_transmitted_lo, | ||
908 | qstats->total_unicast_packets_transmitted_lo); | ||
909 | ADD_64(fstats->total_multicast_packets_transmitted_hi, | ||
910 | qstats->total_multicast_packets_transmitted_hi, | ||
911 | fstats->total_multicast_packets_transmitted_lo, | ||
912 | qstats->total_multicast_packets_transmitted_lo); | ||
913 | ADD_64(fstats->total_broadcast_packets_transmitted_hi, | ||
914 | qstats->total_broadcast_packets_transmitted_hi, | ||
915 | fstats->total_broadcast_packets_transmitted_lo, | ||
916 | qstats->total_broadcast_packets_transmitted_lo); | ||
917 | ADD_64(fstats->valid_bytes_received_hi, | ||
918 | qstats->valid_bytes_received_hi, | ||
919 | fstats->valid_bytes_received_lo, | ||
920 | qstats->valid_bytes_received_lo); | ||
921 | |||
922 | ADD_64(estats->error_bytes_received_hi, | ||
923 | qstats->error_bytes_received_hi, | ||
924 | estats->error_bytes_received_lo, | ||
925 | qstats->error_bytes_received_lo); | ||
926 | ADD_64(estats->etherstatsoverrsizepkts_hi, | ||
927 | qstats->etherstatsoverrsizepkts_hi, | ||
928 | estats->etherstatsoverrsizepkts_lo, | ||
929 | qstats->etherstatsoverrsizepkts_lo); | ||
930 | ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, | ||
931 | estats->no_buff_discard_lo, qstats->no_buff_discard_lo); | ||
932 | } | ||
933 | |||
934 | ADD_64(fstats->total_bytes_received_hi, | ||
935 | estats->rx_stat_ifhcinbadoctets_hi, | ||
936 | fstats->total_bytes_received_lo, | ||
937 | estats->rx_stat_ifhcinbadoctets_lo); | ||
938 | |||
939 | memcpy(estats, &(fstats->total_bytes_received_hi), | ||
940 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | ||
941 | |||
942 | ADD_64(estats->etherstatsoverrsizepkts_hi, | ||
943 | estats->rx_stat_dot3statsframestoolong_hi, | ||
944 | estats->etherstatsoverrsizepkts_lo, | ||
945 | estats->rx_stat_dot3statsframestoolong_lo); | ||
946 | ADD_64(estats->error_bytes_received_hi, | ||
947 | estats->rx_stat_ifhcinbadoctets_hi, | ||
948 | estats->error_bytes_received_lo, | ||
949 | estats->rx_stat_ifhcinbadoctets_lo); | ||
950 | |||
951 | if (bp->port.pmf) { | ||
952 | estats->mac_filter_discard = | ||
953 | le32_to_cpu(tport->mac_filter_discard); | ||
954 | estats->xxoverflow_discard = | ||
955 | le32_to_cpu(tport->xxoverflow_discard); | ||
956 | estats->brb_truncate_discard = | ||
957 | le32_to_cpu(tport->brb_truncate_discard); | ||
958 | estats->mac_discard = le32_to_cpu(tport->mac_discard); | ||
959 | } | ||
960 | |||
961 | fstats->host_func_stats_start = ++fstats->host_func_stats_end; | ||
962 | |||
963 | bp->stats_pending = 0; | ||
964 | |||
965 | return 0; | ||
966 | } | ||
967 | |||
968 | static void bnx2x_net_stats_update(struct bnx2x *bp) | ||
969 | { | ||
970 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
971 | struct net_device_stats *nstats = &bp->dev->stats; | ||
972 | int i; | ||
973 | |||
974 | nstats->rx_packets = | ||
975 | bnx2x_hilo(&estats->total_unicast_packets_received_hi) + | ||
976 | bnx2x_hilo(&estats->total_multicast_packets_received_hi) + | ||
977 | bnx2x_hilo(&estats->total_broadcast_packets_received_hi); | ||
978 | |||
979 | nstats->tx_packets = | ||
980 | bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + | ||
981 | bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + | ||
982 | bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); | ||
983 | |||
984 | nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); | ||
985 | |||
986 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); | ||
987 | |||
988 | nstats->rx_dropped = estats->mac_discard; | ||
989 | for_each_queue(bp, i) | ||
990 | nstats->rx_dropped += | ||
991 | le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); | ||
992 | |||
993 | nstats->tx_dropped = 0; | ||
994 | |||
995 | nstats->multicast = | ||
996 | bnx2x_hilo(&estats->total_multicast_packets_received_hi); | ||
997 | |||
998 | nstats->collisions = | ||
999 | bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); | ||
1000 | |||
1001 | nstats->rx_length_errors = | ||
1002 | bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + | ||
1003 | bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); | ||
1004 | nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + | ||
1005 | bnx2x_hilo(&estats->brb_truncate_hi); | ||
1006 | nstats->rx_crc_errors = | ||
1007 | bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); | ||
1008 | nstats->rx_frame_errors = | ||
1009 | bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); | ||
1010 | nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); | ||
1011 | nstats->rx_missed_errors = estats->xxoverflow_discard; | ||
1012 | |||
1013 | nstats->rx_errors = nstats->rx_length_errors + | ||
1014 | nstats->rx_over_errors + | ||
1015 | nstats->rx_crc_errors + | ||
1016 | nstats->rx_frame_errors + | ||
1017 | nstats->rx_fifo_errors + | ||
1018 | nstats->rx_missed_errors; | ||
1019 | |||
1020 | nstats->tx_aborted_errors = | ||
1021 | bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + | ||
1022 | bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); | ||
1023 | nstats->tx_carrier_errors = | ||
1024 | bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); | ||
1025 | nstats->tx_fifo_errors = 0; | ||
1026 | nstats->tx_heartbeat_errors = 0; | ||
1027 | nstats->tx_window_errors = 0; | ||
1028 | |||
1029 | nstats->tx_errors = nstats->tx_aborted_errors + | ||
1030 | nstats->tx_carrier_errors + | ||
1031 | bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); | ||
1032 | } | ||
1033 | |||
1034 | static void bnx2x_drv_stats_update(struct bnx2x *bp) | ||
1035 | { | ||
1036 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
1037 | int i; | ||
1038 | |||
1039 | estats->driver_xoff = 0; | ||
1040 | estats->rx_err_discard_pkt = 0; | ||
1041 | estats->rx_skb_alloc_failed = 0; | ||
1042 | estats->hw_csum_err = 0; | ||
1043 | for_each_queue(bp, i) { | ||
1044 | struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; | ||
1045 | |||
1046 | estats->driver_xoff += qstats->driver_xoff; | ||
1047 | estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; | ||
1048 | estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; | ||
1049 | estats->hw_csum_err += qstats->hw_csum_err; | ||
1050 | } | ||
1051 | } | ||
1052 | |||
1053 | static void bnx2x_stats_update(struct bnx2x *bp) | ||
1054 | { | ||
1055 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
1056 | |||
1057 | if (*stats_comp != DMAE_COMP_VAL) | ||
1058 | return; | ||
1059 | |||
1060 | if (bp->port.pmf) | ||
1061 | bnx2x_hw_stats_update(bp); | ||
1062 | |||
1063 | if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { | ||
1064 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | ||
1065 | bnx2x_panic(); | ||
1066 | return; | ||
1067 | } | ||
1068 | |||
1069 | bnx2x_net_stats_update(bp); | ||
1070 | bnx2x_drv_stats_update(bp); | ||
1071 | |||
1072 | if (netif_msg_timer(bp)) { | ||
1073 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
1074 | int i; | ||
1075 | |||
1076 | printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", | ||
1077 | bp->dev->name, | ||
1078 | estats->brb_drop_lo, estats->brb_truncate_lo); | ||
1079 | |||
1080 | for_each_queue(bp, i) { | ||
1081 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1082 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | ||
1083 | |||
1084 | printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" | ||
1085 | " rx pkt(%lu) rx calls(%lu %lu)\n", | ||
1086 | fp->name, (le16_to_cpu(*fp->rx_cons_sb) - | ||
1087 | fp->rx_comp_cons), | ||
1088 | le16_to_cpu(*fp->rx_cons_sb), | ||
1089 | bnx2x_hilo(&qstats-> | ||
1090 | total_unicast_packets_received_hi), | ||
1091 | fp->rx_calls, fp->rx_pkt); | ||
1092 | } | ||
1093 | |||
1094 | for_each_queue(bp, i) { | ||
1095 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1096 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | ||
1097 | struct netdev_queue *txq = | ||
1098 | netdev_get_tx_queue(bp->dev, i); | ||
1099 | |||
1100 | printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" | ||
1101 | " tx pkt(%lu) tx calls (%lu)" | ||
1102 | " %s (Xoff events %u)\n", | ||
1103 | fp->name, bnx2x_tx_avail(fp), | ||
1104 | le16_to_cpu(*fp->tx_cons_sb), | ||
1105 | bnx2x_hilo(&qstats-> | ||
1106 | total_unicast_packets_transmitted_hi), | ||
1107 | fp->tx_pkt, | ||
1108 | (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), | ||
1109 | qstats->driver_xoff); | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | bnx2x_hw_stats_post(bp); | ||
1114 | bnx2x_storm_stats_post(bp); | ||
1115 | } | ||
1116 | |||
1117 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | ||
1118 | { | ||
1119 | struct dmae_command *dmae; | ||
1120 | u32 opcode; | ||
1121 | int loader_idx = PMF_DMAE_C(bp); | ||
1122 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
1123 | |||
1124 | bp->executer_idx = 0; | ||
1125 | |||
1126 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
1127 | DMAE_CMD_C_ENABLE | | ||
1128 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
1129 | #ifdef __BIG_ENDIAN | ||
1130 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
1131 | #else | ||
1132 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
1133 | #endif | ||
1134 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
1135 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
1136 | |||
1137 | if (bp->port.port_stx) { | ||
1138 | |||
1139 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
1140 | if (bp->func_stx) | ||
1141 | dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); | ||
1142 | else | ||
1143 | dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); | ||
1144 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
1145 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
1146 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | ||
1147 | dmae->dst_addr_hi = 0; | ||
1148 | dmae->len = sizeof(struct host_port_stats) >> 2; | ||
1149 | if (bp->func_stx) { | ||
1150 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
1151 | dmae->comp_addr_hi = 0; | ||
1152 | dmae->comp_val = 1; | ||
1153 | } else { | ||
1154 | dmae->comp_addr_lo = | ||
1155 | U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
1156 | dmae->comp_addr_hi = | ||
1157 | U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
1158 | dmae->comp_val = DMAE_COMP_VAL; | ||
1159 | |||
1160 | *stats_comp = 0; | ||
1161 | } | ||
1162 | } | ||
1163 | |||
1164 | if (bp->func_stx) { | ||
1165 | |||
1166 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
1167 | dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); | ||
1168 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); | ||
1169 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); | ||
1170 | dmae->dst_addr_lo = bp->func_stx >> 2; | ||
1171 | dmae->dst_addr_hi = 0; | ||
1172 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
1173 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
1174 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
1175 | dmae->comp_val = DMAE_COMP_VAL; | ||
1176 | |||
1177 | *stats_comp = 0; | ||
1178 | } | ||
1179 | } | ||
1180 | |||
1181 | static void bnx2x_stats_stop(struct bnx2x *bp) | ||
1182 | { | ||
1183 | int update = 0; | ||
1184 | |||
1185 | bnx2x_stats_comp(bp); | ||
1186 | |||
1187 | if (bp->port.pmf) | ||
1188 | update = (bnx2x_hw_stats_update(bp) == 0); | ||
1189 | |||
1190 | update |= (bnx2x_storm_stats_update(bp) == 0); | ||
1191 | |||
1192 | if (update) { | ||
1193 | bnx2x_net_stats_update(bp); | ||
1194 | |||
1195 | if (bp->port.pmf) | ||
1196 | bnx2x_port_stats_stop(bp); | ||
1197 | |||
1198 | bnx2x_hw_stats_post(bp); | ||
1199 | bnx2x_stats_comp(bp); | ||
1200 | } | ||
1201 | } | ||
1202 | |||
1203 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | ||
1204 | { | ||
1205 | } | ||
1206 | |||
1207 | static const struct { | ||
1208 | void (*action)(struct bnx2x *bp); | ||
1209 | enum bnx2x_stats_state next_state; | ||
1210 | } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { | ||
1211 | /* state event */ | ||
1212 | { | ||
1213 | /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, | ||
1214 | /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, | ||
1215 | /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, | ||
1216 | /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} | ||
1217 | }, | ||
1218 | { | ||
1219 | /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, | ||
1220 | /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, | ||
1221 | /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, | ||
1222 | /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} | ||
1223 | } | ||
1224 | }; | ||
1225 | |||
1226 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | ||
1227 | { | ||
1228 | enum bnx2x_stats_state state; | ||
1229 | |||
1230 | if (unlikely(bp->panic)) | ||
1231 | return; | ||
1232 | |||
1233 | /* Protect a state change flow */ | ||
1234 | spin_lock_bh(&bp->stats_lock); | ||
1235 | state = bp->stats_state; | ||
1236 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | ||
1237 | spin_unlock_bh(&bp->stats_lock); | ||
1238 | |||
1239 | bnx2x_stats_stm[state][event].action(bp); | ||
1240 | |||
1241 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | ||
1242 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | ||
1243 | state, event, bp->stats_state); | ||
1244 | } | ||
1245 | |||
1246 | static void bnx2x_port_stats_base_init(struct bnx2x *bp) | ||
1247 | { | ||
1248 | struct dmae_command *dmae; | ||
1249 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
1250 | |||
1251 | /* sanity */ | ||
1252 | if (!bp->port.pmf || !bp->port.port_stx) { | ||
1253 | BNX2X_ERR("BUG!\n"); | ||
1254 | return; | ||
1255 | } | ||
1256 | |||
1257 | bp->executer_idx = 0; | ||
1258 | |||
1259 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
1260 | dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | ||
1261 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
1262 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
1263 | #ifdef __BIG_ENDIAN | ||
1264 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
1265 | #else | ||
1266 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
1267 | #endif | ||
1268 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
1269 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
1270 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | ||
1271 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | ||
1272 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | ||
1273 | dmae->dst_addr_hi = 0; | ||
1274 | dmae->len = sizeof(struct host_port_stats) >> 2; | ||
1275 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
1276 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
1277 | dmae->comp_val = DMAE_COMP_VAL; | ||
1278 | |||
1279 | *stats_comp = 0; | ||
1280 | bnx2x_hw_stats_post(bp); | ||
1281 | bnx2x_stats_comp(bp); | ||
1282 | } | ||
1283 | |||
1284 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) | ||
1285 | { | ||
1286 | int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; | ||
1287 | int port = BP_PORT(bp); | ||
1288 | int func; | ||
1289 | u32 func_stx; | ||
1290 | |||
1291 | /* sanity */ | ||
1292 | if (!bp->port.pmf || !bp->func_stx) { | ||
1293 | BNX2X_ERR("BUG!\n"); | ||
1294 | return; | ||
1295 | } | ||
1296 | |||
1297 | /* save our func_stx */ | ||
1298 | func_stx = bp->func_stx; | ||
1299 | |||
1300 | for (vn = VN_0; vn < vn_max; vn++) { | ||
1301 | func = 2*vn + port; | ||
1302 | |||
1303 | bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); | ||
1304 | bnx2x_func_stats_init(bp); | ||
1305 | bnx2x_hw_stats_post(bp); | ||
1306 | bnx2x_stats_comp(bp); | ||
1307 | } | ||
1308 | |||
1309 | /* restore our func_stx */ | ||
1310 | bp->func_stx = func_stx; | ||
1311 | } | ||
1312 | |||
1313 | static void bnx2x_func_stats_base_update(struct bnx2x *bp) | ||
1314 | { | ||
1315 | struct dmae_command *dmae = &bp->stats_dmae; | ||
1316 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | ||
1317 | |||
1318 | /* sanity */ | ||
1319 | if (!bp->func_stx) { | ||
1320 | BNX2X_ERR("BUG!\n"); | ||
1321 | return; | ||
1322 | } | ||
1323 | |||
1324 | bp->executer_idx = 0; | ||
1325 | memset(dmae, 0, sizeof(struct dmae_command)); | ||
1326 | |||
1327 | dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | | ||
1328 | DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | | ||
1329 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | ||
1330 | #ifdef __BIG_ENDIAN | ||
1331 | DMAE_CMD_ENDIANITY_B_DW_SWAP | | ||
1332 | #else | ||
1333 | DMAE_CMD_ENDIANITY_DW_SWAP | | ||
1334 | #endif | ||
1335 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | | ||
1336 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
1337 | dmae->src_addr_lo = bp->func_stx >> 2; | ||
1338 | dmae->src_addr_hi = 0; | ||
1339 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); | ||
1340 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); | ||
1341 | dmae->len = sizeof(struct host_func_stats) >> 2; | ||
1342 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | ||
1343 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | ||
1344 | dmae->comp_val = DMAE_COMP_VAL; | ||
1345 | |||
1346 | *stats_comp = 0; | ||
1347 | bnx2x_hw_stats_post(bp); | ||
1348 | bnx2x_stats_comp(bp); | ||
1349 | } | ||
1350 | |||
1351 | void bnx2x_stats_init(struct bnx2x *bp) | ||
1352 | { | ||
1353 | int port = BP_PORT(bp); | ||
1354 | int func = BP_FUNC(bp); | ||
1355 | int i; | ||
1356 | |||
1357 | bp->stats_pending = 0; | ||
1358 | bp->executer_idx = 0; | ||
1359 | bp->stats_counter = 0; | ||
1360 | |||
1361 | /* port and func stats for management */ | ||
1362 | if (!BP_NOMCP(bp)) { | ||
1363 | bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); | ||
1364 | bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); | ||
1365 | |||
1366 | } else { | ||
1367 | bp->port.port_stx = 0; | ||
1368 | bp->func_stx = 0; | ||
1369 | } | ||
1370 | DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", | ||
1371 | bp->port.port_stx, bp->func_stx); | ||
1372 | |||
1373 | /* port stats */ | ||
1374 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); | ||
1375 | bp->port.old_nig_stats.brb_discard = | ||
1376 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); | ||
1377 | bp->port.old_nig_stats.brb_truncate = | ||
1378 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); | ||
1379 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, | ||
1380 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); | ||
1381 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, | ||
1382 | &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); | ||
1383 | |||
1384 | /* function stats */ | ||
1385 | for_each_queue(bp, i) { | ||
1386 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1387 | |||
1388 | memset(&fp->old_tclient, 0, | ||
1389 | sizeof(struct tstorm_per_client_stats)); | ||
1390 | memset(&fp->old_uclient, 0, | ||
1391 | sizeof(struct ustorm_per_client_stats)); | ||
1392 | memset(&fp->old_xclient, 0, | ||
1393 | sizeof(struct xstorm_per_client_stats)); | ||
1394 | memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); | ||
1395 | } | ||
1396 | |||
1397 | memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); | ||
1398 | memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); | ||
1399 | |||
1400 | bp->stats_state = STATS_STATE_DISABLED; | ||
1401 | |||
1402 | if (bp->port.pmf) { | ||
1403 | if (bp->port.port_stx) | ||
1404 | bnx2x_port_stats_base_init(bp); | ||
1405 | |||
1406 | if (bp->func_stx) | ||
1407 | bnx2x_func_stats_base_init(bp); | ||
1408 | |||
1409 | } else if (bp->func_stx) | ||
1410 | bnx2x_func_stats_base_update(bp); | ||
1411 | } | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h new file mode 100644 index 000000000000..38a4e908f4fb --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_stats.h | |||
@@ -0,0 +1,239 @@ | |||
1 | /* bnx2x_stats.h: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 Broadcom Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
10 | * Written by: Eliezer Tamir | ||
11 | * Based on code from Michael Chan's bnx2 driver | ||
12 | */ | ||
13 | |||
14 | #ifndef BNX2X_STATS_H | ||
15 | #define BNX2X_STATS_H | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | |||
19 | struct bnx2x_eth_q_stats { | ||
20 | u32 total_bytes_received_hi; | ||
21 | u32 total_bytes_received_lo; | ||
22 | u32 total_bytes_transmitted_hi; | ||
23 | u32 total_bytes_transmitted_lo; | ||
24 | u32 total_unicast_packets_received_hi; | ||
25 | u32 total_unicast_packets_received_lo; | ||
26 | u32 total_multicast_packets_received_hi; | ||
27 | u32 total_multicast_packets_received_lo; | ||
28 | u32 total_broadcast_packets_received_hi; | ||
29 | u32 total_broadcast_packets_received_lo; | ||
30 | u32 total_unicast_packets_transmitted_hi; | ||
31 | u32 total_unicast_packets_transmitted_lo; | ||
32 | u32 total_multicast_packets_transmitted_hi; | ||
33 | u32 total_multicast_packets_transmitted_lo; | ||
34 | u32 total_broadcast_packets_transmitted_hi; | ||
35 | u32 total_broadcast_packets_transmitted_lo; | ||
36 | u32 valid_bytes_received_hi; | ||
37 | u32 valid_bytes_received_lo; | ||
38 | |||
39 | u32 error_bytes_received_hi; | ||
40 | u32 error_bytes_received_lo; | ||
41 | u32 etherstatsoverrsizepkts_hi; | ||
42 | u32 etherstatsoverrsizepkts_lo; | ||
43 | u32 no_buff_discard_hi; | ||
44 | u32 no_buff_discard_lo; | ||
45 | |||
46 | u32 driver_xoff; | ||
47 | u32 rx_err_discard_pkt; | ||
48 | u32 rx_skb_alloc_failed; | ||
49 | u32 hw_csum_err; | ||
50 | }; | ||
51 | |||
52 | #define BNX2X_NUM_Q_STATS 13 | ||
53 | #define Q_STATS_OFFSET32(stat_name) \ | ||
54 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
55 | |||
56 | struct nig_stats { | ||
57 | u32 brb_discard; | ||
58 | u32 brb_packet; | ||
59 | u32 brb_truncate; | ||
60 | u32 flow_ctrl_discard; | ||
61 | u32 flow_ctrl_octets; | ||
62 | u32 flow_ctrl_packet; | ||
63 | u32 mng_discard; | ||
64 | u32 mng_octet_inp; | ||
65 | u32 mng_octet_out; | ||
66 | u32 mng_packet_inp; | ||
67 | u32 mng_packet_out; | ||
68 | u32 pbf_octets; | ||
69 | u32 pbf_packet; | ||
70 | u32 safc_inp; | ||
71 | u32 egress_mac_pkt0_lo; | ||
72 | u32 egress_mac_pkt0_hi; | ||
73 | u32 egress_mac_pkt1_lo; | ||
74 | u32 egress_mac_pkt1_hi; | ||
75 | }; | ||
76 | |||
77 | |||
78 | enum bnx2x_stats_event { | ||
79 | STATS_EVENT_PMF = 0, | ||
80 | STATS_EVENT_LINK_UP, | ||
81 | STATS_EVENT_UPDATE, | ||
82 | STATS_EVENT_STOP, | ||
83 | STATS_EVENT_MAX | ||
84 | }; | ||
85 | |||
86 | enum bnx2x_stats_state { | ||
87 | STATS_STATE_DISABLED = 0, | ||
88 | STATS_STATE_ENABLED, | ||
89 | STATS_STATE_MAX | ||
90 | }; | ||
91 | |||
92 | struct bnx2x_eth_stats { | ||
93 | u32 total_bytes_received_hi; | ||
94 | u32 total_bytes_received_lo; | ||
95 | u32 total_bytes_transmitted_hi; | ||
96 | u32 total_bytes_transmitted_lo; | ||
97 | u32 total_unicast_packets_received_hi; | ||
98 | u32 total_unicast_packets_received_lo; | ||
99 | u32 total_multicast_packets_received_hi; | ||
100 | u32 total_multicast_packets_received_lo; | ||
101 | u32 total_broadcast_packets_received_hi; | ||
102 | u32 total_broadcast_packets_received_lo; | ||
103 | u32 total_unicast_packets_transmitted_hi; | ||
104 | u32 total_unicast_packets_transmitted_lo; | ||
105 | u32 total_multicast_packets_transmitted_hi; | ||
106 | u32 total_multicast_packets_transmitted_lo; | ||
107 | u32 total_broadcast_packets_transmitted_hi; | ||
108 | u32 total_broadcast_packets_transmitted_lo; | ||
109 | u32 valid_bytes_received_hi; | ||
110 | u32 valid_bytes_received_lo; | ||
111 | |||
112 | u32 error_bytes_received_hi; | ||
113 | u32 error_bytes_received_lo; | ||
114 | u32 etherstatsoverrsizepkts_hi; | ||
115 | u32 etherstatsoverrsizepkts_lo; | ||
116 | u32 no_buff_discard_hi; | ||
117 | u32 no_buff_discard_lo; | ||
118 | |||
119 | u32 rx_stat_ifhcinbadoctets_hi; | ||
120 | u32 rx_stat_ifhcinbadoctets_lo; | ||
121 | u32 tx_stat_ifhcoutbadoctets_hi; | ||
122 | u32 tx_stat_ifhcoutbadoctets_lo; | ||
123 | u32 rx_stat_dot3statsfcserrors_hi; | ||
124 | u32 rx_stat_dot3statsfcserrors_lo; | ||
125 | u32 rx_stat_dot3statsalignmenterrors_hi; | ||
126 | u32 rx_stat_dot3statsalignmenterrors_lo; | ||
127 | u32 rx_stat_dot3statscarriersenseerrors_hi; | ||
128 | u32 rx_stat_dot3statscarriersenseerrors_lo; | ||
129 | u32 rx_stat_falsecarriererrors_hi; | ||
130 | u32 rx_stat_falsecarriererrors_lo; | ||
131 | u32 rx_stat_etherstatsundersizepkts_hi; | ||
132 | u32 rx_stat_etherstatsundersizepkts_lo; | ||
133 | u32 rx_stat_dot3statsframestoolong_hi; | ||
134 | u32 rx_stat_dot3statsframestoolong_lo; | ||
135 | u32 rx_stat_etherstatsfragments_hi; | ||
136 | u32 rx_stat_etherstatsfragments_lo; | ||
137 | u32 rx_stat_etherstatsjabbers_hi; | ||
138 | u32 rx_stat_etherstatsjabbers_lo; | ||
139 | u32 rx_stat_maccontrolframesreceived_hi; | ||
140 | u32 rx_stat_maccontrolframesreceived_lo; | ||
141 | u32 rx_stat_bmac_xpf_hi; | ||
142 | u32 rx_stat_bmac_xpf_lo; | ||
143 | u32 rx_stat_bmac_xcf_hi; | ||
144 | u32 rx_stat_bmac_xcf_lo; | ||
145 | u32 rx_stat_xoffstateentered_hi; | ||
146 | u32 rx_stat_xoffstateentered_lo; | ||
147 | u32 rx_stat_xonpauseframesreceived_hi; | ||
148 | u32 rx_stat_xonpauseframesreceived_lo; | ||
149 | u32 rx_stat_xoffpauseframesreceived_hi; | ||
150 | u32 rx_stat_xoffpauseframesreceived_lo; | ||
151 | u32 tx_stat_outxonsent_hi; | ||
152 | u32 tx_stat_outxonsent_lo; | ||
153 | u32 tx_stat_outxoffsent_hi; | ||
154 | u32 tx_stat_outxoffsent_lo; | ||
155 | u32 tx_stat_flowcontroldone_hi; | ||
156 | u32 tx_stat_flowcontroldone_lo; | ||
157 | u32 tx_stat_etherstatscollisions_hi; | ||
158 | u32 tx_stat_etherstatscollisions_lo; | ||
159 | u32 tx_stat_dot3statssinglecollisionframes_hi; | ||
160 | u32 tx_stat_dot3statssinglecollisionframes_lo; | ||
161 | u32 tx_stat_dot3statsmultiplecollisionframes_hi; | ||
162 | u32 tx_stat_dot3statsmultiplecollisionframes_lo; | ||
163 | u32 tx_stat_dot3statsdeferredtransmissions_hi; | ||
164 | u32 tx_stat_dot3statsdeferredtransmissions_lo; | ||
165 | u32 tx_stat_dot3statsexcessivecollisions_hi; | ||
166 | u32 tx_stat_dot3statsexcessivecollisions_lo; | ||
167 | u32 tx_stat_dot3statslatecollisions_hi; | ||
168 | u32 tx_stat_dot3statslatecollisions_lo; | ||
169 | u32 tx_stat_etherstatspkts64octets_hi; | ||
170 | u32 tx_stat_etherstatspkts64octets_lo; | ||
171 | u32 tx_stat_etherstatspkts65octetsto127octets_hi; | ||
172 | u32 tx_stat_etherstatspkts65octetsto127octets_lo; | ||
173 | u32 tx_stat_etherstatspkts128octetsto255octets_hi; | ||
174 | u32 tx_stat_etherstatspkts128octetsto255octets_lo; | ||
175 | u32 tx_stat_etherstatspkts256octetsto511octets_hi; | ||
176 | u32 tx_stat_etherstatspkts256octetsto511octets_lo; | ||
177 | u32 tx_stat_etherstatspkts512octetsto1023octets_hi; | ||
178 | u32 tx_stat_etherstatspkts512octetsto1023octets_lo; | ||
179 | u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; | ||
180 | u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; | ||
181 | u32 tx_stat_etherstatspktsover1522octets_hi; | ||
182 | u32 tx_stat_etherstatspktsover1522octets_lo; | ||
183 | u32 tx_stat_bmac_2047_hi; | ||
184 | u32 tx_stat_bmac_2047_lo; | ||
185 | u32 tx_stat_bmac_4095_hi; | ||
186 | u32 tx_stat_bmac_4095_lo; | ||
187 | u32 tx_stat_bmac_9216_hi; | ||
188 | u32 tx_stat_bmac_9216_lo; | ||
189 | u32 tx_stat_bmac_16383_hi; | ||
190 | u32 tx_stat_bmac_16383_lo; | ||
191 | u32 tx_stat_dot3statsinternalmactransmiterrors_hi; | ||
192 | u32 tx_stat_dot3statsinternalmactransmiterrors_lo; | ||
193 | u32 tx_stat_bmac_ufl_hi; | ||
194 | u32 tx_stat_bmac_ufl_lo; | ||
195 | |||
196 | u32 pause_frames_received_hi; | ||
197 | u32 pause_frames_received_lo; | ||
198 | u32 pause_frames_sent_hi; | ||
199 | u32 pause_frames_sent_lo; | ||
200 | |||
201 | u32 etherstatspkts1024octetsto1522octets_hi; | ||
202 | u32 etherstatspkts1024octetsto1522octets_lo; | ||
203 | u32 etherstatspktsover1522octets_hi; | ||
204 | u32 etherstatspktsover1522octets_lo; | ||
205 | |||
206 | u32 brb_drop_hi; | ||
207 | u32 brb_drop_lo; | ||
208 | u32 brb_truncate_hi; | ||
209 | u32 brb_truncate_lo; | ||
210 | |||
211 | u32 mac_filter_discard; | ||
212 | u32 xxoverflow_discard; | ||
213 | u32 brb_truncate_discard; | ||
214 | u32 mac_discard; | ||
215 | |||
216 | u32 driver_xoff; | ||
217 | u32 rx_err_discard_pkt; | ||
218 | u32 rx_skb_alloc_failed; | ||
219 | u32 hw_csum_err; | ||
220 | |||
221 | u32 nig_timer_max; | ||
222 | }; | ||
223 | |||
224 | #define BNX2X_NUM_STATS 43 | ||
225 | #define STATS_OFFSET32(stat_name) \ | ||
226 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) | ||
227 | |||
228 | /* Forward declaration */ | ||
229 | struct bnx2x; | ||
230 | |||
231 | |||
232 | void bnx2x_stats_init(struct bnx2x *bp); | ||
233 | |||
234 | extern const u32 dmae_reg_go_c[]; | ||
235 | extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | ||
236 | u32 data_hi, u32 data_lo, int common); | ||
237 | |||
238 | |||
239 | #endif /* BNX2X_STATS_H */ | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 3662d6e446a9..c746b331771d 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -682,7 +682,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
682 | client_info->ntt = 0; | 682 | client_info->ntt = 0; |
683 | } | 683 | } |
684 | 684 | ||
685 | if (!list_empty(&bond->vlan_list)) { | 685 | if (bond->vlgrp) { |
686 | if (!vlan_get_tag(skb, &client_info->vlan_id)) | 686 | if (!vlan_get_tag(skb, &client_info->vlan_id)) |
687 | client_info->tag = 1; | 687 | client_info->tag = 1; |
688 | } | 688 | } |
@@ -815,7 +815,7 @@ static int rlb_initialize(struct bonding *bond) | |||
815 | 815 | ||
816 | /*initialize packet type*/ | 816 | /*initialize packet type*/ |
817 | pk_type->type = cpu_to_be16(ETH_P_ARP); | 817 | pk_type->type = cpu_to_be16(ETH_P_ARP); |
818 | pk_type->dev = NULL; | 818 | pk_type->dev = bond->dev; |
819 | pk_type->func = rlb_arp_recv; | 819 | pk_type->func = rlb_arp_recv; |
820 | 820 | ||
821 | /* register to receive ARPs */ | 821 | /* register to receive ARPs */ |
@@ -904,7 +904,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) | |||
904 | skb->priority = TC_PRIO_CONTROL; | 904 | skb->priority = TC_PRIO_CONTROL; |
905 | skb->dev = slave->dev; | 905 | skb->dev = slave->dev; |
906 | 906 | ||
907 | if (!list_empty(&bond->vlan_list)) { | 907 | if (bond->vlgrp) { |
908 | struct vlan_entry *vlan; | 908 | struct vlan_entry *vlan; |
909 | 909 | ||
910 | vlan = bond_next_vlan(bond, | 910 | vlan = bond_next_vlan(bond, |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 20f45cbf961a..2cc4cfc31892 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -424,6 +424,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, | |||
424 | { | 424 | { |
425 | unsigned short uninitialized_var(vlan_id); | 425 | unsigned short uninitialized_var(vlan_id); |
426 | 426 | ||
427 | /* Test vlan_list not vlgrp to catch and handle 802.1p tags */ | ||
427 | if (!list_empty(&bond->vlan_list) && | 428 | if (!list_empty(&bond->vlan_list) && |
428 | !(slave_dev->features & NETIF_F_HW_VLAN_TX) && | 429 | !(slave_dev->features & NETIF_F_HW_VLAN_TX) && |
429 | vlan_get_tag(skb, &vlan_id) == 0) { | 430 | vlan_get_tag(skb, &vlan_id) == 0) { |
@@ -487,7 +488,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev, | |||
487 | struct slave *slave; | 488 | struct slave *slave; |
488 | int i; | 489 | int i; |
489 | 490 | ||
491 | write_lock(&bond->lock); | ||
490 | bond->vlgrp = grp; | 492 | bond->vlgrp = grp; |
493 | write_unlock(&bond->lock); | ||
491 | 494 | ||
492 | bond_for_each_slave(bond, slave, i) { | 495 | bond_for_each_slave(bond, slave, i) { |
493 | struct net_device *slave_dev = slave->dev; | 496 | struct net_device *slave_dev = slave->dev; |
@@ -567,10 +570,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla | |||
567 | struct vlan_entry *vlan; | 570 | struct vlan_entry *vlan; |
568 | const struct net_device_ops *slave_ops = slave_dev->netdev_ops; | 571 | const struct net_device_ops *slave_ops = slave_dev->netdev_ops; |
569 | 572 | ||
570 | write_lock_bh(&bond->lock); | 573 | if (!bond->vlgrp) |
571 | 574 | return; | |
572 | if (list_empty(&bond->vlan_list)) | ||
573 | goto out; | ||
574 | 575 | ||
575 | if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && | 576 | if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && |
576 | slave_ops->ndo_vlan_rx_register) | 577 | slave_ops->ndo_vlan_rx_register) |
@@ -578,13 +579,10 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla | |||
578 | 579 | ||
579 | if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || | 580 | if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || |
580 | !(slave_ops->ndo_vlan_rx_add_vid)) | 581 | !(slave_ops->ndo_vlan_rx_add_vid)) |
581 | goto out; | 582 | return; |
582 | 583 | ||
583 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) | 584 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) |
584 | slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); | 585 | slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); |
585 | |||
586 | out: | ||
587 | write_unlock_bh(&bond->lock); | ||
588 | } | 586 | } |
589 | 587 | ||
590 | static void bond_del_vlans_from_slave(struct bonding *bond, | 588 | static void bond_del_vlans_from_slave(struct bonding *bond, |
@@ -594,16 +592,16 @@ static void bond_del_vlans_from_slave(struct bonding *bond, | |||
594 | struct vlan_entry *vlan; | 592 | struct vlan_entry *vlan; |
595 | struct net_device *vlan_dev; | 593 | struct net_device *vlan_dev; |
596 | 594 | ||
597 | write_lock_bh(&bond->lock); | 595 | if (!bond->vlgrp) |
598 | 596 | return; | |
599 | if (list_empty(&bond->vlan_list)) | ||
600 | goto out; | ||
601 | 597 | ||
602 | if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || | 598 | if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || |
603 | !(slave_ops->ndo_vlan_rx_kill_vid)) | 599 | !(slave_ops->ndo_vlan_rx_kill_vid)) |
604 | goto unreg; | 600 | goto unreg; |
605 | 601 | ||
606 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | 602 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { |
603 | if (!vlan->vlan_id) | ||
604 | continue; | ||
607 | /* Save and then restore vlan_dev in the grp array, | 605 | /* Save and then restore vlan_dev in the grp array, |
608 | * since the slave's driver might clear it. | 606 | * since the slave's driver might clear it. |
609 | */ | 607 | */ |
@@ -616,9 +614,6 @@ unreg: | |||
616 | if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && | 614 | if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && |
617 | slave_ops->ndo_vlan_rx_register) | 615 | slave_ops->ndo_vlan_rx_register) |
618 | slave_ops->ndo_vlan_rx_register(slave_dev, NULL); | 616 | slave_ops->ndo_vlan_rx_register(slave_dev, NULL); |
619 | |||
620 | out: | ||
621 | write_unlock_bh(&bond->lock); | ||
622 | } | 617 | } |
623 | 618 | ||
624 | /*------------------------------- Link status -------------------------------*/ | 619 | /*------------------------------- Link status -------------------------------*/ |
@@ -1443,7 +1438,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1443 | /* no need to lock since we're protected by rtnl_lock */ | 1438 | /* no need to lock since we're protected by rtnl_lock */ |
1444 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { | 1439 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { |
1445 | pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); | 1440 | pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); |
1446 | if (!list_empty(&bond->vlan_list)) { | 1441 | if (bond->vlgrp) { |
1447 | pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", | 1442 | pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", |
1448 | bond_dev->name, slave_dev->name, bond_dev->name); | 1443 | bond_dev->name, slave_dev->name, bond_dev->name); |
1449 | return -EPERM; | 1444 | return -EPERM; |
@@ -1942,7 +1937,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1942 | */ | 1937 | */ |
1943 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 1938 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
1944 | 1939 | ||
1945 | if (list_empty(&bond->vlan_list)) { | 1940 | if (!bond->vlgrp) { |
1946 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | 1941 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; |
1947 | } else { | 1942 | } else { |
1948 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 1943 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
@@ -2134,9 +2129,9 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2134 | */ | 2129 | */ |
2135 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2130 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
2136 | 2131 | ||
2137 | if (list_empty(&bond->vlan_list)) | 2132 | if (!bond->vlgrp) { |
2138 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | 2133 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; |
2139 | else { | 2134 | } else { |
2140 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2135 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
2141 | bond_dev->name, bond_dev->name); | 2136 | bond_dev->name, bond_dev->name); |
2142 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2137 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
@@ -2569,7 +2564,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2569 | if (!targets[i]) | 2564 | if (!targets[i]) |
2570 | break; | 2565 | break; |
2571 | pr_debug("basa: target %x\n", targets[i]); | 2566 | pr_debug("basa: target %x\n", targets[i]); |
2572 | if (list_empty(&bond->vlan_list)) { | 2567 | if (!bond->vlgrp) { |
2573 | pr_debug("basa: empty vlan: arp_send\n"); | 2568 | pr_debug("basa: empty vlan: arp_send\n"); |
2574 | bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], | 2569 | bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], |
2575 | bond->master_ip, 0); | 2570 | bond->master_ip, 0); |
@@ -2658,6 +2653,9 @@ static void bond_send_gratuitous_arp(struct bonding *bond) | |||
2658 | bond->master_ip, 0); | 2653 | bond->master_ip, 0); |
2659 | } | 2654 | } |
2660 | 2655 | ||
2656 | if (!bond->vlgrp) | ||
2657 | return; | ||
2658 | |||
2661 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | 2659 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { |
2662 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); | 2660 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); |
2663 | if (vlan->vlan_ip) { | 2661 | if (vlan->vlan_ip) { |
@@ -3590,6 +3588,8 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
3590 | } | 3588 | } |
3591 | 3589 | ||
3592 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { | 3590 | list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { |
3591 | if (!bond->vlgrp) | ||
3592 | continue; | ||
3593 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); | 3593 | vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); |
3594 | if (vlan_dev == event_dev) { | 3594 | if (vlan_dev == event_dev) { |
3595 | switch (event) { | 3595 | switch (event) { |
@@ -4686,6 +4686,7 @@ static void bond_work_cancel_all(struct bonding *bond) | |||
4686 | static void bond_uninit(struct net_device *bond_dev) | 4686 | static void bond_uninit(struct net_device *bond_dev) |
4687 | { | 4687 | { |
4688 | struct bonding *bond = netdev_priv(bond_dev); | 4688 | struct bonding *bond = netdev_priv(bond_dev); |
4689 | struct vlan_entry *vlan, *tmp; | ||
4689 | 4690 | ||
4690 | bond_netpoll_cleanup(bond_dev); | 4691 | bond_netpoll_cleanup(bond_dev); |
4691 | 4692 | ||
@@ -4699,6 +4700,11 @@ static void bond_uninit(struct net_device *bond_dev) | |||
4699 | bond_remove_proc_entry(bond); | 4700 | bond_remove_proc_entry(bond); |
4700 | 4701 | ||
4701 | __hw_addr_flush(&bond->mc_list); | 4702 | __hw_addr_flush(&bond->mc_list); |
4703 | |||
4704 | list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { | ||
4705 | list_del(&vlan->vlan_list); | ||
4706 | kfree(vlan); | ||
4707 | } | ||
4702 | } | 4708 | } |
4703 | 4709 | ||
4704 | /*------------------------- Module initialization ---------------------------*/ | 4710 | /*------------------------- Module initialization ---------------------------*/ |
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 6c948037fc78..f5058ff2b210 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c | |||
@@ -165,6 +165,9 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf, | |||
165 | len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), | 165 | len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), |
166 | "Next RX len: %d\n", cfspi->rx_npck_len); | 166 | "Next RX len: %d\n", cfspi->rx_npck_len); |
167 | 167 | ||
168 | if (len > DEBUGFS_BUF_SIZE) | ||
169 | len = DEBUGFS_BUF_SIZE; | ||
170 | |||
168 | size = simple_read_from_buffer(user_buf, count, ppos, buf, len); | 171 | size = simple_read_from_buffer(user_buf, count, ppos, buf, len); |
169 | kfree(buf); | 172 | kfree(buf); |
170 | 173 | ||
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 2c5227c02fa0..9d9e45394433 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -73,6 +73,15 @@ config CAN_JANZ_ICAN3 | |||
73 | This driver can also be built as a module. If so, the module will be | 73 | This driver can also be built as a module. If so, the module will be |
74 | called janz-ican3.ko. | 74 | called janz-ican3.ko. |
75 | 75 | ||
76 | config HAVE_CAN_FLEXCAN | ||
77 | bool | ||
78 | |||
79 | config CAN_FLEXCAN | ||
80 | tristate "Support for Freescale FLEXCAN based chips" | ||
81 | depends on CAN_DEV && HAVE_CAN_FLEXCAN | ||
82 | ---help--- | ||
83 | Say Y here if you want to support for Freescale FlexCAN. | ||
84 | |||
76 | source "drivers/net/can/mscan/Kconfig" | 85 | source "drivers/net/can/mscan/Kconfig" |
77 | 86 | ||
78 | source "drivers/net/can/sja1000/Kconfig" | 87 | source "drivers/net/can/sja1000/Kconfig" |
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 9047cd066fea..00575373bbd0 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile | |||
@@ -16,5 +16,6 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o | |||
16 | obj-$(CONFIG_CAN_MCP251X) += mcp251x.o | 16 | obj-$(CONFIG_CAN_MCP251X) += mcp251x.o |
17 | obj-$(CONFIG_CAN_BFIN) += bfin_can.o | 17 | obj-$(CONFIG_CAN_BFIN) += bfin_can.o |
18 | obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o | 18 | obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o |
19 | obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o | ||
19 | 20 | ||
20 | ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG | 21 | ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c new file mode 100644 index 000000000000..ef443a090ba7 --- /dev/null +++ b/drivers/net/can/flexcan.c | |||
@@ -0,0 +1,1030 @@ | |||
1 | /* | ||
2 | * flexcan.c - FLEXCAN CAN controller driver | ||
3 | * | ||
4 | * Copyright (c) 2005-2006 Varma Electronics Oy | ||
5 | * Copyright (c) 2009 Sascha Hauer, Pengutronix | ||
6 | * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix | ||
7 | * | ||
8 | * Based on code originally by Andrey Volkov <avolkov@varma-el.com> | ||
9 | * | ||
10 | * LICENCE: | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License as | ||
13 | * published by the Free Software Foundation version 2. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/can.h> | ||
24 | #include <linux/can/dev.h> | ||
25 | #include <linux/can/error.h> | ||
26 | #include <linux/can/platform/flexcan.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/if_arp.h> | ||
30 | #include <linux/if_ether.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/io.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/platform_device.h> | ||
37 | |||
38 | #include <mach/clock.h> | ||
39 | |||
40 | #define DRV_NAME "flexcan" | ||
41 | |||
42 | /* 8 for RX fifo and 2 error handling */ | ||
43 | #define FLEXCAN_NAPI_WEIGHT (8 + 2) | ||
44 | |||
45 | /* FLEXCAN module configuration register (CANMCR) bits */ | ||
46 | #define FLEXCAN_MCR_MDIS BIT(31) | ||
47 | #define FLEXCAN_MCR_FRZ BIT(30) | ||
48 | #define FLEXCAN_MCR_FEN BIT(29) | ||
49 | #define FLEXCAN_MCR_HALT BIT(28) | ||
50 | #define FLEXCAN_MCR_NOT_RDY BIT(27) | ||
51 | #define FLEXCAN_MCR_WAK_MSK BIT(26) | ||
52 | #define FLEXCAN_MCR_SOFTRST BIT(25) | ||
53 | #define FLEXCAN_MCR_FRZ_ACK BIT(24) | ||
54 | #define FLEXCAN_MCR_SUPV BIT(23) | ||
55 | #define FLEXCAN_MCR_SLF_WAK BIT(22) | ||
56 | #define FLEXCAN_MCR_WRN_EN BIT(21) | ||
57 | #define FLEXCAN_MCR_LPM_ACK BIT(20) | ||
58 | #define FLEXCAN_MCR_WAK_SRC BIT(19) | ||
59 | #define FLEXCAN_MCR_DOZE BIT(18) | ||
60 | #define FLEXCAN_MCR_SRX_DIS BIT(17) | ||
61 | #define FLEXCAN_MCR_BCC BIT(16) | ||
62 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) | ||
63 | #define FLEXCAN_MCR_AEN BIT(12) | ||
64 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) | ||
65 | #define FLEXCAN_MCR_IDAM_A (0 << 8) | ||
66 | #define FLEXCAN_MCR_IDAM_B (1 << 8) | ||
67 | #define FLEXCAN_MCR_IDAM_C (2 << 8) | ||
68 | #define FLEXCAN_MCR_IDAM_D (3 << 8) | ||
69 | |||
70 | /* FLEXCAN control register (CANCTRL) bits */ | ||
71 | #define FLEXCAN_CTRL_PRESDIV(x) (((x) & 0xff) << 24) | ||
72 | #define FLEXCAN_CTRL_RJW(x) (((x) & 0x03) << 22) | ||
73 | #define FLEXCAN_CTRL_PSEG1(x) (((x) & 0x07) << 19) | ||
74 | #define FLEXCAN_CTRL_PSEG2(x) (((x) & 0x07) << 16) | ||
75 | #define FLEXCAN_CTRL_BOFF_MSK BIT(15) | ||
76 | #define FLEXCAN_CTRL_ERR_MSK BIT(14) | ||
77 | #define FLEXCAN_CTRL_CLK_SRC BIT(13) | ||
78 | #define FLEXCAN_CTRL_LPB BIT(12) | ||
79 | #define FLEXCAN_CTRL_TWRN_MSK BIT(11) | ||
80 | #define FLEXCAN_CTRL_RWRN_MSK BIT(10) | ||
81 | #define FLEXCAN_CTRL_SMP BIT(7) | ||
82 | #define FLEXCAN_CTRL_BOFF_REC BIT(6) | ||
83 | #define FLEXCAN_CTRL_TSYN BIT(5) | ||
84 | #define FLEXCAN_CTRL_LBUF BIT(4) | ||
85 | #define FLEXCAN_CTRL_LOM BIT(3) | ||
86 | #define FLEXCAN_CTRL_PROPSEG(x) ((x) & 0x07) | ||
87 | #define FLEXCAN_CTRL_ERR_BUS (FLEXCAN_CTRL_ERR_MSK) | ||
88 | #define FLEXCAN_CTRL_ERR_STATE \ | ||
89 | (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \ | ||
90 | FLEXCAN_CTRL_BOFF_MSK) | ||
91 | #define FLEXCAN_CTRL_ERR_ALL \ | ||
92 | (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) | ||
93 | |||
94 | /* FLEXCAN error and status register (ESR) bits */ | ||
95 | #define FLEXCAN_ESR_TWRN_INT BIT(17) | ||
96 | #define FLEXCAN_ESR_RWRN_INT BIT(16) | ||
97 | #define FLEXCAN_ESR_BIT1_ERR BIT(15) | ||
98 | #define FLEXCAN_ESR_BIT0_ERR BIT(14) | ||
99 | #define FLEXCAN_ESR_ACK_ERR BIT(13) | ||
100 | #define FLEXCAN_ESR_CRC_ERR BIT(12) | ||
101 | #define FLEXCAN_ESR_FRM_ERR BIT(11) | ||
102 | #define FLEXCAN_ESR_STF_ERR BIT(10) | ||
103 | #define FLEXCAN_ESR_TX_WRN BIT(9) | ||
104 | #define FLEXCAN_ESR_RX_WRN BIT(8) | ||
105 | #define FLEXCAN_ESR_IDLE BIT(7) | ||
106 | #define FLEXCAN_ESR_TXRX BIT(6) | ||
107 | #define FLEXCAN_EST_FLT_CONF_SHIFT (4) | ||
108 | #define FLEXCAN_ESR_FLT_CONF_MASK (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT) | ||
109 | #define FLEXCAN_ESR_FLT_CONF_ACTIVE (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT) | ||
110 | #define FLEXCAN_ESR_FLT_CONF_PASSIVE (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT) | ||
111 | #define FLEXCAN_ESR_BOFF_INT BIT(2) | ||
112 | #define FLEXCAN_ESR_ERR_INT BIT(1) | ||
113 | #define FLEXCAN_ESR_WAK_INT BIT(0) | ||
114 | #define FLEXCAN_ESR_ERR_BUS \ | ||
115 | (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \ | ||
116 | FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \ | ||
117 | FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR) | ||
118 | #define FLEXCAN_ESR_ERR_STATE \ | ||
119 | (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT) | ||
120 | #define FLEXCAN_ESR_ERR_ALL \ | ||
121 | (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) | ||
122 | |||
123 | /* FLEXCAN interrupt flag register (IFLAG) bits */ | ||
124 | #define FLEXCAN_TX_BUF_ID 8 | ||
125 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) | ||
126 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) | ||
127 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) | ||
128 | #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) | ||
129 | #define FLEXCAN_IFLAG_DEFAULT \ | ||
130 | (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \ | ||
131 | FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID)) | ||
132 | |||
133 | /* FLEXCAN message buffers */ | ||
134 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) | ||
135 | #define FLEXCAN_MB_CNT_SRR BIT(22) | ||
136 | #define FLEXCAN_MB_CNT_IDE BIT(21) | ||
137 | #define FLEXCAN_MB_CNT_RTR BIT(20) | ||
138 | #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) | ||
139 | #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) | ||
140 | |||
141 | #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) | ||
142 | |||
143 | /* Structure of the message buffer */ | ||
144 | struct flexcan_mb { | ||
145 | u32 can_ctrl; | ||
146 | u32 can_id; | ||
147 | u32 data[2]; | ||
148 | }; | ||
149 | |||
150 | /* Structure of the hardware registers */ | ||
151 | struct flexcan_regs { | ||
152 | u32 mcr; /* 0x00 */ | ||
153 | u32 ctrl; /* 0x04 */ | ||
154 | u32 timer; /* 0x08 */ | ||
155 | u32 _reserved1; /* 0x0c */ | ||
156 | u32 rxgmask; /* 0x10 */ | ||
157 | u32 rx14mask; /* 0x14 */ | ||
158 | u32 rx15mask; /* 0x18 */ | ||
159 | u32 ecr; /* 0x1c */ | ||
160 | u32 esr; /* 0x20 */ | ||
161 | u32 imask2; /* 0x24 */ | ||
162 | u32 imask1; /* 0x28 */ | ||
163 | u32 iflag2; /* 0x2c */ | ||
164 | u32 iflag1; /* 0x30 */ | ||
165 | u32 _reserved2[19]; | ||
166 | struct flexcan_mb cantxfg[64]; | ||
167 | }; | ||
168 | |||
169 | struct flexcan_priv { | ||
170 | struct can_priv can; | ||
171 | struct net_device *dev; | ||
172 | struct napi_struct napi; | ||
173 | |||
174 | void __iomem *base; | ||
175 | u32 reg_esr; | ||
176 | u32 reg_ctrl_default; | ||
177 | |||
178 | struct clk *clk; | ||
179 | struct flexcan_platform_data *pdata; | ||
180 | }; | ||
181 | |||
182 | static struct can_bittiming_const flexcan_bittiming_const = { | ||
183 | .name = DRV_NAME, | ||
184 | .tseg1_min = 4, | ||
185 | .tseg1_max = 16, | ||
186 | .tseg2_min = 2, | ||
187 | .tseg2_max = 8, | ||
188 | .sjw_max = 4, | ||
189 | .brp_min = 1, | ||
190 | .brp_max = 256, | ||
191 | .brp_inc = 1, | ||
192 | }; | ||
193 | |||
194 | /* | ||
195 | * Swtich transceiver on or off | ||
196 | */ | ||
197 | static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on) | ||
198 | { | ||
199 | if (priv->pdata && priv->pdata->transceiver_switch) | ||
200 | priv->pdata->transceiver_switch(on); | ||
201 | } | ||
202 | |||
203 | static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, | ||
204 | u32 reg_esr) | ||
205 | { | ||
206 | return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && | ||
207 | (reg_esr & FLEXCAN_ESR_ERR_BUS); | ||
208 | } | ||
209 | |||
210 | static inline void flexcan_chip_enable(struct flexcan_priv *priv) | ||
211 | { | ||
212 | struct flexcan_regs __iomem *regs = priv->base; | ||
213 | u32 reg; | ||
214 | |||
215 | reg = readl(®s->mcr); | ||
216 | reg &= ~FLEXCAN_MCR_MDIS; | ||
217 | writel(reg, ®s->mcr); | ||
218 | |||
219 | udelay(10); | ||
220 | } | ||
221 | |||
222 | static inline void flexcan_chip_disable(struct flexcan_priv *priv) | ||
223 | { | ||
224 | struct flexcan_regs __iomem *regs = priv->base; | ||
225 | u32 reg; | ||
226 | |||
227 | reg = readl(®s->mcr); | ||
228 | reg |= FLEXCAN_MCR_MDIS; | ||
229 | writel(reg, ®s->mcr); | ||
230 | } | ||
231 | |||
232 | static int flexcan_get_berr_counter(const struct net_device *dev, | ||
233 | struct can_berr_counter *bec) | ||
234 | { | ||
235 | const struct flexcan_priv *priv = netdev_priv(dev); | ||
236 | struct flexcan_regs __iomem *regs = priv->base; | ||
237 | u32 reg = readl(®s->ecr); | ||
238 | |||
239 | bec->txerr = (reg >> 0) & 0xff; | ||
240 | bec->rxerr = (reg >> 8) & 0xff; | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
246 | { | ||
247 | const struct flexcan_priv *priv = netdev_priv(dev); | ||
248 | struct net_device_stats *stats = &dev->stats; | ||
249 | struct flexcan_regs __iomem *regs = priv->base; | ||
250 | struct can_frame *cf = (struct can_frame *)skb->data; | ||
251 | u32 can_id; | ||
252 | u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16); | ||
253 | |||
254 | if (can_dropped_invalid_skb(dev, skb)) | ||
255 | return NETDEV_TX_OK; | ||
256 | |||
257 | netif_stop_queue(dev); | ||
258 | |||
259 | if (cf->can_id & CAN_EFF_FLAG) { | ||
260 | can_id = cf->can_id & CAN_EFF_MASK; | ||
261 | ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR; | ||
262 | } else { | ||
263 | can_id = (cf->can_id & CAN_SFF_MASK) << 18; | ||
264 | } | ||
265 | |||
266 | if (cf->can_id & CAN_RTR_FLAG) | ||
267 | ctrl |= FLEXCAN_MB_CNT_RTR; | ||
268 | |||
269 | if (cf->can_dlc > 0) { | ||
270 | u32 data = be32_to_cpup((__be32 *)&cf->data[0]); | ||
271 | writel(data, ®s->cantxfg[FLEXCAN_TX_BUF_ID].data[0]); | ||
272 | } | ||
273 | if (cf->can_dlc > 3) { | ||
274 | u32 data = be32_to_cpup((__be32 *)&cf->data[4]); | ||
275 | writel(data, ®s->cantxfg[FLEXCAN_TX_BUF_ID].data[1]); | ||
276 | } | ||
277 | |||
278 | writel(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); | ||
279 | writel(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | ||
280 | |||
281 | kfree_skb(skb); | ||
282 | |||
283 | /* tx_packets is incremented in flexcan_irq */ | ||
284 | stats->tx_bytes += cf->can_dlc; | ||
285 | |||
286 | return NETDEV_TX_OK; | ||
287 | } | ||
288 | |||
289 | static void do_bus_err(struct net_device *dev, | ||
290 | struct can_frame *cf, u32 reg_esr) | ||
291 | { | ||
292 | struct flexcan_priv *priv = netdev_priv(dev); | ||
293 | int rx_errors = 0, tx_errors = 0; | ||
294 | |||
295 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | ||
296 | |||
297 | if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { | ||
298 | dev_dbg(dev->dev.parent, "BIT1_ERR irq\n"); | ||
299 | cf->data[2] |= CAN_ERR_PROT_BIT1; | ||
300 | tx_errors = 1; | ||
301 | } | ||
302 | if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { | ||
303 | dev_dbg(dev->dev.parent, "BIT0_ERR irq\n"); | ||
304 | cf->data[2] |= CAN_ERR_PROT_BIT0; | ||
305 | tx_errors = 1; | ||
306 | } | ||
307 | if (reg_esr & FLEXCAN_ESR_ACK_ERR) { | ||
308 | dev_dbg(dev->dev.parent, "ACK_ERR irq\n"); | ||
309 | cf->can_id |= CAN_ERR_ACK; | ||
310 | cf->data[3] |= CAN_ERR_PROT_LOC_ACK; | ||
311 | tx_errors = 1; | ||
312 | } | ||
313 | if (reg_esr & FLEXCAN_ESR_CRC_ERR) { | ||
314 | dev_dbg(dev->dev.parent, "CRC_ERR irq\n"); | ||
315 | cf->data[2] |= CAN_ERR_PROT_BIT; | ||
316 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; | ||
317 | rx_errors = 1; | ||
318 | } | ||
319 | if (reg_esr & FLEXCAN_ESR_FRM_ERR) { | ||
320 | dev_dbg(dev->dev.parent, "FRM_ERR irq\n"); | ||
321 | cf->data[2] |= CAN_ERR_PROT_FORM; | ||
322 | rx_errors = 1; | ||
323 | } | ||
324 | if (reg_esr & FLEXCAN_ESR_STF_ERR) { | ||
325 | dev_dbg(dev->dev.parent, "STF_ERR irq\n"); | ||
326 | cf->data[2] |= CAN_ERR_PROT_STUFF; | ||
327 | rx_errors = 1; | ||
328 | } | ||
329 | |||
330 | priv->can.can_stats.bus_error++; | ||
331 | if (rx_errors) | ||
332 | dev->stats.rx_errors++; | ||
333 | if (tx_errors) | ||
334 | dev->stats.tx_errors++; | ||
335 | } | ||
336 | |||
337 | static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) | ||
338 | { | ||
339 | struct sk_buff *skb; | ||
340 | struct can_frame *cf; | ||
341 | |||
342 | skb = alloc_can_err_skb(dev, &cf); | ||
343 | if (unlikely(!skb)) | ||
344 | return 0; | ||
345 | |||
346 | do_bus_err(dev, cf, reg_esr); | ||
347 | netif_receive_skb(skb); | ||
348 | |||
349 | dev->stats.rx_packets++; | ||
350 | dev->stats.rx_bytes += cf->can_dlc; | ||
351 | |||
352 | return 1; | ||
353 | } | ||
354 | |||
355 | static void do_state(struct net_device *dev, | ||
356 | struct can_frame *cf, enum can_state new_state) | ||
357 | { | ||
358 | struct flexcan_priv *priv = netdev_priv(dev); | ||
359 | struct can_berr_counter bec; | ||
360 | |||
361 | flexcan_get_berr_counter(dev, &bec); | ||
362 | |||
363 | switch (priv->can.state) { | ||
364 | case CAN_STATE_ERROR_ACTIVE: | ||
365 | /* | ||
366 | * from: ERROR_ACTIVE | ||
367 | * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF | ||
368 | * => : there was a warning int | ||
369 | */ | ||
370 | if (new_state >= CAN_STATE_ERROR_WARNING && | ||
371 | new_state <= CAN_STATE_BUS_OFF) { | ||
372 | dev_dbg(dev->dev.parent, "Error Warning IRQ\n"); | ||
373 | priv->can.can_stats.error_warning++; | ||
374 | |||
375 | cf->can_id |= CAN_ERR_CRTL; | ||
376 | cf->data[1] = (bec.txerr > bec.rxerr) ? | ||
377 | CAN_ERR_CRTL_TX_WARNING : | ||
378 | CAN_ERR_CRTL_RX_WARNING; | ||
379 | } | ||
380 | case CAN_STATE_ERROR_WARNING: /* fallthrough */ | ||
381 | /* | ||
382 | * from: ERROR_ACTIVE, ERROR_WARNING | ||
383 | * to : ERROR_PASSIVE, BUS_OFF | ||
384 | * => : error passive int | ||
385 | */ | ||
386 | if (new_state >= CAN_STATE_ERROR_PASSIVE && | ||
387 | new_state <= CAN_STATE_BUS_OFF) { | ||
388 | dev_dbg(dev->dev.parent, "Error Passive IRQ\n"); | ||
389 | priv->can.can_stats.error_passive++; | ||
390 | |||
391 | cf->can_id |= CAN_ERR_CRTL; | ||
392 | cf->data[1] = (bec.txerr > bec.rxerr) ? | ||
393 | CAN_ERR_CRTL_TX_PASSIVE : | ||
394 | CAN_ERR_CRTL_RX_PASSIVE; | ||
395 | } | ||
396 | break; | ||
397 | case CAN_STATE_BUS_OFF: | ||
398 | dev_err(dev->dev.parent, | ||
399 | "BUG! hardware recovered automatically from BUS_OFF\n"); | ||
400 | break; | ||
401 | default: | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | /* process state changes depending on the new state */ | ||
406 | switch (new_state) { | ||
407 | case CAN_STATE_ERROR_ACTIVE: | ||
408 | dev_dbg(dev->dev.parent, "Error Active\n"); | ||
409 | cf->can_id |= CAN_ERR_PROT; | ||
410 | cf->data[2] = CAN_ERR_PROT_ACTIVE; | ||
411 | break; | ||
412 | case CAN_STATE_BUS_OFF: | ||
413 | cf->can_id |= CAN_ERR_BUSOFF; | ||
414 | can_bus_off(dev); | ||
415 | break; | ||
416 | default: | ||
417 | break; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) | ||
422 | { | ||
423 | struct flexcan_priv *priv = netdev_priv(dev); | ||
424 | struct sk_buff *skb; | ||
425 | struct can_frame *cf; | ||
426 | enum can_state new_state; | ||
427 | int flt; | ||
428 | |||
429 | flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; | ||
430 | if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { | ||
431 | if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN | | ||
432 | FLEXCAN_ESR_RX_WRN)))) | ||
433 | new_state = CAN_STATE_ERROR_ACTIVE; | ||
434 | else | ||
435 | new_state = CAN_STATE_ERROR_WARNING; | ||
436 | } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) | ||
437 | new_state = CAN_STATE_ERROR_PASSIVE; | ||
438 | else | ||
439 | new_state = CAN_STATE_BUS_OFF; | ||
440 | |||
441 | /* state hasn't changed */ | ||
442 | if (likely(new_state == priv->can.state)) | ||
443 | return 0; | ||
444 | |||
445 | skb = alloc_can_err_skb(dev, &cf); | ||
446 | if (unlikely(!skb)) | ||
447 | return 0; | ||
448 | |||
449 | do_state(dev, cf, new_state); | ||
450 | priv->can.state = new_state; | ||
451 | netif_receive_skb(skb); | ||
452 | |||
453 | dev->stats.rx_packets++; | ||
454 | dev->stats.rx_bytes += cf->can_dlc; | ||
455 | |||
456 | return 1; | ||
457 | } | ||
458 | |||
459 | static void flexcan_read_fifo(const struct net_device *dev, | ||
460 | struct can_frame *cf) | ||
461 | { | ||
462 | const struct flexcan_priv *priv = netdev_priv(dev); | ||
463 | struct flexcan_regs __iomem *regs = priv->base; | ||
464 | struct flexcan_mb __iomem *mb = ®s->cantxfg[0]; | ||
465 | u32 reg_ctrl, reg_id; | ||
466 | |||
467 | reg_ctrl = readl(&mb->can_ctrl); | ||
468 | reg_id = readl(&mb->can_id); | ||
469 | if (reg_ctrl & FLEXCAN_MB_CNT_IDE) | ||
470 | cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; | ||
471 | else | ||
472 | cf->can_id = (reg_id >> 18) & CAN_SFF_MASK; | ||
473 | |||
474 | if (reg_ctrl & FLEXCAN_MB_CNT_RTR) | ||
475 | cf->can_id |= CAN_RTR_FLAG; | ||
476 | cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); | ||
477 | |||
478 | *(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0])); | ||
479 | *(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1])); | ||
480 | |||
481 | /* mark as read */ | ||
482 | writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); | ||
483 | readl(®s->timer); | ||
484 | } | ||
485 | |||
486 | static int flexcan_read_frame(struct net_device *dev) | ||
487 | { | ||
488 | struct net_device_stats *stats = &dev->stats; | ||
489 | struct can_frame *cf; | ||
490 | struct sk_buff *skb; | ||
491 | |||
492 | skb = alloc_can_skb(dev, &cf); | ||
493 | if (unlikely(!skb)) { | ||
494 | stats->rx_dropped++; | ||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | flexcan_read_fifo(dev, cf); | ||
499 | netif_receive_skb(skb); | ||
500 | |||
501 | stats->rx_packets++; | ||
502 | stats->rx_bytes += cf->can_dlc; | ||
503 | |||
504 | return 1; | ||
505 | } | ||
506 | |||
507 | static int flexcan_poll(struct napi_struct *napi, int quota) | ||
508 | { | ||
509 | struct net_device *dev = napi->dev; | ||
510 | const struct flexcan_priv *priv = netdev_priv(dev); | ||
511 | struct flexcan_regs __iomem *regs = priv->base; | ||
512 | u32 reg_iflag1, reg_esr; | ||
513 | int work_done = 0; | ||
514 | |||
515 | /* | ||
516 | * The error bits are cleared on read, | ||
517 | * use saved value from irq handler. | ||
518 | */ | ||
519 | reg_esr = readl(®s->esr) | priv->reg_esr; | ||
520 | |||
521 | /* handle state changes */ | ||
522 | work_done += flexcan_poll_state(dev, reg_esr); | ||
523 | |||
524 | /* handle RX-FIFO */ | ||
525 | reg_iflag1 = readl(®s->iflag1); | ||
526 | while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE && | ||
527 | work_done < quota) { | ||
528 | work_done += flexcan_read_frame(dev); | ||
529 | reg_iflag1 = readl(®s->iflag1); | ||
530 | } | ||
531 | |||
532 | /* report bus errors */ | ||
533 | if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota) | ||
534 | work_done += flexcan_poll_bus_err(dev, reg_esr); | ||
535 | |||
536 | if (work_done < quota) { | ||
537 | napi_complete(napi); | ||
538 | /* enable IRQs */ | ||
539 | writel(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); | ||
540 | writel(priv->reg_ctrl_default, ®s->ctrl); | ||
541 | } | ||
542 | |||
543 | return work_done; | ||
544 | } | ||
545 | |||
546 | static irqreturn_t flexcan_irq(int irq, void *dev_id) | ||
547 | { | ||
548 | struct net_device *dev = dev_id; | ||
549 | struct net_device_stats *stats = &dev->stats; | ||
550 | struct flexcan_priv *priv = netdev_priv(dev); | ||
551 | struct flexcan_regs __iomem *regs = priv->base; | ||
552 | u32 reg_iflag1, reg_esr; | ||
553 | |||
554 | reg_iflag1 = readl(®s->iflag1); | ||
555 | reg_esr = readl(®s->esr); | ||
556 | writel(FLEXCAN_ESR_ERR_INT, ®s->esr); /* ACK err IRQ */ | ||
557 | |||
558 | /* | ||
559 | * schedule NAPI in case of: | ||
560 | * - rx IRQ | ||
561 | * - state change IRQ | ||
562 | * - bus error IRQ and bus error reporting is activated | ||
563 | */ | ||
564 | if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) || | ||
565 | (reg_esr & FLEXCAN_ESR_ERR_STATE) || | ||
566 | flexcan_has_and_handle_berr(priv, reg_esr)) { | ||
567 | /* | ||
568 | * The error bits are cleared on read, | ||
569 | * save them for later use. | ||
570 | */ | ||
571 | priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS; | ||
572 | writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, | ||
573 | ®s->imask1); | ||
574 | writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, | ||
575 | ®s->ctrl); | ||
576 | napi_schedule(&priv->napi); | ||
577 | } | ||
578 | |||
579 | /* FIFO overflow */ | ||
580 | if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { | ||
581 | writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); | ||
582 | dev->stats.rx_over_errors++; | ||
583 | dev->stats.rx_errors++; | ||
584 | } | ||
585 | |||
586 | /* transmission complete interrupt */ | ||
587 | if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { | ||
588 | /* tx_bytes is incremented in flexcan_start_xmit */ | ||
589 | stats->tx_packets++; | ||
590 | writel((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); | ||
591 | netif_wake_queue(dev); | ||
592 | } | ||
593 | |||
594 | return IRQ_HANDLED; | ||
595 | } | ||
596 | |||
597 | static void flexcan_set_bittiming(struct net_device *dev) | ||
598 | { | ||
599 | const struct flexcan_priv *priv = netdev_priv(dev); | ||
600 | const struct can_bittiming *bt = &priv->can.bittiming; | ||
601 | struct flexcan_regs __iomem *regs = priv->base; | ||
602 | u32 reg; | ||
603 | |||
604 | reg = readl(®s->ctrl); | ||
605 | reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) | | ||
606 | FLEXCAN_CTRL_RJW(0x3) | | ||
607 | FLEXCAN_CTRL_PSEG1(0x7) | | ||
608 | FLEXCAN_CTRL_PSEG2(0x7) | | ||
609 | FLEXCAN_CTRL_PROPSEG(0x7) | | ||
610 | FLEXCAN_CTRL_LPB | | ||
611 | FLEXCAN_CTRL_SMP | | ||
612 | FLEXCAN_CTRL_LOM); | ||
613 | |||
614 | reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) | | ||
615 | FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) | | ||
616 | FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) | | ||
617 | FLEXCAN_CTRL_RJW(bt->sjw - 1) | | ||
618 | FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1); | ||
619 | |||
620 | if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) | ||
621 | reg |= FLEXCAN_CTRL_LPB; | ||
622 | if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) | ||
623 | reg |= FLEXCAN_CTRL_LOM; | ||
624 | if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) | ||
625 | reg |= FLEXCAN_CTRL_SMP; | ||
626 | |||
627 | dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg); | ||
628 | writel(reg, ®s->ctrl); | ||
629 | |||
630 | /* print chip status */ | ||
631 | dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, | ||
632 | readl(®s->mcr), readl(®s->ctrl)); | ||
633 | } | ||
634 | |||
635 | /* | ||
636 | * flexcan_chip_start | ||
637 | * | ||
638 | * this functions is entered with clocks enabled | ||
639 | * | ||
640 | */ | ||
641 | static int flexcan_chip_start(struct net_device *dev) | ||
642 | { | ||
643 | struct flexcan_priv *priv = netdev_priv(dev); | ||
644 | struct flexcan_regs __iomem *regs = priv->base; | ||
645 | unsigned int i; | ||
646 | int err; | ||
647 | u32 reg_mcr, reg_ctrl; | ||
648 | |||
649 | /* enable module */ | ||
650 | flexcan_chip_enable(priv); | ||
651 | |||
652 | /* soft reset */ | ||
653 | writel(FLEXCAN_MCR_SOFTRST, ®s->mcr); | ||
654 | udelay(10); | ||
655 | |||
656 | reg_mcr = readl(®s->mcr); | ||
657 | if (reg_mcr & FLEXCAN_MCR_SOFTRST) { | ||
658 | dev_err(dev->dev.parent, | ||
659 | "Failed to softreset can module (mcr=0x%08x)\n", | ||
660 | reg_mcr); | ||
661 | err = -ENODEV; | ||
662 | goto out; | ||
663 | } | ||
664 | |||
665 | flexcan_set_bittiming(dev); | ||
666 | |||
667 | /* | ||
668 | * MCR | ||
669 | * | ||
670 | * enable freeze | ||
671 | * enable fifo | ||
672 | * halt now | ||
673 | * only supervisor access | ||
674 | * enable warning int | ||
675 | * choose format C | ||
676 | * | ||
677 | */ | ||
678 | reg_mcr = readl(®s->mcr); | ||
679 | reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | | ||
680 | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | | ||
681 | FLEXCAN_MCR_IDAM_C; | ||
682 | dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr); | ||
683 | writel(reg_mcr, ®s->mcr); | ||
684 | |||
685 | /* | ||
686 | * CTRL | ||
687 | * | ||
688 | * disable timer sync feature | ||
689 | * | ||
690 | * disable auto busoff recovery | ||
691 | * transmit lowest buffer first | ||
692 | * | ||
693 | * enable tx and rx warning interrupt | ||
694 | * enable bus off interrupt | ||
695 | * (== FLEXCAN_CTRL_ERR_STATE) | ||
696 | * | ||
697 | * _note_: we enable the "error interrupt" | ||
698 | * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any | ||
699 | * warning or bus passive interrupts. | ||
700 | */ | ||
701 | reg_ctrl = readl(®s->ctrl); | ||
702 | reg_ctrl &= ~FLEXCAN_CTRL_TSYN; | ||
703 | reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | | ||
704 | FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK; | ||
705 | |||
706 | /* save for later use */ | ||
707 | priv->reg_ctrl_default = reg_ctrl; | ||
708 | dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); | ||
709 | writel(reg_ctrl, ®s->ctrl); | ||
710 | |||
711 | for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { | ||
712 | writel(0, ®s->cantxfg[i].can_ctrl); | ||
713 | writel(0, ®s->cantxfg[i].can_id); | ||
714 | writel(0, ®s->cantxfg[i].data[0]); | ||
715 | writel(0, ®s->cantxfg[i].data[1]); | ||
716 | |||
717 | /* put MB into rx queue */ | ||
718 | writel(FLEXCAN_MB_CNT_CODE(0x4), ®s->cantxfg[i].can_ctrl); | ||
719 | } | ||
720 | |||
721 | /* acceptance mask/acceptance code (accept everything) */ | ||
722 | writel(0x0, ®s->rxgmask); | ||
723 | writel(0x0, ®s->rx14mask); | ||
724 | writel(0x0, ®s->rx15mask); | ||
725 | |||
726 | flexcan_transceiver_switch(priv, 1); | ||
727 | |||
728 | /* synchronize with the can bus */ | ||
729 | reg_mcr = readl(®s->mcr); | ||
730 | reg_mcr &= ~FLEXCAN_MCR_HALT; | ||
731 | writel(reg_mcr, ®s->mcr); | ||
732 | |||
733 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | ||
734 | |||
735 | /* enable FIFO interrupts */ | ||
736 | writel(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); | ||
737 | |||
738 | /* print chip status */ | ||
739 | dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n", | ||
740 | __func__, readl(®s->mcr), readl(®s->ctrl)); | ||
741 | |||
742 | return 0; | ||
743 | |||
744 | out: | ||
745 | flexcan_chip_disable(priv); | ||
746 | return err; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * flexcan_chip_stop | ||
751 | * | ||
752 | * this functions is entered with clocks enabled | ||
753 | * | ||
754 | */ | ||
755 | static void flexcan_chip_stop(struct net_device *dev) | ||
756 | { | ||
757 | struct flexcan_priv *priv = netdev_priv(dev); | ||
758 | struct flexcan_regs __iomem *regs = priv->base; | ||
759 | u32 reg; | ||
760 | |||
761 | /* Disable all interrupts */ | ||
762 | writel(0, ®s->imask1); | ||
763 | |||
764 | /* Disable + halt module */ | ||
765 | reg = readl(®s->mcr); | ||
766 | reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; | ||
767 | writel(reg, ®s->mcr); | ||
768 | |||
769 | flexcan_transceiver_switch(priv, 0); | ||
770 | priv->can.state = CAN_STATE_STOPPED; | ||
771 | |||
772 | return; | ||
773 | } | ||
774 | |||
775 | static int flexcan_open(struct net_device *dev) | ||
776 | { | ||
777 | struct flexcan_priv *priv = netdev_priv(dev); | ||
778 | int err; | ||
779 | |||
780 | clk_enable(priv->clk); | ||
781 | |||
782 | err = open_candev(dev); | ||
783 | if (err) | ||
784 | goto out; | ||
785 | |||
786 | err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); | ||
787 | if (err) | ||
788 | goto out_close; | ||
789 | |||
790 | /* start chip and queuing */ | ||
791 | err = flexcan_chip_start(dev); | ||
792 | if (err) | ||
793 | goto out_close; | ||
794 | napi_enable(&priv->napi); | ||
795 | netif_start_queue(dev); | ||
796 | |||
797 | return 0; | ||
798 | |||
799 | out_close: | ||
800 | close_candev(dev); | ||
801 | out: | ||
802 | clk_disable(priv->clk); | ||
803 | |||
804 | return err; | ||
805 | } | ||
806 | |||
807 | static int flexcan_close(struct net_device *dev) | ||
808 | { | ||
809 | struct flexcan_priv *priv = netdev_priv(dev); | ||
810 | |||
811 | netif_stop_queue(dev); | ||
812 | napi_disable(&priv->napi); | ||
813 | flexcan_chip_stop(dev); | ||
814 | |||
815 | free_irq(dev->irq, dev); | ||
816 | clk_disable(priv->clk); | ||
817 | |||
818 | close_candev(dev); | ||
819 | |||
820 | return 0; | ||
821 | } | ||
822 | |||
823 | static int flexcan_set_mode(struct net_device *dev, enum can_mode mode) | ||
824 | { | ||
825 | int err; | ||
826 | |||
827 | switch (mode) { | ||
828 | case CAN_MODE_START: | ||
829 | err = flexcan_chip_start(dev); | ||
830 | if (err) | ||
831 | return err; | ||
832 | |||
833 | netif_wake_queue(dev); | ||
834 | break; | ||
835 | |||
836 | default: | ||
837 | return -EOPNOTSUPP; | ||
838 | } | ||
839 | |||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | static const struct net_device_ops flexcan_netdev_ops = { | ||
844 | .ndo_open = flexcan_open, | ||
845 | .ndo_stop = flexcan_close, | ||
846 | .ndo_start_xmit = flexcan_start_xmit, | ||
847 | }; | ||
848 | |||
849 | static int __devinit register_flexcandev(struct net_device *dev) | ||
850 | { | ||
851 | struct flexcan_priv *priv = netdev_priv(dev); | ||
852 | struct flexcan_regs __iomem *regs = priv->base; | ||
853 | u32 reg, err; | ||
854 | |||
855 | clk_enable(priv->clk); | ||
856 | |||
857 | /* select "bus clock", chip must be disabled */ | ||
858 | flexcan_chip_disable(priv); | ||
859 | reg = readl(®s->ctrl); | ||
860 | reg |= FLEXCAN_CTRL_CLK_SRC; | ||
861 | writel(reg, ®s->ctrl); | ||
862 | |||
863 | flexcan_chip_enable(priv); | ||
864 | |||
865 | /* set freeze, halt and activate FIFO, restrict register access */ | ||
866 | reg = readl(®s->mcr); | ||
867 | reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | | ||
868 | FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; | ||
869 | writel(reg, ®s->mcr); | ||
870 | |||
871 | /* | ||
872 | * Currently we only support newer versions of this core | ||
873 | * featuring a RX FIFO. Older cores found on some Coldfire | ||
874 | * derivates are not yet supported. | ||
875 | */ | ||
876 | reg = readl(®s->mcr); | ||
877 | if (!(reg & FLEXCAN_MCR_FEN)) { | ||
878 | dev_err(dev->dev.parent, | ||
879 | "Could not enable RX FIFO, unsupported core\n"); | ||
880 | err = -ENODEV; | ||
881 | goto out; | ||
882 | } | ||
883 | |||
884 | err = register_candev(dev); | ||
885 | |||
886 | out: | ||
887 | /* disable core and turn off clocks */ | ||
888 | flexcan_chip_disable(priv); | ||
889 | clk_disable(priv->clk); | ||
890 | |||
891 | return err; | ||
892 | } | ||
893 | |||
894 | static void __devexit unregister_flexcandev(struct net_device *dev) | ||
895 | { | ||
896 | unregister_candev(dev); | ||
897 | } | ||
898 | |||
899 | static int __devinit flexcan_probe(struct platform_device *pdev) | ||
900 | { | ||
901 | struct net_device *dev; | ||
902 | struct flexcan_priv *priv; | ||
903 | struct resource *mem; | ||
904 | struct clk *clk; | ||
905 | void __iomem *base; | ||
906 | resource_size_t mem_size; | ||
907 | int err, irq; | ||
908 | |||
909 | clk = clk_get(&pdev->dev, NULL); | ||
910 | if (IS_ERR(clk)) { | ||
911 | dev_err(&pdev->dev, "no clock defined\n"); | ||
912 | err = PTR_ERR(clk); | ||
913 | goto failed_clock; | ||
914 | } | ||
915 | |||
916 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
917 | irq = platform_get_irq(pdev, 0); | ||
918 | if (!mem || irq <= 0) { | ||
919 | err = -ENODEV; | ||
920 | goto failed_get; | ||
921 | } | ||
922 | |||
923 | mem_size = resource_size(mem); | ||
924 | if (!request_mem_region(mem->start, mem_size, pdev->name)) { | ||
925 | err = -EBUSY; | ||
926 | goto failed_req; | ||
927 | } | ||
928 | |||
929 | base = ioremap(mem->start, mem_size); | ||
930 | if (!base) { | ||
931 | err = -ENOMEM; | ||
932 | goto failed_map; | ||
933 | } | ||
934 | |||
935 | dev = alloc_candev(sizeof(struct flexcan_priv), 0); | ||
936 | if (!dev) { | ||
937 | err = -ENOMEM; | ||
938 | goto failed_alloc; | ||
939 | } | ||
940 | |||
941 | dev->netdev_ops = &flexcan_netdev_ops; | ||
942 | dev->irq = irq; | ||
943 | dev->flags |= IFF_ECHO; /* we support local echo in hardware */ | ||
944 | |||
945 | priv = netdev_priv(dev); | ||
946 | priv->can.clock.freq = clk_get_rate(clk); | ||
947 | priv->can.bittiming_const = &flexcan_bittiming_const; | ||
948 | priv->can.do_set_mode = flexcan_set_mode; | ||
949 | priv->can.do_get_berr_counter = flexcan_get_berr_counter; | ||
950 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | | ||
951 | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | | ||
952 | CAN_CTRLMODE_BERR_REPORTING; | ||
953 | priv->base = base; | ||
954 | priv->dev = dev; | ||
955 | priv->clk = clk; | ||
956 | priv->pdata = pdev->dev.platform_data; | ||
957 | |||
958 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); | ||
959 | |||
960 | dev_set_drvdata(&pdev->dev, dev); | ||
961 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
962 | |||
963 | err = register_flexcandev(dev); | ||
964 | if (err) { | ||
965 | dev_err(&pdev->dev, "registering netdev failed\n"); | ||
966 | goto failed_register; | ||
967 | } | ||
968 | |||
969 | dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", | ||
970 | priv->base, dev->irq); | ||
971 | |||
972 | return 0; | ||
973 | |||
974 | failed_register: | ||
975 | free_candev(dev); | ||
976 | failed_alloc: | ||
977 | iounmap(base); | ||
978 | failed_map: | ||
979 | release_mem_region(mem->start, mem_size); | ||
980 | failed_req: | ||
981 | clk_put(clk); | ||
982 | failed_get: | ||
983 | failed_clock: | ||
984 | return err; | ||
985 | } | ||
986 | |||
987 | static int __devexit flexcan_remove(struct platform_device *pdev) | ||
988 | { | ||
989 | struct net_device *dev = platform_get_drvdata(pdev); | ||
990 | struct flexcan_priv *priv = netdev_priv(dev); | ||
991 | struct resource *mem; | ||
992 | |||
993 | unregister_flexcandev(dev); | ||
994 | platform_set_drvdata(pdev, NULL); | ||
995 | free_candev(dev); | ||
996 | iounmap(priv->base); | ||
997 | |||
998 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
999 | release_mem_region(mem->start, resource_size(mem)); | ||
1000 | |||
1001 | clk_put(priv->clk); | ||
1002 | |||
1003 | return 0; | ||
1004 | } | ||
1005 | |||
1006 | static struct platform_driver flexcan_driver = { | ||
1007 | .driver.name = DRV_NAME, | ||
1008 | .probe = flexcan_probe, | ||
1009 | .remove = __devexit_p(flexcan_remove), | ||
1010 | }; | ||
1011 | |||
1012 | static int __init flexcan_init(void) | ||
1013 | { | ||
1014 | pr_info("%s netdevice driver\n", DRV_NAME); | ||
1015 | return platform_driver_register(&flexcan_driver); | ||
1016 | } | ||
1017 | |||
1018 | static void __exit flexcan_exit(void) | ||
1019 | { | ||
1020 | platform_driver_unregister(&flexcan_driver); | ||
1021 | pr_info("%s: driver removed\n", DRV_NAME); | ||
1022 | } | ||
1023 | |||
1024 | module_init(flexcan_init); | ||
1025 | module_exit(flexcan_exit); | ||
1026 | |||
1027 | MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, " | ||
1028 | "Marc Kleine-Budde <kernel@pengutronix.de>"); | ||
1029 | MODULE_LICENSE("GPL v2"); | ||
1030 | MODULE_DESCRIPTION("CAN port driver for flexcan based chip"); | ||
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 5ecf0bcf372d..09610323a948 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -40,9 +40,9 @@ | |||
40 | 40 | ||
41 | #include "cnic_if.h" | 41 | #include "cnic_if.h" |
42 | #include "bnx2.h" | 42 | #include "bnx2.h" |
43 | #include "bnx2x_reg.h" | 43 | #include "bnx2x/bnx2x_reg.h" |
44 | #include "bnx2x_fw_defs.h" | 44 | #include "bnx2x/bnx2x_fw_defs.h" |
45 | #include "bnx2x_hsi.h" | 45 | #include "bnx2x/bnx2x_hsi.h" |
46 | #include "../scsi/bnx2i/57xx_iscsi_constants.h" | 46 | #include "../scsi/bnx2i/57xx_iscsi_constants.h" |
47 | #include "../scsi/bnx2i/57xx_iscsi_hsi.h" | 47 | #include "../scsi/bnx2i/57xx_iscsi_hsi.h" |
48 | #include "cnic.h" | 48 | #include "cnic.h" |
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 95a8ba0759f1..427c451be1a7 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
@@ -679,14 +679,6 @@ int t3_seeprom_wp(struct adapter *adapter, int enable) | |||
679 | return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); | 679 | return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); |
680 | } | 680 | } |
681 | 681 | ||
682 | /* | ||
683 | * Convert a character holding a hex digit to a number. | ||
684 | */ | ||
685 | static unsigned int hex2int(unsigned char c) | ||
686 | { | ||
687 | return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; | ||
688 | } | ||
689 | |||
690 | /** | 682 | /** |
691 | * get_vpd_params - read VPD parameters from VPD EEPROM | 683 | * get_vpd_params - read VPD parameters from VPD EEPROM |
692 | * @adapter: adapter to read | 684 | * @adapter: adapter to read |
@@ -727,15 +719,15 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
727 | p->port_type[0] = uses_xaui(adapter) ? 1 : 2; | 719 | p->port_type[0] = uses_xaui(adapter) ? 1 : 2; |
728 | p->port_type[1] = uses_xaui(adapter) ? 6 : 2; | 720 | p->port_type[1] = uses_xaui(adapter) ? 6 : 2; |
729 | } else { | 721 | } else { |
730 | p->port_type[0] = hex2int(vpd.port0_data[0]); | 722 | p->port_type[0] = hex_to_bin(vpd.port0_data[0]); |
731 | p->port_type[1] = hex2int(vpd.port1_data[0]); | 723 | p->port_type[1] = hex_to_bin(vpd.port1_data[0]); |
732 | p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); | 724 | p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); |
733 | p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); | 725 | p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); |
734 | } | 726 | } |
735 | 727 | ||
736 | for (i = 0; i < 6; i++) | 728 | for (i = 0; i < 6; i++) |
737 | p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 + | 729 | p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 + |
738 | hex2int(vpd.na_data[2 * i + 1]); | 730 | hex_to_bin(vpd.na_data[2 * i + 1]); |
739 | return 0; | 731 | return 0; |
740 | } | 732 | } |
741 | 733 | ||
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 65298a6d9af7..99288b95aead 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -324,18 +324,20 @@ enum e1000_state_t { | |||
324 | extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); | 324 | extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); |
325 | #define e_dbg(format, arg...) \ | 325 | #define e_dbg(format, arg...) \ |
326 | netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) | 326 | netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) |
327 | #define e_err(format, arg...) \ | 327 | #define e_err(msglvl, format, arg...) \ |
328 | netdev_err(adapter->netdev, format, ## arg) | 328 | netif_err(adapter, msglvl, adapter->netdev, format, ## arg) |
329 | #define e_info(format, arg...) \ | 329 | #define e_info(msglvl, format, arg...) \ |
330 | netdev_info(adapter->netdev, format, ## arg) | 330 | netif_info(adapter, msglvl, adapter->netdev, format, ## arg) |
331 | #define e_warn(format, arg...) \ | 331 | #define e_warn(msglvl, format, arg...) \ |
332 | netdev_warn(adapter->netdev, format, ## arg) | 332 | netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) |
333 | #define e_notice(format, arg...) \ | 333 | #define e_notice(msglvl, format, arg...) \ |
334 | netdev_notice(adapter->netdev, format, ## arg) | 334 | netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) |
335 | #define e_dev_info(format, arg...) \ | 335 | #define e_dev_info(format, arg...) \ |
336 | dev_info(&adapter->pdev->dev, format, ## arg) | 336 | dev_info(&adapter->pdev->dev, format, ## arg) |
337 | #define e_dev_warn(format, arg...) \ | 337 | #define e_dev_warn(format, arg...) \ |
338 | dev_warn(&adapter->pdev->dev, format, ## arg) | 338 | dev_warn(&adapter->pdev->dev, format, ## arg) |
339 | #define e_dev_err(format, arg...) \ | ||
340 | dev_err(&adapter->pdev->dev, format, ## arg) | ||
339 | 341 | ||
340 | extern char e1000_driver_name[]; | 342 | extern char e1000_driver_name[]; |
341 | extern const char e1000_driver_version[]; | 343 | extern const char e1000_driver_version[]; |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index d5ff029aa7b2..f4d0922ec65b 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data) | |||
346 | 346 | ||
347 | netdev->features &= ~NETIF_F_TSO6; | 347 | netdev->features &= ~NETIF_F_TSO6; |
348 | 348 | ||
349 | e_info("TSO is %s\n", data ? "Enabled" : "Disabled"); | 349 | e_info(probe, "TSO is %s\n", data ? "Enabled" : "Disabled"); |
350 | adapter->tso_force = true; | 350 | adapter->tso_force = true; |
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, | |||
714 | writel(write & test[i], address); | 714 | writel(write & test[i], address); |
715 | read = readl(address); | 715 | read = readl(address); |
716 | if (read != (write & test[i] & mask)) { | 716 | if (read != (write & test[i] & mask)) { |
717 | e_info("pattern test reg %04X failed: " | 717 | e_err(drv, "pattern test reg %04X failed: " |
718 | "got 0x%08X expected 0x%08X\n", | 718 | "got 0x%08X expected 0x%08X\n", |
719 | reg, read, (write & test[i] & mask)); | 719 | reg, read, (write & test[i] & mask)); |
720 | *data = reg; | 720 | *data = reg; |
721 | return true; | 721 | return true; |
722 | } | 722 | } |
@@ -734,7 +734,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, | |||
734 | writel(write & mask, address); | 734 | writel(write & mask, address); |
735 | read = readl(address); | 735 | read = readl(address); |
736 | if ((read & mask) != (write & mask)) { | 736 | if ((read & mask) != (write & mask)) { |
737 | e_err("set/check reg %04X test failed: " | 737 | e_err(drv, "set/check reg %04X test failed: " |
738 | "got 0x%08X expected 0x%08X\n", | 738 | "got 0x%08X expected 0x%08X\n", |
739 | reg, (read & mask), (write & mask)); | 739 | reg, (read & mask), (write & mask)); |
740 | *data = reg; | 740 | *data = reg; |
@@ -779,7 +779,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
779 | ew32(STATUS, toggle); | 779 | ew32(STATUS, toggle); |
780 | after = er32(STATUS) & toggle; | 780 | after = er32(STATUS) & toggle; |
781 | if (value != after) { | 781 | if (value != after) { |
782 | e_err("failed STATUS register test got: " | 782 | e_err(drv, "failed STATUS register test got: " |
783 | "0x%08X expected: 0x%08X\n", after, value); | 783 | "0x%08X expected: 0x%08X\n", after, value); |
784 | *data = 1; | 784 | *data = 1; |
785 | return 1; | 785 | return 1; |
@@ -894,7 +894,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
894 | *data = 1; | 894 | *data = 1; |
895 | return -1; | 895 | return -1; |
896 | } | 896 | } |
897 | e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); | 897 | e_info(hw, "testing %s interrupt\n", (shared_int ? |
898 | "shared" : "unshared")); | ||
898 | 899 | ||
899 | /* Disable all the interrupts */ | 900 | /* Disable all the interrupts */ |
900 | ew32(IMC, 0xFFFFFFFF); | 901 | ew32(IMC, 0xFFFFFFFF); |
@@ -1561,7 +1562,7 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1561 | u8 forced_speed_duplex = hw->forced_speed_duplex; | 1562 | u8 forced_speed_duplex = hw->forced_speed_duplex; |
1562 | u8 autoneg = hw->autoneg; | 1563 | u8 autoneg = hw->autoneg; |
1563 | 1564 | ||
1564 | e_info("offline testing starting\n"); | 1565 | e_info(hw, "offline testing starting\n"); |
1565 | 1566 | ||
1566 | /* Link test performed before hardware reset so autoneg doesn't | 1567 | /* Link test performed before hardware reset so autoneg doesn't |
1567 | * interfere with test result */ | 1568 | * interfere with test result */ |
@@ -1601,7 +1602,7 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1601 | if (if_running) | 1602 | if (if_running) |
1602 | dev_open(netdev); | 1603 | dev_open(netdev); |
1603 | } else { | 1604 | } else { |
1604 | e_info("online testing starting\n"); | 1605 | e_info(hw, "online testing starting\n"); |
1605 | /* Online tests */ | 1606 | /* Online tests */ |
1606 | if (e1000_link_test(adapter, &data[4])) | 1607 | if (e1000_link_test(adapter, &data[4])) |
1607 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1608 | eth_test->flags |= ETH_TEST_FL_FAILED; |
@@ -1694,8 +1695,8 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1694 | wol->supported &= ~WAKE_UCAST; | 1695 | wol->supported &= ~WAKE_UCAST; |
1695 | 1696 | ||
1696 | if (adapter->wol & E1000_WUFC_EX) | 1697 | if (adapter->wol & E1000_WUFC_EX) |
1697 | e_err("Interface does not support " | 1698 | e_err(drv, "Interface does not support directed " |
1698 | "directed (unicast) frame wake-up packets\n"); | 1699 | "(unicast) frame wake-up packets\n"); |
1699 | break; | 1700 | break; |
1700 | default: | 1701 | default: |
1701 | break; | 1702 | break; |
@@ -1726,8 +1727,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1726 | switch (hw->device_id) { | 1727 | switch (hw->device_id) { |
1727 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1728 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
1728 | if (wol->wolopts & WAKE_UCAST) { | 1729 | if (wol->wolopts & WAKE_UCAST) { |
1729 | e_err("Interface does not support " | 1730 | e_err(drv, "Interface does not support directed " |
1730 | "directed (unicast) frame wake-up packets\n"); | 1731 | "(unicast) frame wake-up packets\n"); |
1731 | return -EOPNOTSUPP; | 1732 | return -EOPNOTSUPP; |
1732 | } | 1733 | } |
1733 | break; | 1734 | break; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 68a80893dce1..02833af8a0b1 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -275,7 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
275 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, | 275 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
276 | netdev); | 276 | netdev); |
277 | if (err) { | 277 | if (err) { |
278 | e_err("Unable to allocate interrupt Error: %d\n", err); | 278 | e_err(probe, "Unable to allocate interrupt Error: %d\n", err); |
279 | } | 279 | } |
280 | 280 | ||
281 | return err; | 281 | return err; |
@@ -657,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
657 | ew32(WUC, 0); | 657 | ew32(WUC, 0); |
658 | 658 | ||
659 | if (e1000_init_hw(hw)) | 659 | if (e1000_init_hw(hw)) |
660 | e_err("Hardware Error\n"); | 660 | e_dev_err("Hardware Error\n"); |
661 | e1000_update_mng_vlan(adapter); | 661 | e1000_update_mng_vlan(adapter); |
662 | 662 | ||
663 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ | 663 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ |
@@ -925,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
925 | 925 | ||
926 | /* initialize eeprom parameters */ | 926 | /* initialize eeprom parameters */ |
927 | if (e1000_init_eeprom_params(hw)) { | 927 | if (e1000_init_eeprom_params(hw)) { |
928 | e_err("EEPROM initialization failed\n"); | 928 | e_err(probe, "EEPROM initialization failed\n"); |
929 | goto err_eeprom; | 929 | goto err_eeprom; |
930 | } | 930 | } |
931 | 931 | ||
@@ -936,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
936 | 936 | ||
937 | /* make sure the EEPROM is good */ | 937 | /* make sure the EEPROM is good */ |
938 | if (e1000_validate_eeprom_checksum(hw) < 0) { | 938 | if (e1000_validate_eeprom_checksum(hw) < 0) { |
939 | e_err("The EEPROM Checksum Is Not Valid\n"); | 939 | e_err(probe, "The EEPROM Checksum Is Not Valid\n"); |
940 | e1000_dump_eeprom(adapter); | 940 | e1000_dump_eeprom(adapter); |
941 | /* | 941 | /* |
942 | * set MAC address to all zeroes to invalidate and temporary | 942 | * set MAC address to all zeroes to invalidate and temporary |
@@ -950,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
950 | } else { | 950 | } else { |
951 | /* copy the MAC address out of the EEPROM */ | 951 | /* copy the MAC address out of the EEPROM */ |
952 | if (e1000_read_mac_addr(hw)) | 952 | if (e1000_read_mac_addr(hw)) |
953 | e_err("EEPROM Read Error\n"); | 953 | e_err(probe, "EEPROM Read Error\n"); |
954 | } | 954 | } |
955 | /* don't block initalization here due to bad MAC address */ | 955 | /* don't block initalization here due to bad MAC address */ |
956 | memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); | 956 | memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); |
957 | memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); | 957 | memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); |
958 | 958 | ||
959 | if (!is_valid_ether_addr(netdev->perm_addr)) | 959 | if (!is_valid_ether_addr(netdev->perm_addr)) |
960 | e_err("Invalid MAC Address\n"); | 960 | e_err(probe, "Invalid MAC Address\n"); |
961 | 961 | ||
962 | e1000_get_bus_info(hw); | 962 | e1000_get_bus_info(hw); |
963 | 963 | ||
@@ -1047,7 +1047,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
1047 | goto err_register; | 1047 | goto err_register; |
1048 | 1048 | ||
1049 | /* print bus type/speed/width info */ | 1049 | /* print bus type/speed/width info */ |
1050 | e_info("(PCI%s:%dMHz:%d-bit) %pM\n", | 1050 | e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", |
1051 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), | 1051 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), |
1052 | ((hw->bus_speed == e1000_bus_speed_133) ? 133 : | 1052 | ((hw->bus_speed == e1000_bus_speed_133) ? 133 : |
1053 | (hw->bus_speed == e1000_bus_speed_120) ? 120 : | 1053 | (hw->bus_speed == e1000_bus_speed_120) ? 120 : |
@@ -1059,7 +1059,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
1059 | /* carrier off reporting is important to ethtool even BEFORE open */ | 1059 | /* carrier off reporting is important to ethtool even BEFORE open */ |
1060 | netif_carrier_off(netdev); | 1060 | netif_carrier_off(netdev); |
1061 | 1061 | ||
1062 | e_info("Intel(R) PRO/1000 Network Connection\n"); | 1062 | e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); |
1063 | 1063 | ||
1064 | cards_found++; | 1064 | cards_found++; |
1065 | return 0; | 1065 | return 0; |
@@ -1159,7 +1159,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |||
1159 | /* identify the MAC */ | 1159 | /* identify the MAC */ |
1160 | 1160 | ||
1161 | if (e1000_set_mac_type(hw)) { | 1161 | if (e1000_set_mac_type(hw)) { |
1162 | e_err("Unknown MAC Type\n"); | 1162 | e_err(probe, "Unknown MAC Type\n"); |
1163 | return -EIO; | 1163 | return -EIO; |
1164 | } | 1164 | } |
1165 | 1165 | ||
@@ -1192,7 +1192,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |||
1192 | adapter->num_rx_queues = 1; | 1192 | adapter->num_rx_queues = 1; |
1193 | 1193 | ||
1194 | if (e1000_alloc_queues(adapter)) { | 1194 | if (e1000_alloc_queues(adapter)) { |
1195 | e_err("Unable to allocate memory for queues\n"); | 1195 | e_err(probe, "Unable to allocate memory for queues\n"); |
1196 | return -ENOMEM; | 1196 | return -ENOMEM; |
1197 | } | 1197 | } |
1198 | 1198 | ||
@@ -1386,7 +1386,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
1386 | size = sizeof(struct e1000_buffer) * txdr->count; | 1386 | size = sizeof(struct e1000_buffer) * txdr->count; |
1387 | txdr->buffer_info = vmalloc(size); | 1387 | txdr->buffer_info = vmalloc(size); |
1388 | if (!txdr->buffer_info) { | 1388 | if (!txdr->buffer_info) { |
1389 | e_err("Unable to allocate memory for the Tx descriptor ring\n"); | 1389 | e_err(probe, "Unable to allocate memory for the Tx descriptor " |
1390 | "ring\n"); | ||
1390 | return -ENOMEM; | 1391 | return -ENOMEM; |
1391 | } | 1392 | } |
1392 | memset(txdr->buffer_info, 0, size); | 1393 | memset(txdr->buffer_info, 0, size); |
@@ -1401,7 +1402,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
1401 | if (!txdr->desc) { | 1402 | if (!txdr->desc) { |
1402 | setup_tx_desc_die: | 1403 | setup_tx_desc_die: |
1403 | vfree(txdr->buffer_info); | 1404 | vfree(txdr->buffer_info); |
1404 | e_err("Unable to allocate memory for the Tx descriptor ring\n"); | 1405 | e_err(probe, "Unable to allocate memory for the Tx descriptor " |
1406 | "ring\n"); | ||
1405 | return -ENOMEM; | 1407 | return -ENOMEM; |
1406 | } | 1408 | } |
1407 | 1409 | ||
@@ -1409,7 +1411,7 @@ setup_tx_desc_die: | |||
1409 | if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { | 1411 | if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { |
1410 | void *olddesc = txdr->desc; | 1412 | void *olddesc = txdr->desc; |
1411 | dma_addr_t olddma = txdr->dma; | 1413 | dma_addr_t olddma = txdr->dma; |
1412 | e_err("txdr align check failed: %u bytes at %p\n", | 1414 | e_err(tx_err, "txdr align check failed: %u bytes at %p\n", |
1413 | txdr->size, txdr->desc); | 1415 | txdr->size, txdr->desc); |
1414 | /* Try again, without freeing the previous */ | 1416 | /* Try again, without freeing the previous */ |
1415 | txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, | 1417 | txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, |
@@ -1427,7 +1429,7 @@ setup_tx_desc_die: | |||
1427 | txdr->dma); | 1429 | txdr->dma); |
1428 | dma_free_coherent(&pdev->dev, txdr->size, olddesc, | 1430 | dma_free_coherent(&pdev->dev, txdr->size, olddesc, |
1429 | olddma); | 1431 | olddma); |
1430 | e_err("Unable to allocate aligned memory " | 1432 | e_err(probe, "Unable to allocate aligned memory " |
1431 | "for the transmit descriptor ring\n"); | 1433 | "for the transmit descriptor ring\n"); |
1432 | vfree(txdr->buffer_info); | 1434 | vfree(txdr->buffer_info); |
1433 | return -ENOMEM; | 1435 | return -ENOMEM; |
@@ -1460,7 +1462,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | |||
1460 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1462 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1461 | err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 1463 | err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); |
1462 | if (err) { | 1464 | if (err) { |
1463 | e_err("Allocation for Tx Queue %u failed\n", i); | 1465 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); |
1464 | for (i-- ; i >= 0; i--) | 1466 | for (i-- ; i >= 0; i--) |
1465 | e1000_free_tx_resources(adapter, | 1467 | e1000_free_tx_resources(adapter, |
1466 | &adapter->tx_ring[i]); | 1468 | &adapter->tx_ring[i]); |
@@ -1580,7 +1582,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1580 | size = sizeof(struct e1000_buffer) * rxdr->count; | 1582 | size = sizeof(struct e1000_buffer) * rxdr->count; |
1581 | rxdr->buffer_info = vmalloc(size); | 1583 | rxdr->buffer_info = vmalloc(size); |
1582 | if (!rxdr->buffer_info) { | 1584 | if (!rxdr->buffer_info) { |
1583 | e_err("Unable to allocate memory for the Rx descriptor ring\n"); | 1585 | e_err(probe, "Unable to allocate memory for the Rx descriptor " |
1586 | "ring\n"); | ||
1584 | return -ENOMEM; | 1587 | return -ENOMEM; |
1585 | } | 1588 | } |
1586 | memset(rxdr->buffer_info, 0, size); | 1589 | memset(rxdr->buffer_info, 0, size); |
@@ -1596,7 +1599,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1596 | GFP_KERNEL); | 1599 | GFP_KERNEL); |
1597 | 1600 | ||
1598 | if (!rxdr->desc) { | 1601 | if (!rxdr->desc) { |
1599 | e_err("Unable to allocate memory for the Rx descriptor ring\n"); | 1602 | e_err(probe, "Unable to allocate memory for the Rx descriptor " |
1603 | "ring\n"); | ||
1600 | setup_rx_desc_die: | 1604 | setup_rx_desc_die: |
1601 | vfree(rxdr->buffer_info); | 1605 | vfree(rxdr->buffer_info); |
1602 | return -ENOMEM; | 1606 | return -ENOMEM; |
@@ -1606,7 +1610,7 @@ setup_rx_desc_die: | |||
1606 | if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { | 1610 | if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { |
1607 | void *olddesc = rxdr->desc; | 1611 | void *olddesc = rxdr->desc; |
1608 | dma_addr_t olddma = rxdr->dma; | 1612 | dma_addr_t olddma = rxdr->dma; |
1609 | e_err("rxdr align check failed: %u bytes at %p\n", | 1613 | e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", |
1610 | rxdr->size, rxdr->desc); | 1614 | rxdr->size, rxdr->desc); |
1611 | /* Try again, without freeing the previous */ | 1615 | /* Try again, without freeing the previous */ |
1612 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, | 1616 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, |
@@ -1615,8 +1619,8 @@ setup_rx_desc_die: | |||
1615 | if (!rxdr->desc) { | 1619 | if (!rxdr->desc) { |
1616 | dma_free_coherent(&pdev->dev, rxdr->size, olddesc, | 1620 | dma_free_coherent(&pdev->dev, rxdr->size, olddesc, |
1617 | olddma); | 1621 | olddma); |
1618 | e_err("Unable to allocate memory for the Rx descriptor " | 1622 | e_err(probe, "Unable to allocate memory for the Rx " |
1619 | "ring\n"); | 1623 | "descriptor ring\n"); |
1620 | goto setup_rx_desc_die; | 1624 | goto setup_rx_desc_die; |
1621 | } | 1625 | } |
1622 | 1626 | ||
@@ -1626,8 +1630,8 @@ setup_rx_desc_die: | |||
1626 | rxdr->dma); | 1630 | rxdr->dma); |
1627 | dma_free_coherent(&pdev->dev, rxdr->size, olddesc, | 1631 | dma_free_coherent(&pdev->dev, rxdr->size, olddesc, |
1628 | olddma); | 1632 | olddma); |
1629 | e_err("Unable to allocate aligned memory for the Rx " | 1633 | e_err(probe, "Unable to allocate aligned memory for " |
1630 | "descriptor ring\n"); | 1634 | "the Rx descriptor ring\n"); |
1631 | goto setup_rx_desc_die; | 1635 | goto setup_rx_desc_die; |
1632 | } else { | 1636 | } else { |
1633 | /* Free old allocation, new allocation was successful */ | 1637 | /* Free old allocation, new allocation was successful */ |
@@ -1659,7 +1663,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | |||
1659 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1663 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1660 | err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 1664 | err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); |
1661 | if (err) { | 1665 | if (err) { |
1662 | e_err("Allocation for Rx Queue %u failed\n", i); | 1666 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); |
1663 | for (i-- ; i >= 0; i--) | 1667 | for (i-- ; i >= 0; i--) |
1664 | e1000_free_rx_resources(adapter, | 1668 | e1000_free_rx_resources(adapter, |
1665 | &adapter->rx_ring[i]); | 1669 | &adapter->rx_ring[i]); |
@@ -2110,7 +2114,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2110 | u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); | 2114 | u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); |
2111 | 2115 | ||
2112 | if (!mcarray) { | 2116 | if (!mcarray) { |
2113 | e_err("memory allocation failed\n"); | 2117 | e_err(probe, "memory allocation failed\n"); |
2114 | return; | 2118 | return; |
2115 | } | 2119 | } |
2116 | 2120 | ||
@@ -2648,7 +2652,8 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, | |||
2648 | break; | 2652 | break; |
2649 | default: | 2653 | default: |
2650 | if (unlikely(net_ratelimit())) | 2654 | if (unlikely(net_ratelimit())) |
2651 | e_warn("checksum_partial proto=%x!\n", skb->protocol); | 2655 | e_warn(drv, "checksum_partial proto=%x!\n", |
2656 | skb->protocol); | ||
2652 | break; | 2657 | break; |
2653 | } | 2658 | } |
2654 | 2659 | ||
@@ -2992,7 +2997,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
2992 | /* fall through */ | 2997 | /* fall through */ |
2993 | pull_size = min((unsigned int)4, skb->data_len); | 2998 | pull_size = min((unsigned int)4, skb->data_len); |
2994 | if (!__pskb_pull_tail(skb, pull_size)) { | 2999 | if (!__pskb_pull_tail(skb, pull_size)) { |
2995 | e_err("__pskb_pull_tail failed.\n"); | 3000 | e_err(drv, "__pskb_pull_tail " |
3001 | "failed.\n"); | ||
2996 | dev_kfree_skb_any(skb); | 3002 | dev_kfree_skb_any(skb); |
2997 | return NETDEV_TX_OK; | 3003 | return NETDEV_TX_OK; |
2998 | } | 3004 | } |
@@ -3140,7 +3146,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3140 | 3146 | ||
3141 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 3147 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3142 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3148 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3143 | e_err("Invalid MTU setting\n"); | 3149 | e_err(probe, "Invalid MTU setting\n"); |
3144 | return -EINVAL; | 3150 | return -EINVAL; |
3145 | } | 3151 | } |
3146 | 3152 | ||
@@ -3148,7 +3154,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3148 | switch (hw->mac_type) { | 3154 | switch (hw->mac_type) { |
3149 | case e1000_undefined ... e1000_82542_rev2_1: | 3155 | case e1000_undefined ... e1000_82542_rev2_1: |
3150 | if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { | 3156 | if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { |
3151 | e_err("Jumbo Frames not supported.\n"); | 3157 | e_err(probe, "Jumbo Frames not supported.\n"); |
3152 | return -EINVAL; | 3158 | return -EINVAL; |
3153 | } | 3159 | } |
3154 | break; | 3160 | break; |
@@ -3500,7 +3506,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3500 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { | 3506 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
3501 | 3507 | ||
3502 | /* detected Tx unit hang */ | 3508 | /* detected Tx unit hang */ |
3503 | e_err("Detected Tx Unit Hang\n" | 3509 | e_err(drv, "Detected Tx Unit Hang\n" |
3504 | " Tx Queue <%lu>\n" | 3510 | " Tx Queue <%lu>\n" |
3505 | " TDH <%x>\n" | 3511 | " TDH <%x>\n" |
3506 | " TDT <%x>\n" | 3512 | " TDT <%x>\n" |
@@ -3749,7 +3755,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |||
3749 | 3755 | ||
3750 | /* eth type trans needs skb->data to point to something */ | 3756 | /* eth type trans needs skb->data to point to something */ |
3751 | if (!pskb_may_pull(skb, ETH_HLEN)) { | 3757 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
3752 | e_err("pskb_may_pull failed.\n"); | 3758 | e_err(drv, "pskb_may_pull failed.\n"); |
3753 | dev_kfree_skb(skb); | 3759 | dev_kfree_skb(skb); |
3754 | goto next_desc; | 3760 | goto next_desc; |
3755 | } | 3761 | } |
@@ -3874,7 +3880,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3874 | 3880 | ||
3875 | if (adapter->discarding) { | 3881 | if (adapter->discarding) { |
3876 | /* All receives must fit into a single buffer */ | 3882 | /* All receives must fit into a single buffer */ |
3877 | e_info("Receive packet consumed multiple buffers\n"); | 3883 | e_dbg("Receive packet consumed multiple buffers\n"); |
3878 | /* recycle */ | 3884 | /* recycle */ |
3879 | buffer_info->skb = skb; | 3885 | buffer_info->skb = skb; |
3880 | if (status & E1000_RXD_STAT_EOP) | 3886 | if (status & E1000_RXD_STAT_EOP) |
@@ -3986,8 +3992,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | |||
3986 | /* Fix for errata 23, can't cross 64kB boundary */ | 3992 | /* Fix for errata 23, can't cross 64kB boundary */ |
3987 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { | 3993 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
3988 | struct sk_buff *oldskb = skb; | 3994 | struct sk_buff *oldskb = skb; |
3989 | e_err("skb align check failed: %u bytes at %p\n", | 3995 | e_err(rx_err, "skb align check failed: %u bytes at " |
3990 | bufsz, skb->data); | 3996 | "%p\n", bufsz, skb->data); |
3991 | /* Try again, without freeing the previous */ | 3997 | /* Try again, without freeing the previous */ |
3992 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); | 3998 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
3993 | /* Failed allocation, critical failure */ | 3999 | /* Failed allocation, critical failure */ |
@@ -4095,8 +4101,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
4095 | /* Fix for errata 23, can't cross 64kB boundary */ | 4101 | /* Fix for errata 23, can't cross 64kB boundary */ |
4096 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { | 4102 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
4097 | struct sk_buff *oldskb = skb; | 4103 | struct sk_buff *oldskb = skb; |
4098 | e_err("skb align check failed: %u bytes at %p\n", | 4104 | e_err(rx_err, "skb align check failed: %u bytes at " |
4099 | bufsz, skb->data); | 4105 | "%p\n", bufsz, skb->data); |
4100 | /* Try again, without freeing the previous */ | 4106 | /* Try again, without freeing the previous */ |
4101 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); | 4107 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
4102 | /* Failed allocation, critical failure */ | 4108 | /* Failed allocation, critical failure */ |
@@ -4141,8 +4147,8 @@ map_skb: | |||
4141 | if (!e1000_check_64k_bound(adapter, | 4147 | if (!e1000_check_64k_bound(adapter, |
4142 | (void *)(unsigned long)buffer_info->dma, | 4148 | (void *)(unsigned long)buffer_info->dma, |
4143 | adapter->rx_buffer_len)) { | 4149 | adapter->rx_buffer_len)) { |
4144 | e_err("dma align check failed: %u bytes at %p\n", | 4150 | e_err(rx_err, "dma align check failed: %u bytes at " |
4145 | adapter->rx_buffer_len, | 4151 | "%p\n", adapter->rx_buffer_len, |
4146 | (void *)(unsigned long)buffer_info->dma); | 4152 | (void *)(unsigned long)buffer_info->dma); |
4147 | dev_kfree_skb(skb); | 4153 | dev_kfree_skb(skb); |
4148 | buffer_info->skb = NULL; | 4154 | buffer_info->skb = NULL; |
@@ -4355,7 +4361,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw) | |||
4355 | int ret_val = pci_set_mwi(adapter->pdev); | 4361 | int ret_val = pci_set_mwi(adapter->pdev); |
4356 | 4362 | ||
4357 | if (ret_val) | 4363 | if (ret_val) |
4358 | e_err("Error in setting MWI\n"); | 4364 | e_err(probe, "Error in setting MWI\n"); |
4359 | } | 4365 | } |
4360 | 4366 | ||
4361 | void e1000_pci_clear_mwi(struct e1000_hw *hw) | 4367 | void e1000_pci_clear_mwi(struct e1000_hw *hw) |
@@ -4486,7 +4492,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
4486 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 4492 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
4487 | if ((hw->media_type == e1000_media_type_fiber) && | 4493 | if ((hw->media_type == e1000_media_type_fiber) && |
4488 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 4494 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
4489 | e_err("Unsupported Speed/Duplex configuration\n"); | 4495 | e_err(probe, "Unsupported Speed/Duplex configuration\n"); |
4490 | return -EINVAL; | 4496 | return -EINVAL; |
4491 | } | 4497 | } |
4492 | 4498 | ||
@@ -4509,7 +4515,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
4509 | break; | 4515 | break; |
4510 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | 4516 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
4511 | default: | 4517 | default: |
4512 | e_err("Unsupported Speed/Duplex configuration\n"); | 4518 | e_err(probe, "Unsupported Speed/Duplex configuration\n"); |
4513 | return -EINVAL; | 4519 | return -EINVAL; |
4514 | } | 4520 | } |
4515 | return 0; | 4521 | return 0; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 6aa795a6160b..afd01295fbec 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -5650,8 +5650,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5650 | if (err) | 5650 | if (err) |
5651 | goto err_sw_init; | 5651 | goto err_sw_init; |
5652 | 5652 | ||
5653 | err = -EIO; | ||
5654 | |||
5655 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); | 5653 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); |
5656 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); | 5654 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); |
5657 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); | 5655 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); |
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 38c282e6565b..6d653c459c1f 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c | |||
@@ -632,7 +632,7 @@ static void ethoc_mdio_poll(struct net_device *dev) | |||
632 | { | 632 | { |
633 | } | 633 | } |
634 | 634 | ||
635 | static int ethoc_mdio_probe(struct net_device *dev) | 635 | static int __devinit ethoc_mdio_probe(struct net_device *dev) |
636 | { | 636 | { |
637 | struct ethoc *priv = netdev_priv(dev); | 637 | struct ethoc *priv = netdev_priv(dev); |
638 | struct phy_device *phy; | 638 | struct phy_device *phy; |
@@ -871,7 +871,7 @@ static const struct net_device_ops ethoc_netdev_ops = { | |||
871 | * ethoc_probe() - initialize OpenCores ethernet MAC | 871 | * ethoc_probe() - initialize OpenCores ethernet MAC |
872 | * pdev: platform device | 872 | * pdev: platform device |
873 | */ | 873 | */ |
874 | static int ethoc_probe(struct platform_device *pdev) | 874 | static int __devinit ethoc_probe(struct platform_device *pdev) |
875 | { | 875 | { |
876 | struct net_device *netdev = NULL; | 876 | struct net_device *netdev = NULL; |
877 | struct resource *res = NULL; | 877 | struct resource *res = NULL; |
@@ -1080,7 +1080,7 @@ out: | |||
1080 | * ethoc_remove() - shutdown OpenCores ethernet MAC | 1080 | * ethoc_remove() - shutdown OpenCores ethernet MAC |
1081 | * @pdev: platform device | 1081 | * @pdev: platform device |
1082 | */ | 1082 | */ |
1083 | static int ethoc_remove(struct platform_device *pdev) | 1083 | static int __devexit ethoc_remove(struct platform_device *pdev) |
1084 | { | 1084 | { |
1085 | struct net_device *netdev = platform_get_drvdata(pdev); | 1085 | struct net_device *netdev = platform_get_drvdata(pdev); |
1086 | struct ethoc *priv = netdev_priv(netdev); | 1086 | struct ethoc *priv = netdev_priv(netdev); |
@@ -1121,7 +1121,7 @@ static int ethoc_resume(struct platform_device *pdev) | |||
1121 | 1121 | ||
1122 | static struct platform_driver ethoc_driver = { | 1122 | static struct platform_driver ethoc_driver = { |
1123 | .probe = ethoc_probe, | 1123 | .probe = ethoc_probe, |
1124 | .remove = ethoc_remove, | 1124 | .remove = __devexit_p(ethoc_remove), |
1125 | .suspend = ethoc_suspend, | 1125 | .suspend = ethoc_suspend, |
1126 | .resume = ethoc_resume, | 1126 | .resume = ethoc_resume, |
1127 | .driver = { | 1127 | .driver = { |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 391a553a3add..768b840aeb6b 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -118,6 +118,8 @@ static unsigned char fec_mac_default[] = { | |||
118 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ | 118 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ |
119 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ | 119 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ |
120 | 120 | ||
121 | #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) | ||
122 | |||
121 | /* The FEC stores dest/src/type, data, and checksum for receive packets. | 123 | /* The FEC stores dest/src/type, data, and checksum for receive packets. |
122 | */ | 124 | */ |
123 | #define PKT_MAXBUF_SIZE 1518 | 125 | #define PKT_MAXBUF_SIZE 1518 |
@@ -1213,8 +1215,7 @@ fec_restart(struct net_device *dev, int duplex) | |||
1213 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 1215 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); |
1214 | 1216 | ||
1215 | /* Enable interrupts we wish to service */ | 1217 | /* Enable interrupts we wish to service */ |
1216 | writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, | 1218 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); |
1217 | fep->hwp + FEC_IMASK); | ||
1218 | } | 1219 | } |
1219 | 1220 | ||
1220 | static void | 1221 | static void |
@@ -1233,8 +1234,8 @@ fec_stop(struct net_device *dev) | |||
1233 | /* Whack a reset. We should wait for this. */ | 1234 | /* Whack a reset. We should wait for this. */ |
1234 | writel(1, fep->hwp + FEC_ECNTRL); | 1235 | writel(1, fep->hwp + FEC_ECNTRL); |
1235 | udelay(10); | 1236 | udelay(10); |
1236 | |||
1237 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1237 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1238 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | ||
1238 | } | 1239 | } |
1239 | 1240 | ||
1240 | static int __devinit | 1241 | static int __devinit |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 9ef6a9d5fbcb..4da05b1b445c 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -89,8 +89,10 @@ | |||
89 | #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ | 89 | #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ |
90 | #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ | 90 | #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ |
91 | #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ | 91 | #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ |
92 | #define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */ | 92 | #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ |
93 | #define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */ | 93 | #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ |
94 | #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ | ||
95 | #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ | ||
94 | #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ | 96 | #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ |
95 | #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ | 97 | #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ |
96 | #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ | 98 | #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ |
@@ -6067,111 +6069,111 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { | |||
6067 | }, | 6069 | }, |
6068 | { /* MCP55 Ethernet Controller */ | 6070 | { /* MCP55 Ethernet Controller */ |
6069 | PCI_DEVICE(0x10DE, 0x0372), | 6071 | PCI_DEVICE(0x10DE, 0x0372), |
6070 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, | 6072 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, |
6071 | }, | 6073 | }, |
6072 | { /* MCP55 Ethernet Controller */ | 6074 | { /* MCP55 Ethernet Controller */ |
6073 | PCI_DEVICE(0x10DE, 0x0373), | 6075 | PCI_DEVICE(0x10DE, 0x0373), |
6074 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, | 6076 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, |
6075 | }, | 6077 | }, |
6076 | { /* MCP61 Ethernet Controller */ | 6078 | { /* MCP61 Ethernet Controller */ |
6077 | PCI_DEVICE(0x10DE, 0x03E5), | 6079 | PCI_DEVICE(0x10DE, 0x03E5), |
6078 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, | 6080 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6079 | }, | 6081 | }, |
6080 | { /* MCP61 Ethernet Controller */ | 6082 | { /* MCP61 Ethernet Controller */ |
6081 | PCI_DEVICE(0x10DE, 0x03E6), | 6083 | PCI_DEVICE(0x10DE, 0x03E6), |
6082 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, | 6084 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6083 | }, | 6085 | }, |
6084 | { /* MCP61 Ethernet Controller */ | 6086 | { /* MCP61 Ethernet Controller */ |
6085 | PCI_DEVICE(0x10DE, 0x03EE), | 6087 | PCI_DEVICE(0x10DE, 0x03EE), |
6086 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, | 6088 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6087 | }, | 6089 | }, |
6088 | { /* MCP61 Ethernet Controller */ | 6090 | { /* MCP61 Ethernet Controller */ |
6089 | PCI_DEVICE(0x10DE, 0x03EF), | 6091 | PCI_DEVICE(0x10DE, 0x03EF), |
6090 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, | 6092 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6091 | }, | 6093 | }, |
6092 | { /* MCP65 Ethernet Controller */ | 6094 | { /* MCP65 Ethernet Controller */ |
6093 | PCI_DEVICE(0x10DE, 0x0450), | 6095 | PCI_DEVICE(0x10DE, 0x0450), |
6094 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6096 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6095 | }, | 6097 | }, |
6096 | { /* MCP65 Ethernet Controller */ | 6098 | { /* MCP65 Ethernet Controller */ |
6097 | PCI_DEVICE(0x10DE, 0x0451), | 6099 | PCI_DEVICE(0x10DE, 0x0451), |
6098 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6100 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6099 | }, | 6101 | }, |
6100 | { /* MCP65 Ethernet Controller */ | 6102 | { /* MCP65 Ethernet Controller */ |
6101 | PCI_DEVICE(0x10DE, 0x0452), | 6103 | PCI_DEVICE(0x10DE, 0x0452), |
6102 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6104 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6103 | }, | 6105 | }, |
6104 | { /* MCP65 Ethernet Controller */ | 6106 | { /* MCP65 Ethernet Controller */ |
6105 | PCI_DEVICE(0x10DE, 0x0453), | 6107 | PCI_DEVICE(0x10DE, 0x0453), |
6106 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6108 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6107 | }, | 6109 | }, |
6108 | { /* MCP67 Ethernet Controller */ | 6110 | { /* MCP67 Ethernet Controller */ |
6109 | PCI_DEVICE(0x10DE, 0x054C), | 6111 | PCI_DEVICE(0x10DE, 0x054C), |
6110 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6112 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6111 | }, | 6113 | }, |
6112 | { /* MCP67 Ethernet Controller */ | 6114 | { /* MCP67 Ethernet Controller */ |
6113 | PCI_DEVICE(0x10DE, 0x054D), | 6115 | PCI_DEVICE(0x10DE, 0x054D), |
6114 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6116 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6115 | }, | 6117 | }, |
6116 | { /* MCP67 Ethernet Controller */ | 6118 | { /* MCP67 Ethernet Controller */ |
6117 | PCI_DEVICE(0x10DE, 0x054E), | 6119 | PCI_DEVICE(0x10DE, 0x054E), |
6118 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6120 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6119 | }, | 6121 | }, |
6120 | { /* MCP67 Ethernet Controller */ | 6122 | { /* MCP67 Ethernet Controller */ |
6121 | PCI_DEVICE(0x10DE, 0x054F), | 6123 | PCI_DEVICE(0x10DE, 0x054F), |
6122 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6124 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6123 | }, | 6125 | }, |
6124 | { /* MCP73 Ethernet Controller */ | 6126 | { /* MCP73 Ethernet Controller */ |
6125 | PCI_DEVICE(0x10DE, 0x07DC), | 6127 | PCI_DEVICE(0x10DE, 0x07DC), |
6126 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6128 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6127 | }, | 6129 | }, |
6128 | { /* MCP73 Ethernet Controller */ | 6130 | { /* MCP73 Ethernet Controller */ |
6129 | PCI_DEVICE(0x10DE, 0x07DD), | 6131 | PCI_DEVICE(0x10DE, 0x07DD), |
6130 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6132 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6131 | }, | 6133 | }, |
6132 | { /* MCP73 Ethernet Controller */ | 6134 | { /* MCP73 Ethernet Controller */ |
6133 | PCI_DEVICE(0x10DE, 0x07DE), | 6135 | PCI_DEVICE(0x10DE, 0x07DE), |
6134 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6136 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6135 | }, | 6137 | }, |
6136 | { /* MCP73 Ethernet Controller */ | 6138 | { /* MCP73 Ethernet Controller */ |
6137 | PCI_DEVICE(0x10DE, 0x07DF), | 6139 | PCI_DEVICE(0x10DE, 0x07DF), |
6138 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, | 6140 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6139 | }, | 6141 | }, |
6140 | { /* MCP77 Ethernet Controller */ | 6142 | { /* MCP77 Ethernet Controller */ |
6141 | PCI_DEVICE(0x10DE, 0x0760), | 6143 | PCI_DEVICE(0x10DE, 0x0760), |
6142 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6144 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6143 | }, | 6145 | }, |
6144 | { /* MCP77 Ethernet Controller */ | 6146 | { /* MCP77 Ethernet Controller */ |
6145 | PCI_DEVICE(0x10DE, 0x0761), | 6147 | PCI_DEVICE(0x10DE, 0x0761), |
6146 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6148 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6147 | }, | 6149 | }, |
6148 | { /* MCP77 Ethernet Controller */ | 6150 | { /* MCP77 Ethernet Controller */ |
6149 | PCI_DEVICE(0x10DE, 0x0762), | 6151 | PCI_DEVICE(0x10DE, 0x0762), |
6150 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6152 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6151 | }, | 6153 | }, |
6152 | { /* MCP77 Ethernet Controller */ | 6154 | { /* MCP77 Ethernet Controller */ |
6153 | PCI_DEVICE(0x10DE, 0x0763), | 6155 | PCI_DEVICE(0x10DE, 0x0763), |
6154 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6156 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6155 | }, | 6157 | }, |
6156 | { /* MCP79 Ethernet Controller */ | 6158 | { /* MCP79 Ethernet Controller */ |
6157 | PCI_DEVICE(0x10DE, 0x0AB0), | 6159 | PCI_DEVICE(0x10DE, 0x0AB0), |
6158 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6160 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6159 | }, | 6161 | }, |
6160 | { /* MCP79 Ethernet Controller */ | 6162 | { /* MCP79 Ethernet Controller */ |
6161 | PCI_DEVICE(0x10DE, 0x0AB1), | 6163 | PCI_DEVICE(0x10DE, 0x0AB1), |
6162 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6164 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6163 | }, | 6165 | }, |
6164 | { /* MCP79 Ethernet Controller */ | 6166 | { /* MCP79 Ethernet Controller */ |
6165 | PCI_DEVICE(0x10DE, 0x0AB2), | 6167 | PCI_DEVICE(0x10DE, 0x0AB2), |
6166 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6168 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6167 | }, | 6169 | }, |
6168 | { /* MCP79 Ethernet Controller */ | 6170 | { /* MCP79 Ethernet Controller */ |
6169 | PCI_DEVICE(0x10DE, 0x0AB3), | 6171 | PCI_DEVICE(0x10DE, 0x0AB3), |
6170 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, | 6172 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6171 | }, | 6173 | }, |
6172 | { /* MCP89 Ethernet Controller */ | 6174 | { /* MCP89 Ethernet Controller */ |
6173 | PCI_DEVICE(0x10DE, 0x0D7D), | 6175 | PCI_DEVICE(0x10DE, 0x0D7D), |
6174 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, | 6176 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, |
6175 | }, | 6177 | }, |
6176 | {0,}, | 6178 | {0,}, |
6177 | }; | 6179 | }; |
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 06251a9e9f1b..cc58227af424 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -70,6 +70,35 @@ static const u16 e1000_82580_rxpbs_table[] = | |||
70 | #define E1000_82580_RXPBS_TABLE_SIZE \ | 70 | #define E1000_82580_RXPBS_TABLE_SIZE \ |
71 | (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) | 71 | (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) |
72 | 72 | ||
73 | /** | ||
74 | * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO | ||
75 | * @hw: pointer to the HW structure | ||
76 | * | ||
77 | * Called to determine if the I2C pins are being used for I2C or as an | ||
78 | * external MDIO interface since the two options are mutually exclusive. | ||
79 | **/ | ||
80 | static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) | ||
81 | { | ||
82 | u32 reg = 0; | ||
83 | bool ext_mdio = false; | ||
84 | |||
85 | switch (hw->mac.type) { | ||
86 | case e1000_82575: | ||
87 | case e1000_82576: | ||
88 | reg = rd32(E1000_MDIC); | ||
89 | ext_mdio = !!(reg & E1000_MDIC_DEST); | ||
90 | break; | ||
91 | case e1000_82580: | ||
92 | case e1000_i350: | ||
93 | reg = rd32(E1000_MDICNFG); | ||
94 | ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); | ||
95 | break; | ||
96 | default: | ||
97 | break; | ||
98 | } | ||
99 | return ext_mdio; | ||
100 | } | ||
101 | |||
73 | static s32 igb_get_invariants_82575(struct e1000_hw *hw) | 102 | static s32 igb_get_invariants_82575(struct e1000_hw *hw) |
74 | { | 103 | { |
75 | struct e1000_phy_info *phy = &hw->phy; | 104 | struct e1000_phy_info *phy = &hw->phy; |
@@ -144,13 +173,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
144 | 173 | ||
145 | wr32(E1000_CTRL_EXT, ctrl_ext); | 174 | wr32(E1000_CTRL_EXT, ctrl_ext); |
146 | 175 | ||
147 | /* | ||
148 | * if using i2c make certain the MDICNFG register is cleared to prevent | ||
149 | * communications from being misrouted to the mdic registers | ||
150 | */ | ||
151 | if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580)) | ||
152 | wr32(E1000_MDICNFG, 0); | ||
153 | |||
154 | /* Set mta register count */ | 176 | /* Set mta register count */ |
155 | mac->mta_reg_count = 128; | 177 | mac->mta_reg_count = 128; |
156 | /* Set rar entry count */ | 178 | /* Set rar entry count */ |
@@ -229,18 +251,20 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
229 | phy->reset_delay_us = 100; | 251 | phy->reset_delay_us = 100; |
230 | 252 | ||
231 | /* PHY function pointers */ | 253 | /* PHY function pointers */ |
232 | if (igb_sgmii_active_82575(hw)) { | 254 | if (igb_sgmii_active_82575(hw)) |
233 | phy->ops.reset = igb_phy_hw_reset_sgmii_82575; | 255 | phy->ops.reset = igb_phy_hw_reset_sgmii_82575; |
234 | phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; | 256 | else |
235 | phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; | 257 | phy->ops.reset = igb_phy_hw_reset; |
258 | |||
259 | if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { | ||
260 | phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; | ||
261 | phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; | ||
236 | } else if (hw->mac.type >= e1000_82580) { | 262 | } else if (hw->mac.type >= e1000_82580) { |
237 | phy->ops.reset = igb_phy_hw_reset; | 263 | phy->ops.read_reg = igb_read_phy_reg_82580; |
238 | phy->ops.read_reg = igb_read_phy_reg_82580; | 264 | phy->ops.write_reg = igb_write_phy_reg_82580; |
239 | phy->ops.write_reg = igb_write_phy_reg_82580; | ||
240 | } else { | 265 | } else { |
241 | phy->ops.reset = igb_phy_hw_reset; | 266 | phy->ops.read_reg = igb_read_phy_reg_igp; |
242 | phy->ops.read_reg = igb_read_phy_reg_igp; | 267 | phy->ops.write_reg = igb_write_phy_reg_igp; |
243 | phy->ops.write_reg = igb_write_phy_reg_igp; | ||
244 | } | 268 | } |
245 | 269 | ||
246 | /* set lan id */ | 270 | /* set lan id */ |
@@ -400,6 +424,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) | |||
400 | s32 ret_val = 0; | 424 | s32 ret_val = 0; |
401 | u16 phy_id; | 425 | u16 phy_id; |
402 | u32 ctrl_ext; | 426 | u32 ctrl_ext; |
427 | u32 mdic; | ||
403 | 428 | ||
404 | /* | 429 | /* |
405 | * For SGMII PHYs, we try the list of possible addresses until | 430 | * For SGMII PHYs, we try the list of possible addresses until |
@@ -414,6 +439,29 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) | |||
414 | goto out; | 439 | goto out; |
415 | } | 440 | } |
416 | 441 | ||
442 | if (igb_sgmii_uses_mdio_82575(hw)) { | ||
443 | switch (hw->mac.type) { | ||
444 | case e1000_82575: | ||
445 | case e1000_82576: | ||
446 | mdic = rd32(E1000_MDIC); | ||
447 | mdic &= E1000_MDIC_PHY_MASK; | ||
448 | phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; | ||
449 | break; | ||
450 | case e1000_82580: | ||
451 | case e1000_i350: | ||
452 | mdic = rd32(E1000_MDICNFG); | ||
453 | mdic &= E1000_MDICNFG_PHY_MASK; | ||
454 | phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; | ||
455 | break; | ||
456 | default: | ||
457 | ret_val = -E1000_ERR_PHY; | ||
458 | goto out; | ||
459 | break; | ||
460 | } | ||
461 | ret_val = igb_get_phy_id(hw); | ||
462 | goto out; | ||
463 | } | ||
464 | |||
417 | /* Power on sgmii phy if it is disabled */ | 465 | /* Power on sgmii phy if it is disabled */ |
418 | ctrl_ext = rd32(E1000_CTRL_EXT); | 466 | ctrl_ext = rd32(E1000_CTRL_EXT); |
419 | wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); | 467 | wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); |
@@ -1501,6 +1549,43 @@ out: | |||
1501 | } | 1549 | } |
1502 | 1550 | ||
1503 | /** | 1551 | /** |
1552 | * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits | ||
1553 | * @hw: pointer to the HW structure | ||
1554 | * | ||
1555 | * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on | ||
1556 | * the values found in the EEPROM. This addresses an issue in which these | ||
1557 | * bits are not restored from EEPROM after reset. | ||
1558 | **/ | ||
1559 | static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) | ||
1560 | { | ||
1561 | s32 ret_val = 0; | ||
1562 | u32 mdicnfg; | ||
1563 | u16 nvm_data; | ||
1564 | |||
1565 | if (hw->mac.type != e1000_82580) | ||
1566 | goto out; | ||
1567 | if (!igb_sgmii_active_82575(hw)) | ||
1568 | goto out; | ||
1569 | |||
1570 | ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + | ||
1571 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, | ||
1572 | &nvm_data); | ||
1573 | if (ret_val) { | ||
1574 | hw_dbg("NVM Read Error\n"); | ||
1575 | goto out; | ||
1576 | } | ||
1577 | |||
1578 | mdicnfg = rd32(E1000_MDICNFG); | ||
1579 | if (nvm_data & NVM_WORD24_EXT_MDIO) | ||
1580 | mdicnfg |= E1000_MDICNFG_EXT_MDIO; | ||
1581 | if (nvm_data & NVM_WORD24_COM_MDIO) | ||
1582 | mdicnfg |= E1000_MDICNFG_COM_MDIO; | ||
1583 | wr32(E1000_MDICNFG, mdicnfg); | ||
1584 | out: | ||
1585 | return ret_val; | ||
1586 | } | ||
1587 | |||
1588 | /** | ||
1504 | * igb_reset_hw_82580 - Reset hardware | 1589 | * igb_reset_hw_82580 - Reset hardware |
1505 | * @hw: pointer to the HW structure | 1590 | * @hw: pointer to the HW structure |
1506 | * | 1591 | * |
@@ -1575,6 +1660,10 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) | |||
1575 | wr32(E1000_IMC, 0xffffffff); | 1660 | wr32(E1000_IMC, 0xffffffff); |
1576 | icr = rd32(E1000_ICR); | 1661 | icr = rd32(E1000_ICR); |
1577 | 1662 | ||
1663 | ret_val = igb_reset_mdicnfg_82580(hw); | ||
1664 | if (ret_val) | ||
1665 | hw_dbg("Could not reset MDICNFG based on EEPROM\n"); | ||
1666 | |||
1578 | /* Install any alternate MAC address into RAR0 */ | 1667 | /* Install any alternate MAC address into RAR0 */ |
1579 | ret_val = igb_check_alt_mac_addr(hw); | 1668 | ret_val = igb_check_alt_mac_addr(hw); |
1580 | 1669 | ||
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index 90bc29d7e182..bbd2ec308eb0 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h | |||
@@ -468,6 +468,11 @@ | |||
468 | 468 | ||
469 | #define E1000_TIMINCA_16NS_SHIFT 24 | 469 | #define E1000_TIMINCA_16NS_SHIFT 24 |
470 | 470 | ||
471 | #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ | ||
472 | #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ | ||
473 | #define E1000_MDICNFG_PHY_MASK 0x03E00000 | ||
474 | #define E1000_MDICNFG_PHY_SHIFT 21 | ||
475 | |||
471 | /* PCI Express Control */ | 476 | /* PCI Express Control */ |
472 | #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 | 477 | #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 |
473 | #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 | 478 | #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 |
@@ -565,6 +570,10 @@ | |||
565 | 570 | ||
566 | #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) | 571 | #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) |
567 | 572 | ||
573 | /* Mask bits for fields in Word 0x24 of the NVM */ | ||
574 | #define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ | ||
575 | #define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ | ||
576 | |||
568 | /* Mask bits for fields in Word 0x0f of the NVM */ | 577 | /* Mask bits for fields in Word 0x0f of the NVM */ |
569 | #define NVM_WORD0F_PAUSE_MASK 0x3000 | 578 | #define NVM_WORD0F_PAUSE_MASK 0x3000 |
570 | #define NVM_WORD0F_ASM_DIR 0x2000 | 579 | #define NVM_WORD0F_ASM_DIR 0x2000 |
@@ -698,12 +707,17 @@ | |||
698 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 | 707 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 |
699 | 708 | ||
700 | /* MDI Control */ | 709 | /* MDI Control */ |
710 | #define E1000_MDIC_DATA_MASK 0x0000FFFF | ||
711 | #define E1000_MDIC_REG_MASK 0x001F0000 | ||
701 | #define E1000_MDIC_REG_SHIFT 16 | 712 | #define E1000_MDIC_REG_SHIFT 16 |
713 | #define E1000_MDIC_PHY_MASK 0x03E00000 | ||
702 | #define E1000_MDIC_PHY_SHIFT 21 | 714 | #define E1000_MDIC_PHY_SHIFT 21 |
703 | #define E1000_MDIC_OP_WRITE 0x04000000 | 715 | #define E1000_MDIC_OP_WRITE 0x04000000 |
704 | #define E1000_MDIC_OP_READ 0x08000000 | 716 | #define E1000_MDIC_OP_READ 0x08000000 |
705 | #define E1000_MDIC_READY 0x10000000 | 717 | #define E1000_MDIC_READY 0x10000000 |
718 | #define E1000_MDIC_INT_EN 0x20000000 | ||
706 | #define E1000_MDIC_ERROR 0x40000000 | 719 | #define E1000_MDIC_ERROR 0x40000000 |
720 | #define E1000_MDIC_DEST 0x80000000 | ||
707 | 721 | ||
708 | /* SerDes Control */ | 722 | /* SerDes Control */ |
709 | #define E1000_GEN_CTL_READY 0x80000000 | 723 | #define E1000_GEN_CTL_READY 0x80000000 |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 94656179441d..667b527b0312 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -1722,6 +1722,15 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1722 | u16 eeprom_apme_mask = IGB_EEPROM_APME; | 1722 | u16 eeprom_apme_mask = IGB_EEPROM_APME; |
1723 | u32 part_num; | 1723 | u32 part_num; |
1724 | 1724 | ||
1725 | /* Catch broken hardware that put the wrong VF device ID in | ||
1726 | * the PCIe SR-IOV capability. | ||
1727 | */ | ||
1728 | if (pdev->is_virtfn) { | ||
1729 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", | ||
1730 | pci_name(pdev), pdev->vendor, pdev->device); | ||
1731 | return -EINVAL; | ||
1732 | } | ||
1733 | |||
1725 | err = pci_enable_device_mem(pdev); | 1734 | err = pci_enable_device_mem(pdev); |
1726 | if (err) | 1735 | if (err) |
1727 | return err; | 1736 | return err; |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 5e2b2a8c56c6..048595bc79ad 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -2751,7 +2751,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, | |||
2751 | dev_info(&pdev->dev, | 2751 | dev_info(&pdev->dev, |
2752 | "PF still in reset state, assigning new address." | 2752 | "PF still in reset state, assigning new address." |
2753 | " Is the PF interface up?\n"); | 2753 | " Is the PF interface up?\n"); |
2754 | random_ether_addr(hw->mac.addr); | 2754 | dev_hw_addr_random(adapter->netdev, hw->mac.addr); |
2755 | } else { | 2755 | } else { |
2756 | err = hw->mac.ops.read_mac_addr(hw); | 2756 | err = hw->mac.ops.read_mac_addr(hw); |
2757 | if (err) { | 2757 | if (err) { |
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index d67e48418e55..850ca1c5ee19 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -2848,9 +2848,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, | |||
2848 | unsigned short ss_device = 0x0000; | 2848 | unsigned short ss_device = 0x0000; |
2849 | int ret = 0; | 2849 | int ret = 0; |
2850 | 2850 | ||
2851 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | 2851 | for_each_pci_dev(dev) { |
2852 | |||
2853 | while (dev != NULL) { | ||
2854 | struct smsc_ircc_subsystem_configuration *conf; | 2852 | struct smsc_ircc_subsystem_configuration *conf; |
2855 | 2853 | ||
2856 | /* | 2854 | /* |
@@ -2899,7 +2897,6 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, | |||
2899 | ret = -ENODEV; | 2897 | ret = -ENODEV; |
2900 | } | 2898 | } |
2901 | } | 2899 | } |
2902 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
2903 | } | 2900 | } |
2904 | 2901 | ||
2905 | return ret; | 2902 | return ret; |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index da54b38bb480..dcebc82c6f4d 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -54,14 +54,14 @@ struct ixgbe_stats { | |||
54 | sizeof(((struct ixgbe_adapter *)0)->m), \ | 54 | sizeof(((struct ixgbe_adapter *)0)->m), \ |
55 | offsetof(struct ixgbe_adapter, m) | 55 | offsetof(struct ixgbe_adapter, m) |
56 | #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ | 56 | #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ |
57 | sizeof(((struct net_device *)0)->m), \ | 57 | sizeof(((struct rtnl_link_stats64 *)0)->m), \ |
58 | offsetof(struct net_device, m) - offsetof(struct net_device, stats) | 58 | offsetof(struct rtnl_link_stats64, m) |
59 | 59 | ||
60 | static struct ixgbe_stats ixgbe_gstrings_stats[] = { | 60 | static struct ixgbe_stats ixgbe_gstrings_stats[] = { |
61 | {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)}, | 61 | {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, |
62 | {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)}, | 62 | {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, |
63 | {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)}, | 63 | {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, |
64 | {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)}, | 64 | {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, |
65 | {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, | 65 | {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, |
66 | {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, | 66 | {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, |
67 | {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, | 67 | {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, |
@@ -69,27 +69,27 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
69 | {"lsc_int", IXGBE_STAT(lsc_int)}, | 69 | {"lsc_int", IXGBE_STAT(lsc_int)}, |
70 | {"tx_busy", IXGBE_STAT(tx_busy)}, | 70 | {"tx_busy", IXGBE_STAT(tx_busy)}, |
71 | {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, | 71 | {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, |
72 | {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)}, | 72 | {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, |
73 | {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)}, | 73 | {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, |
74 | {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)}, | 74 | {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, |
75 | {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)}, | 75 | {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, |
76 | {"multicast", IXGBE_NETDEV_STAT(stats.multicast)}, | 76 | {"multicast", IXGBE_NETDEV_STAT(multicast)}, |
77 | {"broadcast", IXGBE_STAT(stats.bprc)}, | 77 | {"broadcast", IXGBE_STAT(stats.bprc)}, |
78 | {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, | 78 | {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, |
79 | {"collisions", IXGBE_NETDEV_STAT(stats.collisions)}, | 79 | {"collisions", IXGBE_NETDEV_STAT(collisions)}, |
80 | {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)}, | 80 | {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, |
81 | {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)}, | 81 | {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, |
82 | {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)}, | 82 | {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, |
83 | {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, | 83 | {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, |
84 | {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, | 84 | {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, |
85 | {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, | 85 | {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, |
86 | {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, | 86 | {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, |
87 | {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)}, | 87 | {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, |
88 | {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)}, | 88 | {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, |
89 | {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)}, | 89 | {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, |
90 | {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)}, | 90 | {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, |
91 | {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)}, | 91 | {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, |
92 | {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)}, | 92 | {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, |
93 | {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, | 93 | {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, |
94 | {"tx_restart_queue", IXGBE_STAT(restart_queue)}, | 94 | {"tx_restart_queue", IXGBE_STAT(restart_queue)}, |
95 | {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, | 95 | {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 920375951454..7d6a415bcf88 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -4783,6 +4783,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4783 | #ifdef CONFIG_IXGBE_DCB | 4783 | #ifdef CONFIG_IXGBE_DCB |
4784 | /* Default traffic class to use for FCoE */ | 4784 | /* Default traffic class to use for FCoE */ |
4785 | adapter->fcoe.tc = IXGBE_FCOE_DEFTC; | 4785 | adapter->fcoe.tc = IXGBE_FCOE_DEFTC; |
4786 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; | ||
4786 | #endif | 4787 | #endif |
4787 | #endif /* IXGBE_FCOE */ | 4788 | #endif /* IXGBE_FCOE */ |
4788 | } | 4789 | } |
@@ -6147,21 +6148,26 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6147 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 6148 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
6148 | int txq = smp_processor_id(); | 6149 | int txq = smp_processor_id(); |
6149 | 6150 | ||
6151 | #ifdef IXGBE_FCOE | ||
6152 | if ((skb->protocol == htons(ETH_P_FCOE)) || | ||
6153 | (skb->protocol == htons(ETH_P_FIP))) { | ||
6154 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
6155 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | ||
6156 | txq += adapter->ring_feature[RING_F_FCOE].mask; | ||
6157 | return txq; | ||
6158 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
6159 | txq = adapter->fcoe.up; | ||
6160 | return txq; | ||
6161 | } | ||
6162 | } | ||
6163 | #endif | ||
6164 | |||
6150 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | 6165 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
6151 | while (unlikely(txq >= dev->real_num_tx_queues)) | 6166 | while (unlikely(txq >= dev->real_num_tx_queues)) |
6152 | txq -= dev->real_num_tx_queues; | 6167 | txq -= dev->real_num_tx_queues; |
6153 | return txq; | 6168 | return txq; |
6154 | } | 6169 | } |
6155 | 6170 | ||
6156 | #ifdef IXGBE_FCOE | ||
6157 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | ||
6158 | ((skb->protocol == htons(ETH_P_FCOE)) || | ||
6159 | (skb->protocol == htons(ETH_P_FIP)))) { | ||
6160 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | ||
6161 | txq += adapter->ring_feature[RING_F_FCOE].mask; | ||
6162 | return txq; | ||
6163 | } | ||
6164 | #endif | ||
6165 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 6171 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
6166 | if (skb->priority == TC_PRIO_CONTROL) | 6172 | if (skb->priority == TC_PRIO_CONTROL) |
6167 | txq = adapter->ring_feature[RING_F_DCB].indices-1; | 6173 | txq = adapter->ring_feature[RING_F_DCB].indices-1; |
@@ -6205,18 +6211,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
6205 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | 6211 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
6206 | 6212 | ||
6207 | #ifdef IXGBE_FCOE | 6213 | #ifdef IXGBE_FCOE |
6208 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 6214 | /* for FCoE with DCB, we force the priority to what |
6209 | #ifdef CONFIG_IXGBE_DCB | 6215 | * was specified by the switch */ |
6210 | /* for FCoE with DCB, we force the priority to what | 6216 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && |
6211 | * was specified by the switch */ | 6217 | (skb->protocol == htons(ETH_P_FCOE) || |
6212 | if ((skb->protocol == htons(ETH_P_FCOE)) || | 6218 | skb->protocol == htons(ETH_P_FIP))) { |
6213 | (skb->protocol == htons(ETH_P_FIP))) { | 6219 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK |
6214 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK | 6220 | << IXGBE_TX_FLAGS_VLAN_SHIFT); |
6215 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | 6221 | tx_flags |= ((adapter->fcoe.up << 13) |
6216 | tx_flags |= ((adapter->fcoe.up << 13) | 6222 | << IXGBE_TX_FLAGS_VLAN_SHIFT); |
6217 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
6218 | } | ||
6219 | #endif | ||
6220 | /* flag for FCoE offloads */ | 6223 | /* flag for FCoE offloads */ |
6221 | if (skb->protocol == htons(ETH_P_FCOE)) | 6224 | if (skb->protocol == htons(ETH_P_FCOE)) |
6222 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | 6225 | tx_flags |= IXGBE_TX_FLAGS_FCOE; |
@@ -6536,6 +6539,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6536 | #endif | 6539 | #endif |
6537 | u32 part_num, eec; | 6540 | u32 part_num, eec; |
6538 | 6541 | ||
6542 | /* Catch broken hardware that put the wrong VF device ID in | ||
6543 | * the PCIe SR-IOV capability. | ||
6544 | */ | ||
6545 | if (pdev->is_virtfn) { | ||
6546 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", | ||
6547 | pci_name(pdev), pdev->vendor, pdev->device); | ||
6548 | return -EINVAL; | ||
6549 | } | ||
6550 | |||
6539 | err = pci_enable_device_mem(pdev); | 6551 | err = pci_enable_device_mem(pdev); |
6540 | if (err) | 6552 | if (err) |
6541 | return err; | 6553 | return err; |
@@ -6549,8 +6561,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6549 | err = dma_set_coherent_mask(&pdev->dev, | 6561 | err = dma_set_coherent_mask(&pdev->dev, |
6550 | DMA_BIT_MASK(32)); | 6562 | DMA_BIT_MASK(32)); |
6551 | if (err) { | 6563 | if (err) { |
6552 | e_dev_err("No usable DMA configuration, " | 6564 | dev_err(&pdev->dev, |
6553 | "aborting\n"); | 6565 | "No usable DMA configuration, aborting\n"); |
6554 | goto err_dma; | 6566 | goto err_dma; |
6555 | } | 6567 | } |
6556 | } | 6568 | } |
@@ -6560,7 +6572,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6560 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, | 6572 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
6561 | IORESOURCE_MEM), ixgbe_driver_name); | 6573 | IORESOURCE_MEM), ixgbe_driver_name); |
6562 | if (err) { | 6574 | if (err) { |
6563 | e_dev_err("pci_request_selected_regions failed 0x%x\n", err); | 6575 | dev_err(&pdev->dev, |
6576 | "pci_request_selected_regions failed 0x%x\n", err); | ||
6564 | goto err_pci_reg; | 6577 | goto err_pci_reg; |
6565 | } | 6578 | } |
6566 | 6579 | ||
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index af491352b5e0..4867440ecfa8 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -2229,7 +2229,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) | |||
2229 | if (err) { | 2229 | if (err) { |
2230 | dev_info(&pdev->dev, | 2230 | dev_info(&pdev->dev, |
2231 | "PF still in reset state, assigning new address\n"); | 2231 | "PF still in reset state, assigning new address\n"); |
2232 | random_ether_addr(hw->mac.addr); | 2232 | dev_hw_addr_random(adapter->netdev, hw->mac.addr); |
2233 | } else { | 2233 | } else { |
2234 | err = hw->mac.ops.init_hw(hw); | 2234 | err = hw->mac.ops.init_hw(hw); |
2235 | if (err) { | 2235 | if (err) { |
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c index 634dad1c8b48..928b2b83cef5 100644 --- a/drivers/net/ks8842.c +++ b/drivers/net/ks8842.c | |||
@@ -30,11 +30,19 @@ | |||
30 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
31 | #include <linux/ethtool.h> | 31 | #include <linux/ethtool.h> |
32 | #include <linux/ks8842.h> | 32 | #include <linux/ks8842.h> |
33 | #include <linux/dmaengine.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/scatterlist.h> | ||
33 | 36 | ||
34 | #define DRV_NAME "ks8842" | 37 | #define DRV_NAME "ks8842" |
35 | 38 | ||
36 | /* Timberdale specific Registers */ | 39 | /* Timberdale specific Registers */ |
37 | #define REG_TIMB_RST 0x1c | 40 | #define REG_TIMB_RST 0x1c |
41 | #define REG_TIMB_FIFO 0x20 | ||
42 | #define REG_TIMB_ISR 0x24 | ||
43 | #define REG_TIMB_IER 0x28 | ||
44 | #define REG_TIMB_IAR 0x2C | ||
45 | #define REQ_TIMB_DMA_RESUME 0x30 | ||
38 | 46 | ||
39 | /* KS8842 registers */ | 47 | /* KS8842 registers */ |
40 | 48 | ||
@@ -77,6 +85,15 @@ | |||
77 | #define IRQ_RX_ERROR 0x0080 | 85 | #define IRQ_RX_ERROR 0x0080 |
78 | #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ | 86 | #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ |
79 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) | 87 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) |
88 | /* When running via timberdale in DMA mode, the RX interrupt should be | ||
89 | enabled in the KS8842, but not in the FPGA IP, since the IP handles | ||
90 | RX DMA internally. | ||
91 | TX interrupts are not needed it is handled by the FPGA the driver is | ||
92 | notified via DMA callbacks. | ||
93 | */ | ||
94 | #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ | ||
95 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) | ||
96 | #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) | ||
80 | #define REG_ISR 0x02 | 97 | #define REG_ISR 0x02 |
81 | #define REG_RXSR 0x04 | 98 | #define REG_RXSR 0x04 |
82 | #define RXSR_VALID 0x8000 | 99 | #define RXSR_VALID 0x8000 |
@@ -119,6 +136,28 @@ | |||
119 | #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ | 136 | #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ |
120 | #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ | 137 | #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ |
121 | 138 | ||
139 | #define DMA_BUFFER_SIZE 2048 | ||
140 | |||
141 | struct ks8842_tx_dma_ctl { | ||
142 | struct dma_chan *chan; | ||
143 | struct dma_async_tx_descriptor *adesc; | ||
144 | void *buf; | ||
145 | struct scatterlist sg; | ||
146 | int channel; | ||
147 | }; | ||
148 | |||
149 | struct ks8842_rx_dma_ctl { | ||
150 | struct dma_chan *chan; | ||
151 | struct dma_async_tx_descriptor *adesc; | ||
152 | struct sk_buff *skb; | ||
153 | struct scatterlist sg; | ||
154 | struct tasklet_struct tasklet; | ||
155 | int channel; | ||
156 | }; | ||
157 | |||
158 | #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ | ||
159 | ((adapter)->dma_rx.channel != -1)) | ||
160 | |||
122 | struct ks8842_adapter { | 161 | struct ks8842_adapter { |
123 | void __iomem *hw_addr; | 162 | void __iomem *hw_addr; |
124 | int irq; | 163 | int irq; |
@@ -127,8 +166,19 @@ struct ks8842_adapter { | |||
127 | spinlock_t lock; /* spinlock to be interrupt safe */ | 166 | spinlock_t lock; /* spinlock to be interrupt safe */ |
128 | struct work_struct timeout_work; | 167 | struct work_struct timeout_work; |
129 | struct net_device *netdev; | 168 | struct net_device *netdev; |
169 | struct device *dev; | ||
170 | struct ks8842_tx_dma_ctl dma_tx; | ||
171 | struct ks8842_rx_dma_ctl dma_rx; | ||
130 | }; | 172 | }; |
131 | 173 | ||
174 | static void ks8842_dma_rx_cb(void *data); | ||
175 | static void ks8842_dma_tx_cb(void *data); | ||
176 | |||
177 | static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) | ||
178 | { | ||
179 | iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); | ||
180 | } | ||
181 | |||
132 | static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) | 182 | static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) |
133 | { | 183 | { |
134 | iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); | 184 | iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); |
@@ -282,10 +332,6 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter) | |||
282 | /* restart port auto-negotiation */ | 332 | /* restart port auto-negotiation */ |
283 | ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); | 333 | ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); |
284 | 334 | ||
285 | if (!(adapter->conf_flags & MICREL_KS884X)) | ||
286 | /* only advertise 10Mbps */ | ||
287 | ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4); | ||
288 | |||
289 | /* Enable the transmitter */ | 335 | /* Enable the transmitter */ |
290 | ks8842_enable_tx(adapter); | 336 | ks8842_enable_tx(adapter); |
291 | 337 | ||
@@ -296,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter) | |||
296 | ks8842_write16(adapter, 18, 0xffff, REG_ISR); | 342 | ks8842_write16(adapter, 18, 0xffff, REG_ISR); |
297 | 343 | ||
298 | /* enable interrupts */ | 344 | /* enable interrupts */ |
299 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); | 345 | if (KS8842_USE_DMA(adapter)) { |
300 | 346 | /* When running in DMA Mode the RX interrupt is not enabled in | |
347 | timberdale because RX data is received by DMA callbacks | ||
348 | it must still be enabled in the KS8842 because it indicates | ||
349 | to timberdale when there is RX data for it's DMA FIFOs */ | ||
350 | iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); | ||
351 | ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); | ||
352 | } else { | ||
353 | if (!(adapter->conf_flags & MICREL_KS884X)) | ||
354 | iowrite16(ENABLED_IRQS, | ||
355 | adapter->hw_addr + REG_TIMB_IER); | ||
356 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); | ||
357 | } | ||
301 | /* enable the switch */ | 358 | /* enable the switch */ |
302 | ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); | 359 | ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); |
303 | } | 360 | } |
@@ -370,6 +427,53 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) | |||
370 | return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; | 427 | return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; |
371 | } | 428 | } |
372 | 429 | ||
430 | static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) | ||
431 | { | ||
432 | struct ks8842_adapter *adapter = netdev_priv(netdev); | ||
433 | struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; | ||
434 | u8 *buf = ctl->buf; | ||
435 | |||
436 | if (ctl->adesc) { | ||
437 | netdev_dbg(netdev, "%s: TX ongoing\n", __func__); | ||
438 | /* transfer ongoing */ | ||
439 | return NETDEV_TX_BUSY; | ||
440 | } | ||
441 | |||
442 | sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); | ||
443 | |||
444 | /* copy data to the TX buffer */ | ||
445 | /* the control word, enable IRQ, port 1 and the length */ | ||
446 | *buf++ = 0x00; | ||
447 | *buf++ = 0x01; /* Port 1 */ | ||
448 | *buf++ = skb->len & 0xff; | ||
449 | *buf++ = (skb->len >> 8) & 0xff; | ||
450 | skb_copy_from_linear_data(skb, buf, skb->len); | ||
451 | |||
452 | dma_sync_single_range_for_device(adapter->dev, | ||
453 | sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), | ||
454 | DMA_TO_DEVICE); | ||
455 | |||
456 | /* make sure the length is a multiple of 4 */ | ||
457 | if (sg_dma_len(&ctl->sg) % 4) | ||
458 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; | ||
459 | |||
460 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | ||
461 | &ctl->sg, 1, DMA_TO_DEVICE, | ||
462 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | ||
463 | if (!ctl->adesc) | ||
464 | return NETDEV_TX_BUSY; | ||
465 | |||
466 | ctl->adesc->callback_param = netdev; | ||
467 | ctl->adesc->callback = ks8842_dma_tx_cb; | ||
468 | ctl->adesc->tx_submit(ctl->adesc); | ||
469 | |||
470 | netdev->stats.tx_bytes += skb->len; | ||
471 | |||
472 | dev_kfree_skb(skb); | ||
473 | |||
474 | return NETDEV_TX_OK; | ||
475 | } | ||
476 | |||
373 | static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) | 477 | static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) |
374 | { | 478 | { |
375 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 479 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
@@ -421,6 +525,121 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) | |||
421 | return NETDEV_TX_OK; | 525 | return NETDEV_TX_OK; |
422 | } | 526 | } |
423 | 527 | ||
528 | static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) | ||
529 | { | ||
530 | netdev_dbg(netdev, "RX error, status: %x\n", status); | ||
531 | |||
532 | netdev->stats.rx_errors++; | ||
533 | if (status & RXSR_TOO_LONG) | ||
534 | netdev->stats.rx_length_errors++; | ||
535 | if (status & RXSR_CRC_ERROR) | ||
536 | netdev->stats.rx_crc_errors++; | ||
537 | if (status & RXSR_RUNT) | ||
538 | netdev->stats.rx_frame_errors++; | ||
539 | } | ||
540 | |||
541 | static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, | ||
542 | int len) | ||
543 | { | ||
544 | netdev_dbg(netdev, "RX packet, len: %d\n", len); | ||
545 | |||
546 | netdev->stats.rx_packets++; | ||
547 | netdev->stats.rx_bytes += len; | ||
548 | if (status & RXSR_MULTICAST) | ||
549 | netdev->stats.multicast++; | ||
550 | } | ||
551 | |||
552 | static int __ks8842_start_new_rx_dma(struct net_device *netdev) | ||
553 | { | ||
554 | struct ks8842_adapter *adapter = netdev_priv(netdev); | ||
555 | struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; | ||
556 | struct scatterlist *sg = &ctl->sg; | ||
557 | int err; | ||
558 | |||
559 | ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); | ||
560 | if (ctl->skb) { | ||
561 | sg_init_table(sg, 1); | ||
562 | sg_dma_address(sg) = dma_map_single(adapter->dev, | ||
563 | ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
564 | err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); | ||
565 | if (unlikely(err)) { | ||
566 | sg_dma_address(sg) = 0; | ||
567 | goto out; | ||
568 | } | ||
569 | |||
570 | sg_dma_len(sg) = DMA_BUFFER_SIZE; | ||
571 | |||
572 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | ||
573 | sg, 1, DMA_FROM_DEVICE, | ||
574 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | ||
575 | |||
576 | if (!ctl->adesc) | ||
577 | goto out; | ||
578 | |||
579 | ctl->adesc->callback_param = netdev; | ||
580 | ctl->adesc->callback = ks8842_dma_rx_cb; | ||
581 | ctl->adesc->tx_submit(ctl->adesc); | ||
582 | } else { | ||
583 | err = -ENOMEM; | ||
584 | sg_dma_address(sg) = 0; | ||
585 | goto out; | ||
586 | } | ||
587 | |||
588 | return err; | ||
589 | out: | ||
590 | if (sg_dma_address(sg)) | ||
591 | dma_unmap_single(adapter->dev, sg_dma_address(sg), | ||
592 | DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
593 | sg_dma_address(sg) = 0; | ||
594 | if (ctl->skb) | ||
595 | dev_kfree_skb(ctl->skb); | ||
596 | |||
597 | ctl->skb = NULL; | ||
598 | |||
599 | printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); | ||
600 | return err; | ||
601 | } | ||
602 | |||
603 | static void ks8842_rx_frame_dma_tasklet(unsigned long arg) | ||
604 | { | ||
605 | struct net_device *netdev = (struct net_device *)arg; | ||
606 | struct ks8842_adapter *adapter = netdev_priv(netdev); | ||
607 | struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; | ||
608 | struct sk_buff *skb = ctl->skb; | ||
609 | dma_addr_t addr = sg_dma_address(&ctl->sg); | ||
610 | u32 status; | ||
611 | |||
612 | ctl->adesc = NULL; | ||
613 | |||
614 | /* kick next transfer going */ | ||
615 | __ks8842_start_new_rx_dma(netdev); | ||
616 | |||
617 | /* now handle the data we got */ | ||
618 | dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
619 | |||
620 | status = *((u32 *)skb->data); | ||
621 | |||
622 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", | ||
623 | __func__, status & 0xffff); | ||
624 | |||
625 | /* check the status */ | ||
626 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { | ||
627 | int len = (status >> 16) & 0x7ff; | ||
628 | |||
629 | ks8842_update_rx_counters(netdev, status, len); | ||
630 | |||
631 | /* reserve 4 bytes which is the status word */ | ||
632 | skb_reserve(skb, 4); | ||
633 | skb_put(skb, len); | ||
634 | |||
635 | skb->protocol = eth_type_trans(skb, netdev); | ||
636 | netif_rx(skb); | ||
637 | } else { | ||
638 | ks8842_update_rx_err_counters(netdev, status); | ||
639 | dev_kfree_skb(skb); | ||
640 | } | ||
641 | } | ||
642 | |||
424 | static void ks8842_rx_frame(struct net_device *netdev, | 643 | static void ks8842_rx_frame(struct net_device *netdev, |
425 | struct ks8842_adapter *adapter) | 644 | struct ks8842_adapter *adapter) |
426 | { | 645 | { |
@@ -444,13 +663,9 @@ static void ks8842_rx_frame(struct net_device *netdev, | |||
444 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { | 663 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { |
445 | struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); | 664 | struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); |
446 | 665 | ||
447 | netdev_dbg(netdev, "%s, got package, len: %d\n", __func__, len); | ||
448 | if (skb) { | 666 | if (skb) { |
449 | 667 | ||
450 | netdev->stats.rx_packets++; | 668 | ks8842_update_rx_counters(netdev, status, len); |
451 | netdev->stats.rx_bytes += len; | ||
452 | if (status & RXSR_MULTICAST) | ||
453 | netdev->stats.multicast++; | ||
454 | 669 | ||
455 | if (adapter->conf_flags & KS884X_16BIT) { | 670 | if (adapter->conf_flags & KS884X_16BIT) { |
456 | u16 *data16 = (u16 *)skb_put(skb, len); | 671 | u16 *data16 = (u16 *)skb_put(skb, len); |
@@ -476,16 +691,8 @@ static void ks8842_rx_frame(struct net_device *netdev, | |||
476 | netif_rx(skb); | 691 | netif_rx(skb); |
477 | } else | 692 | } else |
478 | netdev->stats.rx_dropped++; | 693 | netdev->stats.rx_dropped++; |
479 | } else { | 694 | } else |
480 | netdev_dbg(netdev, "RX error, status: %x\n", status); | 695 | ks8842_update_rx_err_counters(netdev, status); |
481 | netdev->stats.rx_errors++; | ||
482 | if (status & RXSR_TOO_LONG) | ||
483 | netdev->stats.rx_length_errors++; | ||
484 | if (status & RXSR_CRC_ERROR) | ||
485 | netdev->stats.rx_crc_errors++; | ||
486 | if (status & RXSR_RUNT) | ||
487 | netdev->stats.rx_frame_errors++; | ||
488 | } | ||
489 | 696 | ||
490 | /* set high watermark to 3K */ | 697 | /* set high watermark to 3K */ |
491 | ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); | 698 | ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); |
@@ -540,18 +747,30 @@ void ks8842_tasklet(unsigned long arg) | |||
540 | isr = ks8842_read16(adapter, 18, REG_ISR); | 747 | isr = ks8842_read16(adapter, 18, REG_ISR); |
541 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); | 748 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); |
542 | 749 | ||
750 | /* when running in DMA mode, do not ack RX interrupts, it is handled | ||
751 | internally by timberdale, otherwise it's DMA FIFO:s would stop | ||
752 | */ | ||
753 | if (KS8842_USE_DMA(adapter)) | ||
754 | isr &= ~IRQ_RX; | ||
755 | |||
543 | /* Ack */ | 756 | /* Ack */ |
544 | ks8842_write16(adapter, 18, isr, REG_ISR); | 757 | ks8842_write16(adapter, 18, isr, REG_ISR); |
545 | 758 | ||
759 | if (!(adapter->conf_flags & MICREL_KS884X)) | ||
760 | /* Ack in the timberdale IP as well */ | ||
761 | iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); | ||
762 | |||
546 | if (!netif_running(netdev)) | 763 | if (!netif_running(netdev)) |
547 | return; | 764 | return; |
548 | 765 | ||
549 | if (isr & IRQ_LINK_CHANGE) | 766 | if (isr & IRQ_LINK_CHANGE) |
550 | ks8842_update_link_status(netdev, adapter); | 767 | ks8842_update_link_status(netdev, adapter); |
551 | 768 | ||
552 | if (isr & (IRQ_RX | IRQ_RX_ERROR)) | 769 | /* should not get IRQ_RX when running DMA mode */ |
770 | if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) | ||
553 | ks8842_handle_rx(netdev, adapter); | 771 | ks8842_handle_rx(netdev, adapter); |
554 | 772 | ||
773 | /* should only happen when in PIO mode */ | ||
555 | if (isr & IRQ_TX) | 774 | if (isr & IRQ_TX) |
556 | ks8842_handle_tx(netdev, adapter); | 775 | ks8842_handle_tx(netdev, adapter); |
557 | 776 | ||
@@ -570,8 +789,17 @@ void ks8842_tasklet(unsigned long arg) | |||
570 | 789 | ||
571 | /* re-enable interrupts, put back the bank selection register */ | 790 | /* re-enable interrupts, put back the bank selection register */ |
572 | spin_lock_irqsave(&adapter->lock, flags); | 791 | spin_lock_irqsave(&adapter->lock, flags); |
573 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); | 792 | if (KS8842_USE_DMA(adapter)) |
793 | ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); | ||
794 | else | ||
795 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); | ||
574 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); | 796 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); |
797 | |||
798 | /* Make sure timberdale continues DMA operations, they are stopped while | ||
799 | we are handling the ks8842 because we might change bank */ | ||
800 | if (KS8842_USE_DMA(adapter)) | ||
801 | ks8842_resume_dma(adapter); | ||
802 | |||
575 | spin_unlock_irqrestore(&adapter->lock, flags); | 803 | spin_unlock_irqrestore(&adapter->lock, flags); |
576 | } | 804 | } |
577 | 805 | ||
@@ -587,8 +815,12 @@ static irqreturn_t ks8842_irq(int irq, void *devid) | |||
587 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); | 815 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); |
588 | 816 | ||
589 | if (isr) { | 817 | if (isr) { |
590 | /* disable IRQ */ | 818 | if (KS8842_USE_DMA(adapter)) |
591 | ks8842_write16(adapter, 18, 0x00, REG_IER); | 819 | /* disable all but RX IRQ, since the FPGA relies on it*/ |
820 | ks8842_write16(adapter, 18, IRQ_RX, REG_IER); | ||
821 | else | ||
822 | /* disable IRQ */ | ||
823 | ks8842_write16(adapter, 18, 0x00, REG_IER); | ||
592 | 824 | ||
593 | /* schedule tasklet */ | 825 | /* schedule tasklet */ |
594 | tasklet_schedule(&adapter->tasklet); | 826 | tasklet_schedule(&adapter->tasklet); |
@@ -598,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid) | |||
598 | 830 | ||
599 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); | 831 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); |
600 | 832 | ||
833 | /* After an interrupt, tell timberdale to continue DMA operations. | ||
834 | DMA is disabled while we are handling the ks8842 because we might | ||
835 | change bank */ | ||
836 | ks8842_resume_dma(adapter); | ||
837 | |||
601 | return ret; | 838 | return ret; |
602 | } | 839 | } |
603 | 840 | ||
841 | static void ks8842_dma_rx_cb(void *data) | ||
842 | { | ||
843 | struct net_device *netdev = data; | ||
844 | struct ks8842_adapter *adapter = netdev_priv(netdev); | ||
845 | |||
846 | netdev_dbg(netdev, "RX DMA finished\n"); | ||
847 | /* schedule tasklet */ | ||
848 | if (adapter->dma_rx.adesc) | ||
849 | tasklet_schedule(&adapter->dma_rx.tasklet); | ||
850 | } | ||
851 | |||
852 | static void ks8842_dma_tx_cb(void *data) | ||
853 | { | ||
854 | struct net_device *netdev = data; | ||
855 | struct ks8842_adapter *adapter = netdev_priv(netdev); | ||
856 | struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; | ||
857 | |||
858 | netdev_dbg(netdev, "TX DMA finished\n"); | ||
859 | |||
860 | if (!ctl->adesc) | ||
861 | return; | ||
862 | |||
863 | netdev->stats.tx_packets++; | ||
864 | ctl->adesc = NULL; | ||
865 | |||
866 | if (netif_queue_stopped(netdev)) | ||
867 | netif_wake_queue(netdev); | ||
868 | } | ||
869 | |||
870 | static void ks8842_stop_dma(struct ks8842_adapter *adapter) | ||
871 | { | ||
872 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; | ||
873 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; | ||
874 | |||
875 | tx_ctl->adesc = NULL; | ||
876 | if (tx_ctl->chan) | ||
877 | tx_ctl->chan->device->device_control(tx_ctl->chan, | ||
878 | DMA_TERMINATE_ALL, 0); | ||
879 | |||
880 | rx_ctl->adesc = NULL; | ||
881 | if (rx_ctl->chan) | ||
882 | rx_ctl->chan->device->device_control(rx_ctl->chan, | ||
883 | DMA_TERMINATE_ALL, 0); | ||
884 | |||
885 | if (sg_dma_address(&rx_ctl->sg)) | ||
886 | dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), | ||
887 | DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
888 | sg_dma_address(&rx_ctl->sg) = 0; | ||
889 | |||
890 | dev_kfree_skb(rx_ctl->skb); | ||
891 | rx_ctl->skb = NULL; | ||
892 | } | ||
893 | |||
894 | static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) | ||
895 | { | ||
896 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; | ||
897 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; | ||
898 | |||
899 | ks8842_stop_dma(adapter); | ||
900 | |||
901 | if (tx_ctl->chan) | ||
902 | dma_release_channel(tx_ctl->chan); | ||
903 | tx_ctl->chan = NULL; | ||
904 | |||
905 | if (rx_ctl->chan) | ||
906 | dma_release_channel(rx_ctl->chan); | ||
907 | rx_ctl->chan = NULL; | ||
908 | |||
909 | tasklet_kill(&rx_ctl->tasklet); | ||
910 | |||
911 | if (sg_dma_address(&tx_ctl->sg)) | ||
912 | dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), | ||
913 | DMA_BUFFER_SIZE, DMA_TO_DEVICE); | ||
914 | sg_dma_address(&tx_ctl->sg) = 0; | ||
915 | |||
916 | kfree(tx_ctl->buf); | ||
917 | tx_ctl->buf = NULL; | ||
918 | } | ||
919 | |||
920 | static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) | ||
921 | { | ||
922 | return chan->chan_id == (long)filter_param; | ||
923 | } | ||
924 | |||
925 | static int ks8842_alloc_dma_bufs(struct net_device *netdev) | ||
926 | { | ||
927 | struct ks8842_adapter *adapter = netdev_priv(netdev); | ||
928 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; | ||
929 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; | ||
930 | int err; | ||
931 | |||
932 | dma_cap_mask_t mask; | ||
933 | |||
934 | dma_cap_zero(mask); | ||
935 | dma_cap_set(DMA_SLAVE, mask); | ||
936 | dma_cap_set(DMA_PRIVATE, mask); | ||
937 | |||
938 | sg_init_table(&tx_ctl->sg, 1); | ||
939 | |||
940 | tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, | ||
941 | (void *)(long)tx_ctl->channel); | ||
942 | if (!tx_ctl->chan) { | ||
943 | err = -ENODEV; | ||
944 | goto err; | ||
945 | } | ||
946 | |||
947 | /* allocate DMA buffer */ | ||
948 | tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); | ||
949 | if (!tx_ctl->buf) { | ||
950 | err = -ENOMEM; | ||
951 | goto err; | ||
952 | } | ||
953 | |||
954 | sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, | ||
955 | tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); | ||
956 | err = dma_mapping_error(adapter->dev, | ||
957 | sg_dma_address(&tx_ctl->sg)); | ||
958 | if (err) { | ||
959 | sg_dma_address(&tx_ctl->sg) = 0; | ||
960 | goto err; | ||
961 | } | ||
962 | |||
963 | rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, | ||
964 | (void *)(long)rx_ctl->channel); | ||
965 | if (!rx_ctl->chan) { | ||
966 | err = -ENODEV; | ||
967 | goto err; | ||
968 | } | ||
969 | |||
970 | tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, | ||
971 | (unsigned long)netdev); | ||
972 | |||
973 | return 0; | ||
974 | err: | ||
975 | ks8842_dealloc_dma_bufs(adapter); | ||
976 | return err; | ||
977 | } | ||
604 | 978 | ||
605 | /* Netdevice operations */ | 979 | /* Netdevice operations */ |
606 | 980 | ||
@@ -611,6 +985,25 @@ static int ks8842_open(struct net_device *netdev) | |||
611 | 985 | ||
612 | netdev_dbg(netdev, "%s - entry\n", __func__); | 986 | netdev_dbg(netdev, "%s - entry\n", __func__); |
613 | 987 | ||
988 | if (KS8842_USE_DMA(adapter)) { | ||
989 | err = ks8842_alloc_dma_bufs(netdev); | ||
990 | |||
991 | if (!err) { | ||
992 | /* start RX dma */ | ||
993 | err = __ks8842_start_new_rx_dma(netdev); | ||
994 | if (err) | ||
995 | ks8842_dealloc_dma_bufs(adapter); | ||
996 | } | ||
997 | |||
998 | if (err) { | ||
999 | printk(KERN_WARNING DRV_NAME | ||
1000 | ": Failed to initiate DMA, running PIO\n"); | ||
1001 | ks8842_dealloc_dma_bufs(adapter); | ||
1002 | adapter->dma_rx.channel = -1; | ||
1003 | adapter->dma_tx.channel = -1; | ||
1004 | } | ||
1005 | } | ||
1006 | |||
614 | /* reset the HW */ | 1007 | /* reset the HW */ |
615 | ks8842_reset_hw(adapter); | 1008 | ks8842_reset_hw(adapter); |
616 | 1009 | ||
@@ -636,6 +1029,9 @@ static int ks8842_close(struct net_device *netdev) | |||
636 | 1029 | ||
637 | cancel_work_sync(&adapter->timeout_work); | 1030 | cancel_work_sync(&adapter->timeout_work); |
638 | 1031 | ||
1032 | if (KS8842_USE_DMA(adapter)) | ||
1033 | ks8842_dealloc_dma_bufs(adapter); | ||
1034 | |||
639 | /* free the irq */ | 1035 | /* free the irq */ |
640 | free_irq(adapter->irq, netdev); | 1036 | free_irq(adapter->irq, netdev); |
641 | 1037 | ||
@@ -653,6 +1049,17 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, | |||
653 | 1049 | ||
654 | netdev_dbg(netdev, "%s: entry\n", __func__); | 1050 | netdev_dbg(netdev, "%s: entry\n", __func__); |
655 | 1051 | ||
1052 | if (KS8842_USE_DMA(adapter)) { | ||
1053 | unsigned long flags; | ||
1054 | ret = ks8842_tx_frame_dma(skb, netdev); | ||
1055 | /* for now only allow one transfer at the time */ | ||
1056 | spin_lock_irqsave(&adapter->lock, flags); | ||
1057 | if (adapter->dma_tx.adesc) | ||
1058 | netif_stop_queue(netdev); | ||
1059 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1060 | return ret; | ||
1061 | } | ||
1062 | |||
656 | ret = ks8842_tx_frame(skb, netdev); | 1063 | ret = ks8842_tx_frame(skb, netdev); |
657 | 1064 | ||
658 | if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) | 1065 | if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) |
@@ -688,6 +1095,10 @@ static void ks8842_tx_timeout_work(struct work_struct *work) | |||
688 | netdev_dbg(netdev, "%s: entry\n", __func__); | 1095 | netdev_dbg(netdev, "%s: entry\n", __func__); |
689 | 1096 | ||
690 | spin_lock_irqsave(&adapter->lock, flags); | 1097 | spin_lock_irqsave(&adapter->lock, flags); |
1098 | |||
1099 | if (KS8842_USE_DMA(adapter)) | ||
1100 | ks8842_stop_dma(adapter); | ||
1101 | |||
691 | /* disable interrupts */ | 1102 | /* disable interrupts */ |
692 | ks8842_write16(adapter, 18, 0, REG_IER); | 1103 | ks8842_write16(adapter, 18, 0, REG_IER); |
693 | ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); | 1104 | ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); |
@@ -701,6 +1112,9 @@ static void ks8842_tx_timeout_work(struct work_struct *work) | |||
701 | ks8842_write_mac_addr(adapter, netdev->dev_addr); | 1112 | ks8842_write_mac_addr(adapter, netdev->dev_addr); |
702 | 1113 | ||
703 | ks8842_update_link_status(netdev, adapter); | 1114 | ks8842_update_link_status(netdev, adapter); |
1115 | |||
1116 | if (KS8842_USE_DMA(adapter)) | ||
1117 | __ks8842_start_new_rx_dma(netdev); | ||
704 | } | 1118 | } |
705 | 1119 | ||
706 | static void ks8842_tx_timeout(struct net_device *netdev) | 1120 | static void ks8842_tx_timeout(struct net_device *netdev) |
@@ -760,6 +1174,19 @@ static int __devinit ks8842_probe(struct platform_device *pdev) | |||
760 | goto err_get_irq; | 1174 | goto err_get_irq; |
761 | } | 1175 | } |
762 | 1176 | ||
1177 | adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; | ||
1178 | |||
1179 | /* DMA is only supported when accessed via timberdale */ | ||
1180 | if (!(adapter->conf_flags & MICREL_KS884X) && pdata && | ||
1181 | (pdata->tx_dma_channel != -1) && | ||
1182 | (pdata->rx_dma_channel != -1)) { | ||
1183 | adapter->dma_rx.channel = pdata->rx_dma_channel; | ||
1184 | adapter->dma_tx.channel = pdata->tx_dma_channel; | ||
1185 | } else { | ||
1186 | adapter->dma_rx.channel = -1; | ||
1187 | adapter->dma_tx.channel = -1; | ||
1188 | } | ||
1189 | |||
763 | tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); | 1190 | tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); |
764 | spin_lock_init(&adapter->lock); | 1191 | spin_lock_init(&adapter->lock); |
765 | 1192 | ||
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index b3c010b85658..8b32cc107f0f 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c | |||
@@ -6894,13 +6894,12 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) | |||
6894 | i = j = num = got_num = 0; | 6894 | i = j = num = got_num = 0; |
6895 | while (j < MAC_ADDR_LEN) { | 6895 | while (j < MAC_ADDR_LEN) { |
6896 | if (macaddr[i]) { | 6896 | if (macaddr[i]) { |
6897 | int digit; | ||
6898 | |||
6897 | got_num = 1; | 6899 | got_num = 1; |
6898 | if ('0' <= macaddr[i] && macaddr[i] <= '9') | 6900 | digit = hex_to_bin(macaddr[i]); |
6899 | num = num * 16 + macaddr[i] - '0'; | 6901 | if (digit >= 0) |
6900 | else if ('A' <= macaddr[i] && macaddr[i] <= 'F') | 6902 | num = num * 16 + digit; |
6901 | num = num * 16 + 10 + macaddr[i] - 'A'; | ||
6902 | else if ('a' <= macaddr[i] && macaddr[i] <= 'f') | ||
6903 | num = num * 16 + 10 + macaddr[i] - 'a'; | ||
6904 | else if (':' == macaddr[i]) | 6903 | else if (':' == macaddr[i]) |
6905 | got_num = 2; | 6904 | got_num = 2; |
6906 | else | 6905 | else |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1b28aaec0a5a..0ef0eb0db945 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -158,7 +158,8 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) | |||
158 | const struct macvlan_dev *vlan; | 158 | const struct macvlan_dev *vlan; |
159 | const struct macvlan_dev *src; | 159 | const struct macvlan_dev *src; |
160 | struct net_device *dev; | 160 | struct net_device *dev; |
161 | unsigned int len; | 161 | unsigned int len = 0; |
162 | int ret = NET_RX_DROP; | ||
162 | 163 | ||
163 | port = macvlan_port_get_rcu(skb->dev); | 164 | port = macvlan_port_get_rcu(skb->dev); |
164 | if (is_multicast_ether_addr(eth->h_dest)) { | 165 | if (is_multicast_ether_addr(eth->h_dest)) { |
@@ -195,14 +196,16 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) | |||
195 | } | 196 | } |
196 | len = skb->len + ETH_HLEN; | 197 | len = skb->len + ETH_HLEN; |
197 | skb = skb_share_check(skb, GFP_ATOMIC); | 198 | skb = skb_share_check(skb, GFP_ATOMIC); |
198 | macvlan_count_rx(vlan, len, skb != NULL, 0); | ||
199 | if (!skb) | 199 | if (!skb) |
200 | return NULL; | 200 | goto out; |
201 | 201 | ||
202 | skb->dev = dev; | 202 | skb->dev = dev; |
203 | skb->pkt_type = PACKET_HOST; | 203 | skb->pkt_type = PACKET_HOST; |
204 | 204 | ||
205 | vlan->receive(skb); | 205 | ret = vlan->receive(skb); |
206 | |||
207 | out: | ||
208 | macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); | ||
206 | return NULL; | 209 | return NULL; |
207 | } | 210 | } |
208 | 211 | ||
@@ -515,7 +518,7 @@ static const struct net_device_ops macvlan_netdev_ops = { | |||
515 | .ndo_validate_addr = eth_validate_addr, | 518 | .ndo_validate_addr = eth_validate_addr, |
516 | }; | 519 | }; |
517 | 520 | ||
518 | static void macvlan_setup(struct net_device *dev) | 521 | void macvlan_common_setup(struct net_device *dev) |
519 | { | 522 | { |
520 | ether_setup(dev); | 523 | ether_setup(dev); |
521 | 524 | ||
@@ -524,6 +527,12 @@ static void macvlan_setup(struct net_device *dev) | |||
524 | dev->destructor = free_netdev; | 527 | dev->destructor = free_netdev; |
525 | dev->header_ops = &macvlan_hard_header_ops, | 528 | dev->header_ops = &macvlan_hard_header_ops, |
526 | dev->ethtool_ops = &macvlan_ethtool_ops; | 529 | dev->ethtool_ops = &macvlan_ethtool_ops; |
530 | } | ||
531 | EXPORT_SYMBOL_GPL(macvlan_common_setup); | ||
532 | |||
533 | static void macvlan_setup(struct net_device *dev) | ||
534 | { | ||
535 | macvlan_common_setup(dev); | ||
527 | dev->tx_queue_len = 0; | 536 | dev->tx_queue_len = 0; |
528 | } | 537 | } |
529 | 538 | ||
@@ -735,7 +744,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops) | |||
735 | /* common fields */ | 744 | /* common fields */ |
736 | ops->priv_size = sizeof(struct macvlan_dev); | 745 | ops->priv_size = sizeof(struct macvlan_dev); |
737 | ops->get_tx_queues = macvlan_get_tx_queues; | 746 | ops->get_tx_queues = macvlan_get_tx_queues; |
738 | ops->setup = macvlan_setup; | ||
739 | ops->validate = macvlan_validate; | 747 | ops->validate = macvlan_validate; |
740 | ops->maxtype = IFLA_MACVLAN_MAX; | 748 | ops->maxtype = IFLA_MACVLAN_MAX; |
741 | ops->policy = macvlan_policy; | 749 | ops->policy = macvlan_policy; |
@@ -749,6 +757,7 @@ EXPORT_SYMBOL_GPL(macvlan_link_register); | |||
749 | 757 | ||
750 | static struct rtnl_link_ops macvlan_link_ops = { | 758 | static struct rtnl_link_ops macvlan_link_ops = { |
751 | .kind = "macvlan", | 759 | .kind = "macvlan", |
760 | .setup = macvlan_setup, | ||
752 | .newlink = macvlan_newlink, | 761 | .newlink = macvlan_newlink, |
753 | .dellink = macvlan_dellink, | 762 | .dellink = macvlan_dellink, |
754 | }; | 763 | }; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 2b4d59b58b2c..3b1c54a9c6ef 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -180,11 +180,18 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
180 | { | 180 | { |
181 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); | 181 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
182 | if (!q) | 182 | if (!q) |
183 | return -ENOLINK; | 183 | goto drop; |
184 | |||
185 | if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) | ||
186 | goto drop; | ||
184 | 187 | ||
185 | skb_queue_tail(&q->sk.sk_receive_queue, skb); | 188 | skb_queue_tail(&q->sk.sk_receive_queue, skb); |
186 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); | 189 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); |
187 | return 0; | 190 | return NET_RX_SUCCESS; |
191 | |||
192 | drop: | ||
193 | kfree_skb(skb); | ||
194 | return NET_RX_DROP; | ||
188 | } | 195 | } |
189 | 196 | ||
190 | /* | 197 | /* |
@@ -235,8 +242,15 @@ static void macvtap_dellink(struct net_device *dev, | |||
235 | macvlan_dellink(dev, head); | 242 | macvlan_dellink(dev, head); |
236 | } | 243 | } |
237 | 244 | ||
245 | static void macvtap_setup(struct net_device *dev) | ||
246 | { | ||
247 | macvlan_common_setup(dev); | ||
248 | dev->tx_queue_len = TUN_READQ_SIZE; | ||
249 | } | ||
250 | |||
238 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { | 251 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { |
239 | .kind = "macvtap", | 252 | .kind = "macvtap", |
253 | .setup = macvtap_setup, | ||
240 | .newlink = macvtap_newlink, | 254 | .newlink = macvtap_newlink, |
241 | .dellink = macvtap_dellink, | 255 | .dellink = macvtap_dellink, |
242 | }; | 256 | }; |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 2fcdb1e1b99d..2d488abcf62d 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -2675,7 +2675,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2675 | * Detect hardware parameters. | 2675 | * Detect hardware parameters. |
2676 | */ | 2676 | */ |
2677 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; | 2677 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; |
2678 | msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024; | 2678 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? |
2679 | pd->tx_csum_limit : 9 * 1024; | ||
2679 | infer_hw_params(msp); | 2680 | infer_hw_params(msp); |
2680 | 2681 | ||
2681 | platform_set_drvdata(pdev, msp); | 2682 | platform_set_drvdata(pdev, msp); |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 54ebb65ada18..6168a130f33f 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -5,6 +5,8 @@ | |||
5 | * See LICENSE.qla3xxx for copyright and licensing details. | 5 | * See LICENSE.qla3xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
9 | |||
8 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
9 | #include <linux/init.h> | 11 | #include <linux/init.h> |
10 | #include <linux/types.h> | 12 | #include <linux/types.h> |
@@ -36,14 +38,16 @@ | |||
36 | 38 | ||
37 | #include "qla3xxx.h" | 39 | #include "qla3xxx.h" |
38 | 40 | ||
39 | #define DRV_NAME "qla3xxx" | 41 | #define DRV_NAME "qla3xxx" |
40 | #define DRV_STRING "QLogic ISP3XXX Network Driver" | 42 | #define DRV_STRING "QLogic ISP3XXX Network Driver" |
41 | #define DRV_VERSION "v2.03.00-k5" | 43 | #define DRV_VERSION "v2.03.00-k5" |
42 | #define PFX DRV_NAME " " | ||
43 | 44 | ||
44 | static const char ql3xxx_driver_name[] = DRV_NAME; | 45 | static const char ql3xxx_driver_name[] = DRV_NAME; |
45 | static const char ql3xxx_driver_version[] = DRV_VERSION; | 46 | static const char ql3xxx_driver_version[] = DRV_VERSION; |
46 | 47 | ||
48 | #define TIMED_OUT_MSG \ | ||
49 | "Timed out waiting for management port to get free before issuing command\n" | ||
50 | |||
47 | MODULE_AUTHOR("QLogic Corporation"); | 51 | MODULE_AUTHOR("QLogic Corporation"); |
48 | MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); | 52 | MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); |
49 | MODULE_LICENSE("GPL"); | 53 | MODULE_LICENSE("GPL"); |
@@ -73,24 +77,24 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); | |||
73 | /* | 77 | /* |
74 | * These are the known PHY's which are used | 78 | * These are the known PHY's which are used |
75 | */ | 79 | */ |
76 | typedef enum { | 80 | enum PHY_DEVICE_TYPE { |
77 | PHY_TYPE_UNKNOWN = 0, | 81 | PHY_TYPE_UNKNOWN = 0, |
78 | PHY_VITESSE_VSC8211, | 82 | PHY_VITESSE_VSC8211, |
79 | PHY_AGERE_ET1011C, | 83 | PHY_AGERE_ET1011C, |
80 | MAX_PHY_DEV_TYPES | 84 | MAX_PHY_DEV_TYPES |
81 | } PHY_DEVICE_et; | 85 | }; |
82 | 86 | ||
83 | typedef struct { | 87 | struct PHY_DEVICE_INFO { |
84 | PHY_DEVICE_et phyDevice; | 88 | const enum PHY_DEVICE_TYPE phyDevice; |
85 | u32 phyIdOUI; | 89 | const u32 phyIdOUI; |
86 | u16 phyIdModel; | 90 | const u16 phyIdModel; |
87 | char *name; | 91 | const char *name; |
88 | } PHY_DEVICE_INFO_t; | 92 | }; |
89 | 93 | ||
90 | static const PHY_DEVICE_INFO_t PHY_DEVICES[] = | 94 | static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { |
91 | {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, | 95 | {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, |
92 | {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, | 96 | {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, |
93 | {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, | 97 | {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, |
94 | }; | 98 | }; |
95 | 99 | ||
96 | 100 | ||
@@ -100,7 +104,8 @@ static const PHY_DEVICE_INFO_t PHY_DEVICES[] = | |||
100 | static int ql_sem_spinlock(struct ql3_adapter *qdev, | 104 | static int ql_sem_spinlock(struct ql3_adapter *qdev, |
101 | u32 sem_mask, u32 sem_bits) | 105 | u32 sem_mask, u32 sem_bits) |
102 | { | 106 | { |
103 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 107 | struct ql3xxx_port_registers __iomem *port_regs = |
108 | qdev->mem_map_registers; | ||
104 | u32 value; | 109 | u32 value; |
105 | unsigned int seconds = 3; | 110 | unsigned int seconds = 3; |
106 | 111 | ||
@@ -111,20 +116,22 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev, | |||
111 | if ((value & (sem_mask >> 16)) == sem_bits) | 116 | if ((value & (sem_mask >> 16)) == sem_bits) |
112 | return 0; | 117 | return 0; |
113 | ssleep(1); | 118 | ssleep(1); |
114 | } while(--seconds); | 119 | } while (--seconds); |
115 | return -1; | 120 | return -1; |
116 | } | 121 | } |
117 | 122 | ||
118 | static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) | 123 | static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) |
119 | { | 124 | { |
120 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 125 | struct ql3xxx_port_registers __iomem *port_regs = |
126 | qdev->mem_map_registers; | ||
121 | writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); | 127 | writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); |
122 | readl(&port_regs->CommonRegs.semaphoreReg); | 128 | readl(&port_regs->CommonRegs.semaphoreReg); |
123 | } | 129 | } |
124 | 130 | ||
125 | static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) | 131 | static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) |
126 | { | 132 | { |
127 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 133 | struct ql3xxx_port_registers __iomem *port_regs = |
134 | qdev->mem_map_registers; | ||
128 | u32 value; | 135 | u32 value; |
129 | 136 | ||
130 | writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); | 137 | writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); |
@@ -139,32 +146,28 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |||
139 | { | 146 | { |
140 | int i = 0; | 147 | int i = 0; |
141 | 148 | ||
142 | while (1) { | 149 | while (i < 10) { |
143 | if (!ql_sem_lock(qdev, | 150 | if (i) |
144 | QL_DRVR_SEM_MASK, | 151 | ssleep(1); |
145 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | 152 | |
146 | * 2) << 1)) { | 153 | if (ql_sem_lock(qdev, |
147 | if (i < 10) { | 154 | QL_DRVR_SEM_MASK, |
148 | ssleep(1); | 155 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) |
149 | i++; | 156 | * 2) << 1)) { |
150 | } else { | 157 | netdev_printk(KERN_DEBUG, qdev->ndev, |
151 | printk(KERN_ERR PFX "%s: Timed out waiting for " | 158 | "driver lock acquired\n"); |
152 | "driver lock...\n", | ||
153 | qdev->ndev->name); | ||
154 | return 0; | ||
155 | } | ||
156 | } else { | ||
157 | printk(KERN_DEBUG PFX | ||
158 | "%s: driver lock acquired.\n", | ||
159 | qdev->ndev->name); | ||
160 | return 1; | 159 | return 1; |
161 | } | 160 | } |
162 | } | 161 | } |
162 | |||
163 | netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); | ||
164 | return 0; | ||
163 | } | 165 | } |
164 | 166 | ||
165 | static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) | 167 | static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) |
166 | { | 168 | { |
167 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 169 | struct ql3xxx_port_registers __iomem *port_regs = |
170 | qdev->mem_map_registers; | ||
168 | 171 | ||
169 | writel(((ISP_CONTROL_NP_MASK << 16) | page), | 172 | writel(((ISP_CONTROL_NP_MASK << 16) | page), |
170 | &port_regs->CommonRegs.ispControlStatus); | 173 | &port_regs->CommonRegs.ispControlStatus); |
@@ -172,8 +175,7 @@ static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) | |||
172 | qdev->current_page = page; | 175 | qdev->current_page = page; |
173 | } | 176 | } |
174 | 177 | ||
175 | static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, | 178 | static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) |
176 | u32 __iomem * reg) | ||
177 | { | 179 | { |
178 | u32 value; | 180 | u32 value; |
179 | unsigned long hw_flags; | 181 | unsigned long hw_flags; |
@@ -185,8 +187,7 @@ static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, | |||
185 | return value; | 187 | return value; |
186 | } | 188 | } |
187 | 189 | ||
188 | static u32 ql_read_common_reg(struct ql3_adapter *qdev, | 190 | static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) |
189 | u32 __iomem * reg) | ||
190 | { | 191 | { |
191 | return readl(reg); | 192 | return readl(reg); |
192 | } | 193 | } |
@@ -199,7 +200,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | |||
199 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 200 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
200 | 201 | ||
201 | if (qdev->current_page != 0) | 202 | if (qdev->current_page != 0) |
202 | ql_set_register_page(qdev,0); | 203 | ql_set_register_page(qdev, 0); |
203 | value = readl(reg); | 204 | value = readl(reg); |
204 | 205 | ||
205 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 206 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
@@ -209,7 +210,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | |||
209 | static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) | 210 | static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) |
210 | { | 211 | { |
211 | if (qdev->current_page != 0) | 212 | if (qdev->current_page != 0) |
212 | ql_set_register_page(qdev,0); | 213 | ql_set_register_page(qdev, 0); |
213 | return readl(reg); | 214 | return readl(reg); |
214 | } | 215 | } |
215 | 216 | ||
@@ -243,7 +244,7 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev, | |||
243 | u32 __iomem *reg, u32 value) | 244 | u32 __iomem *reg, u32 value) |
244 | { | 245 | { |
245 | if (qdev->current_page != 0) | 246 | if (qdev->current_page != 0) |
246 | ql_set_register_page(qdev,0); | 247 | ql_set_register_page(qdev, 0); |
247 | writel(value, reg); | 248 | writel(value, reg); |
248 | readl(reg); | 249 | readl(reg); |
249 | } | 250 | } |
@@ -255,7 +256,7 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev, | |||
255 | u32 __iomem *reg, u32 value) | 256 | u32 __iomem *reg, u32 value) |
256 | { | 257 | { |
257 | if (qdev->current_page != 1) | 258 | if (qdev->current_page != 1) |
258 | ql_set_register_page(qdev,1); | 259 | ql_set_register_page(qdev, 1); |
259 | writel(value, reg); | 260 | writel(value, reg); |
260 | readl(reg); | 261 | readl(reg); |
261 | } | 262 | } |
@@ -267,14 +268,15 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev, | |||
267 | u32 __iomem *reg, u32 value) | 268 | u32 __iomem *reg, u32 value) |
268 | { | 269 | { |
269 | if (qdev->current_page != 2) | 270 | if (qdev->current_page != 2) |
270 | ql_set_register_page(qdev,2); | 271 | ql_set_register_page(qdev, 2); |
271 | writel(value, reg); | 272 | writel(value, reg); |
272 | readl(reg); | 273 | readl(reg); |
273 | } | 274 | } |
274 | 275 | ||
275 | static void ql_disable_interrupts(struct ql3_adapter *qdev) | 276 | static void ql_disable_interrupts(struct ql3_adapter *qdev) |
276 | { | 277 | { |
277 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 278 | struct ql3xxx_port_registers __iomem *port_regs = |
279 | qdev->mem_map_registers; | ||
278 | 280 | ||
279 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | 281 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, |
280 | (ISP_IMR_ENABLE_INT << 16)); | 282 | (ISP_IMR_ENABLE_INT << 16)); |
@@ -283,7 +285,8 @@ static void ql_disable_interrupts(struct ql3_adapter *qdev) | |||
283 | 285 | ||
284 | static void ql_enable_interrupts(struct ql3_adapter *qdev) | 286 | static void ql_enable_interrupts(struct ql3_adapter *qdev) |
285 | { | 287 | { |
286 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 288 | struct ql3xxx_port_registers __iomem *port_regs = |
289 | qdev->mem_map_registers; | ||
287 | 290 | ||
288 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | 291 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, |
289 | ((0xff << 16) | ISP_IMR_ENABLE_INT)); | 292 | ((0xff << 16) | ISP_IMR_ENABLE_INT)); |
@@ -308,8 +311,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
308 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, | 311 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, |
309 | qdev->lrg_buffer_len); | 312 | qdev->lrg_buffer_len); |
310 | if (unlikely(!lrg_buf_cb->skb)) { | 313 | if (unlikely(!lrg_buf_cb->skb)) { |
311 | printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", | 314 | netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); |
312 | qdev->ndev->name); | ||
313 | qdev->lrg_buf_skb_check++; | 315 | qdev->lrg_buf_skb_check++; |
314 | } else { | 316 | } else { |
315 | /* | 317 | /* |
@@ -323,9 +325,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
323 | QL_HEADER_SPACE, | 325 | QL_HEADER_SPACE, |
324 | PCI_DMA_FROMDEVICE); | 326 | PCI_DMA_FROMDEVICE); |
325 | err = pci_dma_mapping_error(qdev->pdev, map); | 327 | err = pci_dma_mapping_error(qdev->pdev, map); |
326 | if(err) { | 328 | if (err) { |
327 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 329 | netdev_err(qdev->ndev, |
328 | qdev->ndev->name, err); | 330 | "PCI mapping failed with error: %d\n", |
331 | err); | ||
329 | dev_kfree_skb(lrg_buf_cb->skb); | 332 | dev_kfree_skb(lrg_buf_cb->skb); |
330 | lrg_buf_cb->skb = NULL; | 333 | lrg_buf_cb->skb = NULL; |
331 | 334 | ||
@@ -350,10 +353,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
350 | static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter | 353 | static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter |
351 | *qdev) | 354 | *qdev) |
352 | { | 355 | { |
353 | struct ql_rcv_buf_cb *lrg_buf_cb; | 356 | struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; |
354 | 357 | ||
355 | if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { | 358 | if (lrg_buf_cb != NULL) { |
356 | if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) | 359 | qdev->lrg_buf_free_head = lrg_buf_cb->next; |
360 | if (qdev->lrg_buf_free_head == NULL) | ||
357 | qdev->lrg_buf_free_tail = NULL; | 361 | qdev->lrg_buf_free_tail = NULL; |
358 | qdev->lrg_buf_free_count--; | 362 | qdev->lrg_buf_free_count--; |
359 | } | 363 | } |
@@ -374,13 +378,13 @@ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, | |||
374 | static void fm93c56a_select(struct ql3_adapter *qdev) | 378 | static void fm93c56a_select(struct ql3_adapter *qdev) |
375 | { | 379 | { |
376 | struct ql3xxx_port_registers __iomem *port_regs = | 380 | struct ql3xxx_port_registers __iomem *port_regs = |
377 | qdev->mem_map_registers; | 381 | qdev->mem_map_registers; |
382 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
378 | 383 | ||
379 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; | 384 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; |
380 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 385 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
381 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | 386 | ql_write_nvram_reg(qdev, spir, |
382 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 387 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); |
383 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); | ||
384 | } | 388 | } |
385 | 389 | ||
386 | /* | 390 | /* |
@@ -393,51 +397,40 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
393 | u32 dataBit; | 397 | u32 dataBit; |
394 | u32 previousBit; | 398 | u32 previousBit; |
395 | struct ql3xxx_port_registers __iomem *port_regs = | 399 | struct ql3xxx_port_registers __iomem *port_regs = |
396 | qdev->mem_map_registers; | 400 | qdev->mem_map_registers; |
401 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
397 | 402 | ||
398 | /* Clock in a zero, then do the start bit */ | 403 | /* Clock in a zero, then do the start bit */ |
399 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 404 | ql_write_nvram_reg(qdev, spir, |
400 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 405 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
401 | AUBURN_EEPROM_DO_1); | 406 | AUBURN_EEPROM_DO_1)); |
402 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 407 | ql_write_nvram_reg(qdev, spir, |
403 | ISP_NVRAM_MASK | qdev-> | 408 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
404 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | 409 | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); |
405 | AUBURN_EEPROM_CLK_RISE); | 410 | ql_write_nvram_reg(qdev, spir, |
406 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 411 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
407 | ISP_NVRAM_MASK | qdev-> | 412 | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); |
408 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | ||
409 | AUBURN_EEPROM_CLK_FALL); | ||
410 | 413 | ||
411 | mask = 1 << (FM93C56A_CMD_BITS - 1); | 414 | mask = 1 << (FM93C56A_CMD_BITS - 1); |
412 | /* Force the previous data bit to be different */ | 415 | /* Force the previous data bit to be different */ |
413 | previousBit = 0xffff; | 416 | previousBit = 0xffff; |
414 | for (i = 0; i < FM93C56A_CMD_BITS; i++) { | 417 | for (i = 0; i < FM93C56A_CMD_BITS; i++) { |
415 | dataBit = | 418 | dataBit = (cmd & mask) |
416 | (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; | 419 | ? AUBURN_EEPROM_DO_1 |
420 | : AUBURN_EEPROM_DO_0; | ||
417 | if (previousBit != dataBit) { | 421 | if (previousBit != dataBit) { |
418 | /* | 422 | /* If the bit changed, change the DO state to match */ |
419 | * If the bit changed, then change the DO state to | 423 | ql_write_nvram_reg(qdev, spir, |
420 | * match | 424 | (ISP_NVRAM_MASK | |
421 | */ | 425 | qdev->eeprom_cmd_data | dataBit)); |
422 | ql_write_nvram_reg(qdev, | ||
423 | &port_regs->CommonRegs. | ||
424 | serialPortInterfaceReg, | ||
425 | ISP_NVRAM_MASK | qdev-> | ||
426 | eeprom_cmd_data | dataBit); | ||
427 | previousBit = dataBit; | 426 | previousBit = dataBit; |
428 | } | 427 | } |
429 | ql_write_nvram_reg(qdev, | 428 | ql_write_nvram_reg(qdev, spir, |
430 | &port_regs->CommonRegs. | 429 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
431 | serialPortInterfaceReg, | 430 | dataBit | AUBURN_EEPROM_CLK_RISE)); |
432 | ISP_NVRAM_MASK | qdev-> | 431 | ql_write_nvram_reg(qdev, spir, |
433 | eeprom_cmd_data | dataBit | | 432 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
434 | AUBURN_EEPROM_CLK_RISE); | 433 | dataBit | AUBURN_EEPROM_CLK_FALL)); |
435 | ql_write_nvram_reg(qdev, | ||
436 | &port_regs->CommonRegs. | ||
437 | serialPortInterfaceReg, | ||
438 | ISP_NVRAM_MASK | qdev-> | ||
439 | eeprom_cmd_data | dataBit | | ||
440 | AUBURN_EEPROM_CLK_FALL); | ||
441 | cmd = cmd << 1; | 434 | cmd = cmd << 1; |
442 | } | 435 | } |
443 | 436 | ||
@@ -445,33 +438,24 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
445 | /* Force the previous data bit to be different */ | 438 | /* Force the previous data bit to be different */ |
446 | previousBit = 0xffff; | 439 | previousBit = 0xffff; |
447 | for (i = 0; i < addrBits; i++) { | 440 | for (i = 0; i < addrBits; i++) { |
448 | dataBit = | 441 | dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 |
449 | (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : | 442 | : AUBURN_EEPROM_DO_0; |
450 | AUBURN_EEPROM_DO_0; | ||
451 | if (previousBit != dataBit) { | 443 | if (previousBit != dataBit) { |
452 | /* | 444 | /* |
453 | * If the bit changed, then change the DO state to | 445 | * If the bit changed, then change the DO state to |
454 | * match | 446 | * match |
455 | */ | 447 | */ |
456 | ql_write_nvram_reg(qdev, | 448 | ql_write_nvram_reg(qdev, spir, |
457 | &port_regs->CommonRegs. | 449 | (ISP_NVRAM_MASK | |
458 | serialPortInterfaceReg, | 450 | qdev->eeprom_cmd_data | dataBit)); |
459 | ISP_NVRAM_MASK | qdev-> | ||
460 | eeprom_cmd_data | dataBit); | ||
461 | previousBit = dataBit; | 451 | previousBit = dataBit; |
462 | } | 452 | } |
463 | ql_write_nvram_reg(qdev, | 453 | ql_write_nvram_reg(qdev, spir, |
464 | &port_regs->CommonRegs. | 454 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
465 | serialPortInterfaceReg, | 455 | dataBit | AUBURN_EEPROM_CLK_RISE)); |
466 | ISP_NVRAM_MASK | qdev-> | 456 | ql_write_nvram_reg(qdev, spir, |
467 | eeprom_cmd_data | dataBit | | 457 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
468 | AUBURN_EEPROM_CLK_RISE); | 458 | dataBit | AUBURN_EEPROM_CLK_FALL)); |
469 | ql_write_nvram_reg(qdev, | ||
470 | &port_regs->CommonRegs. | ||
471 | serialPortInterfaceReg, | ||
472 | ISP_NVRAM_MASK | qdev-> | ||
473 | eeprom_cmd_data | dataBit | | ||
474 | AUBURN_EEPROM_CLK_FALL); | ||
475 | eepromAddr = eepromAddr << 1; | 459 | eepromAddr = eepromAddr << 1; |
476 | } | 460 | } |
477 | } | 461 | } |
@@ -482,10 +466,11 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
482 | static void fm93c56a_deselect(struct ql3_adapter *qdev) | 466 | static void fm93c56a_deselect(struct ql3_adapter *qdev) |
483 | { | 467 | { |
484 | struct ql3xxx_port_registers __iomem *port_regs = | 468 | struct ql3xxx_port_registers __iomem *port_regs = |
485 | qdev->mem_map_registers; | 469 | qdev->mem_map_registers; |
470 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
471 | |||
486 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; | 472 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; |
487 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 473 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
488 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | ||
489 | } | 474 | } |
490 | 475 | ||
491 | /* | 476 | /* |
@@ -497,29 +482,23 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) | |||
497 | u32 data = 0; | 482 | u32 data = 0; |
498 | u32 dataBit; | 483 | u32 dataBit; |
499 | struct ql3xxx_port_registers __iomem *port_regs = | 484 | struct ql3xxx_port_registers __iomem *port_regs = |
500 | qdev->mem_map_registers; | 485 | qdev->mem_map_registers; |
486 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
501 | 487 | ||
502 | /* Read the data bits */ | 488 | /* Read the data bits */ |
503 | /* The first bit is a dummy. Clock right over it. */ | 489 | /* The first bit is a dummy. Clock right over it. */ |
504 | for (i = 0; i < dataBits; i++) { | 490 | for (i = 0; i < dataBits; i++) { |
505 | ql_write_nvram_reg(qdev, | 491 | ql_write_nvram_reg(qdev, spir, |
506 | &port_regs->CommonRegs. | 492 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
507 | serialPortInterfaceReg, | 493 | AUBURN_EEPROM_CLK_RISE); |
508 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 494 | ql_write_nvram_reg(qdev, spir, |
509 | AUBURN_EEPROM_CLK_RISE); | 495 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
510 | ql_write_nvram_reg(qdev, | 496 | AUBURN_EEPROM_CLK_FALL); |
511 | &port_regs->CommonRegs. | 497 | dataBit = (ql_read_common_reg(qdev, spir) & |
512 | serialPortInterfaceReg, | 498 | AUBURN_EEPROM_DI_1) ? 1 : 0; |
513 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
514 | AUBURN_EEPROM_CLK_FALL); | ||
515 | dataBit = | ||
516 | (ql_read_common_reg | ||
517 | (qdev, | ||
518 | &port_regs->CommonRegs. | ||
519 | serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; | ||
520 | data = (data << 1) | dataBit; | 499 | data = (data << 1) | dataBit; |
521 | } | 500 | } |
522 | *value = (u16) data; | 501 | *value = (u16)data; |
523 | } | 502 | } |
524 | 503 | ||
525 | /* | 504 | /* |
@@ -551,13 +530,12 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev) | |||
551 | 530 | ||
552 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 531 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
553 | 532 | ||
554 | pEEPROMData = (u16 *) & qdev->nvram_data; | 533 | pEEPROMData = (u16 *)&qdev->nvram_data; |
555 | qdev->eeprom_cmd_data = 0; | 534 | qdev->eeprom_cmd_data = 0; |
556 | if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, | 535 | if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, |
557 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 536 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
558 | 2) << 10)) { | 537 | 2) << 10)) { |
559 | printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", | 538 | pr_err("%s: Failed ql_sem_spinlock()\n", __func__); |
560 | __func__); | ||
561 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 539 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
562 | return -1; | 540 | return -1; |
563 | } | 541 | } |
@@ -570,8 +548,8 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev) | |||
570 | ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); | 548 | ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); |
571 | 549 | ||
572 | if (checksum != 0) { | 550 | if (checksum != 0) { |
573 | printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", | 551 | netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", |
574 | qdev->ndev->name, checksum); | 552 | checksum); |
575 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 553 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
576 | return -1; | 554 | return -1; |
577 | } | 555 | } |
@@ -587,7 +565,7 @@ static const u32 PHYAddr[2] = { | |||
587 | static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) | 565 | static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) |
588 | { | 566 | { |
589 | struct ql3xxx_port_registers __iomem *port_regs = | 567 | struct ql3xxx_port_registers __iomem *port_regs = |
590 | qdev->mem_map_registers; | 568 | qdev->mem_map_registers; |
591 | u32 temp; | 569 | u32 temp; |
592 | int count = 1000; | 570 | int count = 1000; |
593 | 571 | ||
@@ -604,7 +582,7 @@ static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) | |||
604 | static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) | 582 | static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) |
605 | { | 583 | { |
606 | struct ql3xxx_port_registers __iomem *port_regs = | 584 | struct ql3xxx_port_registers __iomem *port_regs = |
607 | qdev->mem_map_registers; | 585 | qdev->mem_map_registers; |
608 | u32 scanControl; | 586 | u32 scanControl; |
609 | 587 | ||
610 | if (qdev->numPorts > 1) { | 588 | if (qdev->numPorts > 1) { |
@@ -632,7 +610,7 @@ static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) | |||
632 | { | 610 | { |
633 | u8 ret; | 611 | u8 ret; |
634 | struct ql3xxx_port_registers __iomem *port_regs = | 612 | struct ql3xxx_port_registers __iomem *port_regs = |
635 | qdev->mem_map_registers; | 613 | qdev->mem_map_registers; |
636 | 614 | ||
637 | /* See if scan mode is enabled before we turn it off */ | 615 | /* See if scan mode is enabled before we turn it off */ |
638 | if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & | 616 | if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & |
@@ -662,17 +640,13 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | |||
662 | u16 regAddr, u16 value, u32 phyAddr) | 640 | u16 regAddr, u16 value, u32 phyAddr) |
663 | { | 641 | { |
664 | struct ql3xxx_port_registers __iomem *port_regs = | 642 | struct ql3xxx_port_registers __iomem *port_regs = |
665 | qdev->mem_map_registers; | 643 | qdev->mem_map_registers; |
666 | u8 scanWasEnabled; | 644 | u8 scanWasEnabled; |
667 | 645 | ||
668 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | 646 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); |
669 | 647 | ||
670 | if (ql_wait_for_mii_ready(qdev)) { | 648 | if (ql_wait_for_mii_ready(qdev)) { |
671 | if (netif_msg_link(qdev)) | 649 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
672 | printk(KERN_WARNING PFX | ||
673 | "%s Timed out waiting for management port to " | ||
674 | "get free before issuing command.\n", | ||
675 | qdev->ndev->name); | ||
676 | return -1; | 650 | return -1; |
677 | } | 651 | } |
678 | 652 | ||
@@ -683,11 +657,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | |||
683 | 657 | ||
684 | /* Wait for write to complete 9/10/04 SJP */ | 658 | /* Wait for write to complete 9/10/04 SJP */ |
685 | if (ql_wait_for_mii_ready(qdev)) { | 659 | if (ql_wait_for_mii_ready(qdev)) { |
686 | if (netif_msg_link(qdev)) | 660 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
687 | printk(KERN_WARNING PFX | ||
688 | "%s: Timed out waiting for management port to " | ||
689 | "get free before issuing command.\n", | ||
690 | qdev->ndev->name); | ||
691 | return -1; | 661 | return -1; |
692 | } | 662 | } |
693 | 663 | ||
@@ -698,21 +668,17 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | |||
698 | } | 668 | } |
699 | 669 | ||
700 | static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | 670 | static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, |
701 | u16 * value, u32 phyAddr) | 671 | u16 *value, u32 phyAddr) |
702 | { | 672 | { |
703 | struct ql3xxx_port_registers __iomem *port_regs = | 673 | struct ql3xxx_port_registers __iomem *port_regs = |
704 | qdev->mem_map_registers; | 674 | qdev->mem_map_registers; |
705 | u8 scanWasEnabled; | 675 | u8 scanWasEnabled; |
706 | u32 temp; | 676 | u32 temp; |
707 | 677 | ||
708 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | 678 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); |
709 | 679 | ||
710 | if (ql_wait_for_mii_ready(qdev)) { | 680 | if (ql_wait_for_mii_ready(qdev)) { |
711 | if (netif_msg_link(qdev)) | 681 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
712 | printk(KERN_WARNING PFX | ||
713 | "%s: Timed out waiting for management port to " | ||
714 | "get free before issuing command.\n", | ||
715 | qdev->ndev->name); | ||
716 | return -1; | 682 | return -1; |
717 | } | 683 | } |
718 | 684 | ||
@@ -727,11 +693,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | |||
727 | 693 | ||
728 | /* Wait for the read to complete */ | 694 | /* Wait for the read to complete */ |
729 | if (ql_wait_for_mii_ready(qdev)) { | 695 | if (ql_wait_for_mii_ready(qdev)) { |
730 | if (netif_msg_link(qdev)) | 696 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
731 | printk(KERN_WARNING PFX | ||
732 | "%s: Timed out waiting for management port to " | ||
733 | "get free after issuing command.\n", | ||
734 | qdev->ndev->name); | ||
735 | return -1; | 697 | return -1; |
736 | } | 698 | } |
737 | 699 | ||
@@ -747,16 +709,12 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | |||
747 | static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) | 709 | static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) |
748 | { | 710 | { |
749 | struct ql3xxx_port_registers __iomem *port_regs = | 711 | struct ql3xxx_port_registers __iomem *port_regs = |
750 | qdev->mem_map_registers; | 712 | qdev->mem_map_registers; |
751 | 713 | ||
752 | ql_mii_disable_scan_mode(qdev); | 714 | ql_mii_disable_scan_mode(qdev); |
753 | 715 | ||
754 | if (ql_wait_for_mii_ready(qdev)) { | 716 | if (ql_wait_for_mii_ready(qdev)) { |
755 | if (netif_msg_link(qdev)) | 717 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
756 | printk(KERN_WARNING PFX | ||
757 | "%s: Timed out waiting for management port to " | ||
758 | "get free before issuing command.\n", | ||
759 | qdev->ndev->name); | ||
760 | return -1; | 718 | return -1; |
761 | } | 719 | } |
762 | 720 | ||
@@ -767,11 +725,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) | |||
767 | 725 | ||
768 | /* Wait for write to complete. */ | 726 | /* Wait for write to complete. */ |
769 | if (ql_wait_for_mii_ready(qdev)) { | 727 | if (ql_wait_for_mii_ready(qdev)) { |
770 | if (netif_msg_link(qdev)) | 728 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
771 | printk(KERN_WARNING PFX | ||
772 | "%s: Timed out waiting for management port to " | ||
773 | "get free before issuing command.\n", | ||
774 | qdev->ndev->name); | ||
775 | return -1; | 729 | return -1; |
776 | } | 730 | } |
777 | 731 | ||
@@ -784,16 +738,12 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) | |||
784 | { | 738 | { |
785 | u32 temp; | 739 | u32 temp; |
786 | struct ql3xxx_port_registers __iomem *port_regs = | 740 | struct ql3xxx_port_registers __iomem *port_regs = |
787 | qdev->mem_map_registers; | 741 | qdev->mem_map_registers; |
788 | 742 | ||
789 | ql_mii_disable_scan_mode(qdev); | 743 | ql_mii_disable_scan_mode(qdev); |
790 | 744 | ||
791 | if (ql_wait_for_mii_ready(qdev)) { | 745 | if (ql_wait_for_mii_ready(qdev)) { |
792 | if (netif_msg_link(qdev)) | 746 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
793 | printk(KERN_WARNING PFX | ||
794 | "%s: Timed out waiting for management port to " | ||
795 | "get free before issuing command.\n", | ||
796 | qdev->ndev->name); | ||
797 | return -1; | 747 | return -1; |
798 | } | 748 | } |
799 | 749 | ||
@@ -808,11 +758,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) | |||
808 | 758 | ||
809 | /* Wait for the read to complete */ | 759 | /* Wait for the read to complete */ |
810 | if (ql_wait_for_mii_ready(qdev)) { | 760 | if (ql_wait_for_mii_ready(qdev)) { |
811 | if (netif_msg_link(qdev)) | 761 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); |
812 | printk(KERN_WARNING PFX | ||
813 | "%s: Timed out waiting for management port to " | ||
814 | "get free before issuing command.\n", | ||
815 | qdev->ndev->name); | ||
816 | return -1; | 762 | return -1; |
817 | } | 763 | } |
818 | 764 | ||
@@ -898,7 +844,7 @@ static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) | |||
898 | 844 | ||
899 | static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) | 845 | static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) |
900 | { | 846 | { |
901 | printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); | 847 | netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); |
902 | /* power down device bit 11 = 1 */ | 848 | /* power down device bit 11 = 1 */ |
903 | ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); | 849 | ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); |
904 | /* enable diagnostic mode bit 2 = 1 */ | 850 | /* enable diagnostic mode bit 2 = 1 */ |
@@ -918,7 +864,8 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) | |||
918 | /* point to hidden reg 0x2806 */ | 864 | /* point to hidden reg 0x2806 */ |
919 | ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); | 865 | ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); |
920 | /* Write new PHYAD w/bit 5 set */ | 866 | /* Write new PHYAD w/bit 5 set */ |
921 | ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); | 867 | ql_mii_write_reg_ex(qdev, 0x11, |
868 | 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); | ||
922 | /* | 869 | /* |
923 | * Disable diagnostic mode bit 2 = 0 | 870 | * Disable diagnostic mode bit 2 = 0 |
924 | * Power up device bit 11 = 0 | 871 | * Power up device bit 11 = 0 |
@@ -929,21 +876,19 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) | |||
929 | ql_mii_write_reg(qdev, 0x1c, 0xfaf0); | 876 | ql_mii_write_reg(qdev, 0x1c, 0xfaf0); |
930 | } | 877 | } |
931 | 878 | ||
932 | static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, | 879 | static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, |
933 | u16 phyIdReg0, u16 phyIdReg1) | 880 | u16 phyIdReg0, u16 phyIdReg1) |
934 | { | 881 | { |
935 | PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; | 882 | enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; |
936 | u32 oui; | 883 | u32 oui; |
937 | u16 model; | 884 | u16 model; |
938 | int i; | 885 | int i; |
939 | 886 | ||
940 | if (phyIdReg0 == 0xffff) { | 887 | if (phyIdReg0 == 0xffff) |
941 | return result; | 888 | return result; |
942 | } | ||
943 | 889 | ||
944 | if (phyIdReg1 == 0xffff) { | 890 | if (phyIdReg1 == 0xffff) |
945 | return result; | 891 | return result; |
946 | } | ||
947 | 892 | ||
948 | /* oui is split between two registers */ | 893 | /* oui is split between two registers */ |
949 | oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); | 894 | oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); |
@@ -951,15 +896,13 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, | |||
951 | model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; | 896 | model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; |
952 | 897 | ||
953 | /* Scan table for this PHY */ | 898 | /* Scan table for this PHY */ |
954 | for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { | 899 | for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { |
955 | if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) | 900 | if ((oui == PHY_DEVICES[i].phyIdOUI) && |
956 | { | 901 | (model == PHY_DEVICES[i].phyIdModel)) { |
902 | netdev_info(qdev->ndev, "Phy: %s\n", | ||
903 | PHY_DEVICES[i].name); | ||
957 | result = PHY_DEVICES[i].phyDevice; | 904 | result = PHY_DEVICES[i].phyDevice; |
958 | 905 | break; | |
959 | printk(KERN_INFO "%s: Phy: %s\n", | ||
960 | qdev->ndev->name, PHY_DEVICES[i].name); | ||
961 | |||
962 | break; | ||
963 | } | 906 | } |
964 | } | 907 | } |
965 | 908 | ||
@@ -970,9 +913,8 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev) | |||
970 | { | 913 | { |
971 | u16 reg; | 914 | u16 reg; |
972 | 915 | ||
973 | switch(qdev->phyType) { | 916 | switch (qdev->phyType) { |
974 | case PHY_AGERE_ET1011C: | 917 | case PHY_AGERE_ET1011C: { |
975 | { | ||
976 | if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) | 918 | if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) |
977 | return 0; | 919 | return 0; |
978 | 920 | ||
@@ -980,20 +922,20 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev) | |||
980 | break; | 922 | break; |
981 | } | 923 | } |
982 | default: | 924 | default: |
983 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | 925 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) |
984 | return 0; | 926 | return 0; |
985 | 927 | ||
986 | reg = (((reg & 0x18) >> 3) & 3); | 928 | reg = (((reg & 0x18) >> 3) & 3); |
987 | } | 929 | } |
988 | 930 | ||
989 | switch(reg) { | 931 | switch (reg) { |
990 | case 2: | 932 | case 2: |
991 | return SPEED_1000; | 933 | return SPEED_1000; |
992 | case 1: | 934 | case 1: |
993 | return SPEED_100; | 935 | return SPEED_100; |
994 | case 0: | 936 | case 0: |
995 | return SPEED_10; | 937 | return SPEED_10; |
996 | default: | 938 | default: |
997 | return -1; | 939 | return -1; |
998 | } | 940 | } |
999 | } | 941 | } |
@@ -1002,17 +944,15 @@ static int ql_is_full_dup(struct ql3_adapter *qdev) | |||
1002 | { | 944 | { |
1003 | u16 reg; | 945 | u16 reg; |
1004 | 946 | ||
1005 | switch(qdev->phyType) { | 947 | switch (qdev->phyType) { |
1006 | case PHY_AGERE_ET1011C: | 948 | case PHY_AGERE_ET1011C: { |
1007 | { | ||
1008 | if (ql_mii_read_reg(qdev, 0x1A, ®)) | 949 | if (ql_mii_read_reg(qdev, 0x1A, ®)) |
1009 | return 0; | 950 | return 0; |
1010 | 951 | ||
1011 | return ((reg & 0x0080) && (reg & 0x1000)) != 0; | 952 | return ((reg & 0x0080) && (reg & 0x1000)) != 0; |
1012 | } | 953 | } |
1013 | case PHY_VITESSE_VSC8211: | 954 | case PHY_VITESSE_VSC8211: |
1014 | default: | 955 | default: { |
1015 | { | ||
1016 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | 956 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) |
1017 | return 0; | 957 | return 0; |
1018 | return (reg & PHY_AUX_DUPLEX_STAT) != 0; | 958 | return (reg & PHY_AUX_DUPLEX_STAT) != 0; |
@@ -1040,17 +980,15 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1040 | 980 | ||
1041 | /* Determine the PHY we are using by reading the ID's */ | 981 | /* Determine the PHY we are using by reading the ID's */ |
1042 | err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); | 982 | err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); |
1043 | if(err != 0) { | 983 | if (err != 0) { |
1044 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", | 984 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); |
1045 | qdev->ndev->name); | 985 | return err; |
1046 | return err; | ||
1047 | } | 986 | } |
1048 | 987 | ||
1049 | err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); | 988 | err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); |
1050 | if(err != 0) { | 989 | if (err != 0) { |
1051 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", | 990 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); |
1052 | qdev->ndev->name); | 991 | return err; |
1053 | return err; | ||
1054 | } | 992 | } |
1055 | 993 | ||
1056 | /* Check if we have a Agere PHY */ | 994 | /* Check if we have a Agere PHY */ |
@@ -1058,24 +996,22 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1058 | 996 | ||
1059 | /* Determine which MII address we should be using | 997 | /* Determine which MII address we should be using |
1060 | determined by the index of the card */ | 998 | determined by the index of the card */ |
1061 | if (qdev->mac_index == 0) { | 999 | if (qdev->mac_index == 0) |
1062 | miiAddr = MII_AGERE_ADDR_1; | 1000 | miiAddr = MII_AGERE_ADDR_1; |
1063 | } else { | 1001 | else |
1064 | miiAddr = MII_AGERE_ADDR_2; | 1002 | miiAddr = MII_AGERE_ADDR_2; |
1065 | } | ||
1066 | 1003 | ||
1067 | err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); | 1004 | err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); |
1068 | if(err != 0) { | 1005 | if (err != 0) { |
1069 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", | 1006 | netdev_err(qdev->ndev, |
1070 | qdev->ndev->name); | 1007 | "Could not read from reg PHY_ID_0_REG after Agere detected\n"); |
1071 | return err; | 1008 | return err; |
1072 | } | 1009 | } |
1073 | 1010 | ||
1074 | err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); | 1011 | err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); |
1075 | if(err != 0) { | 1012 | if (err != 0) { |
1076 | printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", | 1013 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); |
1077 | qdev->ndev->name); | 1014 | return err; |
1078 | return err; | ||
1079 | } | 1015 | } |
1080 | 1016 | ||
1081 | /* We need to remember to initialize the Agere PHY */ | 1017 | /* We need to remember to initialize the Agere PHY */ |
@@ -1090,7 +1026,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1090 | /* need this here so address gets changed */ | 1026 | /* need this here so address gets changed */ |
1091 | phyAgereSpecificInit(qdev, miiAddr); | 1027 | phyAgereSpecificInit(qdev, miiAddr); |
1092 | } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { | 1028 | } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { |
1093 | printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); | 1029 | netdev_err(qdev->ndev, "PHY is unknown\n"); |
1094 | return -EIO; | 1030 | return -EIO; |
1095 | } | 1031 | } |
1096 | 1032 | ||
@@ -1103,7 +1039,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1103 | static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) | 1039 | static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) |
1104 | { | 1040 | { |
1105 | struct ql3xxx_port_registers __iomem *port_regs = | 1041 | struct ql3xxx_port_registers __iomem *port_regs = |
1106 | qdev->mem_map_registers; | 1042 | qdev->mem_map_registers; |
1107 | u32 value; | 1043 | u32 value; |
1108 | 1044 | ||
1109 | if (enable) | 1045 | if (enable) |
@@ -1123,7 +1059,7 @@ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) | |||
1123 | static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) | 1059 | static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) |
1124 | { | 1060 | { |
1125 | struct ql3xxx_port_registers __iomem *port_regs = | 1061 | struct ql3xxx_port_registers __iomem *port_regs = |
1126 | qdev->mem_map_registers; | 1062 | qdev->mem_map_registers; |
1127 | u32 value; | 1063 | u32 value; |
1128 | 1064 | ||
1129 | if (enable) | 1065 | if (enable) |
@@ -1143,7 +1079,7 @@ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) | |||
1143 | static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) | 1079 | static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) |
1144 | { | 1080 | { |
1145 | struct ql3xxx_port_registers __iomem *port_regs = | 1081 | struct ql3xxx_port_registers __iomem *port_regs = |
1146 | qdev->mem_map_registers; | 1082 | qdev->mem_map_registers; |
1147 | u32 value; | 1083 | u32 value; |
1148 | 1084 | ||
1149 | if (enable) | 1085 | if (enable) |
@@ -1163,7 +1099,7 @@ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) | |||
1163 | static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) | 1099 | static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) |
1164 | { | 1100 | { |
1165 | struct ql3xxx_port_registers __iomem *port_regs = | 1101 | struct ql3xxx_port_registers __iomem *port_regs = |
1166 | qdev->mem_map_registers; | 1102 | qdev->mem_map_registers; |
1167 | u32 value; | 1103 | u32 value; |
1168 | 1104 | ||
1169 | if (enable) | 1105 | if (enable) |
@@ -1183,7 +1119,7 @@ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) | |||
1183 | static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) | 1119 | static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) |
1184 | { | 1120 | { |
1185 | struct ql3xxx_port_registers __iomem *port_regs = | 1121 | struct ql3xxx_port_registers __iomem *port_regs = |
1186 | qdev->mem_map_registers; | 1122 | qdev->mem_map_registers; |
1187 | u32 value; | 1123 | u32 value; |
1188 | 1124 | ||
1189 | if (enable) | 1125 | if (enable) |
@@ -1205,7 +1141,7 @@ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) | |||
1205 | static int ql_is_fiber(struct ql3_adapter *qdev) | 1141 | static int ql_is_fiber(struct ql3_adapter *qdev) |
1206 | { | 1142 | { |
1207 | struct ql3xxx_port_registers __iomem *port_regs = | 1143 | struct ql3xxx_port_registers __iomem *port_regs = |
1208 | qdev->mem_map_registers; | 1144 | qdev->mem_map_registers; |
1209 | u32 bitToCheck = 0; | 1145 | u32 bitToCheck = 0; |
1210 | u32 temp; | 1146 | u32 temp; |
1211 | 1147 | ||
@@ -1235,7 +1171,7 @@ static int ql_is_auto_cfg(struct ql3_adapter *qdev) | |||
1235 | static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) | 1171 | static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) |
1236 | { | 1172 | { |
1237 | struct ql3xxx_port_registers __iomem *port_regs = | 1173 | struct ql3xxx_port_registers __iomem *port_regs = |
1238 | qdev->mem_map_registers; | 1174 | qdev->mem_map_registers; |
1239 | u32 bitToCheck = 0; | 1175 | u32 bitToCheck = 0; |
1240 | u32 temp; | 1176 | u32 temp; |
1241 | 1177 | ||
@@ -1250,18 +1186,11 @@ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) | |||
1250 | 1186 | ||
1251 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | 1187 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); |
1252 | if (temp & bitToCheck) { | 1188 | if (temp & bitToCheck) { |
1253 | if (netif_msg_link(qdev)) | 1189 | netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); |
1254 | printk(KERN_INFO PFX | ||
1255 | "%s: Auto-Negotiate complete.\n", | ||
1256 | qdev->ndev->name); | ||
1257 | return 1; | 1190 | return 1; |
1258 | } else { | ||
1259 | if (netif_msg_link(qdev)) | ||
1260 | printk(KERN_WARNING PFX | ||
1261 | "%s: Auto-Negotiate incomplete.\n", | ||
1262 | qdev->ndev->name); | ||
1263 | return 0; | ||
1264 | } | 1191 | } |
1192 | netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); | ||
1193 | return 0; | ||
1265 | } | 1194 | } |
1266 | 1195 | ||
1267 | /* | 1196 | /* |
@@ -1278,7 +1207,7 @@ static int ql_is_neg_pause(struct ql3_adapter *qdev) | |||
1278 | static int ql_auto_neg_error(struct ql3_adapter *qdev) | 1207 | static int ql_auto_neg_error(struct ql3_adapter *qdev) |
1279 | { | 1208 | { |
1280 | struct ql3xxx_port_registers __iomem *port_regs = | 1209 | struct ql3xxx_port_registers __iomem *port_regs = |
1281 | qdev->mem_map_registers; | 1210 | qdev->mem_map_registers; |
1282 | u32 bitToCheck = 0; | 1211 | u32 bitToCheck = 0; |
1283 | u32 temp; | 1212 | u32 temp; |
1284 | 1213 | ||
@@ -1316,7 +1245,7 @@ static int ql_is_link_full_dup(struct ql3_adapter *qdev) | |||
1316 | static int ql_link_down_detect(struct ql3_adapter *qdev) | 1245 | static int ql_link_down_detect(struct ql3_adapter *qdev) |
1317 | { | 1246 | { |
1318 | struct ql3xxx_port_registers __iomem *port_regs = | 1247 | struct ql3xxx_port_registers __iomem *port_regs = |
1319 | qdev->mem_map_registers; | 1248 | qdev->mem_map_registers; |
1320 | u32 bitToCheck = 0; | 1249 | u32 bitToCheck = 0; |
1321 | u32 temp; | 1250 | u32 temp; |
1322 | 1251 | ||
@@ -1340,7 +1269,7 @@ static int ql_link_down_detect(struct ql3_adapter *qdev) | |||
1340 | static int ql_link_down_detect_clear(struct ql3_adapter *qdev) | 1269 | static int ql_link_down_detect_clear(struct ql3_adapter *qdev) |
1341 | { | 1270 | { |
1342 | struct ql3xxx_port_registers __iomem *port_regs = | 1271 | struct ql3xxx_port_registers __iomem *port_regs = |
1343 | qdev->mem_map_registers; | 1272 | qdev->mem_map_registers; |
1344 | 1273 | ||
1345 | switch (qdev->mac_index) { | 1274 | switch (qdev->mac_index) { |
1346 | case 0: | 1275 | case 0: |
@@ -1370,7 +1299,7 @@ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) | |||
1370 | static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) | 1299 | static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) |
1371 | { | 1300 | { |
1372 | struct ql3xxx_port_registers __iomem *port_regs = | 1301 | struct ql3xxx_port_registers __iomem *port_regs = |
1373 | qdev->mem_map_registers; | 1302 | qdev->mem_map_registers; |
1374 | u32 bitToCheck = 0; | 1303 | u32 bitToCheck = 0; |
1375 | u32 temp; | 1304 | u32 temp; |
1376 | 1305 | ||
@@ -1387,16 +1316,13 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) | |||
1387 | 1316 | ||
1388 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | 1317 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); |
1389 | if (temp & bitToCheck) { | 1318 | if (temp & bitToCheck) { |
1390 | if (netif_msg_link(qdev)) | 1319 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, |
1391 | printk(KERN_DEBUG PFX | 1320 | "not link master\n"); |
1392 | "%s: is not link master.\n", qdev->ndev->name); | ||
1393 | return 0; | 1321 | return 0; |
1394 | } else { | ||
1395 | if (netif_msg_link(qdev)) | ||
1396 | printk(KERN_DEBUG PFX | ||
1397 | "%s: is link master.\n", qdev->ndev->name); | ||
1398 | return 1; | ||
1399 | } | 1322 | } |
1323 | |||
1324 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); | ||
1325 | return 1; | ||
1400 | } | 1326 | } |
1401 | 1327 | ||
1402 | static void ql_phy_reset_ex(struct ql3_adapter *qdev) | 1328 | static void ql_phy_reset_ex(struct ql3_adapter *qdev) |
@@ -1410,19 +1336,20 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | |||
1410 | u16 reg; | 1336 | u16 reg; |
1411 | u16 portConfiguration; | 1337 | u16 portConfiguration; |
1412 | 1338 | ||
1413 | if(qdev->phyType == PHY_AGERE_ET1011C) { | 1339 | if (qdev->phyType == PHY_AGERE_ET1011C) |
1414 | /* turn off external loopback */ | ||
1415 | ql_mii_write_reg(qdev, 0x13, 0x0000); | 1340 | ql_mii_write_reg(qdev, 0x13, 0x0000); |
1416 | } | 1341 | /* turn off external loopback */ |
1417 | 1342 | ||
1418 | if(qdev->mac_index == 0) | 1343 | if (qdev->mac_index == 0) |
1419 | portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; | 1344 | portConfiguration = |
1345 | qdev->nvram_data.macCfg_port0.portConfiguration; | ||
1420 | else | 1346 | else |
1421 | portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; | 1347 | portConfiguration = |
1348 | qdev->nvram_data.macCfg_port1.portConfiguration; | ||
1422 | 1349 | ||
1423 | /* Some HBA's in the field are set to 0 and they need to | 1350 | /* Some HBA's in the field are set to 0 and they need to |
1424 | be reinterpreted with a default value */ | 1351 | be reinterpreted with a default value */ |
1425 | if(portConfiguration == 0) | 1352 | if (portConfiguration == 0) |
1426 | portConfiguration = PORT_CONFIG_DEFAULT; | 1353 | portConfiguration = PORT_CONFIG_DEFAULT; |
1427 | 1354 | ||
1428 | /* Set the 1000 advertisements */ | 1355 | /* Set the 1000 advertisements */ |
@@ -1430,8 +1357,8 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | |||
1430 | PHYAddr[qdev->mac_index]); | 1357 | PHYAddr[qdev->mac_index]); |
1431 | reg &= ~PHY_GIG_ALL_PARAMS; | 1358 | reg &= ~PHY_GIG_ALL_PARAMS; |
1432 | 1359 | ||
1433 | if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { | 1360 | if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { |
1434 | if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) | 1361 | if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) |
1435 | reg |= PHY_GIG_ADV_1000F; | 1362 | reg |= PHY_GIG_ADV_1000F; |
1436 | else | 1363 | else |
1437 | reg |= PHY_GIG_ADV_1000H; | 1364 | reg |= PHY_GIG_ADV_1000H; |
@@ -1445,29 +1372,27 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | |||
1445 | PHYAddr[qdev->mac_index]); | 1372 | PHYAddr[qdev->mac_index]); |
1446 | reg &= ~PHY_NEG_ALL_PARAMS; | 1373 | reg &= ~PHY_NEG_ALL_PARAMS; |
1447 | 1374 | ||
1448 | if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) | 1375 | if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) |
1449 | reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; | 1376 | reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; |
1450 | 1377 | ||
1451 | if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { | 1378 | if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { |
1452 | if(portConfiguration & PORT_CONFIG_100MB_SPEED) | 1379 | if (portConfiguration & PORT_CONFIG_100MB_SPEED) |
1453 | reg |= PHY_NEG_ADV_100F; | 1380 | reg |= PHY_NEG_ADV_100F; |
1454 | 1381 | ||
1455 | if(portConfiguration & PORT_CONFIG_10MB_SPEED) | 1382 | if (portConfiguration & PORT_CONFIG_10MB_SPEED) |
1456 | reg |= PHY_NEG_ADV_10F; | 1383 | reg |= PHY_NEG_ADV_10F; |
1457 | } | 1384 | } |
1458 | 1385 | ||
1459 | if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { | 1386 | if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { |
1460 | if(portConfiguration & PORT_CONFIG_100MB_SPEED) | 1387 | if (portConfiguration & PORT_CONFIG_100MB_SPEED) |
1461 | reg |= PHY_NEG_ADV_100H; | 1388 | reg |= PHY_NEG_ADV_100H; |
1462 | 1389 | ||
1463 | if(portConfiguration & PORT_CONFIG_10MB_SPEED) | 1390 | if (portConfiguration & PORT_CONFIG_10MB_SPEED) |
1464 | reg |= PHY_NEG_ADV_10H; | 1391 | reg |= PHY_NEG_ADV_10H; |
1465 | } | 1392 | } |
1466 | 1393 | ||
1467 | if(portConfiguration & | 1394 | if (portConfiguration & PORT_CONFIG_1000MB_SPEED) |
1468 | PORT_CONFIG_1000MB_SPEED) { | ||
1469 | reg |= 1; | 1395 | reg |= 1; |
1470 | } | ||
1471 | 1396 | ||
1472 | ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, | 1397 | ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, |
1473 | PHYAddr[qdev->mac_index]); | 1398 | PHYAddr[qdev->mac_index]); |
@@ -1492,7 +1417,7 @@ static void ql_phy_init_ex(struct ql3_adapter *qdev) | |||
1492 | static u32 ql_get_link_state(struct ql3_adapter *qdev) | 1417 | static u32 ql_get_link_state(struct ql3_adapter *qdev) |
1493 | { | 1418 | { |
1494 | struct ql3xxx_port_registers __iomem *port_regs = | 1419 | struct ql3xxx_port_registers __iomem *port_regs = |
1495 | qdev->mem_map_registers; | 1420 | qdev->mem_map_registers; |
1496 | u32 bitToCheck = 0; | 1421 | u32 bitToCheck = 0; |
1497 | u32 temp, linkState; | 1422 | u32 temp, linkState; |
1498 | 1423 | ||
@@ -1504,22 +1429,22 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev) | |||
1504 | bitToCheck = PORT_STATUS_UP1; | 1429 | bitToCheck = PORT_STATUS_UP1; |
1505 | break; | 1430 | break; |
1506 | } | 1431 | } |
1432 | |||
1507 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | 1433 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); |
1508 | if (temp & bitToCheck) { | 1434 | if (temp & bitToCheck) |
1509 | linkState = LS_UP; | 1435 | linkState = LS_UP; |
1510 | } else { | 1436 | else |
1511 | linkState = LS_DOWN; | 1437 | linkState = LS_DOWN; |
1512 | } | 1438 | |
1513 | return linkState; | 1439 | return linkState; |
1514 | } | 1440 | } |
1515 | 1441 | ||
1516 | static int ql_port_start(struct ql3_adapter *qdev) | 1442 | static int ql_port_start(struct ql3_adapter *qdev) |
1517 | { | 1443 | { |
1518 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1444 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1519 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1445 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
1520 | 2) << 7)) { | 1446 | 2) << 7)) { |
1521 | printk(KERN_ERR "%s: Could not get hw lock for GIO\n", | 1447 | netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); |
1522 | qdev->ndev->name); | ||
1523 | return -1; | 1448 | return -1; |
1524 | } | 1449 | } |
1525 | 1450 | ||
@@ -1537,19 +1462,16 @@ static int ql_port_start(struct ql3_adapter *qdev) | |||
1537 | static int ql_finish_auto_neg(struct ql3_adapter *qdev) | 1462 | static int ql_finish_auto_neg(struct ql3_adapter *qdev) |
1538 | { | 1463 | { |
1539 | 1464 | ||
1540 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1465 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1541 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1466 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
1542 | 2) << 7)) | 1467 | 2) << 7)) |
1543 | return -1; | 1468 | return -1; |
1544 | 1469 | ||
1545 | if (!ql_auto_neg_error(qdev)) { | 1470 | if (!ql_auto_neg_error(qdev)) { |
1546 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | 1471 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) { |
1547 | /* configure the MAC */ | 1472 | /* configure the MAC */ |
1548 | if (netif_msg_link(qdev)) | 1473 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, |
1549 | printk(KERN_DEBUG PFX | 1474 | "Configuring link\n"); |
1550 | "%s: Configuring link.\n", | ||
1551 | qdev->ndev-> | ||
1552 | name); | ||
1553 | ql_mac_cfg_soft_reset(qdev, 1); | 1475 | ql_mac_cfg_soft_reset(qdev, 1); |
1554 | ql_mac_cfg_gig(qdev, | 1476 | ql_mac_cfg_gig(qdev, |
1555 | (ql_get_link_speed | 1477 | (ql_get_link_speed |
@@ -1564,43 +1486,32 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev) | |||
1564 | ql_mac_cfg_soft_reset(qdev, 0); | 1486 | ql_mac_cfg_soft_reset(qdev, 0); |
1565 | 1487 | ||
1566 | /* enable the MAC */ | 1488 | /* enable the MAC */ |
1567 | if (netif_msg_link(qdev)) | 1489 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, |
1568 | printk(KERN_DEBUG PFX | 1490 | "Enabling mac\n"); |
1569 | "%s: Enabling mac.\n", | ||
1570 | qdev->ndev-> | ||
1571 | name); | ||
1572 | ql_mac_enable(qdev, 1); | 1491 | ql_mac_enable(qdev, 1); |
1573 | } | 1492 | } |
1574 | 1493 | ||
1575 | qdev->port_link_state = LS_UP; | 1494 | qdev->port_link_state = LS_UP; |
1576 | netif_start_queue(qdev->ndev); | 1495 | netif_start_queue(qdev->ndev); |
1577 | netif_carrier_on(qdev->ndev); | 1496 | netif_carrier_on(qdev->ndev); |
1578 | if (netif_msg_link(qdev)) | 1497 | netif_info(qdev, link, qdev->ndev, |
1579 | printk(KERN_INFO PFX | 1498 | "Link is up at %d Mbps, %s duplex\n", |
1580 | "%s: Link is up at %d Mbps, %s duplex.\n", | 1499 | ql_get_link_speed(qdev), |
1581 | qdev->ndev->name, | 1500 | ql_is_link_full_dup(qdev) ? "full" : "half"); |
1582 | ql_get_link_speed(qdev), | ||
1583 | ql_is_link_full_dup(qdev) | ||
1584 | ? "full" : "half"); | ||
1585 | 1501 | ||
1586 | } else { /* Remote error detected */ | 1502 | } else { /* Remote error detected */ |
1587 | 1503 | ||
1588 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | 1504 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) { |
1589 | if (netif_msg_link(qdev)) | 1505 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, |
1590 | printk(KERN_DEBUG PFX | 1506 | "Remote error detected. Calling ql_port_start()\n"); |
1591 | "%s: Remote error detected. " | ||
1592 | "Calling ql_port_start().\n", | ||
1593 | qdev->ndev-> | ||
1594 | name); | ||
1595 | /* | 1507 | /* |
1596 | * ql_port_start() is shared code and needs | 1508 | * ql_port_start() is shared code and needs |
1597 | * to lock the PHY on it's own. | 1509 | * to lock the PHY on it's own. |
1598 | */ | 1510 | */ |
1599 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | 1511 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); |
1600 | if(ql_port_start(qdev)) {/* Restart port */ | 1512 | if (ql_port_start(qdev)) /* Restart port */ |
1601 | return -1; | 1513 | return -1; |
1602 | } else | 1514 | return 0; |
1603 | return 0; | ||
1604 | } | 1515 | } |
1605 | } | 1516 | } |
1606 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | 1517 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); |
@@ -1619,33 +1530,28 @@ static void ql_link_state_machine_work(struct work_struct *work) | |||
1619 | 1530 | ||
1620 | curr_link_state = ql_get_link_state(qdev); | 1531 | curr_link_state = ql_get_link_state(qdev); |
1621 | 1532 | ||
1622 | if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { | 1533 | if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { |
1623 | if (netif_msg_link(qdev)) | 1534 | netif_info(qdev, link, qdev->ndev, |
1624 | printk(KERN_INFO PFX | 1535 | "Reset in progress, skip processing link state\n"); |
1625 | "%s: Reset in progress, skip processing link " | ||
1626 | "state.\n", qdev->ndev->name); | ||
1627 | 1536 | ||
1628 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1537 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1629 | 1538 | ||
1630 | /* Restart timer on 2 second interval. */ | 1539 | /* Restart timer on 2 second interval. */ |
1631 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ | 1540 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); |
1632 | 1541 | ||
1633 | return; | 1542 | return; |
1634 | } | 1543 | } |
1635 | 1544 | ||
1636 | switch (qdev->port_link_state) { | 1545 | switch (qdev->port_link_state) { |
1637 | default: | 1546 | default: |
1638 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | 1547 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) |
1639 | ql_port_start(qdev); | 1548 | ql_port_start(qdev); |
1640 | } | ||
1641 | qdev->port_link_state = LS_DOWN; | 1549 | qdev->port_link_state = LS_DOWN; |
1642 | /* Fall Through */ | 1550 | /* Fall Through */ |
1643 | 1551 | ||
1644 | case LS_DOWN: | 1552 | case LS_DOWN: |
1645 | if (curr_link_state == LS_UP) { | 1553 | if (curr_link_state == LS_UP) { |
1646 | if (netif_msg_link(qdev)) | 1554 | netif_info(qdev, link, qdev->ndev, "Link is up\n"); |
1647 | printk(KERN_INFO PFX "%s: Link is up.\n", | ||
1648 | qdev->ndev->name); | ||
1649 | if (ql_is_auto_neg_complete(qdev)) | 1555 | if (ql_is_auto_neg_complete(qdev)) |
1650 | ql_finish_auto_neg(qdev); | 1556 | ql_finish_auto_neg(qdev); |
1651 | 1557 | ||
@@ -1662,9 +1568,7 @@ static void ql_link_state_machine_work(struct work_struct *work) | |||
1662 | * back up | 1568 | * back up |
1663 | */ | 1569 | */ |
1664 | if (curr_link_state == LS_DOWN) { | 1570 | if (curr_link_state == LS_DOWN) { |
1665 | if (netif_msg_link(qdev)) | 1571 | netif_info(qdev, link, qdev->ndev, "Link is down\n"); |
1666 | printk(KERN_INFO PFX "%s: Link is down.\n", | ||
1667 | qdev->ndev->name); | ||
1668 | qdev->port_link_state = LS_DOWN; | 1572 | qdev->port_link_state = LS_DOWN; |
1669 | } | 1573 | } |
1670 | if (ql_link_down_detect(qdev)) | 1574 | if (ql_link_down_detect(qdev)) |
@@ -1683,9 +1587,9 @@ static void ql_link_state_machine_work(struct work_struct *work) | |||
1683 | static void ql_get_phy_owner(struct ql3_adapter *qdev) | 1587 | static void ql_get_phy_owner(struct ql3_adapter *qdev) |
1684 | { | 1588 | { |
1685 | if (ql_this_adapter_controls_port(qdev)) | 1589 | if (ql_this_adapter_controls_port(qdev)) |
1686 | set_bit(QL_LINK_MASTER,&qdev->flags); | 1590 | set_bit(QL_LINK_MASTER, &qdev->flags); |
1687 | else | 1591 | else |
1688 | clear_bit(QL_LINK_MASTER,&qdev->flags); | 1592 | clear_bit(QL_LINK_MASTER, &qdev->flags); |
1689 | } | 1593 | } |
1690 | 1594 | ||
1691 | /* | 1595 | /* |
@@ -1695,7 +1599,7 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev) | |||
1695 | { | 1599 | { |
1696 | ql_mii_enable_scan_mode(qdev); | 1600 | ql_mii_enable_scan_mode(qdev); |
1697 | 1601 | ||
1698 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | 1602 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { |
1699 | if (ql_this_adapter_controls_port(qdev)) | 1603 | if (ql_this_adapter_controls_port(qdev)) |
1700 | ql_petbi_init_ex(qdev); | 1604 | ql_petbi_init_ex(qdev); |
1701 | } else { | 1605 | } else { |
@@ -1705,18 +1609,18 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev) | |||
1705 | } | 1609 | } |
1706 | 1610 | ||
1707 | /* | 1611 | /* |
1708 | * MII_Setup needs to be called before taking the PHY out of reset so that the | 1612 | * MII_Setup needs to be called before taking the PHY out of reset |
1709 | * management interface clock speed can be set properly. It would be better if | 1613 | * so that the management interface clock speed can be set properly. |
1710 | * we had a way to disable MDC until after the PHY is out of reset, but we | 1614 | * It would be better if we had a way to disable MDC until after the |
1711 | * don't have that capability. | 1615 | * PHY is out of reset, but we don't have that capability. |
1712 | */ | 1616 | */ |
1713 | static int ql_mii_setup(struct ql3_adapter *qdev) | 1617 | static int ql_mii_setup(struct ql3_adapter *qdev) |
1714 | { | 1618 | { |
1715 | u32 reg; | 1619 | u32 reg; |
1716 | struct ql3xxx_port_registers __iomem *port_regs = | 1620 | struct ql3xxx_port_registers __iomem *port_regs = |
1717 | qdev->mem_map_registers; | 1621 | qdev->mem_map_registers; |
1718 | 1622 | ||
1719 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1623 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1720 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1624 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
1721 | 2) << 7)) | 1625 | 2) << 7)) |
1722 | return -1; | 1626 | return -1; |
@@ -1735,24 +1639,24 @@ static int ql_mii_setup(struct ql3_adapter *qdev) | |||
1735 | return 0; | 1639 | return 0; |
1736 | } | 1640 | } |
1737 | 1641 | ||
1642 | #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ | ||
1643 | SUPPORTED_FIBRE | \ | ||
1644 | SUPPORTED_Autoneg) | ||
1645 | #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ | ||
1646 | SUPPORTED_10baseT_Full | \ | ||
1647 | SUPPORTED_100baseT_Half | \ | ||
1648 | SUPPORTED_100baseT_Full | \ | ||
1649 | SUPPORTED_1000baseT_Half | \ | ||
1650 | SUPPORTED_1000baseT_Full | \ | ||
1651 | SUPPORTED_Autoneg | \ | ||
1652 | SUPPORTED_TP); \ | ||
1653 | |||
1738 | static u32 ql_supported_modes(struct ql3_adapter *qdev) | 1654 | static u32 ql_supported_modes(struct ql3_adapter *qdev) |
1739 | { | 1655 | { |
1740 | u32 supported; | 1656 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) |
1657 | return SUPPORTED_OPTICAL_MODES; | ||
1741 | 1658 | ||
1742 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | 1659 | return SUPPORTED_TP_MODES; |
1743 | supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | ||
1744 | | SUPPORTED_Autoneg; | ||
1745 | } else { | ||
1746 | supported = SUPPORTED_10baseT_Half | ||
1747 | | SUPPORTED_10baseT_Full | ||
1748 | | SUPPORTED_100baseT_Half | ||
1749 | | SUPPORTED_100baseT_Full | ||
1750 | | SUPPORTED_1000baseT_Half | ||
1751 | | SUPPORTED_1000baseT_Full | ||
1752 | | SUPPORTED_Autoneg | SUPPORTED_TP; | ||
1753 | } | ||
1754 | |||
1755 | return supported; | ||
1756 | } | 1660 | } |
1757 | 1661 | ||
1758 | static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) | 1662 | static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) |
@@ -1760,9 +1664,9 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) | |||
1760 | int status; | 1664 | int status; |
1761 | unsigned long hw_flags; | 1665 | unsigned long hw_flags; |
1762 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1666 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1763 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1667 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1764 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1668 | (QL_RESOURCE_BITS_BASE_CODE | |
1765 | 2) << 7)) { | 1669 | (qdev->mac_index) * 2) << 7)) { |
1766 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1670 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1767 | return 0; | 1671 | return 0; |
1768 | } | 1672 | } |
@@ -1777,9 +1681,9 @@ static u32 ql_get_speed(struct ql3_adapter *qdev) | |||
1777 | u32 status; | 1681 | u32 status; |
1778 | unsigned long hw_flags; | 1682 | unsigned long hw_flags; |
1779 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1683 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1780 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1684 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1781 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1685 | (QL_RESOURCE_BITS_BASE_CODE | |
1782 | 2) << 7)) { | 1686 | (qdev->mac_index) * 2) << 7)) { |
1783 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1687 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1784 | return 0; | 1688 | return 0; |
1785 | } | 1689 | } |
@@ -1794,9 +1698,9 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) | |||
1794 | int status; | 1698 | int status; |
1795 | unsigned long hw_flags; | 1699 | unsigned long hw_flags; |
1796 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1700 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1797 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1701 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1798 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1702 | (QL_RESOURCE_BITS_BASE_CODE | |
1799 | 2) << 7)) { | 1703 | (qdev->mac_index) * 2) << 7)) { |
1800 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1704 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1801 | return 0; | 1705 | return 0; |
1802 | } | 1706 | } |
@@ -1806,7 +1710,6 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) | |||
1806 | return status; | 1710 | return status; |
1807 | } | 1711 | } |
1808 | 1712 | ||
1809 | |||
1810 | static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | 1713 | static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) |
1811 | { | 1714 | { |
1812 | struct ql3_adapter *qdev = netdev_priv(ndev); | 1715 | struct ql3_adapter *qdev = netdev_priv(ndev); |
@@ -1814,7 +1717,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | |||
1814 | ecmd->transceiver = XCVR_INTERNAL; | 1717 | ecmd->transceiver = XCVR_INTERNAL; |
1815 | ecmd->supported = ql_supported_modes(qdev); | 1718 | ecmd->supported = ql_supported_modes(qdev); |
1816 | 1719 | ||
1817 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | 1720 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { |
1818 | ecmd->port = PORT_FIBRE; | 1721 | ecmd->port = PORT_FIBRE; |
1819 | } else { | 1722 | } else { |
1820 | ecmd->port = PORT_TP; | 1723 | ecmd->port = PORT_TP; |
@@ -1855,10 +1758,11 @@ static void ql_get_pauseparam(struct net_device *ndev, | |||
1855 | struct ethtool_pauseparam *pause) | 1758 | struct ethtool_pauseparam *pause) |
1856 | { | 1759 | { |
1857 | struct ql3_adapter *qdev = netdev_priv(ndev); | 1760 | struct ql3_adapter *qdev = netdev_priv(ndev); |
1858 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 1761 | struct ql3xxx_port_registers __iomem *port_regs = |
1762 | qdev->mem_map_registers; | ||
1859 | 1763 | ||
1860 | u32 reg; | 1764 | u32 reg; |
1861 | if(qdev->mac_index == 0) | 1765 | if (qdev->mac_index == 0) |
1862 | reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); | 1766 | reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); |
1863 | else | 1767 | else |
1864 | reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); | 1768 | reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); |
@@ -1885,12 +1789,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1885 | 1789 | ||
1886 | while (lrg_buf_cb) { | 1790 | while (lrg_buf_cb) { |
1887 | if (!lrg_buf_cb->skb) { | 1791 | if (!lrg_buf_cb->skb) { |
1888 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, | 1792 | lrg_buf_cb->skb = |
1889 | qdev->lrg_buffer_len); | 1793 | netdev_alloc_skb(qdev->ndev, |
1794 | qdev->lrg_buffer_len); | ||
1890 | if (unlikely(!lrg_buf_cb->skb)) { | 1795 | if (unlikely(!lrg_buf_cb->skb)) { |
1891 | printk(KERN_DEBUG PFX | 1796 | netdev_printk(KERN_DEBUG, qdev->ndev, |
1892 | "%s: Failed netdev_alloc_skb().\n", | 1797 | "Failed netdev_alloc_skb()\n"); |
1893 | qdev->ndev->name); | ||
1894 | break; | 1798 | break; |
1895 | } else { | 1799 | } else { |
1896 | /* | 1800 | /* |
@@ -1905,9 +1809,10 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1905 | PCI_DMA_FROMDEVICE); | 1809 | PCI_DMA_FROMDEVICE); |
1906 | 1810 | ||
1907 | err = pci_dma_mapping_error(qdev->pdev, map); | 1811 | err = pci_dma_mapping_error(qdev->pdev, map); |
1908 | if(err) { | 1812 | if (err) { |
1909 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 1813 | netdev_err(qdev->ndev, |
1910 | qdev->ndev->name, err); | 1814 | "PCI mapping failed with error: %d\n", |
1815 | err); | ||
1911 | dev_kfree_skb(lrg_buf_cb->skb); | 1816 | dev_kfree_skb(lrg_buf_cb->skb); |
1912 | lrg_buf_cb->skb = NULL; | 1817 | lrg_buf_cb->skb = NULL; |
1913 | break; | 1818 | break; |
@@ -1915,9 +1820,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1915 | 1820 | ||
1916 | 1821 | ||
1917 | lrg_buf_cb->buf_phy_addr_low = | 1822 | lrg_buf_cb->buf_phy_addr_low = |
1918 | cpu_to_le32(LS_64BITS(map)); | 1823 | cpu_to_le32(LS_64BITS(map)); |
1919 | lrg_buf_cb->buf_phy_addr_high = | 1824 | lrg_buf_cb->buf_phy_addr_high = |
1920 | cpu_to_le32(MS_64BITS(map)); | 1825 | cpu_to_le32(MS_64BITS(map)); |
1921 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); | 1826 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); |
1922 | dma_unmap_len_set(lrg_buf_cb, maplen, | 1827 | dma_unmap_len_set(lrg_buf_cb, maplen, |
1923 | qdev->lrg_buffer_len - | 1828 | qdev->lrg_buffer_len - |
@@ -1937,7 +1842,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1937 | */ | 1842 | */ |
1938 | static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) | 1843 | static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) |
1939 | { | 1844 | { |
1940 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 1845 | struct ql3xxx_port_registers __iomem *port_regs = |
1846 | qdev->mem_map_registers; | ||
1847 | |||
1941 | if (qdev->small_buf_release_cnt >= 16) { | 1848 | if (qdev->small_buf_release_cnt >= 16) { |
1942 | while (qdev->small_buf_release_cnt >= 16) { | 1849 | while (qdev->small_buf_release_cnt >= 16) { |
1943 | qdev->small_buf_q_producer_index++; | 1850 | qdev->small_buf_q_producer_index++; |
@@ -1961,7 +1868,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) | |||
1961 | struct bufq_addr_element *lrg_buf_q_ele; | 1868 | struct bufq_addr_element *lrg_buf_q_ele; |
1962 | int i; | 1869 | int i; |
1963 | struct ql_rcv_buf_cb *lrg_buf_cb; | 1870 | struct ql_rcv_buf_cb *lrg_buf_cb; |
1964 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 1871 | struct ql3xxx_port_registers __iomem *port_regs = |
1872 | qdev->mem_map_registers; | ||
1965 | 1873 | ||
1966 | if ((qdev->lrg_buf_free_count >= 8) && | 1874 | if ((qdev->lrg_buf_free_count >= 8) && |
1967 | (qdev->lrg_buf_release_cnt >= 16)) { | 1875 | (qdev->lrg_buf_release_cnt >= 16)) { |
@@ -1989,7 +1897,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) | |||
1989 | 1897 | ||
1990 | qdev->lrg_buf_q_producer_index++; | 1898 | qdev->lrg_buf_q_producer_index++; |
1991 | 1899 | ||
1992 | if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) | 1900 | if (qdev->lrg_buf_q_producer_index == |
1901 | qdev->num_lbufq_entries) | ||
1993 | qdev->lrg_buf_q_producer_index = 0; | 1902 | qdev->lrg_buf_q_producer_index = 0; |
1994 | 1903 | ||
1995 | if (qdev->lrg_buf_q_producer_index == | 1904 | if (qdev->lrg_buf_q_producer_index == |
@@ -2011,23 +1920,26 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
2011 | int i; | 1920 | int i; |
2012 | int retval = 0; | 1921 | int retval = 0; |
2013 | 1922 | ||
2014 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | 1923 | if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { |
2015 | printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); | 1924 | netdev_warn(qdev->ndev, |
1925 | "Frame too short but it was padded and sent\n"); | ||
2016 | } | 1926 | } |
2017 | 1927 | ||
2018 | tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; | 1928 | tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; |
2019 | 1929 | ||
2020 | /* Check the transmit response flags for any errors */ | 1930 | /* Check the transmit response flags for any errors */ |
2021 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | 1931 | if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { |
2022 | printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); | 1932 | netdev_err(qdev->ndev, |
1933 | "Frame too short to be legal, frame not sent\n"); | ||
2023 | 1934 | ||
2024 | qdev->ndev->stats.tx_errors++; | 1935 | qdev->ndev->stats.tx_errors++; |
2025 | retval = -EIO; | 1936 | retval = -EIO; |
2026 | goto frame_not_sent; | 1937 | goto frame_not_sent; |
2027 | } | 1938 | } |
2028 | 1939 | ||
2029 | if(tx_cb->seg_count == 0) { | 1940 | if (tx_cb->seg_count == 0) { |
2030 | printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); | 1941 | netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", |
1942 | mac_rsp->transaction_id); | ||
2031 | 1943 | ||
2032 | qdev->ndev->stats.tx_errors++; | 1944 | qdev->ndev->stats.tx_errors++; |
2033 | retval = -EIO; | 1945 | retval = -EIO; |
@@ -2073,7 +1985,7 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) | |||
2073 | qdev->lrg_buf_release_cnt++; | 1985 | qdev->lrg_buf_release_cnt++; |
2074 | if (++qdev->lrg_buf_index == qdev->num_large_buffers) | 1986 | if (++qdev->lrg_buf_index == qdev->num_large_buffers) |
2075 | qdev->lrg_buf_index = 0; | 1987 | qdev->lrg_buf_index = 0; |
2076 | return(lrg_buf_cb); | 1988 | return lrg_buf_cb; |
2077 | } | 1989 | } |
2078 | 1990 | ||
2079 | /* | 1991 | /* |
@@ -2177,12 +2089,11 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
2177 | if (checksum & | 2089 | if (checksum & |
2178 | (IB_IP_IOCB_RSP_3032_ICE | | 2090 | (IB_IP_IOCB_RSP_3032_ICE | |
2179 | IB_IP_IOCB_RSP_3032_CE)) { | 2091 | IB_IP_IOCB_RSP_3032_CE)) { |
2180 | printk(KERN_ERR | 2092 | netdev_err(ndev, |
2181 | "%s: Bad checksum for this %s packet, checksum = %x.\n", | 2093 | "%s: Bad checksum for this %s packet, checksum = %x\n", |
2182 | __func__, | 2094 | __func__, |
2183 | ((checksum & | 2095 | ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? |
2184 | IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : | 2096 | "TCP" : "UDP"), checksum); |
2185 | "UDP"),checksum); | ||
2186 | } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || | 2097 | } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || |
2187 | (checksum & IB_IP_IOCB_RSP_3032_UDP && | 2098 | (checksum & IB_IP_IOCB_RSP_3032_UDP && |
2188 | !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { | 2099 | !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { |
@@ -2215,8 +2126,8 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
2215 | net_rsp = qdev->rsp_current; | 2126 | net_rsp = qdev->rsp_current; |
2216 | rmb(); | 2127 | rmb(); |
2217 | /* | 2128 | /* |
2218 | * Fix 4032 chipe undocumented "feature" where bit-8 is set if the | 2129 | * Fix 4032 chip's undocumented "feature" where bit-8 is set |
2219 | * inbound completion is for a VLAN. | 2130 | * if the inbound completion is for a VLAN. |
2220 | */ | 2131 | */ |
2221 | if (qdev->device_id == QL3032_DEVICE_ID) | 2132 | if (qdev->device_id == QL3032_DEVICE_ID) |
2222 | net_rsp->opcode &= 0x7f; | 2133 | net_rsp->opcode &= 0x7f; |
@@ -2242,22 +2153,18 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
2242 | net_rsp); | 2153 | net_rsp); |
2243 | (*rx_cleaned)++; | 2154 | (*rx_cleaned)++; |
2244 | break; | 2155 | break; |
2245 | default: | 2156 | default: { |
2246 | { | 2157 | u32 *tmp = (u32 *)net_rsp; |
2247 | u32 *tmp = (u32 *) net_rsp; | 2158 | netdev_err(ndev, |
2248 | printk(KERN_ERR PFX | 2159 | "Hit default case, not handled!\n" |
2249 | "%s: Hit default case, not " | 2160 | " dropping the packet, opcode = %x\n" |
2250 | "handled!\n" | 2161 | "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", |
2251 | " dropping the packet, opcode = " | 2162 | net_rsp->opcode, |
2252 | "%x.\n", | 2163 | (unsigned long int)tmp[0], |
2253 | ndev->name, net_rsp->opcode); | 2164 | (unsigned long int)tmp[1], |
2254 | printk(KERN_ERR PFX | 2165 | (unsigned long int)tmp[2], |
2255 | "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", | 2166 | (unsigned long int)tmp[3]); |
2256 | (unsigned long int)tmp[0], | 2167 | } |
2257 | (unsigned long int)tmp[1], | ||
2258 | (unsigned long int)tmp[2], | ||
2259 | (unsigned long int)tmp[3]); | ||
2260 | } | ||
2261 | } | 2168 | } |
2262 | 2169 | ||
2263 | qdev->rsp_consumer_index++; | 2170 | qdev->rsp_consumer_index++; |
@@ -2280,7 +2187,8 @@ static int ql_poll(struct napi_struct *napi, int budget) | |||
2280 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); | 2187 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); |
2281 | int rx_cleaned = 0, tx_cleaned = 0; | 2188 | int rx_cleaned = 0, tx_cleaned = 0; |
2282 | unsigned long hw_flags; | 2189 | unsigned long hw_flags; |
2283 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2190 | struct ql3xxx_port_registers __iomem *port_regs = |
2191 | qdev->mem_map_registers; | ||
2284 | 2192 | ||
2285 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); | 2193 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); |
2286 | 2194 | ||
@@ -2303,15 +2211,14 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2303 | 2211 | ||
2304 | struct net_device *ndev = dev_id; | 2212 | struct net_device *ndev = dev_id; |
2305 | struct ql3_adapter *qdev = netdev_priv(ndev); | 2213 | struct ql3_adapter *qdev = netdev_priv(ndev); |
2306 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2214 | struct ql3xxx_port_registers __iomem *port_regs = |
2215 | qdev->mem_map_registers; | ||
2307 | u32 value; | 2216 | u32 value; |
2308 | int handled = 1; | 2217 | int handled = 1; |
2309 | u32 var; | 2218 | u32 var; |
2310 | 2219 | ||
2311 | port_regs = qdev->mem_map_registers; | 2220 | value = ql_read_common_reg_l(qdev, |
2312 | 2221 | &port_regs->CommonRegs.ispControlStatus); | |
2313 | value = | ||
2314 | ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); | ||
2315 | 2222 | ||
2316 | if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { | 2223 | if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { |
2317 | spin_lock(&qdev->adapter_lock); | 2224 | spin_lock(&qdev->adapter_lock); |
@@ -2319,7 +2226,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2319 | netif_carrier_off(qdev->ndev); | 2226 | netif_carrier_off(qdev->ndev); |
2320 | ql_disable_interrupts(qdev); | 2227 | ql_disable_interrupts(qdev); |
2321 | qdev->port_link_state = LS_DOWN; | 2228 | qdev->port_link_state = LS_DOWN; |
2322 | set_bit(QL_RESET_ACTIVE,&qdev->flags) ; | 2229 | set_bit(QL_RESET_ACTIVE, &qdev->flags) ; |
2323 | 2230 | ||
2324 | if (value & ISP_CONTROL_FE) { | 2231 | if (value & ISP_CONTROL_FE) { |
2325 | /* | 2232 | /* |
@@ -2328,69 +2235,53 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2328 | var = | 2235 | var = |
2329 | ql_read_page0_reg_l(qdev, | 2236 | ql_read_page0_reg_l(qdev, |
2330 | &port_regs->PortFatalErrStatus); | 2237 | &port_regs->PortFatalErrStatus); |
2331 | printk(KERN_WARNING PFX | 2238 | netdev_warn(ndev, |
2332 | "%s: Resetting chip. PortFatalErrStatus " | 2239 | "Resetting chip. PortFatalErrStatus register = 0x%x\n", |
2333 | "register = 0x%x\n", ndev->name, var); | 2240 | var); |
2334 | set_bit(QL_RESET_START,&qdev->flags) ; | 2241 | set_bit(QL_RESET_START, &qdev->flags) ; |
2335 | } else { | 2242 | } else { |
2336 | /* | 2243 | /* |
2337 | * Soft Reset Requested. | 2244 | * Soft Reset Requested. |
2338 | */ | 2245 | */ |
2339 | set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; | 2246 | set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; |
2340 | printk(KERN_ERR PFX | 2247 | netdev_err(ndev, |
2341 | "%s: Another function issued a reset to the " | 2248 | "Another function issued a reset to the chip. ISR value = %x\n", |
2342 | "chip. ISR value = %x.\n", ndev->name, value); | 2249 | value); |
2343 | } | 2250 | } |
2344 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); | 2251 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); |
2345 | spin_unlock(&qdev->adapter_lock); | 2252 | spin_unlock(&qdev->adapter_lock); |
2346 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2253 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2347 | ql_disable_interrupts(qdev); | 2254 | ql_disable_interrupts(qdev); |
2348 | if (likely(napi_schedule_prep(&qdev->napi))) { | 2255 | if (likely(napi_schedule_prep(&qdev->napi))) |
2349 | __napi_schedule(&qdev->napi); | 2256 | __napi_schedule(&qdev->napi); |
2350 | } | 2257 | } else |
2351 | } else { | ||
2352 | return IRQ_NONE; | 2258 | return IRQ_NONE; |
2353 | } | ||
2354 | 2259 | ||
2355 | return IRQ_RETVAL(handled); | 2260 | return IRQ_RETVAL(handled); |
2356 | } | 2261 | } |
2357 | 2262 | ||
2358 | /* | 2263 | /* |
2359 | * Get the total number of segments needed for the | 2264 | * Get the total number of segments needed for the given number of fragments. |
2360 | * given number of fragments. This is necessary because | 2265 | * This is necessary because outbound address lists (OAL) will be used when |
2361 | * outbound address lists (OAL) will be used when more than | 2266 | * more than two frags are given. Each address list has 5 addr/len pairs. |
2362 | * two frags are given. Each address list has 5 addr/len | 2267 | * The 5th pair in each OAL is used to point to the next OAL if more frags |
2363 | * pairs. The 5th pair in each AOL is used to point to | 2268 | * are coming. That is why the frags:segment count ratio is not linear. |
2364 | * the next AOL if more frags are coming. | ||
2365 | * That is why the frags:segment count ratio is not linear. | ||
2366 | */ | 2269 | */ |
2367 | static int ql_get_seg_count(struct ql3_adapter *qdev, | 2270 | static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) |
2368 | unsigned short frags) | ||
2369 | { | 2271 | { |
2370 | if (qdev->device_id == QL3022_DEVICE_ID) | 2272 | if (qdev->device_id == QL3022_DEVICE_ID) |
2371 | return 1; | 2273 | return 1; |
2372 | 2274 | ||
2373 | switch(frags) { | 2275 | if (frags <= 2) |
2374 | case 0: return 1; /* just the skb->data seg */ | 2276 | return frags + 1; |
2375 | case 1: return 2; /* skb->data + 1 frag */ | 2277 | else if (frags <= 6) |
2376 | case 2: return 3; /* skb->data + 2 frags */ | 2278 | return frags + 2; |
2377 | case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ | 2279 | else if (frags <= 10) |
2378 | case 4: return 6; | 2280 | return frags + 3; |
2379 | case 5: return 7; | 2281 | else if (frags <= 14) |
2380 | case 6: return 8; | 2282 | return frags + 4; |
2381 | case 7: return 10; | 2283 | else if (frags <= 18) |
2382 | case 8: return 11; | 2284 | return frags + 5; |
2383 | case 9: return 12; | ||
2384 | case 10: return 13; | ||
2385 | case 11: return 15; | ||
2386 | case 12: return 16; | ||
2387 | case 13: return 17; | ||
2388 | case 14: return 18; | ||
2389 | case 15: return 20; | ||
2390 | case 16: return 21; | ||
2391 | case 17: return 22; | ||
2392 | case 18: return 23; | ||
2393 | } | ||
2394 | return -1; | 2285 | return -1; |
2395 | } | 2286 | } |
2396 | 2287 | ||
@@ -2413,8 +2304,8 @@ static void ql_hw_csum_setup(const struct sk_buff *skb, | |||
2413 | } | 2304 | } |
2414 | 2305 | ||
2415 | /* | 2306 | /* |
2416 | * Map the buffers for this transmit. This will return | 2307 | * Map the buffers for this transmit. |
2417 | * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | 2308 | * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. |
2418 | */ | 2309 | */ |
2419 | static int ql_send_map(struct ql3_adapter *qdev, | 2310 | static int ql_send_map(struct ql3_adapter *qdev, |
2420 | struct ob_mac_iocb_req *mac_iocb_ptr, | 2311 | struct ob_mac_iocb_req *mac_iocb_ptr, |
@@ -2437,9 +2328,9 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2437 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2328 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2438 | 2329 | ||
2439 | err = pci_dma_mapping_error(qdev->pdev, map); | 2330 | err = pci_dma_mapping_error(qdev->pdev, map); |
2440 | if(err) { | 2331 | if (err) { |
2441 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2332 | netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", |
2442 | qdev->ndev->name, err); | 2333 | err); |
2443 | 2334 | ||
2444 | return NETDEV_TX_BUSY; | 2335 | return NETDEV_TX_BUSY; |
2445 | } | 2336 | } |
@@ -2455,65 +2346,67 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2455 | if (seg_cnt == 1) { | 2346 | if (seg_cnt == 1) { |
2456 | /* Terminate the last segment. */ | 2347 | /* Terminate the last segment. */ |
2457 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | 2348 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); |
2458 | } else { | 2349 | return NETDEV_TX_OK; |
2459 | oal = tx_cb->oal; | 2350 | } |
2460 | for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { | 2351 | oal = tx_cb->oal; |
2461 | skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; | 2352 | for (completed_segs = 0; |
2462 | oal_entry++; | 2353 | completed_segs < frag_cnt; |
2463 | if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ | 2354 | completed_segs++, seg++) { |
2464 | (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ | 2355 | skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; |
2465 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | 2356 | oal_entry++; |
2466 | (seg == 17 && seg_cnt > 18)) { | 2357 | /* |
2467 | /* Continuation entry points to outbound address list. */ | 2358 | * Check for continuation requirements. |
2468 | map = pci_map_single(qdev->pdev, oal, | 2359 | * It's strange but necessary. |
2469 | sizeof(struct oal), | 2360 | * Continuation entry points to outbound address list. |
2470 | PCI_DMA_TODEVICE); | 2361 | */ |
2471 | 2362 | if ((seg == 2 && seg_cnt > 3) || | |
2472 | err = pci_dma_mapping_error(qdev->pdev, map); | 2363 | (seg == 7 && seg_cnt > 8) || |
2473 | if(err) { | 2364 | (seg == 12 && seg_cnt > 13) || |
2474 | 2365 | (seg == 17 && seg_cnt > 18)) { | |
2475 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", | 2366 | map = pci_map_single(qdev->pdev, oal, |
2476 | qdev->ndev->name, err); | 2367 | sizeof(struct oal), |
2477 | goto map_error; | 2368 | PCI_DMA_TODEVICE); |
2478 | } | ||
2479 | |||
2480 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2481 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2482 | oal_entry->len = | ||
2483 | cpu_to_le32(sizeof(struct oal) | | ||
2484 | OAL_CONT_ENTRY); | ||
2485 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, | ||
2486 | map); | ||
2487 | dma_unmap_len_set(&tx_cb->map[seg], maplen, | ||
2488 | sizeof(struct oal)); | ||
2489 | oal_entry = (struct oal_entry *)oal; | ||
2490 | oal++; | ||
2491 | seg++; | ||
2492 | } | ||
2493 | |||
2494 | map = | ||
2495 | pci_map_page(qdev->pdev, frag->page, | ||
2496 | frag->page_offset, frag->size, | ||
2497 | PCI_DMA_TODEVICE); | ||
2498 | 2369 | ||
2499 | err = pci_dma_mapping_error(qdev->pdev, map); | 2370 | err = pci_dma_mapping_error(qdev->pdev, map); |
2500 | if(err) { | 2371 | if (err) { |
2501 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", | 2372 | netdev_err(qdev->ndev, |
2502 | qdev->ndev->name, err); | 2373 | "PCI mapping outbound address list with error: %d\n", |
2374 | err); | ||
2503 | goto map_error; | 2375 | goto map_error; |
2504 | } | 2376 | } |
2505 | 2377 | ||
2506 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | 2378 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); |
2507 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | 2379 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); |
2508 | oal_entry->len = cpu_to_le32(frag->size); | 2380 | oal_entry->len = cpu_to_le32(sizeof(struct oal) | |
2381 | OAL_CONT_ENTRY); | ||
2509 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | 2382 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); |
2510 | dma_unmap_len_set(&tx_cb->map[seg], maplen, | 2383 | dma_unmap_len_set(&tx_cb->map[seg], maplen, |
2511 | frag->size); | 2384 | sizeof(struct oal)); |
2385 | oal_entry = (struct oal_entry *)oal; | ||
2386 | oal++; | ||
2387 | seg++; | ||
2388 | } | ||
2389 | |||
2390 | map = pci_map_page(qdev->pdev, frag->page, | ||
2391 | frag->page_offset, frag->size, | ||
2392 | PCI_DMA_TODEVICE); | ||
2393 | |||
2394 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
2395 | if (err) { | ||
2396 | netdev_err(qdev->ndev, | ||
2397 | "PCI mapping frags failed with error: %d\n", | ||
2398 | err); | ||
2399 | goto map_error; | ||
2512 | } | 2400 | } |
2513 | /* Terminate the last segment. */ | ||
2514 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | ||
2515 | } | ||
2516 | 2401 | ||
2402 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2403 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2404 | oal_entry->len = cpu_to_le32(frag->size); | ||
2405 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | ||
2406 | dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); | ||
2407 | } | ||
2408 | /* Terminate the last segment. */ | ||
2409 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | ||
2517 | return NETDEV_TX_OK; | 2410 | return NETDEV_TX_OK; |
2518 | 2411 | ||
2519 | map_error: | 2412 | map_error: |
@@ -2525,13 +2418,18 @@ map_error: | |||
2525 | seg = 1; | 2418 | seg = 1; |
2526 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; | 2419 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; |
2527 | oal = tx_cb->oal; | 2420 | oal = tx_cb->oal; |
2528 | for (i=0; i<completed_segs; i++,seg++) { | 2421 | for (i = 0; i < completed_segs; i++, seg++) { |
2529 | oal_entry++; | 2422 | oal_entry++; |
2530 | 2423 | ||
2531 | if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ | 2424 | /* |
2532 | (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ | 2425 | * Check for continuation requirements. |
2533 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | 2426 | * It's strange but necessary. |
2534 | (seg == 17 && seg_cnt > 18)) { | 2427 | */ |
2428 | |||
2429 | if ((seg == 2 && seg_cnt > 3) || | ||
2430 | (seg == 7 && seg_cnt > 8) || | ||
2431 | (seg == 12 && seg_cnt > 13) || | ||
2432 | (seg == 17 && seg_cnt > 18)) { | ||
2535 | pci_unmap_single(qdev->pdev, | 2433 | pci_unmap_single(qdev->pdev, |
2536 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), | 2434 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), |
2537 | dma_unmap_len(&tx_cb->map[seg], maplen), | 2435 | dma_unmap_len(&tx_cb->map[seg], maplen), |
@@ -2570,19 +2468,20 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, | |||
2570 | struct net_device *ndev) | 2468 | struct net_device *ndev) |
2571 | { | 2469 | { |
2572 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 2470 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
2573 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2471 | struct ql3xxx_port_registers __iomem *port_regs = |
2472 | qdev->mem_map_registers; | ||
2574 | struct ql_tx_buf_cb *tx_cb; | 2473 | struct ql_tx_buf_cb *tx_cb; |
2575 | u32 tot_len = skb->len; | 2474 | u32 tot_len = skb->len; |
2576 | struct ob_mac_iocb_req *mac_iocb_ptr; | 2475 | struct ob_mac_iocb_req *mac_iocb_ptr; |
2577 | 2476 | ||
2578 | if (unlikely(atomic_read(&qdev->tx_count) < 2)) { | 2477 | if (unlikely(atomic_read(&qdev->tx_count) < 2)) |
2579 | return NETDEV_TX_BUSY; | 2478 | return NETDEV_TX_BUSY; |
2580 | } | ||
2581 | 2479 | ||
2582 | tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; | 2480 | tx_cb = &qdev->tx_buf[qdev->req_producer_index]; |
2583 | if((tx_cb->seg_count = ql_get_seg_count(qdev, | 2481 | tx_cb->seg_count = ql_get_seg_count(qdev, |
2584 | (skb_shinfo(skb)->nr_frags))) == -1) { | 2482 | skb_shinfo(skb)->nr_frags); |
2585 | printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); | 2483 | if (tx_cb->seg_count == -1) { |
2484 | netdev_err(ndev, "%s: invalid segment count!\n", __func__); | ||
2586 | return NETDEV_TX_OK; | 2485 | return NETDEV_TX_OK; |
2587 | } | 2486 | } |
2588 | 2487 | ||
@@ -2598,8 +2497,8 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, | |||
2598 | skb->ip_summed == CHECKSUM_PARTIAL) | 2497 | skb->ip_summed == CHECKSUM_PARTIAL) |
2599 | ql_hw_csum_setup(skb, mac_iocb_ptr); | 2498 | ql_hw_csum_setup(skb, mac_iocb_ptr); |
2600 | 2499 | ||
2601 | if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { | 2500 | if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { |
2602 | printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); | 2501 | netdev_err(ndev, "%s: Could not map the segments!\n", __func__); |
2603 | return NETDEV_TX_BUSY; | 2502 | return NETDEV_TX_BUSY; |
2604 | } | 2503 | } |
2605 | 2504 | ||
@@ -2612,9 +2511,9 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, | |||
2612 | &port_regs->CommonRegs.reqQProducerIndex, | 2511 | &port_regs->CommonRegs.reqQProducerIndex, |
2613 | qdev->req_producer_index); | 2512 | qdev->req_producer_index); |
2614 | 2513 | ||
2615 | if (netif_msg_tx_queued(qdev)) | 2514 | netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, |
2616 | printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", | 2515 | "tx queued, slot %d, len %d\n", |
2617 | ndev->name, qdev->req_producer_index, skb->len); | 2516 | qdev->req_producer_index, skb->len); |
2618 | 2517 | ||
2619 | atomic_dec(&qdev->tx_count); | 2518 | atomic_dec(&qdev->tx_count); |
2620 | return NETDEV_TX_OK; | 2519 | return NETDEV_TX_OK; |
@@ -2632,8 +2531,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) | |||
2632 | 2531 | ||
2633 | if ((qdev->req_q_virt_addr == NULL) || | 2532 | if ((qdev->req_q_virt_addr == NULL) || |
2634 | LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { | 2533 | LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { |
2635 | printk(KERN_ERR PFX "%s: reqQ failed.\n", | 2534 | netdev_err(qdev->ndev, "reqQ failed\n"); |
2636 | qdev->ndev->name); | ||
2637 | return -ENOMEM; | 2535 | return -ENOMEM; |
2638 | } | 2536 | } |
2639 | 2537 | ||
@@ -2646,25 +2544,22 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) | |||
2646 | 2544 | ||
2647 | if ((qdev->rsp_q_virt_addr == NULL) || | 2545 | if ((qdev->rsp_q_virt_addr == NULL) || |
2648 | LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { | 2546 | LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { |
2649 | printk(KERN_ERR PFX | 2547 | netdev_err(qdev->ndev, "rspQ allocation failed\n"); |
2650 | "%s: rspQ allocation failed\n", | ||
2651 | qdev->ndev->name); | ||
2652 | pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, | 2548 | pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, |
2653 | qdev->req_q_virt_addr, | 2549 | qdev->req_q_virt_addr, |
2654 | qdev->req_q_phy_addr); | 2550 | qdev->req_q_phy_addr); |
2655 | return -ENOMEM; | 2551 | return -ENOMEM; |
2656 | } | 2552 | } |
2657 | 2553 | ||
2658 | set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); | 2554 | set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); |
2659 | 2555 | ||
2660 | return 0; | 2556 | return 0; |
2661 | } | 2557 | } |
2662 | 2558 | ||
2663 | static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) | 2559 | static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) |
2664 | { | 2560 | { |
2665 | if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { | 2561 | if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { |
2666 | printk(KERN_INFO PFX | 2562 | netdev_info(qdev->ndev, "Already done\n"); |
2667 | "%s: Already done.\n", qdev->ndev->name); | ||
2668 | return; | 2563 | return; |
2669 | } | 2564 | } |
2670 | 2565 | ||
@@ -2680,34 +2575,34 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) | |||
2680 | 2575 | ||
2681 | qdev->rsp_q_virt_addr = NULL; | 2576 | qdev->rsp_q_virt_addr = NULL; |
2682 | 2577 | ||
2683 | clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); | 2578 | clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); |
2684 | } | 2579 | } |
2685 | 2580 | ||
2686 | static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | 2581 | static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) |
2687 | { | 2582 | { |
2688 | /* Create Large Buffer Queue */ | 2583 | /* Create Large Buffer Queue */ |
2689 | qdev->lrg_buf_q_size = | 2584 | qdev->lrg_buf_q_size = |
2690 | qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); | 2585 | qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); |
2691 | if (qdev->lrg_buf_q_size < PAGE_SIZE) | 2586 | if (qdev->lrg_buf_q_size < PAGE_SIZE) |
2692 | qdev->lrg_buf_q_alloc_size = PAGE_SIZE; | 2587 | qdev->lrg_buf_q_alloc_size = PAGE_SIZE; |
2693 | else | 2588 | else |
2694 | qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; | 2589 | qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; |
2695 | 2590 | ||
2696 | qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); | 2591 | qdev->lrg_buf = |
2592 | kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), | ||
2593 | GFP_KERNEL); | ||
2697 | if (qdev->lrg_buf == NULL) { | 2594 | if (qdev->lrg_buf == NULL) { |
2698 | printk(KERN_ERR PFX | 2595 | netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); |
2699 | "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); | ||
2700 | return -ENOMEM; | 2596 | return -ENOMEM; |
2701 | } | 2597 | } |
2702 | 2598 | ||
2703 | qdev->lrg_buf_q_alloc_virt_addr = | 2599 | qdev->lrg_buf_q_alloc_virt_addr = |
2704 | pci_alloc_consistent(qdev->pdev, | 2600 | pci_alloc_consistent(qdev->pdev, |
2705 | qdev->lrg_buf_q_alloc_size, | 2601 | qdev->lrg_buf_q_alloc_size, |
2706 | &qdev->lrg_buf_q_alloc_phy_addr); | 2602 | &qdev->lrg_buf_q_alloc_phy_addr); |
2707 | 2603 | ||
2708 | if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { | 2604 | if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { |
2709 | printk(KERN_ERR PFX | 2605 | netdev_err(qdev->ndev, "lBufQ failed\n"); |
2710 | "%s: lBufQ failed\n", qdev->ndev->name); | ||
2711 | return -ENOMEM; | 2606 | return -ENOMEM; |
2712 | } | 2607 | } |
2713 | qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; | 2608 | qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; |
@@ -2715,21 +2610,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | |||
2715 | 2610 | ||
2716 | /* Create Small Buffer Queue */ | 2611 | /* Create Small Buffer Queue */ |
2717 | qdev->small_buf_q_size = | 2612 | qdev->small_buf_q_size = |
2718 | NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); | 2613 | NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); |
2719 | if (qdev->small_buf_q_size < PAGE_SIZE) | 2614 | if (qdev->small_buf_q_size < PAGE_SIZE) |
2720 | qdev->small_buf_q_alloc_size = PAGE_SIZE; | 2615 | qdev->small_buf_q_alloc_size = PAGE_SIZE; |
2721 | else | 2616 | else |
2722 | qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; | 2617 | qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; |
2723 | 2618 | ||
2724 | qdev->small_buf_q_alloc_virt_addr = | 2619 | qdev->small_buf_q_alloc_virt_addr = |
2725 | pci_alloc_consistent(qdev->pdev, | 2620 | pci_alloc_consistent(qdev->pdev, |
2726 | qdev->small_buf_q_alloc_size, | 2621 | qdev->small_buf_q_alloc_size, |
2727 | &qdev->small_buf_q_alloc_phy_addr); | 2622 | &qdev->small_buf_q_alloc_phy_addr); |
2728 | 2623 | ||
2729 | if (qdev->small_buf_q_alloc_virt_addr == NULL) { | 2624 | if (qdev->small_buf_q_alloc_virt_addr == NULL) { |
2730 | printk(KERN_ERR PFX | 2625 | netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); |
2731 | "%s: Small Buffer Queue allocation failed.\n", | ||
2732 | qdev->ndev->name); | ||
2733 | pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, | 2626 | pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, |
2734 | qdev->lrg_buf_q_alloc_virt_addr, | 2627 | qdev->lrg_buf_q_alloc_virt_addr, |
2735 | qdev->lrg_buf_q_alloc_phy_addr); | 2628 | qdev->lrg_buf_q_alloc_phy_addr); |
@@ -2738,18 +2631,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | |||
2738 | 2631 | ||
2739 | qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; | 2632 | qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; |
2740 | qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; | 2633 | qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; |
2741 | set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); | 2634 | set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); |
2742 | return 0; | 2635 | return 0; |
2743 | } | 2636 | } |
2744 | 2637 | ||
2745 | static void ql_free_buffer_queues(struct ql3_adapter *qdev) | 2638 | static void ql_free_buffer_queues(struct ql3_adapter *qdev) |
2746 | { | 2639 | { |
2747 | if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { | 2640 | if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { |
2748 | printk(KERN_INFO PFX | 2641 | netdev_info(qdev->ndev, "Already done\n"); |
2749 | "%s: Already done.\n", qdev->ndev->name); | ||
2750 | return; | 2642 | return; |
2751 | } | 2643 | } |
2752 | if(qdev->lrg_buf) kfree(qdev->lrg_buf); | 2644 | kfree(qdev->lrg_buf); |
2753 | pci_free_consistent(qdev->pdev, | 2645 | pci_free_consistent(qdev->pdev, |
2754 | qdev->lrg_buf_q_alloc_size, | 2646 | qdev->lrg_buf_q_alloc_size, |
2755 | qdev->lrg_buf_q_alloc_virt_addr, | 2647 | qdev->lrg_buf_q_alloc_virt_addr, |
@@ -2764,7 +2656,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev) | |||
2764 | 2656 | ||
2765 | qdev->small_buf_q_virt_addr = NULL; | 2657 | qdev->small_buf_q_virt_addr = NULL; |
2766 | 2658 | ||
2767 | clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); | 2659 | clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); |
2768 | } | 2660 | } |
2769 | 2661 | ||
2770 | static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | 2662 | static int ql_alloc_small_buffers(struct ql3_adapter *qdev) |
@@ -2774,18 +2666,16 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | |||
2774 | 2666 | ||
2775 | /* Currently we allocate on one of memory and use it for smallbuffers */ | 2667 | /* Currently we allocate on one of memory and use it for smallbuffers */ |
2776 | qdev->small_buf_total_size = | 2668 | qdev->small_buf_total_size = |
2777 | (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * | 2669 | (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * |
2778 | QL_SMALL_BUFFER_SIZE); | 2670 | QL_SMALL_BUFFER_SIZE); |
2779 | 2671 | ||
2780 | qdev->small_buf_virt_addr = | 2672 | qdev->small_buf_virt_addr = |
2781 | pci_alloc_consistent(qdev->pdev, | 2673 | pci_alloc_consistent(qdev->pdev, |
2782 | qdev->small_buf_total_size, | 2674 | qdev->small_buf_total_size, |
2783 | &qdev->small_buf_phy_addr); | 2675 | &qdev->small_buf_phy_addr); |
2784 | 2676 | ||
2785 | if (qdev->small_buf_virt_addr == NULL) { | 2677 | if (qdev->small_buf_virt_addr == NULL) { |
2786 | printk(KERN_ERR PFX | 2678 | netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); |
2787 | "%s: Failed to get small buffer memory.\n", | ||
2788 | qdev->ndev->name); | ||
2789 | return -ENOMEM; | 2679 | return -ENOMEM; |
2790 | } | 2680 | } |
2791 | 2681 | ||
@@ -2804,15 +2694,14 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | |||
2804 | small_buf_q_entry++; | 2694 | small_buf_q_entry++; |
2805 | } | 2695 | } |
2806 | qdev->small_buf_index = 0; | 2696 | qdev->small_buf_index = 0; |
2807 | set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); | 2697 | set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); |
2808 | return 0; | 2698 | return 0; |
2809 | } | 2699 | } |
2810 | 2700 | ||
2811 | static void ql_free_small_buffers(struct ql3_adapter *qdev) | 2701 | static void ql_free_small_buffers(struct ql3_adapter *qdev) |
2812 | { | 2702 | { |
2813 | if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { | 2703 | if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { |
2814 | printk(KERN_INFO PFX | 2704 | netdev_info(qdev->ndev, "Already done\n"); |
2815 | "%s: Already done.\n", qdev->ndev->name); | ||
2816 | return; | 2705 | return; |
2817 | } | 2706 | } |
2818 | if (qdev->small_buf_virt_addr != NULL) { | 2707 | if (qdev->small_buf_virt_addr != NULL) { |
@@ -2874,11 +2763,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2874 | qdev->lrg_buffer_len); | 2763 | qdev->lrg_buffer_len); |
2875 | if (unlikely(!skb)) { | 2764 | if (unlikely(!skb)) { |
2876 | /* Better luck next round */ | 2765 | /* Better luck next round */ |
2877 | printk(KERN_ERR PFX | 2766 | netdev_err(qdev->ndev, |
2878 | "%s: large buff alloc failed, " | 2767 | "large buff alloc failed for %d bytes at index %d\n", |
2879 | "for %d bytes at index %d.\n", | 2768 | qdev->lrg_buffer_len * 2, i); |
2880 | qdev->ndev->name, | ||
2881 | qdev->lrg_buffer_len * 2, i); | ||
2882 | ql_free_large_buffers(qdev); | 2769 | ql_free_large_buffers(qdev); |
2883 | return -ENOMEM; | 2770 | return -ENOMEM; |
2884 | } else { | 2771 | } else { |
@@ -2899,9 +2786,10 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2899 | PCI_DMA_FROMDEVICE); | 2786 | PCI_DMA_FROMDEVICE); |
2900 | 2787 | ||
2901 | err = pci_dma_mapping_error(qdev->pdev, map); | 2788 | err = pci_dma_mapping_error(qdev->pdev, map); |
2902 | if(err) { | 2789 | if (err) { |
2903 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2790 | netdev_err(qdev->ndev, |
2904 | qdev->ndev->name, err); | 2791 | "PCI mapping failed with error: %d\n", |
2792 | err); | ||
2905 | ql_free_large_buffers(qdev); | 2793 | ql_free_large_buffers(qdev); |
2906 | return -ENOMEM; | 2794 | return -ENOMEM; |
2907 | } | 2795 | } |
@@ -2926,10 +2814,8 @@ static void ql_free_send_free_list(struct ql3_adapter *qdev) | |||
2926 | 2814 | ||
2927 | tx_cb = &qdev->tx_buf[0]; | 2815 | tx_cb = &qdev->tx_buf[0]; |
2928 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | 2816 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { |
2929 | if (tx_cb->oal) { | 2817 | kfree(tx_cb->oal); |
2930 | kfree(tx_cb->oal); | 2818 | tx_cb->oal = NULL; |
2931 | tx_cb->oal = NULL; | ||
2932 | } | ||
2933 | tx_cb++; | 2819 | tx_cb++; |
2934 | } | 2820 | } |
2935 | } | 2821 | } |
@@ -2938,8 +2824,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev) | |||
2938 | { | 2824 | { |
2939 | struct ql_tx_buf_cb *tx_cb; | 2825 | struct ql_tx_buf_cb *tx_cb; |
2940 | int i; | 2826 | int i; |
2941 | struct ob_mac_iocb_req *req_q_curr = | 2827 | struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; |
2942 | qdev->req_q_virt_addr; | ||
2943 | 2828 | ||
2944 | /* Create free list of transmit buffers */ | 2829 | /* Create free list of transmit buffers */ |
2945 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | 2830 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { |
@@ -2960,23 +2845,22 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | |||
2960 | if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { | 2845 | if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { |
2961 | qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; | 2846 | qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; |
2962 | qdev->lrg_buffer_len = NORMAL_MTU_SIZE; | 2847 | qdev->lrg_buffer_len = NORMAL_MTU_SIZE; |
2963 | } | 2848 | } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { |
2964 | else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { | ||
2965 | /* | 2849 | /* |
2966 | * Bigger buffers, so less of them. | 2850 | * Bigger buffers, so less of them. |
2967 | */ | 2851 | */ |
2968 | qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; | 2852 | qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; |
2969 | qdev->lrg_buffer_len = JUMBO_MTU_SIZE; | 2853 | qdev->lrg_buffer_len = JUMBO_MTU_SIZE; |
2970 | } else { | 2854 | } else { |
2971 | printk(KERN_ERR PFX | 2855 | netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", |
2972 | "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", | 2856 | qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); |
2973 | qdev->ndev->name); | ||
2974 | return -ENOMEM; | 2857 | return -ENOMEM; |
2975 | } | 2858 | } |
2976 | qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; | 2859 | qdev->num_large_buffers = |
2860 | qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; | ||
2977 | qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; | 2861 | qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; |
2978 | qdev->max_frame_size = | 2862 | qdev->max_frame_size = |
2979 | (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; | 2863 | (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; |
2980 | 2864 | ||
2981 | /* | 2865 | /* |
2982 | * First allocate a page of shared memory and use it for shadow | 2866 | * First allocate a page of shared memory and use it for shadow |
@@ -2984,51 +2868,44 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | |||
2984 | * Network Completion Queue Producer Index Register | 2868 | * Network Completion Queue Producer Index Register |
2985 | */ | 2869 | */ |
2986 | qdev->shadow_reg_virt_addr = | 2870 | qdev->shadow_reg_virt_addr = |
2987 | pci_alloc_consistent(qdev->pdev, | 2871 | pci_alloc_consistent(qdev->pdev, |
2988 | PAGE_SIZE, &qdev->shadow_reg_phy_addr); | 2872 | PAGE_SIZE, &qdev->shadow_reg_phy_addr); |
2989 | 2873 | ||
2990 | if (qdev->shadow_reg_virt_addr != NULL) { | 2874 | if (qdev->shadow_reg_virt_addr != NULL) { |
2991 | qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; | 2875 | qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; |
2992 | qdev->req_consumer_index_phy_addr_high = | 2876 | qdev->req_consumer_index_phy_addr_high = |
2993 | MS_64BITS(qdev->shadow_reg_phy_addr); | 2877 | MS_64BITS(qdev->shadow_reg_phy_addr); |
2994 | qdev->req_consumer_index_phy_addr_low = | 2878 | qdev->req_consumer_index_phy_addr_low = |
2995 | LS_64BITS(qdev->shadow_reg_phy_addr); | 2879 | LS_64BITS(qdev->shadow_reg_phy_addr); |
2996 | 2880 | ||
2997 | qdev->prsp_producer_index = | 2881 | qdev->prsp_producer_index = |
2998 | (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); | 2882 | (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); |
2999 | qdev->rsp_producer_index_phy_addr_high = | 2883 | qdev->rsp_producer_index_phy_addr_high = |
3000 | qdev->req_consumer_index_phy_addr_high; | 2884 | qdev->req_consumer_index_phy_addr_high; |
3001 | qdev->rsp_producer_index_phy_addr_low = | 2885 | qdev->rsp_producer_index_phy_addr_low = |
3002 | qdev->req_consumer_index_phy_addr_low + 8; | 2886 | qdev->req_consumer_index_phy_addr_low + 8; |
3003 | } else { | 2887 | } else { |
3004 | printk(KERN_ERR PFX | 2888 | netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); |
3005 | "%s: shadowReg Alloc failed.\n", qdev->ndev->name); | ||
3006 | return -ENOMEM; | 2889 | return -ENOMEM; |
3007 | } | 2890 | } |
3008 | 2891 | ||
3009 | if (ql_alloc_net_req_rsp_queues(qdev) != 0) { | 2892 | if (ql_alloc_net_req_rsp_queues(qdev) != 0) { |
3010 | printk(KERN_ERR PFX | 2893 | netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); |
3011 | "%s: ql_alloc_net_req_rsp_queues failed.\n", | ||
3012 | qdev->ndev->name); | ||
3013 | goto err_req_rsp; | 2894 | goto err_req_rsp; |
3014 | } | 2895 | } |
3015 | 2896 | ||
3016 | if (ql_alloc_buffer_queues(qdev) != 0) { | 2897 | if (ql_alloc_buffer_queues(qdev) != 0) { |
3017 | printk(KERN_ERR PFX | 2898 | netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); |
3018 | "%s: ql_alloc_buffer_queues failed.\n", | ||
3019 | qdev->ndev->name); | ||
3020 | goto err_buffer_queues; | 2899 | goto err_buffer_queues; |
3021 | } | 2900 | } |
3022 | 2901 | ||
3023 | if (ql_alloc_small_buffers(qdev) != 0) { | 2902 | if (ql_alloc_small_buffers(qdev) != 0) { |
3024 | printk(KERN_ERR PFX | 2903 | netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); |
3025 | "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name); | ||
3026 | goto err_small_buffers; | 2904 | goto err_small_buffers; |
3027 | } | 2905 | } |
3028 | 2906 | ||
3029 | if (ql_alloc_large_buffers(qdev) != 0) { | 2907 | if (ql_alloc_large_buffers(qdev) != 0) { |
3030 | printk(KERN_ERR PFX | 2908 | netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); |
3031 | "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name); | ||
3032 | goto err_small_buffers; | 2909 | goto err_small_buffers; |
3033 | } | 2910 | } |
3034 | 2911 | ||
@@ -3076,7 +2953,7 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev) | |||
3076 | struct ql3xxx_local_ram_registers __iomem *local_ram = | 2953 | struct ql3xxx_local_ram_registers __iomem *local_ram = |
3077 | (void __iomem *)qdev->mem_map_registers; | 2954 | (void __iomem *)qdev->mem_map_registers; |
3078 | 2955 | ||
3079 | if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, | 2956 | if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, |
3080 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 2957 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
3081 | 2) << 4)) | 2958 | 2) << 4)) |
3082 | return -1; | 2959 | return -1; |
@@ -3132,18 +3009,20 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev) | |||
3132 | static int ql_adapter_initialize(struct ql3_adapter *qdev) | 3009 | static int ql_adapter_initialize(struct ql3_adapter *qdev) |
3133 | { | 3010 | { |
3134 | u32 value; | 3011 | u32 value; |
3135 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3012 | struct ql3xxx_port_registers __iomem *port_regs = |
3013 | qdev->mem_map_registers; | ||
3014 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
3136 | struct ql3xxx_host_memory_registers __iomem *hmem_regs = | 3015 | struct ql3xxx_host_memory_registers __iomem *hmem_regs = |
3137 | (void __iomem *)port_regs; | 3016 | (void __iomem *)port_regs; |
3138 | u32 delay = 10; | 3017 | u32 delay = 10; |
3139 | int status = 0; | 3018 | int status = 0; |
3140 | unsigned long hw_flags = 0; | 3019 | unsigned long hw_flags = 0; |
3141 | 3020 | ||
3142 | if(ql_mii_setup(qdev)) | 3021 | if (ql_mii_setup(qdev)) |
3143 | return -1; | 3022 | return -1; |
3144 | 3023 | ||
3145 | /* Bring out PHY out of reset */ | 3024 | /* Bring out PHY out of reset */ |
3146 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3025 | ql_write_common_reg(qdev, spir, |
3147 | (ISP_SERIAL_PORT_IF_WE | | 3026 | (ISP_SERIAL_PORT_IF_WE | |
3148 | (ISP_SERIAL_PORT_IF_WE << 16))); | 3027 | (ISP_SERIAL_PORT_IF_WE << 16))); |
3149 | /* Give the PHY time to come out of reset. */ | 3028 | /* Give the PHY time to come out of reset. */ |
@@ -3152,13 +3031,13 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3152 | netif_carrier_off(qdev->ndev); | 3031 | netif_carrier_off(qdev->ndev); |
3153 | 3032 | ||
3154 | /* V2 chip fix for ARS-39168. */ | 3033 | /* V2 chip fix for ARS-39168. */ |
3155 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3034 | ql_write_common_reg(qdev, spir, |
3156 | (ISP_SERIAL_PORT_IF_SDE | | 3035 | (ISP_SERIAL_PORT_IF_SDE | |
3157 | (ISP_SERIAL_PORT_IF_SDE << 16))); | 3036 | (ISP_SERIAL_PORT_IF_SDE << 16))); |
3158 | 3037 | ||
3159 | /* Request Queue Registers */ | 3038 | /* Request Queue Registers */ |
3160 | *((u32 *) (qdev->preq_consumer_index)) = 0; | 3039 | *((u32 *)(qdev->preq_consumer_index)) = 0; |
3161 | atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); | 3040 | atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); |
3162 | qdev->req_producer_index = 0; | 3041 | qdev->req_producer_index = 0; |
3163 | 3042 | ||
3164 | ql_write_page1_reg(qdev, | 3043 | ql_write_page1_reg(qdev, |
@@ -3208,7 +3087,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3208 | &hmem_regs->rxLargeQBaseAddrLow, | 3087 | &hmem_regs->rxLargeQBaseAddrLow, |
3209 | LS_64BITS(qdev->lrg_buf_q_phy_addr)); | 3088 | LS_64BITS(qdev->lrg_buf_q_phy_addr)); |
3210 | 3089 | ||
3211 | ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); | 3090 | ql_write_page1_reg(qdev, |
3091 | &hmem_regs->rxLargeQLength, | ||
3092 | qdev->num_lbufq_entries); | ||
3212 | 3093 | ||
3213 | ql_write_page1_reg(qdev, | 3094 | ql_write_page1_reg(qdev, |
3214 | &hmem_regs->rxLargeBufferLength, | 3095 | &hmem_regs->rxLargeBufferLength, |
@@ -3258,7 +3139,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3258 | if ((value & PORT_STATUS_IC) == 0) { | 3139 | if ((value & PORT_STATUS_IC) == 0) { |
3259 | 3140 | ||
3260 | /* Chip has not been configured yet, so let it rip. */ | 3141 | /* Chip has not been configured yet, so let it rip. */ |
3261 | if(ql_init_misc_registers(qdev)) { | 3142 | if (ql_init_misc_registers(qdev)) { |
3262 | status = -1; | 3143 | status = -1; |
3263 | goto out; | 3144 | goto out; |
3264 | } | 3145 | } |
@@ -3268,7 +3149,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3268 | 3149 | ||
3269 | value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; | 3150 | value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; |
3270 | 3151 | ||
3271 | if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, | 3152 | if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, |
3272 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | 3153 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) |
3273 | * 2) << 13)) { | 3154 | * 2) << 13)) { |
3274 | status = -1; | 3155 | status = -1; |
@@ -3291,7 +3172,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3291 | &port_regs->mac0MaxFrameLengthReg, | 3172 | &port_regs->mac0MaxFrameLengthReg, |
3292 | qdev->max_frame_size); | 3173 | qdev->max_frame_size); |
3293 | 3174 | ||
3294 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 3175 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
3295 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 3176 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
3296 | 2) << 7)) { | 3177 | 2) << 7)) { |
3297 | status = -1; | 3178 | status = -1; |
@@ -3353,8 +3234,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3353 | } while (--delay); | 3234 | } while (--delay); |
3354 | 3235 | ||
3355 | if (delay == 0) { | 3236 | if (delay == 0) { |
3356 | printk(KERN_ERR PFX | 3237 | netdev_err(qdev->ndev, "Hw Initialization timeout\n"); |
3357 | "%s: Hw Initialization timeout.\n", qdev->ndev->name); | ||
3358 | status = -1; | 3238 | status = -1; |
3359 | goto out; | 3239 | goto out; |
3360 | } | 3240 | } |
@@ -3385,7 +3265,8 @@ out: | |||
3385 | */ | 3265 | */ |
3386 | static int ql_adapter_reset(struct ql3_adapter *qdev) | 3266 | static int ql_adapter_reset(struct ql3_adapter *qdev) |
3387 | { | 3267 | { |
3388 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3268 | struct ql3xxx_port_registers __iomem *port_regs = |
3269 | qdev->mem_map_registers; | ||
3389 | int status = 0; | 3270 | int status = 0; |
3390 | u16 value; | 3271 | u16 value; |
3391 | int max_wait_time; | 3272 | int max_wait_time; |
@@ -3396,17 +3277,14 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) | |||
3396 | /* | 3277 | /* |
3397 | * Issue soft reset to chip. | 3278 | * Issue soft reset to chip. |
3398 | */ | 3279 | */ |
3399 | printk(KERN_DEBUG PFX | 3280 | netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); |
3400 | "%s: Issue soft reset to chip.\n", | ||
3401 | qdev->ndev->name); | ||
3402 | ql_write_common_reg(qdev, | 3281 | ql_write_common_reg(qdev, |
3403 | &port_regs->CommonRegs.ispControlStatus, | 3282 | &port_regs->CommonRegs.ispControlStatus, |
3404 | ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); | 3283 | ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); |
3405 | 3284 | ||
3406 | /* Wait 3 seconds for reset to complete. */ | 3285 | /* Wait 3 seconds for reset to complete. */ |
3407 | printk(KERN_DEBUG PFX | 3286 | netdev_printk(KERN_DEBUG, qdev->ndev, |
3408 | "%s: Wait 10 milliseconds for reset to complete.\n", | 3287 | "Wait 10 milliseconds for reset to complete\n"); |
3409 | qdev->ndev->name); | ||
3410 | 3288 | ||
3411 | /* Wait until the firmware tells us the Soft Reset is done */ | 3289 | /* Wait until the firmware tells us the Soft Reset is done */ |
3412 | max_wait_time = 5; | 3290 | max_wait_time = 5; |
@@ -3427,8 +3305,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) | |||
3427 | value = | 3305 | value = |
3428 | ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); | 3306 | ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); |
3429 | if (value & ISP_CONTROL_RI) { | 3307 | if (value & ISP_CONTROL_RI) { |
3430 | printk(KERN_DEBUG PFX | 3308 | netdev_printk(KERN_DEBUG, qdev->ndev, |
3431 | "ql_adapter_reset: clearing RI after reset.\n"); | 3309 | "clearing RI after reset\n"); |
3432 | ql_write_common_reg(qdev, | 3310 | ql_write_common_reg(qdev, |
3433 | &port_regs->CommonRegs. | 3311 | &port_regs->CommonRegs. |
3434 | ispControlStatus, | 3312 | ispControlStatus, |
@@ -3448,13 +3326,11 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) | |||
3448 | */ | 3326 | */ |
3449 | max_wait_time = 5; | 3327 | max_wait_time = 5; |
3450 | do { | 3328 | do { |
3451 | value = | 3329 | value = ql_read_common_reg(qdev, |
3452 | ql_read_common_reg(qdev, | 3330 | &port_regs->CommonRegs. |
3453 | &port_regs->CommonRegs. | 3331 | ispControlStatus); |
3454 | ispControlStatus); | 3332 | if ((value & ISP_CONTROL_FSR) == 0) |
3455 | if ((value & ISP_CONTROL_FSR) == 0) { | ||
3456 | break; | 3333 | break; |
3457 | } | ||
3458 | ssleep(1); | 3334 | ssleep(1); |
3459 | } while ((--max_wait_time)); | 3335 | } while ((--max_wait_time)); |
3460 | } | 3336 | } |
@@ -3468,7 +3344,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) | |||
3468 | 3344 | ||
3469 | static void ql_set_mac_info(struct ql3_adapter *qdev) | 3345 | static void ql_set_mac_info(struct ql3_adapter *qdev) |
3470 | { | 3346 | { |
3471 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3347 | struct ql3xxx_port_registers __iomem *port_regs = |
3348 | qdev->mem_map_registers; | ||
3472 | u32 value, port_status; | 3349 | u32 value, port_status; |
3473 | u8 func_number; | 3350 | u8 func_number; |
3474 | 3351 | ||
@@ -3484,9 +3361,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) | |||
3484 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; | 3361 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; |
3485 | qdev->PHYAddr = PORT0_PHY_ADDRESS; | 3362 | qdev->PHYAddr = PORT0_PHY_ADDRESS; |
3486 | if (port_status & PORT_STATUS_SM0) | 3363 | if (port_status & PORT_STATUS_SM0) |
3487 | set_bit(QL_LINK_OPTICAL,&qdev->flags); | 3364 | set_bit(QL_LINK_OPTICAL, &qdev->flags); |
3488 | else | 3365 | else |
3489 | clear_bit(QL_LINK_OPTICAL,&qdev->flags); | 3366 | clear_bit(QL_LINK_OPTICAL, &qdev->flags); |
3490 | break; | 3367 | break; |
3491 | 3368 | ||
3492 | case ISP_CONTROL_FN1_NET: | 3369 | case ISP_CONTROL_FN1_NET: |
@@ -3495,17 +3372,17 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) | |||
3495 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; | 3372 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; |
3496 | qdev->PHYAddr = PORT1_PHY_ADDRESS; | 3373 | qdev->PHYAddr = PORT1_PHY_ADDRESS; |
3497 | if (port_status & PORT_STATUS_SM1) | 3374 | if (port_status & PORT_STATUS_SM1) |
3498 | set_bit(QL_LINK_OPTICAL,&qdev->flags); | 3375 | set_bit(QL_LINK_OPTICAL, &qdev->flags); |
3499 | else | 3376 | else |
3500 | clear_bit(QL_LINK_OPTICAL,&qdev->flags); | 3377 | clear_bit(QL_LINK_OPTICAL, &qdev->flags); |
3501 | break; | 3378 | break; |
3502 | 3379 | ||
3503 | case ISP_CONTROL_FN0_SCSI: | 3380 | case ISP_CONTROL_FN0_SCSI: |
3504 | case ISP_CONTROL_FN1_SCSI: | 3381 | case ISP_CONTROL_FN1_SCSI: |
3505 | default: | 3382 | default: |
3506 | printk(KERN_DEBUG PFX | 3383 | netdev_printk(KERN_DEBUG, qdev->ndev, |
3507 | "%s: Invalid function number, ispControlStatus = 0x%x\n", | 3384 | "Invalid function number, ispControlStatus = 0x%x\n", |
3508 | qdev->ndev->name,value); | 3385 | value); |
3509 | break; | 3386 | break; |
3510 | } | 3387 | } |
3511 | qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; | 3388 | qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; |
@@ -3516,32 +3393,26 @@ static void ql_display_dev_info(struct net_device *ndev) | |||
3516 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 3393 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
3517 | struct pci_dev *pdev = qdev->pdev; | 3394 | struct pci_dev *pdev = qdev->pdev; |
3518 | 3395 | ||
3519 | printk(KERN_INFO PFX | 3396 | netdev_info(ndev, |
3520 | "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", | 3397 | "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", |
3521 | DRV_NAME, qdev->index, qdev->chip_rev_id, | 3398 | DRV_NAME, qdev->index, qdev->chip_rev_id, |
3522 | (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", | 3399 | qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", |
3523 | qdev->pci_slot); | 3400 | qdev->pci_slot); |
3524 | printk(KERN_INFO PFX | 3401 | netdev_info(ndev, "%s Interface\n", |
3525 | "%s Interface.\n", | 3402 | test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); |
3526 | test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); | ||
3527 | 3403 | ||
3528 | /* | 3404 | /* |
3529 | * Print PCI bus width/type. | 3405 | * Print PCI bus width/type. |
3530 | */ | 3406 | */ |
3531 | printk(KERN_INFO PFX | 3407 | netdev_info(ndev, "Bus interface is %s %s\n", |
3532 | "Bus interface is %s %s.\n", | 3408 | ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), |
3533 | ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), | 3409 | ((qdev->pci_x) ? "PCI-X" : "PCI")); |
3534 | ((qdev->pci_x) ? "PCI-X" : "PCI")); | ||
3535 | 3410 | ||
3536 | printk(KERN_INFO PFX | 3411 | netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", |
3537 | "mem IO base address adjusted = 0x%p\n", | 3412 | qdev->mem_map_registers); |
3538 | qdev->mem_map_registers); | 3413 | netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); |
3539 | printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); | ||
3540 | 3414 | ||
3541 | if (netif_msg_probe(qdev)) | 3415 | netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); |
3542 | printk(KERN_INFO PFX | ||
3543 | "%s: MAC address %pM\n", | ||
3544 | ndev->name, ndev->dev_addr); | ||
3545 | } | 3416 | } |
3546 | 3417 | ||
3547 | static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | 3418 | static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) |
@@ -3552,17 +3423,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | |||
3552 | netif_stop_queue(ndev); | 3423 | netif_stop_queue(ndev); |
3553 | netif_carrier_off(ndev); | 3424 | netif_carrier_off(ndev); |
3554 | 3425 | ||
3555 | clear_bit(QL_ADAPTER_UP,&qdev->flags); | 3426 | clear_bit(QL_ADAPTER_UP, &qdev->flags); |
3556 | clear_bit(QL_LINK_MASTER,&qdev->flags); | 3427 | clear_bit(QL_LINK_MASTER, &qdev->flags); |
3557 | 3428 | ||
3558 | ql_disable_interrupts(qdev); | 3429 | ql_disable_interrupts(qdev); |
3559 | 3430 | ||
3560 | free_irq(qdev->pdev->irq, ndev); | 3431 | free_irq(qdev->pdev->irq, ndev); |
3561 | 3432 | ||
3562 | if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { | 3433 | if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { |
3563 | printk(KERN_INFO PFX | 3434 | netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); |
3564 | "%s: calling pci_disable_msi().\n", qdev->ndev->name); | 3435 | clear_bit(QL_MSI_ENABLED, &qdev->flags); |
3565 | clear_bit(QL_MSI_ENABLED,&qdev->flags); | ||
3566 | pci_disable_msi(qdev->pdev); | 3436 | pci_disable_msi(qdev->pdev); |
3567 | } | 3437 | } |
3568 | 3438 | ||
@@ -3576,17 +3446,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | |||
3576 | 3446 | ||
3577 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 3447 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
3578 | if (ql_wait_for_drvr_lock(qdev)) { | 3448 | if (ql_wait_for_drvr_lock(qdev)) { |
3579 | if ((soft_reset = ql_adapter_reset(qdev))) { | 3449 | soft_reset = ql_adapter_reset(qdev); |
3580 | printk(KERN_ERR PFX | 3450 | if (soft_reset) { |
3581 | "%s: ql_adapter_reset(%d) FAILED!\n", | 3451 | netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", |
3582 | ndev->name, qdev->index); | 3452 | qdev->index); |
3583 | } | 3453 | } |
3584 | printk(KERN_ERR PFX | 3454 | netdev_err(ndev, |
3585 | "%s: Releaseing driver lock via chip reset.\n",ndev->name); | 3455 | "Releasing driver lock via chip reset\n"); |
3586 | } else { | 3456 | } else { |
3587 | printk(KERN_ERR PFX | 3457 | netdev_err(ndev, |
3588 | "%s: Could not acquire driver lock to do " | 3458 | "Could not acquire driver lock to do reset!\n"); |
3589 | "reset!\n", ndev->name); | ||
3590 | retval = -1; | 3459 | retval = -1; |
3591 | } | 3460 | } |
3592 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 3461 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
@@ -3603,56 +3472,50 @@ static int ql_adapter_up(struct ql3_adapter *qdev) | |||
3603 | unsigned long hw_flags; | 3472 | unsigned long hw_flags; |
3604 | 3473 | ||
3605 | if (ql_alloc_mem_resources(qdev)) { | 3474 | if (ql_alloc_mem_resources(qdev)) { |
3606 | printk(KERN_ERR PFX | 3475 | netdev_err(ndev, "Unable to allocate buffers\n"); |
3607 | "%s Unable to allocate buffers.\n", ndev->name); | ||
3608 | return -ENOMEM; | 3476 | return -ENOMEM; |
3609 | } | 3477 | } |
3610 | 3478 | ||
3611 | if (qdev->msi) { | 3479 | if (qdev->msi) { |
3612 | if (pci_enable_msi(qdev->pdev)) { | 3480 | if (pci_enable_msi(qdev->pdev)) { |
3613 | printk(KERN_ERR PFX | 3481 | netdev_err(ndev, |
3614 | "%s: User requested MSI, but MSI failed to " | 3482 | "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); |
3615 | "initialize. Continuing without MSI.\n", | ||
3616 | qdev->ndev->name); | ||
3617 | qdev->msi = 0; | 3483 | qdev->msi = 0; |
3618 | } else { | 3484 | } else { |
3619 | printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); | 3485 | netdev_info(ndev, "MSI Enabled...\n"); |
3620 | set_bit(QL_MSI_ENABLED,&qdev->flags); | 3486 | set_bit(QL_MSI_ENABLED, &qdev->flags); |
3621 | irq_flags &= ~IRQF_SHARED; | 3487 | irq_flags &= ~IRQF_SHARED; |
3622 | } | 3488 | } |
3623 | } | 3489 | } |
3624 | 3490 | ||
3625 | if ((err = request_irq(qdev->pdev->irq, | 3491 | err = request_irq(qdev->pdev->irq, ql3xxx_isr, |
3626 | ql3xxx_isr, | 3492 | irq_flags, ndev->name, ndev); |
3627 | irq_flags, ndev->name, ndev))) { | 3493 | if (err) { |
3628 | printk(KERN_ERR PFX | 3494 | netdev_err(ndev, |
3629 | "%s: Failed to reserve interrupt %d already in use.\n", | 3495 | "Failed to reserve interrupt %d - already in use\n", |
3630 | ndev->name, qdev->pdev->irq); | 3496 | qdev->pdev->irq); |
3631 | goto err_irq; | 3497 | goto err_irq; |
3632 | } | 3498 | } |
3633 | 3499 | ||
3634 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 3500 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
3635 | 3501 | ||
3636 | if ((err = ql_wait_for_drvr_lock(qdev))) { | 3502 | err = ql_wait_for_drvr_lock(qdev); |
3637 | if ((err = ql_adapter_initialize(qdev))) { | 3503 | if (err) { |
3638 | printk(KERN_ERR PFX | 3504 | err = ql_adapter_initialize(qdev); |
3639 | "%s: Unable to initialize adapter.\n", | 3505 | if (err) { |
3640 | ndev->name); | 3506 | netdev_err(ndev, "Unable to initialize adapter\n"); |
3641 | goto err_init; | 3507 | goto err_init; |
3642 | } | 3508 | } |
3643 | printk(KERN_ERR PFX | 3509 | netdev_err(ndev, "Releasing driver lock\n"); |
3644 | "%s: Releaseing driver lock.\n",ndev->name); | ||
3645 | ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); | 3510 | ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); |
3646 | } else { | 3511 | } else { |
3647 | printk(KERN_ERR PFX | 3512 | netdev_err(ndev, "Could not acquire driver lock\n"); |
3648 | "%s: Could not acquire driver lock.\n", | ||
3649 | ndev->name); | ||
3650 | goto err_lock; | 3513 | goto err_lock; |
3651 | } | 3514 | } |
3652 | 3515 | ||
3653 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 3516 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
3654 | 3517 | ||
3655 | set_bit(QL_ADAPTER_UP,&qdev->flags); | 3518 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
3656 | 3519 | ||
3657 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | 3520 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); |
3658 | 3521 | ||
@@ -3666,11 +3529,9 @@ err_lock: | |||
3666 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 3529 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
3667 | free_irq(qdev->pdev->irq, ndev); | 3530 | free_irq(qdev->pdev->irq, ndev); |
3668 | err_irq: | 3531 | err_irq: |
3669 | if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { | 3532 | if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { |
3670 | printk(KERN_INFO PFX | 3533 | netdev_info(ndev, "calling pci_disable_msi()\n"); |
3671 | "%s: calling pci_disable_msi().\n", | 3534 | clear_bit(QL_MSI_ENABLED, &qdev->flags); |
3672 | qdev->ndev->name); | ||
3673 | clear_bit(QL_MSI_ENABLED,&qdev->flags); | ||
3674 | pci_disable_msi(qdev->pdev); | 3535 | pci_disable_msi(qdev->pdev); |
3675 | } | 3536 | } |
3676 | return err; | 3537 | return err; |
@@ -3678,10 +3539,9 @@ err_irq: | |||
3678 | 3539 | ||
3679 | static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) | 3540 | static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) |
3680 | { | 3541 | { |
3681 | if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { | 3542 | if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { |
3682 | printk(KERN_ERR PFX | 3543 | netdev_err(qdev->ndev, |
3683 | "%s: Driver up/down cycle failed, " | 3544 | "Driver up/down cycle failed, closing device\n"); |
3684 | "closing device\n",qdev->ndev->name); | ||
3685 | rtnl_lock(); | 3545 | rtnl_lock(); |
3686 | dev_close(qdev->ndev); | 3546 | dev_close(qdev->ndev); |
3687 | rtnl_unlock(); | 3547 | rtnl_unlock(); |
@@ -3698,24 +3558,24 @@ static int ql3xxx_close(struct net_device *ndev) | |||
3698 | * Wait for device to recover from a reset. | 3558 | * Wait for device to recover from a reset. |
3699 | * (Rarely happens, but possible.) | 3559 | * (Rarely happens, but possible.) |
3700 | */ | 3560 | */ |
3701 | while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) | 3561 | while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) |
3702 | msleep(50); | 3562 | msleep(50); |
3703 | 3563 | ||
3704 | ql_adapter_down(qdev,QL_DO_RESET); | 3564 | ql_adapter_down(qdev, QL_DO_RESET); |
3705 | return 0; | 3565 | return 0; |
3706 | } | 3566 | } |
3707 | 3567 | ||
3708 | static int ql3xxx_open(struct net_device *ndev) | 3568 | static int ql3xxx_open(struct net_device *ndev) |
3709 | { | 3569 | { |
3710 | struct ql3_adapter *qdev = netdev_priv(ndev); | 3570 | struct ql3_adapter *qdev = netdev_priv(ndev); |
3711 | return (ql_adapter_up(qdev)); | 3571 | return ql_adapter_up(qdev); |
3712 | } | 3572 | } |
3713 | 3573 | ||
3714 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) | 3574 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) |
3715 | { | 3575 | { |
3716 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 3576 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
3717 | struct ql3xxx_port_registers __iomem *port_regs = | 3577 | struct ql3xxx_port_registers __iomem *port_regs = |
3718 | qdev->mem_map_registers; | 3578 | qdev->mem_map_registers; |
3719 | struct sockaddr *addr = p; | 3579 | struct sockaddr *addr = p; |
3720 | unsigned long hw_flags; | 3580 | unsigned long hw_flags; |
3721 | 3581 | ||
@@ -3750,7 +3610,7 @@ static void ql3xxx_tx_timeout(struct net_device *ndev) | |||
3750 | { | 3610 | { |
3751 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 3611 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
3752 | 3612 | ||
3753 | printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); | 3613 | netdev_err(ndev, "Resetting...\n"); |
3754 | /* | 3614 | /* |
3755 | * Stop the queues, we've got a problem. | 3615 | * Stop the queues, we've got a problem. |
3756 | */ | 3616 | */ |
@@ -3770,11 +3630,12 @@ static void ql_reset_work(struct work_struct *work) | |||
3770 | u32 value; | 3630 | u32 value; |
3771 | struct ql_tx_buf_cb *tx_cb; | 3631 | struct ql_tx_buf_cb *tx_cb; |
3772 | int max_wait_time, i; | 3632 | int max_wait_time, i; |
3773 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3633 | struct ql3xxx_port_registers __iomem *port_regs = |
3634 | qdev->mem_map_registers; | ||
3774 | unsigned long hw_flags; | 3635 | unsigned long hw_flags; |
3775 | 3636 | ||
3776 | if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { | 3637 | if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { |
3777 | clear_bit(QL_LINK_MASTER,&qdev->flags); | 3638 | clear_bit(QL_LINK_MASTER, &qdev->flags); |
3778 | 3639 | ||
3779 | /* | 3640 | /* |
3780 | * Loop through the active list and return the skb. | 3641 | * Loop through the active list and return the skb. |
@@ -3783,17 +3644,19 @@ static void ql_reset_work(struct work_struct *work) | |||
3783 | int j; | 3644 | int j; |
3784 | tx_cb = &qdev->tx_buf[i]; | 3645 | tx_cb = &qdev->tx_buf[i]; |
3785 | if (tx_cb->skb) { | 3646 | if (tx_cb->skb) { |
3786 | printk(KERN_DEBUG PFX | 3647 | netdev_printk(KERN_DEBUG, ndev, |
3787 | "%s: Freeing lost SKB.\n", | 3648 | "Freeing lost SKB\n"); |
3788 | qdev->ndev->name); | ||
3789 | pci_unmap_single(qdev->pdev, | 3649 | pci_unmap_single(qdev->pdev, |
3790 | dma_unmap_addr(&tx_cb->map[0], mapaddr), | 3650 | dma_unmap_addr(&tx_cb->map[0], |
3651 | mapaddr), | ||
3791 | dma_unmap_len(&tx_cb->map[0], maplen), | 3652 | dma_unmap_len(&tx_cb->map[0], maplen), |
3792 | PCI_DMA_TODEVICE); | 3653 | PCI_DMA_TODEVICE); |
3793 | for(j=1;j<tx_cb->seg_count;j++) { | 3654 | for (j = 1; j < tx_cb->seg_count; j++) { |
3794 | pci_unmap_page(qdev->pdev, | 3655 | pci_unmap_page(qdev->pdev, |
3795 | dma_unmap_addr(&tx_cb->map[j],mapaddr), | 3656 | dma_unmap_addr(&tx_cb->map[j], |
3796 | dma_unmap_len(&tx_cb->map[j],maplen), | 3657 | mapaddr), |
3658 | dma_unmap_len(&tx_cb->map[j], | ||
3659 | maplen), | ||
3797 | PCI_DMA_TODEVICE); | 3660 | PCI_DMA_TODEVICE); |
3798 | } | 3661 | } |
3799 | dev_kfree_skb(tx_cb->skb); | 3662 | dev_kfree_skb(tx_cb->skb); |
@@ -3801,8 +3664,7 @@ static void ql_reset_work(struct work_struct *work) | |||
3801 | } | 3664 | } |
3802 | } | 3665 | } |
3803 | 3666 | ||
3804 | printk(KERN_ERR PFX | 3667 | netdev_err(ndev, "Clearing NRI after reset\n"); |
3805 | "%s: Clearing NRI after reset.\n", qdev->ndev->name); | ||
3806 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 3668 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
3807 | ql_write_common_reg(qdev, | 3669 | ql_write_common_reg(qdev, |
3808 | &port_regs->CommonRegs. | 3670 | &port_regs->CommonRegs. |
@@ -3818,16 +3680,14 @@ static void ql_reset_work(struct work_struct *work) | |||
3818 | 3680 | ||
3819 | ispControlStatus); | 3681 | ispControlStatus); |
3820 | if ((value & ISP_CONTROL_SR) == 0) { | 3682 | if ((value & ISP_CONTROL_SR) == 0) { |
3821 | printk(KERN_DEBUG PFX | 3683 | netdev_printk(KERN_DEBUG, ndev, |
3822 | "%s: reset completed.\n", | 3684 | "reset completed\n"); |
3823 | qdev->ndev->name); | ||
3824 | break; | 3685 | break; |
3825 | } | 3686 | } |
3826 | 3687 | ||
3827 | if (value & ISP_CONTROL_RI) { | 3688 | if (value & ISP_CONTROL_RI) { |
3828 | printk(KERN_DEBUG PFX | 3689 | netdev_printk(KERN_DEBUG, ndev, |
3829 | "%s: clearing NRI after reset.\n", | 3690 | "clearing NRI after reset\n"); |
3830 | qdev->ndev->name); | ||
3831 | ql_write_common_reg(qdev, | 3691 | ql_write_common_reg(qdev, |
3832 | &port_regs-> | 3692 | &port_regs-> |
3833 | CommonRegs. | 3693 | CommonRegs. |
@@ -3848,21 +3708,19 @@ static void ql_reset_work(struct work_struct *work) | |||
3848 | * Set the reset flags and clear the board again. | 3708 | * Set the reset flags and clear the board again. |
3849 | * Nothing else to do... | 3709 | * Nothing else to do... |
3850 | */ | 3710 | */ |
3851 | printk(KERN_ERR PFX | 3711 | netdev_err(ndev, |
3852 | "%s: Timed out waiting for reset to " | 3712 | "Timed out waiting for reset to complete\n"); |
3853 | "complete.\n", ndev->name); | 3713 | netdev_err(ndev, "Do a reset\n"); |
3854 | printk(KERN_ERR PFX | 3714 | clear_bit(QL_RESET_PER_SCSI, &qdev->flags); |
3855 | "%s: Do a reset.\n", ndev->name); | 3715 | clear_bit(QL_RESET_START, &qdev->flags); |
3856 | clear_bit(QL_RESET_PER_SCSI,&qdev->flags); | 3716 | ql_cycle_adapter(qdev, QL_DO_RESET); |
3857 | clear_bit(QL_RESET_START,&qdev->flags); | ||
3858 | ql_cycle_adapter(qdev,QL_DO_RESET); | ||
3859 | return; | 3717 | return; |
3860 | } | 3718 | } |
3861 | 3719 | ||
3862 | clear_bit(QL_RESET_ACTIVE,&qdev->flags); | 3720 | clear_bit(QL_RESET_ACTIVE, &qdev->flags); |
3863 | clear_bit(QL_RESET_PER_SCSI,&qdev->flags); | 3721 | clear_bit(QL_RESET_PER_SCSI, &qdev->flags); |
3864 | clear_bit(QL_RESET_START,&qdev->flags); | 3722 | clear_bit(QL_RESET_START, &qdev->flags); |
3865 | ql_cycle_adapter(qdev,QL_NO_RESET); | 3723 | ql_cycle_adapter(qdev, QL_NO_RESET); |
3866 | } | 3724 | } |
3867 | } | 3725 | } |
3868 | 3726 | ||
@@ -3876,7 +3734,8 @@ static void ql_tx_timeout_work(struct work_struct *work) | |||
3876 | 3734 | ||
3877 | static void ql_get_board_info(struct ql3_adapter *qdev) | 3735 | static void ql_get_board_info(struct ql3_adapter *qdev) |
3878 | { | 3736 | { |
3879 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3737 | struct ql3xxx_port_registers __iomem *port_regs = |
3738 | qdev->mem_map_registers; | ||
3880 | u32 value; | 3739 | u32 value; |
3881 | 3740 | ||
3882 | value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); | 3741 | value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); |
@@ -3915,20 +3774,18 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3915 | { | 3774 | { |
3916 | struct net_device *ndev = NULL; | 3775 | struct net_device *ndev = NULL; |
3917 | struct ql3_adapter *qdev = NULL; | 3776 | struct ql3_adapter *qdev = NULL; |
3918 | static int cards_found = 0; | 3777 | static int cards_found; |
3919 | int uninitialized_var(pci_using_dac), err; | 3778 | int uninitialized_var(pci_using_dac), err; |
3920 | 3779 | ||
3921 | err = pci_enable_device(pdev); | 3780 | err = pci_enable_device(pdev); |
3922 | if (err) { | 3781 | if (err) { |
3923 | printk(KERN_ERR PFX "%s cannot enable PCI device\n", | 3782 | pr_err("%s cannot enable PCI device\n", pci_name(pdev)); |
3924 | pci_name(pdev)); | ||
3925 | goto err_out; | 3783 | goto err_out; |
3926 | } | 3784 | } |
3927 | 3785 | ||
3928 | err = pci_request_regions(pdev, DRV_NAME); | 3786 | err = pci_request_regions(pdev, DRV_NAME); |
3929 | if (err) { | 3787 | if (err) { |
3930 | printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", | 3788 | pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); |
3931 | pci_name(pdev)); | ||
3932 | goto err_out_disable_pdev; | 3789 | goto err_out_disable_pdev; |
3933 | } | 3790 | } |
3934 | 3791 | ||
@@ -3943,15 +3800,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3943 | } | 3800 | } |
3944 | 3801 | ||
3945 | if (err) { | 3802 | if (err) { |
3946 | printk(KERN_ERR PFX "%s no usable DMA configuration\n", | 3803 | pr_err("%s no usable DMA configuration\n", pci_name(pdev)); |
3947 | pci_name(pdev)); | ||
3948 | goto err_out_free_regions; | 3804 | goto err_out_free_regions; |
3949 | } | 3805 | } |
3950 | 3806 | ||
3951 | ndev = alloc_etherdev(sizeof(struct ql3_adapter)); | 3807 | ndev = alloc_etherdev(sizeof(struct ql3_adapter)); |
3952 | if (!ndev) { | 3808 | if (!ndev) { |
3953 | printk(KERN_ERR PFX "%s could not alloc etherdev\n", | 3809 | pr_err("%s could not alloc etherdev\n", pci_name(pdev)); |
3954 | pci_name(pdev)); | ||
3955 | err = -ENOMEM; | 3810 | err = -ENOMEM; |
3956 | goto err_out_free_regions; | 3811 | goto err_out_free_regions; |
3957 | } | 3812 | } |
@@ -3978,8 +3833,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3978 | 3833 | ||
3979 | qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); | 3834 | qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); |
3980 | if (!qdev->mem_map_registers) { | 3835 | if (!qdev->mem_map_registers) { |
3981 | printk(KERN_ERR PFX "%s: cannot map device registers\n", | 3836 | pr_err("%s: cannot map device registers\n", pci_name(pdev)); |
3982 | pci_name(pdev)); | ||
3983 | err = -EIO; | 3837 | err = -EIO; |
3984 | goto err_out_free_ndev; | 3838 | goto err_out_free_ndev; |
3985 | } | 3839 | } |
@@ -3998,9 +3852,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3998 | 3852 | ||
3999 | /* make sure the EEPROM is good */ | 3853 | /* make sure the EEPROM is good */ |
4000 | if (ql_get_nvram_params(qdev)) { | 3854 | if (ql_get_nvram_params(qdev)) { |
4001 | printk(KERN_ALERT PFX | 3855 | pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", |
4002 | "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", | 3856 | __func__, qdev->index); |
4003 | qdev->index); | ||
4004 | err = -EIO; | 3857 | err = -EIO; |
4005 | goto err_out_iounmap; | 3858 | goto err_out_iounmap; |
4006 | } | 3859 | } |
@@ -4026,14 +3879,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
4026 | * Set the Maximum Memory Read Byte Count value. We do this to handle | 3879 | * Set the Maximum Memory Read Byte Count value. We do this to handle |
4027 | * jumbo frames. | 3880 | * jumbo frames. |
4028 | */ | 3881 | */ |
4029 | if (qdev->pci_x) { | 3882 | if (qdev->pci_x) |
4030 | pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); | 3883 | pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); |
4031 | } | ||
4032 | 3884 | ||
4033 | err = register_netdev(ndev); | 3885 | err = register_netdev(ndev); |
4034 | if (err) { | 3886 | if (err) { |
4035 | printk(KERN_ERR PFX "%s: cannot register net device\n", | 3887 | pr_err("%s: cannot register net device\n", pci_name(pdev)); |
4036 | pci_name(pdev)); | ||
4037 | goto err_out_iounmap; | 3888 | goto err_out_iounmap; |
4038 | } | 3889 | } |
4039 | 3890 | ||
@@ -4052,10 +3903,10 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
4052 | qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ | 3903 | qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ |
4053 | qdev->adapter_timer.data = (unsigned long)qdev; | 3904 | qdev->adapter_timer.data = (unsigned long)qdev; |
4054 | 3905 | ||
4055 | if(!cards_found) { | 3906 | if (!cards_found) { |
4056 | printk(KERN_ALERT PFX "%s\n", DRV_STRING); | 3907 | pr_alert("%s\n", DRV_STRING); |
4057 | printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", | 3908 | pr_alert("Driver name: %s, Version: %s\n", |
4058 | DRV_NAME, DRV_VERSION); | 3909 | DRV_NAME, DRV_VERSION); |
4059 | } | 3910 | } |
4060 | ql_display_dev_info(ndev); | 3911 | ql_display_dev_info(ndev); |
4061 | 3912 | ||
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index e1894775e5aa..970389331bbc 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h | |||
@@ -1074,8 +1074,8 @@ struct qlcnic_eswitch { | |||
1074 | /* Return codes for Error handling */ | 1074 | /* Return codes for Error handling */ |
1075 | #define QL_STATUS_INVALID_PARAM -1 | 1075 | #define QL_STATUS_INVALID_PARAM -1 |
1076 | 1076 | ||
1077 | #define MAX_BW 10000 | 1077 | #define MAX_BW 100 |
1078 | #define MIN_BW 100 | 1078 | #define MIN_BW 1 |
1079 | #define MAX_VLAN_ID 4095 | 1079 | #define MAX_VLAN_ID 4095 |
1080 | #define MIN_VLAN_ID 2 | 1080 | #define MIN_VLAN_ID 2 |
1081 | #define MAX_TX_QUEUES 1 | 1081 | #define MAX_TX_QUEUES 1 |
@@ -1083,8 +1083,7 @@ struct qlcnic_eswitch { | |||
1083 | #define DEFAULT_MAC_LEARN 1 | 1083 | #define DEFAULT_MAC_LEARN 1 |
1084 | 1084 | ||
1085 | #define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID) | 1085 | #define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID) |
1086 | #define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW \ | 1086 | #define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) |
1087 | && (bw % 100) == 0) | ||
1088 | #define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) | 1087 | #define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) |
1089 | #define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) | 1088 | #define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) |
1090 | #define IS_VALID_MODE(mode) (mode == 0 || mode == 1) | 1089 | #define IS_VALID_MODE(mode) (mode == 0 || mode == 1) |
@@ -1302,8 +1301,6 @@ struct qlcnic_nic_template { | |||
1302 | int (*get_mac_addr) (struct qlcnic_adapter *, u8*); | 1301 | int (*get_mac_addr) (struct qlcnic_adapter *, u8*); |
1303 | int (*config_bridged_mode) (struct qlcnic_adapter *, u32); | 1302 | int (*config_bridged_mode) (struct qlcnic_adapter *, u32); |
1304 | int (*config_led) (struct qlcnic_adapter *, u32, u32); | 1303 | int (*config_led) (struct qlcnic_adapter *, u32, u32); |
1305 | int (*set_ilb_mode) (struct qlcnic_adapter *); | ||
1306 | void (*clear_ilb_mode) (struct qlcnic_adapter *); | ||
1307 | int (*start_firmware) (struct qlcnic_adapter *); | 1304 | int (*start_firmware) (struct qlcnic_adapter *); |
1308 | }; | 1305 | }; |
1309 | 1306 | ||
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 7d6558e33dca..9328d59e21e0 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -678,6 +678,12 @@ static int qlcnic_loopback_test(struct net_device *netdev) | |||
678 | int max_sds_rings = adapter->max_sds_rings; | 678 | int max_sds_rings = adapter->max_sds_rings; |
679 | int ret; | 679 | int ret; |
680 | 680 | ||
681 | if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { | ||
682 | dev_warn(&adapter->pdev->dev, "Loopback test not supported" | ||
683 | "for non privilege function\n"); | ||
684 | return 0; | ||
685 | } | ||
686 | |||
681 | if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) | 687 | if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) |
682 | return -EIO; | 688 | return -EIO; |
683 | 689 | ||
@@ -685,13 +691,13 @@ static int qlcnic_loopback_test(struct net_device *netdev) | |||
685 | if (ret) | 691 | if (ret) |
686 | goto clear_it; | 692 | goto clear_it; |
687 | 693 | ||
688 | ret = adapter->nic_ops->set_ilb_mode(adapter); | 694 | ret = qlcnic_set_ilb_mode(adapter); |
689 | if (ret) | 695 | if (ret) |
690 | goto done; | 696 | goto done; |
691 | 697 | ||
692 | ret = qlcnic_do_ilb_test(adapter); | 698 | ret = qlcnic_do_ilb_test(adapter); |
693 | 699 | ||
694 | adapter->nic_ops->clear_ilb_mode(adapter); | 700 | qlcnic_clear_ilb_mode(adapter); |
695 | 701 | ||
696 | done: | 702 | done: |
697 | qlcnic_diag_free_res(netdev, max_sds_rings); | 703 | qlcnic_diag_free_res(netdev, max_sds_rings); |
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index f1f7acfbf412..b9615bd745ea 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
@@ -107,8 +107,6 @@ static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); | |||
107 | static int qlcnic_start_firmware(struct qlcnic_adapter *); | 107 | static int qlcnic_start_firmware(struct qlcnic_adapter *); |
108 | 108 | ||
109 | static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); | 109 | static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); |
110 | static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *); | ||
111 | static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *); | ||
112 | static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); | 110 | static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); |
113 | static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); | 111 | static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); |
114 | static int qlcnicvf_start_firmware(struct qlcnic_adapter *); | 112 | static int qlcnicvf_start_firmware(struct qlcnic_adapter *); |
@@ -381,8 +379,6 @@ static struct qlcnic_nic_template qlcnic_ops = { | |||
381 | .get_mac_addr = qlcnic_get_mac_address, | 379 | .get_mac_addr = qlcnic_get_mac_address, |
382 | .config_bridged_mode = qlcnic_config_bridged_mode, | 380 | .config_bridged_mode = qlcnic_config_bridged_mode, |
383 | .config_led = qlcnic_config_led, | 381 | .config_led = qlcnic_config_led, |
384 | .set_ilb_mode = qlcnic_set_ilb_mode, | ||
385 | .clear_ilb_mode = qlcnic_clear_ilb_mode, | ||
386 | .start_firmware = qlcnic_start_firmware | 382 | .start_firmware = qlcnic_start_firmware |
387 | }; | 383 | }; |
388 | 384 | ||
@@ -390,8 +386,6 @@ static struct qlcnic_nic_template qlcnic_vf_ops = { | |||
390 | .get_mac_addr = qlcnic_get_mac_address, | 386 | .get_mac_addr = qlcnic_get_mac_address, |
391 | .config_bridged_mode = qlcnicvf_config_bridged_mode, | 387 | .config_bridged_mode = qlcnicvf_config_bridged_mode, |
392 | .config_led = qlcnicvf_config_led, | 388 | .config_led = qlcnicvf_config_led, |
393 | .set_ilb_mode = qlcnicvf_set_ilb_mode, | ||
394 | .clear_ilb_mode = qlcnicvf_clear_ilb_mode, | ||
395 | .start_firmware = qlcnicvf_start_firmware | 389 | .start_firmware = qlcnicvf_start_firmware |
396 | }; | 390 | }; |
397 | 391 | ||
@@ -1182,6 +1176,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) | |||
1182 | ret = qlcnic_fw_create_ctx(adapter); | 1176 | ret = qlcnic_fw_create_ctx(adapter); |
1183 | if (ret) { | 1177 | if (ret) { |
1184 | qlcnic_detach(adapter); | 1178 | qlcnic_detach(adapter); |
1179 | netif_device_attach(netdev); | ||
1185 | return ret; | 1180 | return ret; |
1186 | } | 1181 | } |
1187 | 1182 | ||
@@ -2841,18 +2836,6 @@ qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) | |||
2841 | return -EOPNOTSUPP; | 2836 | return -EOPNOTSUPP; |
2842 | } | 2837 | } |
2843 | 2838 | ||
2844 | static int | ||
2845 | qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter) | ||
2846 | { | ||
2847 | return -EOPNOTSUPP; | ||
2848 | } | ||
2849 | |||
2850 | static void | ||
2851 | qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter) | ||
2852 | { | ||
2853 | return; | ||
2854 | } | ||
2855 | |||
2856 | static ssize_t | 2839 | static ssize_t |
2857 | qlcnic_store_bridged_mode(struct device *dev, | 2840 | qlcnic_store_bridged_mode(struct device *dev, |
2858 | struct device_attribute *attr, const char *buf, size_t len) | 2841 | struct device_attribute *attr, const char *buf, size_t len) |
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 06b2188f6368..a478786840a6 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -18,8 +18,6 @@ | |||
18 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 18 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
19 | #define DRV_VERSION "v1.00.00.25.00.00-01" | 19 | #define DRV_VERSION "v1.00.00.25.00.00-01" |
20 | 20 | ||
21 | #define PFX "qlge: " | ||
22 | |||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 21 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 22 | ||
25 | #define QLGE_VENDOR_ID 0x1077 | 23 | #define QLGE_VENDOR_ID 0x1077 |
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index 548e9010b20b..4747492935ef 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c | |||
@@ -1,3 +1,5 @@ | |||
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
2 | |||
1 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
2 | 4 | ||
3 | #include "qlge.h" | 5 | #include "qlge.h" |
@@ -446,7 +448,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) | |||
446 | MAC_ADDR_TYPE_CAM_MAC, i, value); | 448 | MAC_ADDR_TYPE_CAM_MAC, i, value); |
447 | if (status) { | 449 | if (status) { |
448 | netif_err(qdev, drv, qdev->ndev, | 450 | netif_err(qdev, drv, qdev->ndev, |
449 | "Failed read of mac index register.\n"); | 451 | "Failed read of mac index register\n"); |
450 | goto err; | 452 | goto err; |
451 | } | 453 | } |
452 | *buf++ = value[0]; /* lower MAC address */ | 454 | *buf++ = value[0]; /* lower MAC address */ |
@@ -458,7 +460,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) | |||
458 | MAC_ADDR_TYPE_MULTI_MAC, i, value); | 460 | MAC_ADDR_TYPE_MULTI_MAC, i, value); |
459 | if (status) { | 461 | if (status) { |
460 | netif_err(qdev, drv, qdev->ndev, | 462 | netif_err(qdev, drv, qdev->ndev, |
461 | "Failed read of mac index register.\n"); | 463 | "Failed read of mac index register\n"); |
462 | goto err; | 464 | goto err; |
463 | } | 465 | } |
464 | *buf++ = value[0]; /* lower Mcast address */ | 466 | *buf++ = value[0]; /* lower Mcast address */ |
@@ -482,7 +484,7 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) | |||
482 | status = ql_get_routing_reg(qdev, i, &value); | 484 | status = ql_get_routing_reg(qdev, i, &value); |
483 | if (status) { | 485 | if (status) { |
484 | netif_err(qdev, drv, qdev->ndev, | 486 | netif_err(qdev, drv, qdev->ndev, |
485 | "Failed read of routing index register.\n"); | 487 | "Failed read of routing index register\n"); |
486 | goto err; | 488 | goto err; |
487 | } else { | 489 | } else { |
488 | *buf++ = value; | 490 | *buf++ = value; |
@@ -668,7 +670,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) | |||
668 | max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; | 670 | max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; |
669 | break; | 671 | break; |
670 | default: | 672 | default: |
671 | printk(KERN_ERR"Bad type!!! 0x%08x\n", type); | 673 | pr_err("Bad type!!! 0x%08x\n", type); |
672 | max_index = 0; | 674 | max_index = 0; |
673 | max_offset = 0; | 675 | max_offset = 0; |
674 | break; | 676 | break; |
@@ -738,7 +740,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
738 | int i; | 740 | int i; |
739 | 741 | ||
740 | if (!mpi_coredump) { | 742 | if (!mpi_coredump) { |
741 | netif_err(qdev, drv, qdev->ndev, "No memory available.\n"); | 743 | netif_err(qdev, drv, qdev->ndev, "No memory available\n"); |
742 | return -ENOMEM; | 744 | return -ENOMEM; |
743 | } | 745 | } |
744 | 746 | ||
@@ -1234,7 +1236,7 @@ static void ql_get_core_dump(struct ql_adapter *qdev) | |||
1234 | 1236 | ||
1235 | if (!netif_running(qdev->ndev)) { | 1237 | if (!netif_running(qdev->ndev)) { |
1236 | netif_err(qdev, ifup, qdev->ndev, | 1238 | netif_err(qdev, ifup, qdev->ndev, |
1237 | "Force Coredump can only be done from interface that is up.\n"); | 1239 | "Force Coredump can only be done from interface that is up\n"); |
1238 | return; | 1240 | return; |
1239 | } | 1241 | } |
1240 | ql_queue_fw_error(qdev); | 1242 | ql_queue_fw_error(qdev); |
@@ -1334,7 +1336,7 @@ void ql_mpi_core_to_log(struct work_struct *work) | |||
1334 | "Core is dumping to log file!\n"); | 1336 | "Core is dumping to log file!\n"); |
1335 | 1337 | ||
1336 | for (i = 0; i < count; i += 8) { | 1338 | for (i = 0; i < count; i += 8) { |
1337 | printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " | 1339 | pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " |
1338 | "%.08x %.08x %.08x\n", i, | 1340 | "%.08x %.08x %.08x\n", i, |
1339 | tmp[i + 0], | 1341 | tmp[i + 0], |
1340 | tmp[i + 1], | 1342 | tmp[i + 1], |
@@ -1356,71 +1358,43 @@ static void ql_dump_intr_states(struct ql_adapter *qdev) | |||
1356 | for (i = 0; i < qdev->intr_count; i++) { | 1358 | for (i = 0; i < qdev->intr_count; i++) { |
1357 | ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); | 1359 | ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); |
1358 | value = ql_read32(qdev, INTR_EN); | 1360 | value = ql_read32(qdev, INTR_EN); |
1359 | printk(KERN_ERR PFX | 1361 | pr_err("%s: Interrupt %d is %s\n", |
1360 | "%s: Interrupt %d is %s.\n", | ||
1361 | qdev->ndev->name, i, | 1362 | qdev->ndev->name, i, |
1362 | (value & INTR_EN_EN ? "enabled" : "disabled")); | 1363 | (value & INTR_EN_EN ? "enabled" : "disabled")); |
1363 | } | 1364 | } |
1364 | } | 1365 | } |
1365 | 1366 | ||
1367 | #define DUMP_XGMAC(qdev, reg) \ | ||
1368 | do { \ | ||
1369 | u32 data; \ | ||
1370 | ql_read_xgmac_reg(qdev, reg, &data); \ | ||
1371 | pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ | ||
1372 | } while (0) | ||
1373 | |||
1366 | void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) | 1374 | void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) |
1367 | { | 1375 | { |
1368 | u32 data; | ||
1369 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { | 1376 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { |
1370 | printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__); | 1377 | pr_err("%s: Couldn't get xgmac sem\n", __func__); |
1371 | return; | 1378 | return; |
1372 | } | 1379 | } |
1373 | ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data); | 1380 | DUMP_XGMAC(qdev, PAUSE_SRC_LO); |
1374 | printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name, | 1381 | DUMP_XGMAC(qdev, PAUSE_SRC_HI); |
1375 | data); | 1382 | DUMP_XGMAC(qdev, GLOBAL_CFG); |
1376 | ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data); | 1383 | DUMP_XGMAC(qdev, TX_CFG); |
1377 | printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name, | 1384 | DUMP_XGMAC(qdev, RX_CFG); |
1378 | data); | 1385 | DUMP_XGMAC(qdev, FLOW_CTL); |
1379 | ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); | 1386 | DUMP_XGMAC(qdev, PAUSE_OPCODE); |
1380 | printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name, | 1387 | DUMP_XGMAC(qdev, PAUSE_TIMER); |
1381 | data); | 1388 | DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); |
1382 | ql_read_xgmac_reg(qdev, TX_CFG, &data); | 1389 | DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); |
1383 | printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data); | 1390 | DUMP_XGMAC(qdev, MAC_TX_PARAMS); |
1384 | ql_read_xgmac_reg(qdev, RX_CFG, &data); | 1391 | DUMP_XGMAC(qdev, MAC_RX_PARAMS); |
1385 | printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data); | 1392 | DUMP_XGMAC(qdev, MAC_SYS_INT); |
1386 | ql_read_xgmac_reg(qdev, FLOW_CTL, &data); | 1393 | DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); |
1387 | printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name, | 1394 | DUMP_XGMAC(qdev, MAC_MGMT_INT); |
1388 | data); | 1395 | DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); |
1389 | ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data); | 1396 | DUMP_XGMAC(qdev, EXT_ARB_MODE); |
1390 | printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name, | ||
1391 | data); | ||
1392 | ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data); | ||
1393 | printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name, | ||
1394 | data); | ||
1395 | ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data); | ||
1396 | printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n", | ||
1397 | qdev->ndev->name, data); | ||
1398 | ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data); | ||
1399 | printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n", | ||
1400 | qdev->ndev->name, data); | ||
1401 | ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data); | ||
1402 | printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name, | ||
1403 | data); | ||
1404 | ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data); | ||
1405 | printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name, | ||
1406 | data); | ||
1407 | ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data); | ||
1408 | printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name, | ||
1409 | data); | ||
1410 | ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data); | ||
1411 | printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n", | ||
1412 | qdev->ndev->name, data); | ||
1413 | ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data); | ||
1414 | printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name, | ||
1415 | data); | ||
1416 | ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data); | ||
1417 | printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n", | ||
1418 | qdev->ndev->name, data); | ||
1419 | ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data); | ||
1420 | printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name, | ||
1421 | data); | ||
1422 | ql_sem_unlock(qdev, qdev->xg_sem_mask); | 1397 | ql_sem_unlock(qdev, qdev->xg_sem_mask); |
1423 | |||
1424 | } | 1398 | } |
1425 | 1399 | ||
1426 | static void ql_dump_ets_regs(struct ql_adapter *qdev) | 1400 | static void ql_dump_ets_regs(struct ql_adapter *qdev) |
@@ -1437,14 +1411,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev) | |||
1437 | return; | 1411 | return; |
1438 | for (i = 0; i < 4; i++) { | 1412 | for (i = 0; i < 4; i++) { |
1439 | if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { | 1413 | if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { |
1440 | printk(KERN_ERR PFX | 1414 | pr_err("%s: Failed read of mac index register\n", |
1441 | "%s: Failed read of mac index register.\n", | ||
1442 | __func__); | 1415 | __func__); |
1443 | return; | 1416 | return; |
1444 | } else { | 1417 | } else { |
1445 | if (value[0]) | 1418 | if (value[0]) |
1446 | printk(KERN_ERR PFX | 1419 | pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", |
1447 | "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n", | ||
1448 | qdev->ndev->name, i, value[1], value[0], | 1420 | qdev->ndev->name, i, value[1], value[0], |
1449 | value[2]); | 1421 | value[2]); |
1450 | } | 1422 | } |
@@ -1452,14 +1424,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev) | |||
1452 | for (i = 0; i < 32; i++) { | 1424 | for (i = 0; i < 32; i++) { |
1453 | if (ql_get_mac_addr_reg | 1425 | if (ql_get_mac_addr_reg |
1454 | (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { | 1426 | (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { |
1455 | printk(KERN_ERR PFX | 1427 | pr_err("%s: Failed read of mac index register\n", |
1456 | "%s: Failed read of mac index register.\n", | ||
1457 | __func__); | 1428 | __func__); |
1458 | return; | 1429 | return; |
1459 | } else { | 1430 | } else { |
1460 | if (value[0]) | 1431 | if (value[0]) |
1461 | printk(KERN_ERR PFX | 1432 | pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", |
1462 | "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n", | ||
1463 | qdev->ndev->name, i, value[1], value[0]); | 1433 | qdev->ndev->name, i, value[1], value[0]); |
1464 | } | 1434 | } |
1465 | } | 1435 | } |
@@ -1476,129 +1446,77 @@ void ql_dump_routing_entries(struct ql_adapter *qdev) | |||
1476 | for (i = 0; i < 16; i++) { | 1446 | for (i = 0; i < 16; i++) { |
1477 | value = 0; | 1447 | value = 0; |
1478 | if (ql_get_routing_reg(qdev, i, &value)) { | 1448 | if (ql_get_routing_reg(qdev, i, &value)) { |
1479 | printk(KERN_ERR PFX | 1449 | pr_err("%s: Failed read of routing index register\n", |
1480 | "%s: Failed read of routing index register.\n", | ||
1481 | __func__); | 1450 | __func__); |
1482 | return; | 1451 | return; |
1483 | } else { | 1452 | } else { |
1484 | if (value) | 1453 | if (value) |
1485 | printk(KERN_ERR PFX | 1454 | pr_err("%s: Routing Mask %d = 0x%.08x\n", |
1486 | "%s: Routing Mask %d = 0x%.08x.\n", | ||
1487 | qdev->ndev->name, i, value); | 1455 | qdev->ndev->name, i, value); |
1488 | } | 1456 | } |
1489 | } | 1457 | } |
1490 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | 1458 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
1491 | } | 1459 | } |
1492 | 1460 | ||
1461 | #define DUMP_REG(qdev, reg) \ | ||
1462 | pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) | ||
1463 | |||
1493 | void ql_dump_regs(struct ql_adapter *qdev) | 1464 | void ql_dump_regs(struct ql_adapter *qdev) |
1494 | { | 1465 | { |
1495 | printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func); | 1466 | pr_err("reg dump for function #%d\n", qdev->func); |
1496 | printk(KERN_ERR PFX "SYS = 0x%x.\n", | 1467 | DUMP_REG(qdev, SYS); |
1497 | ql_read32(qdev, SYS)); | 1468 | DUMP_REG(qdev, RST_FO); |
1498 | printk(KERN_ERR PFX "RST_FO = 0x%x.\n", | 1469 | DUMP_REG(qdev, FSC); |
1499 | ql_read32(qdev, RST_FO)); | 1470 | DUMP_REG(qdev, CSR); |
1500 | printk(KERN_ERR PFX "FSC = 0x%x.\n", | 1471 | DUMP_REG(qdev, ICB_RID); |
1501 | ql_read32(qdev, FSC)); | 1472 | DUMP_REG(qdev, ICB_L); |
1502 | printk(KERN_ERR PFX "CSR = 0x%x.\n", | 1473 | DUMP_REG(qdev, ICB_H); |
1503 | ql_read32(qdev, CSR)); | 1474 | DUMP_REG(qdev, CFG); |
1504 | printk(KERN_ERR PFX "ICB_RID = 0x%x.\n", | 1475 | DUMP_REG(qdev, BIOS_ADDR); |
1505 | ql_read32(qdev, ICB_RID)); | 1476 | DUMP_REG(qdev, STS); |
1506 | printk(KERN_ERR PFX "ICB_L = 0x%x.\n", | 1477 | DUMP_REG(qdev, INTR_EN); |
1507 | ql_read32(qdev, ICB_L)); | 1478 | DUMP_REG(qdev, INTR_MASK); |
1508 | printk(KERN_ERR PFX "ICB_H = 0x%x.\n", | 1479 | DUMP_REG(qdev, ISR1); |
1509 | ql_read32(qdev, ICB_H)); | 1480 | DUMP_REG(qdev, ISR2); |
1510 | printk(KERN_ERR PFX "CFG = 0x%x.\n", | 1481 | DUMP_REG(qdev, ISR3); |
1511 | ql_read32(qdev, CFG)); | 1482 | DUMP_REG(qdev, ISR4); |
1512 | printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n", | 1483 | DUMP_REG(qdev, REV_ID); |
1513 | ql_read32(qdev, BIOS_ADDR)); | 1484 | DUMP_REG(qdev, FRC_ECC_ERR); |
1514 | printk(KERN_ERR PFX "STS = 0x%x.\n", | 1485 | DUMP_REG(qdev, ERR_STS); |
1515 | ql_read32(qdev, STS)); | 1486 | DUMP_REG(qdev, RAM_DBG_ADDR); |
1516 | printk(KERN_ERR PFX "INTR_EN = 0x%x.\n", | 1487 | DUMP_REG(qdev, RAM_DBG_DATA); |
1517 | ql_read32(qdev, INTR_EN)); | 1488 | DUMP_REG(qdev, ECC_ERR_CNT); |
1518 | printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n", | 1489 | DUMP_REG(qdev, SEM); |
1519 | ql_read32(qdev, INTR_MASK)); | 1490 | DUMP_REG(qdev, GPIO_1); |
1520 | printk(KERN_ERR PFX "ISR1 = 0x%x.\n", | 1491 | DUMP_REG(qdev, GPIO_2); |
1521 | ql_read32(qdev, ISR1)); | 1492 | DUMP_REG(qdev, GPIO_3); |
1522 | printk(KERN_ERR PFX "ISR2 = 0x%x.\n", | 1493 | DUMP_REG(qdev, XGMAC_ADDR); |
1523 | ql_read32(qdev, ISR2)); | 1494 | DUMP_REG(qdev, XGMAC_DATA); |
1524 | printk(KERN_ERR PFX "ISR3 = 0x%x.\n", | 1495 | DUMP_REG(qdev, NIC_ETS); |
1525 | ql_read32(qdev, ISR3)); | 1496 | DUMP_REG(qdev, CNA_ETS); |
1526 | printk(KERN_ERR PFX "ISR4 = 0x%x.\n", | 1497 | DUMP_REG(qdev, FLASH_ADDR); |
1527 | ql_read32(qdev, ISR4)); | 1498 | DUMP_REG(qdev, FLASH_DATA); |
1528 | printk(KERN_ERR PFX "REV_ID = 0x%x.\n", | 1499 | DUMP_REG(qdev, CQ_STOP); |
1529 | ql_read32(qdev, REV_ID)); | 1500 | DUMP_REG(qdev, PAGE_TBL_RID); |
1530 | printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n", | 1501 | DUMP_REG(qdev, WQ_PAGE_TBL_LO); |
1531 | ql_read32(qdev, FRC_ECC_ERR)); | 1502 | DUMP_REG(qdev, WQ_PAGE_TBL_HI); |
1532 | printk(KERN_ERR PFX "ERR_STS = 0x%x.\n", | 1503 | DUMP_REG(qdev, CQ_PAGE_TBL_LO); |
1533 | ql_read32(qdev, ERR_STS)); | 1504 | DUMP_REG(qdev, CQ_PAGE_TBL_HI); |
1534 | printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n", | 1505 | DUMP_REG(qdev, COS_DFLT_CQ1); |
1535 | ql_read32(qdev, RAM_DBG_ADDR)); | 1506 | DUMP_REG(qdev, COS_DFLT_CQ2); |
1536 | printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n", | 1507 | DUMP_REG(qdev, SPLT_HDR); |
1537 | ql_read32(qdev, RAM_DBG_DATA)); | 1508 | DUMP_REG(qdev, FC_PAUSE_THRES); |
1538 | printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n", | 1509 | DUMP_REG(qdev, NIC_PAUSE_THRES); |
1539 | ql_read32(qdev, ECC_ERR_CNT)); | 1510 | DUMP_REG(qdev, FC_ETHERTYPE); |
1540 | printk(KERN_ERR PFX "SEM = 0x%x.\n", | 1511 | DUMP_REG(qdev, FC_RCV_CFG); |
1541 | ql_read32(qdev, SEM)); | 1512 | DUMP_REG(qdev, NIC_RCV_CFG); |
1542 | printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n", | 1513 | DUMP_REG(qdev, FC_COS_TAGS); |
1543 | ql_read32(qdev, GPIO_1)); | 1514 | DUMP_REG(qdev, NIC_COS_TAGS); |
1544 | printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n", | 1515 | DUMP_REG(qdev, MGMT_RCV_CFG); |
1545 | ql_read32(qdev, GPIO_2)); | 1516 | DUMP_REG(qdev, XG_SERDES_ADDR); |
1546 | printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n", | 1517 | DUMP_REG(qdev, XG_SERDES_DATA); |
1547 | ql_read32(qdev, GPIO_3)); | 1518 | DUMP_REG(qdev, PRB_MX_ADDR); |
1548 | printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n", | 1519 | DUMP_REG(qdev, PRB_MX_DATA); |
1549 | ql_read32(qdev, XGMAC_ADDR)); | ||
1550 | printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n", | ||
1551 | ql_read32(qdev, XGMAC_DATA)); | ||
1552 | printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n", | ||
1553 | ql_read32(qdev, NIC_ETS)); | ||
1554 | printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n", | ||
1555 | ql_read32(qdev, CNA_ETS)); | ||
1556 | printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n", | ||
1557 | ql_read32(qdev, FLASH_ADDR)); | ||
1558 | printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n", | ||
1559 | ql_read32(qdev, FLASH_DATA)); | ||
1560 | printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n", | ||
1561 | ql_read32(qdev, CQ_STOP)); | ||
1562 | printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n", | ||
1563 | ql_read32(qdev, PAGE_TBL_RID)); | ||
1564 | printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n", | ||
1565 | ql_read32(qdev, WQ_PAGE_TBL_LO)); | ||
1566 | printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n", | ||
1567 | ql_read32(qdev, WQ_PAGE_TBL_HI)); | ||
1568 | printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n", | ||
1569 | ql_read32(qdev, CQ_PAGE_TBL_LO)); | ||
1570 | printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n", | ||
1571 | ql_read32(qdev, CQ_PAGE_TBL_HI)); | ||
1572 | printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n", | ||
1573 | ql_read32(qdev, COS_DFLT_CQ1)); | ||
1574 | printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n", | ||
1575 | ql_read32(qdev, COS_DFLT_CQ2)); | ||
1576 | printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n", | ||
1577 | ql_read32(qdev, SPLT_HDR)); | ||
1578 | printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n", | ||
1579 | ql_read32(qdev, FC_PAUSE_THRES)); | ||
1580 | printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n", | ||
1581 | ql_read32(qdev, NIC_PAUSE_THRES)); | ||
1582 | printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n", | ||
1583 | ql_read32(qdev, FC_ETHERTYPE)); | ||
1584 | printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n", | ||
1585 | ql_read32(qdev, FC_RCV_CFG)); | ||
1586 | printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n", | ||
1587 | ql_read32(qdev, NIC_RCV_CFG)); | ||
1588 | printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n", | ||
1589 | ql_read32(qdev, FC_COS_TAGS)); | ||
1590 | printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n", | ||
1591 | ql_read32(qdev, NIC_COS_TAGS)); | ||
1592 | printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n", | ||
1593 | ql_read32(qdev, MGMT_RCV_CFG)); | ||
1594 | printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n", | ||
1595 | ql_read32(qdev, XG_SERDES_ADDR)); | ||
1596 | printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n", | ||
1597 | ql_read32(qdev, XG_SERDES_DATA)); | ||
1598 | printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n", | ||
1599 | ql_read32(qdev, PRB_MX_ADDR)); | ||
1600 | printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n", | ||
1601 | ql_read32(qdev, PRB_MX_DATA)); | ||
1602 | ql_dump_intr_states(qdev); | 1520 | ql_dump_intr_states(qdev); |
1603 | ql_dump_xgmac_control_regs(qdev); | 1521 | ql_dump_xgmac_control_regs(qdev); |
1604 | ql_dump_ets_regs(qdev); | 1522 | ql_dump_ets_regs(qdev); |
@@ -1608,191 +1526,124 @@ void ql_dump_regs(struct ql_adapter *qdev) | |||
1608 | #endif | 1526 | #endif |
1609 | 1527 | ||
1610 | #ifdef QL_STAT_DUMP | 1528 | #ifdef QL_STAT_DUMP |
1529 | |||
1530 | #define DUMP_STAT(qdev, stat) \ | ||
1531 | pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) | ||
1532 | |||
1611 | void ql_dump_stat(struct ql_adapter *qdev) | 1533 | void ql_dump_stat(struct ql_adapter *qdev) |
1612 | { | 1534 | { |
1613 | printk(KERN_ERR "%s: Enter.\n", __func__); | 1535 | pr_err("%s: Enter\n", __func__); |
1614 | printk(KERN_ERR "tx_pkts = %ld\n", | 1536 | DUMP_STAT(qdev, tx_pkts); |
1615 | (unsigned long)qdev->nic_stats.tx_pkts); | 1537 | DUMP_STAT(qdev, tx_bytes); |
1616 | printk(KERN_ERR "tx_bytes = %ld\n", | 1538 | DUMP_STAT(qdev, tx_mcast_pkts); |
1617 | (unsigned long)qdev->nic_stats.tx_bytes); | 1539 | DUMP_STAT(qdev, tx_bcast_pkts); |
1618 | printk(KERN_ERR "tx_mcast_pkts = %ld.\n", | 1540 | DUMP_STAT(qdev, tx_ucast_pkts); |
1619 | (unsigned long)qdev->nic_stats.tx_mcast_pkts); | 1541 | DUMP_STAT(qdev, tx_ctl_pkts); |
1620 | printk(KERN_ERR "tx_bcast_pkts = %ld.\n", | 1542 | DUMP_STAT(qdev, tx_pause_pkts); |
1621 | (unsigned long)qdev->nic_stats.tx_bcast_pkts); | 1543 | DUMP_STAT(qdev, tx_64_pkt); |
1622 | printk(KERN_ERR "tx_ucast_pkts = %ld.\n", | 1544 | DUMP_STAT(qdev, tx_65_to_127_pkt); |
1623 | (unsigned long)qdev->nic_stats.tx_ucast_pkts); | 1545 | DUMP_STAT(qdev, tx_128_to_255_pkt); |
1624 | printk(KERN_ERR "tx_ctl_pkts = %ld.\n", | 1546 | DUMP_STAT(qdev, tx_256_511_pkt); |
1625 | (unsigned long)qdev->nic_stats.tx_ctl_pkts); | 1547 | DUMP_STAT(qdev, tx_512_to_1023_pkt); |
1626 | printk(KERN_ERR "tx_pause_pkts = %ld.\n", | 1548 | DUMP_STAT(qdev, tx_1024_to_1518_pkt); |
1627 | (unsigned long)qdev->nic_stats.tx_pause_pkts); | 1549 | DUMP_STAT(qdev, tx_1519_to_max_pkt); |
1628 | printk(KERN_ERR "tx_64_pkt = %ld.\n", | 1550 | DUMP_STAT(qdev, tx_undersize_pkt); |
1629 | (unsigned long)qdev->nic_stats.tx_64_pkt); | 1551 | DUMP_STAT(qdev, tx_oversize_pkt); |
1630 | printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n", | 1552 | DUMP_STAT(qdev, rx_bytes); |
1631 | (unsigned long)qdev->nic_stats.tx_65_to_127_pkt); | 1553 | DUMP_STAT(qdev, rx_bytes_ok); |
1632 | printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n", | 1554 | DUMP_STAT(qdev, rx_pkts); |
1633 | (unsigned long)qdev->nic_stats.tx_128_to_255_pkt); | 1555 | DUMP_STAT(qdev, rx_pkts_ok); |
1634 | printk(KERN_ERR "tx_256_511_pkt = %ld.\n", | 1556 | DUMP_STAT(qdev, rx_bcast_pkts); |
1635 | (unsigned long)qdev->nic_stats.tx_256_511_pkt); | 1557 | DUMP_STAT(qdev, rx_mcast_pkts); |
1636 | printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n", | 1558 | DUMP_STAT(qdev, rx_ucast_pkts); |
1637 | (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt); | 1559 | DUMP_STAT(qdev, rx_undersize_pkts); |
1638 | printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n", | 1560 | DUMP_STAT(qdev, rx_oversize_pkts); |
1639 | (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt); | 1561 | DUMP_STAT(qdev, rx_jabber_pkts); |
1640 | printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n", | 1562 | DUMP_STAT(qdev, rx_undersize_fcerr_pkts); |
1641 | (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt); | 1563 | DUMP_STAT(qdev, rx_drop_events); |
1642 | printk(KERN_ERR "tx_undersize_pkt = %ld.\n", | 1564 | DUMP_STAT(qdev, rx_fcerr_pkts); |
1643 | (unsigned long)qdev->nic_stats.tx_undersize_pkt); | 1565 | DUMP_STAT(qdev, rx_align_err); |
1644 | printk(KERN_ERR "tx_oversize_pkt = %ld.\n", | 1566 | DUMP_STAT(qdev, rx_symbol_err); |
1645 | (unsigned long)qdev->nic_stats.tx_oversize_pkt); | 1567 | DUMP_STAT(qdev, rx_mac_err); |
1646 | printk(KERN_ERR "rx_bytes = %ld.\n", | 1568 | DUMP_STAT(qdev, rx_ctl_pkts); |
1647 | (unsigned long)qdev->nic_stats.rx_bytes); | 1569 | DUMP_STAT(qdev, rx_pause_pkts); |
1648 | printk(KERN_ERR "rx_bytes_ok = %ld.\n", | 1570 | DUMP_STAT(qdev, rx_64_pkts); |
1649 | (unsigned long)qdev->nic_stats.rx_bytes_ok); | 1571 | DUMP_STAT(qdev, rx_65_to_127_pkts); |
1650 | printk(KERN_ERR "rx_pkts = %ld.\n", | 1572 | DUMP_STAT(qdev, rx_128_255_pkts); |
1651 | (unsigned long)qdev->nic_stats.rx_pkts); | 1573 | DUMP_STAT(qdev, rx_256_511_pkts); |
1652 | printk(KERN_ERR "rx_pkts_ok = %ld.\n", | 1574 | DUMP_STAT(qdev, rx_512_to_1023_pkts); |
1653 | (unsigned long)qdev->nic_stats.rx_pkts_ok); | 1575 | DUMP_STAT(qdev, rx_1024_to_1518_pkts); |
1654 | printk(KERN_ERR "rx_bcast_pkts = %ld.\n", | 1576 | DUMP_STAT(qdev, rx_1519_to_max_pkts); |
1655 | (unsigned long)qdev->nic_stats.rx_bcast_pkts); | 1577 | DUMP_STAT(qdev, rx_len_err_pkts); |
1656 | printk(KERN_ERR "rx_mcast_pkts = %ld.\n", | ||
1657 | (unsigned long)qdev->nic_stats.rx_mcast_pkts); | ||
1658 | printk(KERN_ERR "rx_ucast_pkts = %ld.\n", | ||
1659 | (unsigned long)qdev->nic_stats.rx_ucast_pkts); | ||
1660 | printk(KERN_ERR "rx_undersize_pkts = %ld.\n", | ||
1661 | (unsigned long)qdev->nic_stats.rx_undersize_pkts); | ||
1662 | printk(KERN_ERR "rx_oversize_pkts = %ld.\n", | ||
1663 | (unsigned long)qdev->nic_stats.rx_oversize_pkts); | ||
1664 | printk(KERN_ERR "rx_jabber_pkts = %ld.\n", | ||
1665 | (unsigned long)qdev->nic_stats.rx_jabber_pkts); | ||
1666 | printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n", | ||
1667 | (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts); | ||
1668 | printk(KERN_ERR "rx_drop_events = %ld.\n", | ||
1669 | (unsigned long)qdev->nic_stats.rx_drop_events); | ||
1670 | printk(KERN_ERR "rx_fcerr_pkts = %ld.\n", | ||
1671 | (unsigned long)qdev->nic_stats.rx_fcerr_pkts); | ||
1672 | printk(KERN_ERR "rx_align_err = %ld.\n", | ||
1673 | (unsigned long)qdev->nic_stats.rx_align_err); | ||
1674 | printk(KERN_ERR "rx_symbol_err = %ld.\n", | ||
1675 | (unsigned long)qdev->nic_stats.rx_symbol_err); | ||
1676 | printk(KERN_ERR "rx_mac_err = %ld.\n", | ||
1677 | (unsigned long)qdev->nic_stats.rx_mac_err); | ||
1678 | printk(KERN_ERR "rx_ctl_pkts = %ld.\n", | ||
1679 | (unsigned long)qdev->nic_stats.rx_ctl_pkts); | ||
1680 | printk(KERN_ERR "rx_pause_pkts = %ld.\n", | ||
1681 | (unsigned long)qdev->nic_stats.rx_pause_pkts); | ||
1682 | printk(KERN_ERR "rx_64_pkts = %ld.\n", | ||
1683 | (unsigned long)qdev->nic_stats.rx_64_pkts); | ||
1684 | printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n", | ||
1685 | (unsigned long)qdev->nic_stats.rx_65_to_127_pkts); | ||
1686 | printk(KERN_ERR "rx_128_255_pkts = %ld.\n", | ||
1687 | (unsigned long)qdev->nic_stats.rx_128_255_pkts); | ||
1688 | printk(KERN_ERR "rx_256_511_pkts = %ld.\n", | ||
1689 | (unsigned long)qdev->nic_stats.rx_256_511_pkts); | ||
1690 | printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n", | ||
1691 | (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts); | ||
1692 | printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n", | ||
1693 | (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts); | ||
1694 | printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n", | ||
1695 | (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts); | ||
1696 | printk(KERN_ERR "rx_len_err_pkts = %ld.\n", | ||
1697 | (unsigned long)qdev->nic_stats.rx_len_err_pkts); | ||
1698 | }; | 1578 | }; |
1699 | #endif | 1579 | #endif |
1700 | 1580 | ||
1701 | #ifdef QL_DEV_DUMP | 1581 | #ifdef QL_DEV_DUMP |
1582 | |||
1583 | #define DUMP_QDEV_FIELD(qdev, type, field) \ | ||
1584 | pr_err("qdev->%-24s = " type "\n", #field, qdev->field) | ||
1585 | #define DUMP_QDEV_DMA_FIELD(qdev, field) \ | ||
1586 | pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) | ||
1587 | #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ | ||
1588 | pr_err("%s[%d].%s = " type "\n", \ | ||
1589 | #array, index, #field, qdev->array[index].field); | ||
1702 | void ql_dump_qdev(struct ql_adapter *qdev) | 1590 | void ql_dump_qdev(struct ql_adapter *qdev) |
1703 | { | 1591 | { |
1704 | int i; | 1592 | int i; |
1705 | printk(KERN_ERR PFX "qdev->flags = %lx.\n", | 1593 | DUMP_QDEV_FIELD(qdev, "%lx", flags); |
1706 | qdev->flags); | 1594 | DUMP_QDEV_FIELD(qdev, "%p", vlgrp); |
1707 | printk(KERN_ERR PFX "qdev->vlgrp = %p.\n", | 1595 | DUMP_QDEV_FIELD(qdev, "%p", pdev); |
1708 | qdev->vlgrp); | 1596 | DUMP_QDEV_FIELD(qdev, "%p", ndev); |
1709 | printk(KERN_ERR PFX "qdev->pdev = %p.\n", | 1597 | DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); |
1710 | qdev->pdev); | 1598 | DUMP_QDEV_FIELD(qdev, "%p", reg_base); |
1711 | printk(KERN_ERR PFX "qdev->ndev = %p.\n", | 1599 | DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); |
1712 | qdev->ndev); | 1600 | DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); |
1713 | printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n", | 1601 | DUMP_QDEV_FIELD(qdev, "%x", msg_enable); |
1714 | qdev->chip_rev_id); | 1602 | DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); |
1715 | printk(KERN_ERR PFX "qdev->reg_base = %p.\n", | 1603 | DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); |
1716 | qdev->reg_base); | 1604 | DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); |
1717 | printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n", | 1605 | DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); |
1718 | qdev->doorbell_area); | 1606 | DUMP_QDEV_FIELD(qdev, "%d", intr_count); |
1719 | printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n", | ||
1720 | qdev->doorbell_area_size); | ||
1721 | printk(KERN_ERR PFX "msg_enable = %x.\n", | ||
1722 | qdev->msg_enable); | ||
1723 | printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n", | ||
1724 | qdev->rx_ring_shadow_reg_area); | ||
1725 | printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n", | ||
1726 | (unsigned long long) qdev->rx_ring_shadow_reg_dma); | ||
1727 | printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n", | ||
1728 | qdev->tx_ring_shadow_reg_area); | ||
1729 | printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n", | ||
1730 | (unsigned long long) qdev->tx_ring_shadow_reg_dma); | ||
1731 | printk(KERN_ERR PFX "qdev->intr_count = %d.\n", | ||
1732 | qdev->intr_count); | ||
1733 | if (qdev->msi_x_entry) | 1607 | if (qdev->msi_x_entry) |
1734 | for (i = 0; i < qdev->intr_count; i++) { | 1608 | for (i = 0; i < qdev->intr_count; i++) { |
1735 | printk(KERN_ERR PFX | 1609 | DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); |
1736 | "msi_x_entry.[%d]vector = %d.\n", i, | 1610 | DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); |
1737 | qdev->msi_x_entry[i].vector); | ||
1738 | printk(KERN_ERR PFX | ||
1739 | "msi_x_entry.[%d]entry = %d.\n", i, | ||
1740 | qdev->msi_x_entry[i].entry); | ||
1741 | } | 1611 | } |
1742 | for (i = 0; i < qdev->intr_count; i++) { | 1612 | for (i = 0; i < qdev->intr_count; i++) { |
1743 | printk(KERN_ERR PFX | 1613 | DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); |
1744 | "intr_context[%d].qdev = %p.\n", i, | 1614 | DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); |
1745 | qdev->intr_context[i].qdev); | 1615 | DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); |
1746 | printk(KERN_ERR PFX | 1616 | DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); |
1747 | "intr_context[%d].intr = %d.\n", i, | 1617 | DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); |
1748 | qdev->intr_context[i].intr); | 1618 | DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); |
1749 | printk(KERN_ERR PFX | ||
1750 | "intr_context[%d].hooked = %d.\n", i, | ||
1751 | qdev->intr_context[i].hooked); | ||
1752 | printk(KERN_ERR PFX | ||
1753 | "intr_context[%d].intr_en_mask = 0x%08x.\n", i, | ||
1754 | qdev->intr_context[i].intr_en_mask); | ||
1755 | printk(KERN_ERR PFX | ||
1756 | "intr_context[%d].intr_dis_mask = 0x%08x.\n", i, | ||
1757 | qdev->intr_context[i].intr_dis_mask); | ||
1758 | printk(KERN_ERR PFX | ||
1759 | "intr_context[%d].intr_read_mask = 0x%08x.\n", i, | ||
1760 | qdev->intr_context[i].intr_read_mask); | ||
1761 | } | 1619 | } |
1762 | printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count); | 1620 | DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); |
1763 | printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count); | 1621 | DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); |
1764 | printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size); | 1622 | DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); |
1765 | printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem); | 1623 | DUMP_QDEV_FIELD(qdev, "%p", ring_mem); |
1766 | printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count); | 1624 | DUMP_QDEV_FIELD(qdev, "%d", intr_count); |
1767 | printk(KERN_ERR PFX "qdev->tx_ring = %p.\n", | 1625 | DUMP_QDEV_FIELD(qdev, "%p", tx_ring); |
1768 | qdev->tx_ring); | 1626 | DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); |
1769 | printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n", | 1627 | DUMP_QDEV_FIELD(qdev, "%p", rx_ring); |
1770 | qdev->rss_ring_count); | 1628 | DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); |
1771 | printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring); | 1629 | DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); |
1772 | printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n", | 1630 | DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); |
1773 | qdev->default_rx_queue); | 1631 | DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); |
1774 | printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n", | ||
1775 | qdev->xg_sem_mask); | ||
1776 | printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n", | ||
1777 | qdev->port_link_up); | ||
1778 | printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n", | ||
1779 | qdev->port_init); | ||
1780 | |||
1781 | } | 1632 | } |
1782 | #endif | 1633 | #endif |
1783 | 1634 | ||
1784 | #ifdef QL_CB_DUMP | 1635 | #ifdef QL_CB_DUMP |
1785 | void ql_dump_wqicb(struct wqicb *wqicb) | 1636 | void ql_dump_wqicb(struct wqicb *wqicb) |
1786 | { | 1637 | { |
1787 | printk(KERN_ERR PFX "Dumping wqicb stuff...\n"); | 1638 | pr_err("Dumping wqicb stuff...\n"); |
1788 | printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len)); | 1639 | pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); |
1789 | printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags)); | 1640 | pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); |
1790 | printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", | 1641 | pr_err("wqicb->cq_id_rss = %d\n", |
1791 | le16_to_cpu(wqicb->cq_id_rss)); | 1642 | le16_to_cpu(wqicb->cq_id_rss)); |
1792 | printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); | 1643 | pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); |
1793 | printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n", | 1644 | pr_err("wqicb->wq_addr = 0x%llx\n", |
1794 | (unsigned long long) le64_to_cpu(wqicb->addr)); | 1645 | (unsigned long long) le64_to_cpu(wqicb->addr)); |
1795 | printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n", | 1646 | pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", |
1796 | (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); | 1647 | (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); |
1797 | } | 1648 | } |
1798 | 1649 | ||
@@ -1800,40 +1651,34 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) | |||
1800 | { | 1651 | { |
1801 | if (tx_ring == NULL) | 1652 | if (tx_ring == NULL) |
1802 | return; | 1653 | return; |
1803 | printk(KERN_ERR PFX | 1654 | pr_err("===================== Dumping tx_ring %d ===============\n", |
1804 | "===================== Dumping tx_ring %d ===============.\n", | ||
1805 | tx_ring->wq_id); | 1655 | tx_ring->wq_id); |
1806 | printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); | 1656 | pr_err("tx_ring->base = %p\n", tx_ring->wq_base); |
1807 | printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", | 1657 | pr_err("tx_ring->base_dma = 0x%llx\n", |
1808 | (unsigned long long) tx_ring->wq_base_dma); | 1658 | (unsigned long long) tx_ring->wq_base_dma); |
1809 | printk(KERN_ERR PFX | 1659 | pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", |
1810 | "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n", | ||
1811 | tx_ring->cnsmr_idx_sh_reg, | 1660 | tx_ring->cnsmr_idx_sh_reg, |
1812 | tx_ring->cnsmr_idx_sh_reg | 1661 | tx_ring->cnsmr_idx_sh_reg |
1813 | ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); | 1662 | ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); |
1814 | printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); | 1663 | pr_err("tx_ring->size = %d\n", tx_ring->wq_size); |
1815 | printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); | 1664 | pr_err("tx_ring->len = %d\n", tx_ring->wq_len); |
1816 | printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", | 1665 | pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); |
1817 | tx_ring->prod_idx_db_reg); | 1666 | pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); |
1818 | printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n", | 1667 | pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); |
1819 | tx_ring->valid_db_reg); | 1668 | pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); |
1820 | printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx); | 1669 | pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); |
1821 | printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id); | 1670 | pr_err("tx_ring->q = %p\n", tx_ring->q); |
1822 | printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id); | 1671 | pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); |
1823 | printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q); | ||
1824 | printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n", | ||
1825 | atomic_read(&tx_ring->tx_count)); | ||
1826 | } | 1672 | } |
1827 | 1673 | ||
1828 | void ql_dump_ricb(struct ricb *ricb) | 1674 | void ql_dump_ricb(struct ricb *ricb) |
1829 | { | 1675 | { |
1830 | int i; | 1676 | int i; |
1831 | printk(KERN_ERR PFX | 1677 | pr_err("===================== Dumping ricb ===============\n"); |
1832 | "===================== Dumping ricb ===============.\n"); | 1678 | pr_err("Dumping ricb stuff...\n"); |
1833 | printk(KERN_ERR PFX "Dumping ricb stuff...\n"); | ||
1834 | 1679 | ||
1835 | printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f); | 1680 | pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); |
1836 | printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n", | 1681 | pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", |
1837 | ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", | 1682 | ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", |
1838 | ricb->flags & RSS_L6K ? "RSS_L6K " : "", | 1683 | ricb->flags & RSS_L6K ? "RSS_L6K " : "", |
1839 | ricb->flags & RSS_LI ? "RSS_LI " : "", | 1684 | ricb->flags & RSS_LI ? "RSS_LI " : "", |
@@ -1843,44 +1688,44 @@ void ql_dump_ricb(struct ricb *ricb) | |||
1843 | ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", | 1688 | ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", |
1844 | ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", | 1689 | ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", |
1845 | ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); | 1690 | ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); |
1846 | printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask)); | 1691 | pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); |
1847 | for (i = 0; i < 16; i++) | 1692 | for (i = 0; i < 16; i++) |
1848 | printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i, | 1693 | pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, |
1849 | le32_to_cpu(ricb->hash_cq_id[i])); | 1694 | le32_to_cpu(ricb->hash_cq_id[i])); |
1850 | for (i = 0; i < 10; i++) | 1695 | for (i = 0; i < 10; i++) |
1851 | printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i, | 1696 | pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, |
1852 | le32_to_cpu(ricb->ipv6_hash_key[i])); | 1697 | le32_to_cpu(ricb->ipv6_hash_key[i])); |
1853 | for (i = 0; i < 4; i++) | 1698 | for (i = 0; i < 4; i++) |
1854 | printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i, | 1699 | pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, |
1855 | le32_to_cpu(ricb->ipv4_hash_key[i])); | 1700 | le32_to_cpu(ricb->ipv4_hash_key[i])); |
1856 | } | 1701 | } |
1857 | 1702 | ||
1858 | void ql_dump_cqicb(struct cqicb *cqicb) | 1703 | void ql_dump_cqicb(struct cqicb *cqicb) |
1859 | { | 1704 | { |
1860 | printk(KERN_ERR PFX "Dumping cqicb stuff...\n"); | 1705 | pr_err("Dumping cqicb stuff...\n"); |
1861 | 1706 | ||
1862 | printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); | 1707 | pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); |
1863 | printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); | 1708 | pr_err("cqicb->flags = %x\n", cqicb->flags); |
1864 | printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); | 1709 | pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); |
1865 | printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n", | 1710 | pr_err("cqicb->addr = 0x%llx\n", |
1866 | (unsigned long long) le64_to_cpu(cqicb->addr)); | 1711 | (unsigned long long) le64_to_cpu(cqicb->addr)); |
1867 | printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n", | 1712 | pr_err("cqicb->prod_idx_addr = 0x%llx\n", |
1868 | (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); | 1713 | (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); |
1869 | printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", | 1714 | pr_err("cqicb->pkt_delay = 0x%.04x\n", |
1870 | le16_to_cpu(cqicb->pkt_delay)); | 1715 | le16_to_cpu(cqicb->pkt_delay)); |
1871 | printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", | 1716 | pr_err("cqicb->irq_delay = 0x%.04x\n", |
1872 | le16_to_cpu(cqicb->irq_delay)); | 1717 | le16_to_cpu(cqicb->irq_delay)); |
1873 | printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n", | 1718 | pr_err("cqicb->lbq_addr = 0x%llx\n", |
1874 | (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); | 1719 | (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); |
1875 | printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", | 1720 | pr_err("cqicb->lbq_buf_size = 0x%.04x\n", |
1876 | le16_to_cpu(cqicb->lbq_buf_size)); | 1721 | le16_to_cpu(cqicb->lbq_buf_size)); |
1877 | printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", | 1722 | pr_err("cqicb->lbq_len = 0x%.04x\n", |
1878 | le16_to_cpu(cqicb->lbq_len)); | 1723 | le16_to_cpu(cqicb->lbq_len)); |
1879 | printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n", | 1724 | pr_err("cqicb->sbq_addr = 0x%llx\n", |
1880 | (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); | 1725 | (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); |
1881 | printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", | 1726 | pr_err("cqicb->sbq_buf_size = 0x%.04x\n", |
1882 | le16_to_cpu(cqicb->sbq_buf_size)); | 1727 | le16_to_cpu(cqicb->sbq_buf_size)); |
1883 | printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", | 1728 | pr_err("cqicb->sbq_len = 0x%.04x\n", |
1884 | le16_to_cpu(cqicb->sbq_len)); | 1729 | le16_to_cpu(cqicb->sbq_len)); |
1885 | } | 1730 | } |
1886 | 1731 | ||
@@ -1888,100 +1733,85 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) | |||
1888 | { | 1733 | { |
1889 | if (rx_ring == NULL) | 1734 | if (rx_ring == NULL) |
1890 | return; | 1735 | return; |
1891 | printk(KERN_ERR PFX | 1736 | pr_err("===================== Dumping rx_ring %d ===============\n", |
1892 | "===================== Dumping rx_ring %d ===============.\n", | ||
1893 | rx_ring->cq_id); | 1737 | rx_ring->cq_id); |
1894 | printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n", | 1738 | pr_err("Dumping rx_ring %d, type = %s%s%s\n", |
1895 | rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", | 1739 | rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", |
1896 | rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", | 1740 | rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", |
1897 | rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); | 1741 | rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); |
1898 | printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb); | 1742 | pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); |
1899 | printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base); | 1743 | pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); |
1900 | printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n", | 1744 | pr_err("rx_ring->cq_base_dma = %llx\n", |
1901 | (unsigned long long) rx_ring->cq_base_dma); | 1745 | (unsigned long long) rx_ring->cq_base_dma); |
1902 | printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); | 1746 | pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); |
1903 | printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); | 1747 | pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); |
1904 | printk(KERN_ERR PFX | 1748 | pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", |
1905 | "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n", | ||
1906 | rx_ring->prod_idx_sh_reg, | 1749 | rx_ring->prod_idx_sh_reg, |
1907 | rx_ring->prod_idx_sh_reg | 1750 | rx_ring->prod_idx_sh_reg |
1908 | ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); | 1751 | ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); |
1909 | printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", | 1752 | pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", |
1910 | (unsigned long long) rx_ring->prod_idx_sh_reg_dma); | 1753 | (unsigned long long) rx_ring->prod_idx_sh_reg_dma); |
1911 | printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", | 1754 | pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", |
1912 | rx_ring->cnsmr_idx_db_reg); | 1755 | rx_ring->cnsmr_idx_db_reg); |
1913 | printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx); | 1756 | pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); |
1914 | printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry); | 1757 | pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); |
1915 | printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n", | 1758 | pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); |
1916 | rx_ring->valid_db_reg); | ||
1917 | 1759 | ||
1918 | printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base); | 1760 | pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); |
1919 | printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n", | 1761 | pr_err("rx_ring->lbq_base_dma = %llx\n", |
1920 | (unsigned long long) rx_ring->lbq_base_dma); | 1762 | (unsigned long long) rx_ring->lbq_base_dma); |
1921 | printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n", | 1763 | pr_err("rx_ring->lbq_base_indirect = %p\n", |
1922 | rx_ring->lbq_base_indirect); | 1764 | rx_ring->lbq_base_indirect); |
1923 | printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n", | 1765 | pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", |
1924 | (unsigned long long) rx_ring->lbq_base_indirect_dma); | 1766 | (unsigned long long) rx_ring->lbq_base_indirect_dma); |
1925 | printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq); | 1767 | pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); |
1926 | printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len); | 1768 | pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); |
1927 | printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size); | 1769 | pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); |
1928 | printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n", | 1770 | pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", |
1929 | rx_ring->lbq_prod_idx_db_reg); | 1771 | rx_ring->lbq_prod_idx_db_reg); |
1930 | printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n", | 1772 | pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); |
1931 | rx_ring->lbq_prod_idx); | 1773 | pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); |
1932 | printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n", | 1774 | pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); |
1933 | rx_ring->lbq_curr_idx); | 1775 | pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); |
1934 | printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n", | 1776 | pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); |
1935 | rx_ring->lbq_clean_idx); | 1777 | |
1936 | printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n", | 1778 | pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); |
1937 | rx_ring->lbq_free_cnt); | 1779 | pr_err("rx_ring->sbq_base_dma = %llx\n", |
1938 | printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n", | ||
1939 | rx_ring->lbq_buf_size); | ||
1940 | |||
1941 | printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base); | ||
1942 | printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n", | ||
1943 | (unsigned long long) rx_ring->sbq_base_dma); | 1780 | (unsigned long long) rx_ring->sbq_base_dma); |
1944 | printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n", | 1781 | pr_err("rx_ring->sbq_base_indirect = %p\n", |
1945 | rx_ring->sbq_base_indirect); | 1782 | rx_ring->sbq_base_indirect); |
1946 | printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n", | 1783 | pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", |
1947 | (unsigned long long) rx_ring->sbq_base_indirect_dma); | 1784 | (unsigned long long) rx_ring->sbq_base_indirect_dma); |
1948 | printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq); | 1785 | pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); |
1949 | printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len); | 1786 | pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); |
1950 | printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size); | 1787 | pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); |
1951 | printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n", | 1788 | pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", |
1952 | rx_ring->sbq_prod_idx_db_reg); | 1789 | rx_ring->sbq_prod_idx_db_reg); |
1953 | printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n", | 1790 | pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); |
1954 | rx_ring->sbq_prod_idx); | 1791 | pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); |
1955 | printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n", | 1792 | pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); |
1956 | rx_ring->sbq_curr_idx); | 1793 | pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); |
1957 | printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n", | 1794 | pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); |
1958 | rx_ring->sbq_clean_idx); | 1795 | pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); |
1959 | printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n", | 1796 | pr_err("rx_ring->irq = %d\n", rx_ring->irq); |
1960 | rx_ring->sbq_free_cnt); | 1797 | pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); |
1961 | printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n", | 1798 | pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); |
1962 | rx_ring->sbq_buf_size); | ||
1963 | printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id); | ||
1964 | printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq); | ||
1965 | printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu); | ||
1966 | printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev); | ||
1967 | } | 1799 | } |
1968 | 1800 | ||
1969 | void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) | 1801 | void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) |
1970 | { | 1802 | { |
1971 | void *ptr; | 1803 | void *ptr; |
1972 | 1804 | ||
1973 | printk(KERN_ERR PFX "%s: Enter.\n", __func__); | 1805 | pr_err("%s: Enter\n", __func__); |
1974 | 1806 | ||
1975 | ptr = kmalloc(size, GFP_ATOMIC); | 1807 | ptr = kmalloc(size, GFP_ATOMIC); |
1976 | if (ptr == NULL) { | 1808 | if (ptr == NULL) { |
1977 | printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n", | 1809 | pr_err("%s: Couldn't allocate a buffer\n", __func__); |
1978 | __func__); | ||
1979 | return; | 1810 | return; |
1980 | } | 1811 | } |
1981 | 1812 | ||
1982 | if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { | 1813 | if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { |
1983 | printk(KERN_ERR "%s: Failed to upload control block!\n", | 1814 | pr_err("%s: Failed to upload control block!\n", __func__); |
1984 | __func__); | ||
1985 | goto fail_it; | 1815 | goto fail_it; |
1986 | } | 1816 | } |
1987 | switch (bit) { | 1817 | switch (bit) { |
@@ -1995,8 +1825,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) | |||
1995 | ql_dump_ricb((struct ricb *)ptr); | 1825 | ql_dump_ricb((struct ricb *)ptr); |
1996 | break; | 1826 | break; |
1997 | default: | 1827 | default: |
1998 | printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n", | 1828 | pr_err("%s: Invalid bit value = %x\n", __func__, bit); |
1999 | __func__, bit); | ||
2000 | break; | 1829 | break; |
2001 | } | 1830 | } |
2002 | fail_it: | 1831 | fail_it: |
@@ -2007,27 +1836,27 @@ fail_it: | |||
2007 | #ifdef QL_OB_DUMP | 1836 | #ifdef QL_OB_DUMP |
2008 | void ql_dump_tx_desc(struct tx_buf_desc *tbd) | 1837 | void ql_dump_tx_desc(struct tx_buf_desc *tbd) |
2009 | { | 1838 | { |
2010 | printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", | 1839 | pr_err("tbd->addr = 0x%llx\n", |
2011 | le64_to_cpu((u64) tbd->addr)); | 1840 | le64_to_cpu((u64) tbd->addr)); |
2012 | printk(KERN_ERR PFX "tbd->len = %d\n", | 1841 | pr_err("tbd->len = %d\n", |
2013 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); | 1842 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); |
2014 | printk(KERN_ERR PFX "tbd->flags = %s %s\n", | 1843 | pr_err("tbd->flags = %s %s\n", |
2015 | tbd->len & TX_DESC_C ? "C" : ".", | 1844 | tbd->len & TX_DESC_C ? "C" : ".", |
2016 | tbd->len & TX_DESC_E ? "E" : "."); | 1845 | tbd->len & TX_DESC_E ? "E" : "."); |
2017 | tbd++; | 1846 | tbd++; |
2018 | printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", | 1847 | pr_err("tbd->addr = 0x%llx\n", |
2019 | le64_to_cpu((u64) tbd->addr)); | 1848 | le64_to_cpu((u64) tbd->addr)); |
2020 | printk(KERN_ERR PFX "tbd->len = %d\n", | 1849 | pr_err("tbd->len = %d\n", |
2021 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); | 1850 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); |
2022 | printk(KERN_ERR PFX "tbd->flags = %s %s\n", | 1851 | pr_err("tbd->flags = %s %s\n", |
2023 | tbd->len & TX_DESC_C ? "C" : ".", | 1852 | tbd->len & TX_DESC_C ? "C" : ".", |
2024 | tbd->len & TX_DESC_E ? "E" : "."); | 1853 | tbd->len & TX_DESC_E ? "E" : "."); |
2025 | tbd++; | 1854 | tbd++; |
2026 | printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", | 1855 | pr_err("tbd->addr = 0x%llx\n", |
2027 | le64_to_cpu((u64) tbd->addr)); | 1856 | le64_to_cpu((u64) tbd->addr)); |
2028 | printk(KERN_ERR PFX "tbd->len = %d\n", | 1857 | pr_err("tbd->len = %d\n", |
2029 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); | 1858 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); |
2030 | printk(KERN_ERR PFX "tbd->flags = %s %s\n", | 1859 | pr_err("tbd->flags = %s %s\n", |
2031 | tbd->len & TX_DESC_C ? "C" : ".", | 1860 | tbd->len & TX_DESC_C ? "C" : ".", |
2032 | tbd->len & TX_DESC_E ? "E" : "."); | 1861 | tbd->len & TX_DESC_E ? "E" : "."); |
2033 | 1862 | ||
@@ -2040,38 +1869,38 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) | |||
2040 | struct tx_buf_desc *tbd; | 1869 | struct tx_buf_desc *tbd; |
2041 | u16 frame_len; | 1870 | u16 frame_len; |
2042 | 1871 | ||
2043 | printk(KERN_ERR PFX "%s\n", __func__); | 1872 | pr_err("%s\n", __func__); |
2044 | printk(KERN_ERR PFX "opcode = %s\n", | 1873 | pr_err("opcode = %s\n", |
2045 | (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); | 1874 | (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); |
2046 | printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n", | 1875 | pr_err("flags1 = %s %s %s %s %s\n", |
2047 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", | 1876 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", |
2048 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", | 1877 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", |
2049 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", | 1878 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", |
2050 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", | 1879 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", |
2051 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); | 1880 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); |
2052 | printk(KERN_ERR PFX "flags2 = %s %s %s\n", | 1881 | pr_err("flags2 = %s %s %s\n", |
2053 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", | 1882 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", |
2054 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", | 1883 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", |
2055 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); | 1884 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); |
2056 | printk(KERN_ERR PFX "flags3 = %s %s %s\n", | 1885 | pr_err("flags3 = %s %s %s\n", |
2057 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", | 1886 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", |
2058 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", | 1887 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", |
2059 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); | 1888 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); |
2060 | printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid); | 1889 | pr_err("tid = %x\n", ob_mac_iocb->tid); |
2061 | printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx); | 1890 | pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); |
2062 | printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); | 1891 | pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); |
2063 | if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { | 1892 | if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { |
2064 | printk(KERN_ERR PFX "frame_len = %d\n", | 1893 | pr_err("frame_len = %d\n", |
2065 | le32_to_cpu(ob_mac_tso_iocb->frame_len)); | 1894 | le32_to_cpu(ob_mac_tso_iocb->frame_len)); |
2066 | printk(KERN_ERR PFX "mss = %d\n", | 1895 | pr_err("mss = %d\n", |
2067 | le16_to_cpu(ob_mac_tso_iocb->mss)); | 1896 | le16_to_cpu(ob_mac_tso_iocb->mss)); |
2068 | printk(KERN_ERR PFX "prot_hdr_len = %d\n", | 1897 | pr_err("prot_hdr_len = %d\n", |
2069 | le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); | 1898 | le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); |
2070 | printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n", | 1899 | pr_err("hdr_offset = 0x%.04x\n", |
2071 | le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); | 1900 | le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); |
2072 | frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); | 1901 | frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); |
2073 | } else { | 1902 | } else { |
2074 | printk(KERN_ERR PFX "frame_len = %d\n", | 1903 | pr_err("frame_len = %d\n", |
2075 | le16_to_cpu(ob_mac_iocb->frame_len)); | 1904 | le16_to_cpu(ob_mac_iocb->frame_len)); |
2076 | frame_len = le16_to_cpu(ob_mac_iocb->frame_len); | 1905 | frame_len = le16_to_cpu(ob_mac_iocb->frame_len); |
2077 | } | 1906 | } |
@@ -2081,9 +1910,9 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) | |||
2081 | 1910 | ||
2082 | void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) | 1911 | void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) |
2083 | { | 1912 | { |
2084 | printk(KERN_ERR PFX "%s\n", __func__); | 1913 | pr_err("%s\n", __func__); |
2085 | printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode); | 1914 | pr_err("opcode = %d\n", ob_mac_rsp->opcode); |
2086 | printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n", | 1915 | pr_err("flags = %s %s %s %s %s %s %s\n", |
2087 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", | 1916 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", |
2088 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", | 1917 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", |
2089 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", | 1918 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", |
@@ -2091,16 +1920,16 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) | |||
2091 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", | 1920 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", |
2092 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", | 1921 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", |
2093 | ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); | 1922 | ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); |
2094 | printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid); | 1923 | pr_err("tid = %x\n", ob_mac_rsp->tid); |
2095 | } | 1924 | } |
2096 | #endif | 1925 | #endif |
2097 | 1926 | ||
2098 | #ifdef QL_IB_DUMP | 1927 | #ifdef QL_IB_DUMP |
2099 | void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | 1928 | void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) |
2100 | { | 1929 | { |
2101 | printk(KERN_ERR PFX "%s\n", __func__); | 1930 | pr_err("%s\n", __func__); |
2102 | printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode); | 1931 | pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); |
2103 | printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n", | 1932 | pr_err("flags1 = %s%s%s%s%s%s\n", |
2104 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", | 1933 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", |
2105 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", | 1934 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", |
2106 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", | 1935 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", |
@@ -2109,7 +1938,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
2109 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); | 1938 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); |
2110 | 1939 | ||
2111 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) | 1940 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) |
2112 | printk(KERN_ERR PFX "%s%s%s Multicast.\n", | 1941 | pr_err("%s%s%s Multicast\n", |
2113 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1942 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
2114 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | 1943 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", |
2115 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1944 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
@@ -2117,7 +1946,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
2117 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | 1946 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == |
2118 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | 1947 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); |
2119 | 1948 | ||
2120 | printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n", | 1949 | pr_err("flags2 = %s%s%s%s%s\n", |
2121 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", | 1950 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", |
2122 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", | 1951 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", |
2123 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", | 1952 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", |
@@ -2125,7 +1954,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
2125 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); | 1954 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); |
2126 | 1955 | ||
2127 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) | 1956 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) |
2128 | printk(KERN_ERR PFX "%s%s%s%s%s error.\n", | 1957 | pr_err("%s%s%s%s%s error\n", |
2129 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | 1958 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == |
2130 | IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", | 1959 | IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", |
2131 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | 1960 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == |
@@ -2137,12 +1966,12 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
2137 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | 1966 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == |
2138 | IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); | 1967 | IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); |
2139 | 1968 | ||
2140 | printk(KERN_ERR PFX "flags3 = %s%s.\n", | 1969 | pr_err("flags3 = %s%s\n", |
2141 | ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", | 1970 | ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", |
2142 | ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); | 1971 | ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); |
2143 | 1972 | ||
2144 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) | 1973 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) |
2145 | printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n", | 1974 | pr_err("RSS flags = %s%s%s%s\n", |
2146 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | 1975 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == |
2147 | IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", | 1976 | IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", |
2148 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | 1977 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == |
@@ -2152,26 +1981,26 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
2152 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | 1981 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == |
2153 | IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); | 1982 | IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); |
2154 | 1983 | ||
2155 | printk(KERN_ERR PFX "data_len = %d\n", | 1984 | pr_err("data_len = %d\n", |
2156 | le32_to_cpu(ib_mac_rsp->data_len)); | 1985 | le32_to_cpu(ib_mac_rsp->data_len)); |
2157 | printk(KERN_ERR PFX "data_addr = 0x%llx\n", | 1986 | pr_err("data_addr = 0x%llx\n", |
2158 | (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); | 1987 | (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); |
2159 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) | 1988 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) |
2160 | printk(KERN_ERR PFX "rss = %x\n", | 1989 | pr_err("rss = %x\n", |
2161 | le32_to_cpu(ib_mac_rsp->rss)); | 1990 | le32_to_cpu(ib_mac_rsp->rss)); |
2162 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) | 1991 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) |
2163 | printk(KERN_ERR PFX "vlan_id = %x\n", | 1992 | pr_err("vlan_id = %x\n", |
2164 | le16_to_cpu(ib_mac_rsp->vlan_id)); | 1993 | le16_to_cpu(ib_mac_rsp->vlan_id)); |
2165 | 1994 | ||
2166 | printk(KERN_ERR PFX "flags4 = %s%s%s.\n", | 1995 | pr_err("flags4 = %s%s%s\n", |
2167 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", | 1996 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", |
2168 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", | 1997 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", |
2169 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); | 1998 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); |
2170 | 1999 | ||
2171 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | 2000 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { |
2172 | printk(KERN_ERR PFX "hdr length = %d.\n", | 2001 | pr_err("hdr length = %d\n", |
2173 | le32_to_cpu(ib_mac_rsp->hdr_len)); | 2002 | le32_to_cpu(ib_mac_rsp->hdr_len)); |
2174 | printk(KERN_ERR PFX "hdr addr = 0x%llx.\n", | 2003 | pr_err("hdr addr = 0x%llx\n", |
2175 | (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); | 2004 | (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); |
2176 | } | 2005 | } |
2177 | } | 2006 | } |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 7d482a2316ac..142c381e1d73 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -510,7 +510,7 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
510 | if (!lp->phydev) | 510 | if (!lp->phydev) |
511 | return -EINVAL; | 511 | return -EINVAL; |
512 | 512 | ||
513 | return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd); | 513 | return phy_mii_ioctl(lp->phydev, rq, cmd); |
514 | } | 514 | } |
515 | 515 | ||
516 | static int r6040_rx(struct net_device *dev, int limit) | 516 | static int r6040_rx(struct net_device *dev, int limit) |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index b8b85843c614..18bc5b718bbb 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -5796,7 +5796,7 @@ static void s2io_vpd_read(struct s2io_nic *nic) | |||
5796 | { | 5796 | { |
5797 | u8 *vpd_data; | 5797 | u8 *vpd_data; |
5798 | u8 data; | 5798 | u8 data; |
5799 | int i = 0, cnt, fail = 0; | 5799 | int i = 0, cnt, len, fail = 0; |
5800 | int vpd_addr = 0x80; | 5800 | int vpd_addr = 0x80; |
5801 | struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; | 5801 | struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; |
5802 | 5802 | ||
@@ -5837,20 +5837,28 @@ static void s2io_vpd_read(struct s2io_nic *nic) | |||
5837 | 5837 | ||
5838 | if (!fail) { | 5838 | if (!fail) { |
5839 | /* read serial number of adapter */ | 5839 | /* read serial number of adapter */ |
5840 | for (cnt = 0; cnt < 256; cnt++) { | 5840 | for (cnt = 0; cnt < 252; cnt++) { |
5841 | if ((vpd_data[cnt] == 'S') && | 5841 | if ((vpd_data[cnt] == 'S') && |
5842 | (vpd_data[cnt+1] == 'N') && | 5842 | (vpd_data[cnt+1] == 'N')) { |
5843 | (vpd_data[cnt+2] < VPD_STRING_LEN)) { | 5843 | len = vpd_data[cnt+2]; |
5844 | memset(nic->serial_num, 0, VPD_STRING_LEN); | 5844 | if (len < min(VPD_STRING_LEN, 256-cnt-2)) { |
5845 | memcpy(nic->serial_num, &vpd_data[cnt + 3], | 5845 | memcpy(nic->serial_num, |
5846 | vpd_data[cnt+2]); | 5846 | &vpd_data[cnt + 3], |
5847 | break; | 5847 | len); |
5848 | memset(nic->serial_num+len, | ||
5849 | 0, | ||
5850 | VPD_STRING_LEN-len); | ||
5851 | break; | ||
5852 | } | ||
5848 | } | 5853 | } |
5849 | } | 5854 | } |
5850 | } | 5855 | } |
5851 | 5856 | ||
5852 | if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) | 5857 | if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { |
5853 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); | 5858 | len = vpd_data[1]; |
5859 | memcpy(nic->product_name, &vpd_data[3], len); | ||
5860 | nic->product_name[len] = 0; | ||
5861 | } | ||
5854 | kfree(vpd_data); | 5862 | kfree(vpd_data); |
5855 | swstats->mem_freed += 256; | 5863 | swstats->mem_freed += 256; |
5856 | } | 5864 | } |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 3645fb3673db..0af033533905 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -65,7 +65,7 @@ static int debug_level = ERR_DBG; | |||
65 | 65 | ||
66 | /* DEBUG message print. */ | 66 | /* DEBUG message print. */ |
67 | #define DBG_PRINT(dbg_level, fmt, args...) do { \ | 67 | #define DBG_PRINT(dbg_level, fmt, args...) do { \ |
68 | if (dbg_level >= debug_level) \ | 68 | if (dbg_level <= debug_level) \ |
69 | pr_info(fmt, ##args); \ | 69 | pr_info(fmt, ##args); \ |
70 | } while (0) | 70 | } while (0) |
71 | 71 | ||
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h index 144f76fd3e39..66b9da0260fe 100644 --- a/drivers/net/stmmac/common.h +++ b/drivers/net/stmmac/common.h | |||
@@ -108,6 +108,7 @@ enum rx_frame_status { /* IPC status */ | |||
108 | good_frame = 0, | 108 | good_frame = 0, |
109 | discard_frame = 1, | 109 | discard_frame = 1, |
110 | csum_none = 2, | 110 | csum_none = 2, |
111 | llc_snap = 4, | ||
111 | }; | 112 | }; |
112 | 113 | ||
113 | enum tx_dma_irq_status { | 114 | enum tx_dma_irq_status { |
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h index d8d0f3553770..8b20b19971cb 100644 --- a/drivers/net/stmmac/dwmac1000.h +++ b/drivers/net/stmmac/dwmac1000.h | |||
@@ -93,7 +93,7 @@ enum inter_frame_gap { | |||
93 | #define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ | 93 | #define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ |
94 | #define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ | 94 | #define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ |
95 | #define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ | 95 | #define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ |
96 | #define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */ | 96 | #define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */ |
97 | #define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ | 97 | #define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ |
98 | #define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ | 98 | #define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ |
99 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ | 99 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ |
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c index 917b4e16923b..2b2f5c8caf1c 100644 --- a/drivers/net/stmmac/dwmac1000_core.c +++ b/drivers/net/stmmac/dwmac1000_core.c | |||
@@ -220,6 +220,8 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr) | |||
220 | ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); | 220 | ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); |
221 | 221 | ||
222 | mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); | 222 | mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); |
223 | if (!mac) | ||
224 | return NULL; | ||
223 | 225 | ||
224 | mac->mac = &dwmac1000_ops; | 226 | mac->mac = &dwmac1000_ops; |
225 | mac->dma = &dwmac1000_dma_ops; | 227 | mac->dma = &dwmac1000_dma_ops; |
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c index 6f270a0e151a..2fb165fa2ba0 100644 --- a/drivers/net/stmmac/dwmac100_core.c +++ b/drivers/net/stmmac/dwmac100_core.c | |||
@@ -179,6 +179,8 @@ struct mac_device_info *dwmac100_setup(unsigned long ioaddr) | |||
179 | struct mac_device_info *mac; | 179 | struct mac_device_info *mac; |
180 | 180 | ||
181 | mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); | 181 | mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); |
182 | if (!mac) | ||
183 | return NULL; | ||
182 | 184 | ||
183 | pr_info("\tDWMAC100\n"); | 185 | pr_info("\tDWMAC100\n"); |
184 | 186 | ||
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c index 3c18ebece043..f612f986a7e1 100644 --- a/drivers/net/stmmac/enh_desc.c +++ b/drivers/net/stmmac/enh_desc.c | |||
@@ -123,7 +123,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) | |||
123 | */ | 123 | */ |
124 | if (status == 0x0) { | 124 | if (status == 0x0) { |
125 | CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); | 125 | CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); |
126 | ret = good_frame; | 126 | ret = llc_snap; |
127 | } else if (status == 0x4) { | 127 | } else if (status == 0x4) { |
128 | CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); | 128 | CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); |
129 | ret = good_frame; | 129 | ret = good_frame; |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index acf061686940..bbb7951b9c4c 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -829,7 +829,6 @@ static int stmmac_open(struct net_device *dev) | |||
829 | * In case of failure continue without timer. */ | 829 | * In case of failure continue without timer. */ |
830 | if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { | 830 | if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { |
831 | pr_warning("stmmaceth: cannot attach the external timer.\n"); | 831 | pr_warning("stmmaceth: cannot attach the external timer.\n"); |
832 | tmrate = 0; | ||
833 | priv->tm->freq = 0; | 832 | priv->tm->freq = 0; |
834 | priv->tm->timer_start = stmmac_no_timer_started; | 833 | priv->tm->timer_start = stmmac_no_timer_started; |
835 | priv->tm->timer_stop = stmmac_no_timer_stopped; | 834 | priv->tm->timer_stop = stmmac_no_timer_stopped; |
@@ -1217,9 +1216,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
1217 | priv->dev->stats.rx_errors++; | 1216 | priv->dev->stats.rx_errors++; |
1218 | else { | 1217 | else { |
1219 | struct sk_buff *skb; | 1218 | struct sk_buff *skb; |
1220 | /* Length should omit the CRC */ | 1219 | int frame_len; |
1221 | int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4; | ||
1222 | 1220 | ||
1221 | frame_len = priv->hw->desc->get_rx_frame_len(p); | ||
1222 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | ||
1223 | * Type frames (LLC/LLC-SNAP) */ | ||
1224 | if (unlikely(status != llc_snap)) | ||
1225 | frame_len -= ETH_FCS_LEN; | ||
1223 | #ifdef STMMAC_RX_DEBUG | 1226 | #ifdef STMMAC_RX_DEBUG |
1224 | if (frame_len > ETH_FRAME_LEN) | 1227 | if (frame_len > ETH_FRAME_LEN) |
1225 | pr_debug("\tRX frame size %d, COE status: %d\n", | 1228 | pr_debug("\tRX frame size %d, COE status: %d\n", |
@@ -1558,15 +1561,15 @@ static int stmmac_mac_device_setup(struct net_device *dev) | |||
1558 | else | 1561 | else |
1559 | device = dwmac100_setup(ioaddr); | 1562 | device = dwmac100_setup(ioaddr); |
1560 | 1563 | ||
1564 | if (!device) | ||
1565 | return -ENOMEM; | ||
1566 | |||
1561 | if (priv->enh_desc) { | 1567 | if (priv->enh_desc) { |
1562 | device->desc = &enh_desc_ops; | 1568 | device->desc = &enh_desc_ops; |
1563 | pr_info("\tEnhanced descriptor structure\n"); | 1569 | pr_info("\tEnhanced descriptor structure\n"); |
1564 | } else | 1570 | } else |
1565 | device->desc = &ndesc_ops; | 1571 | device->desc = &ndesc_ops; |
1566 | 1572 | ||
1567 | if (!device) | ||
1568 | return -ENOMEM; | ||
1569 | |||
1570 | priv->hw = device; | 1573 | priv->hw = device; |
1571 | 1574 | ||
1572 | priv->wolenabled = priv->hw->pmt; /* PMT supported */ | 1575 | priv->wolenabled = priv->hw->pmt; /* PMT supported */ |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 6ad6fe706312..63042596f0cf 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -736,8 +736,18 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, | |||
736 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 736 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
737 | else if (sinfo->gso_type & SKB_GSO_UDP) | 737 | else if (sinfo->gso_type & SKB_GSO_UDP) |
738 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; | 738 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
739 | else | 739 | else { |
740 | BUG(); | 740 | printk(KERN_ERR "tun: unexpected GSO type: " |
741 | "0x%x, gso_size %d, hdr_len %d\n", | ||
742 | sinfo->gso_type, gso.gso_size, | ||
743 | gso.hdr_len); | ||
744 | print_hex_dump(KERN_ERR, "tun: ", | ||
745 | DUMP_PREFIX_NONE, | ||
746 | 16, 1, skb->head, | ||
747 | min((int)gso.hdr_len, 64), true); | ||
748 | WARN_ON_ONCE(1); | ||
749 | return -EINVAL; | ||
750 | } | ||
741 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | 751 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
742 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | 752 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
743 | } else | 753 | } else |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 7eab4071ea26..3b03794ac3f5 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/usb.h> | 44 | #include <linux/usb.h> |
45 | #include <linux/usb/usbnet.h> | 45 | #include <linux/usb/usbnet.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/kernel.h> | ||
47 | 48 | ||
48 | #define DRIVER_VERSION "22-Aug-2005" | 49 | #define DRIVER_VERSION "22-Aug-2005" |
49 | 50 | ||
@@ -158,16 +159,6 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) | |||
158 | } | 159 | } |
159 | EXPORT_SYMBOL_GPL(usbnet_get_endpoints); | 160 | EXPORT_SYMBOL_GPL(usbnet_get_endpoints); |
160 | 161 | ||
161 | static u8 nibble(unsigned char c) | ||
162 | { | ||
163 | if (likely(isdigit(c))) | ||
164 | return c - '0'; | ||
165 | c = toupper(c); | ||
166 | if (likely(isxdigit(c))) | ||
167 | return 10 + c - 'A'; | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) | 162 | int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) |
172 | { | 163 | { |
173 | int tmp, i; | 164 | int tmp, i; |
@@ -183,7 +174,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) | |||
183 | } | 174 | } |
184 | for (i = tmp = 0; i < 6; i++, tmp += 2) | 175 | for (i = tmp = 0; i < 6; i++, tmp += 2) |
185 | dev->net->dev_addr [i] = | 176 | dev->net->dev_addr [i] = |
186 | (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]); | 177 | (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); |
187 | return 0; | 178 | return 0; |
188 | } | 179 | } |
189 | EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); | 180 | EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); |
@@ -624,7 +615,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev) | |||
624 | while (!skb_queue_empty(&dev->rxq) | 615 | while (!skb_queue_empty(&dev->rxq) |
625 | && !skb_queue_empty(&dev->txq) | 616 | && !skb_queue_empty(&dev->txq) |
626 | && !skb_queue_empty(&dev->done)) { | 617 | && !skb_queue_empty(&dev->done)) { |
627 | schedule_timeout(UNLINK_TIMEOUT_MS); | 618 | schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); |
628 | set_current_state(TASK_UNINTERRUPTIBLE); | 619 | set_current_state(TASK_UNINTERRUPTIBLE); |
629 | netif_dbg(dev, ifdown, dev->net, | 620 | netif_dbg(dev, ifdown, dev->net, |
630 | "waited for %d urb completions\n", temp); | 621 | "waited for %d urb completions\n", temp); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 9d64186050f3..abe0ff53daf3 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -664,8 +664,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
664 | while (len) { | 664 | while (len) { |
665 | u32 buf_size; | 665 | u32 buf_size; |
666 | 666 | ||
667 | buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? | 667 | if (len < VMXNET3_MAX_TX_BUF_SIZE) { |
668 | VMXNET3_MAX_TX_BUF_SIZE : len; | 668 | buf_size = len; |
669 | dw2 |= len; | ||
670 | } else { | ||
671 | buf_size = VMXNET3_MAX_TX_BUF_SIZE; | ||
672 | /* spec says that for TxDesc.len, 0 == 2^14 */ | ||
673 | } | ||
669 | 674 | ||
670 | tbi = tq->buf_info + tq->tx_ring.next2fill; | 675 | tbi = tq->buf_info + tq->tx_ring.next2fill; |
671 | tbi->map_type = VMXNET3_MAP_SINGLE; | 676 | tbi->map_type = VMXNET3_MAP_SINGLE; |
@@ -673,13 +678,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
673 | skb->data + buf_offset, buf_size, | 678 | skb->data + buf_offset, buf_size, |
674 | PCI_DMA_TODEVICE); | 679 | PCI_DMA_TODEVICE); |
675 | 680 | ||
676 | tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ | 681 | tbi->len = buf_size; |
677 | 682 | ||
678 | gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; | 683 | gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; |
679 | BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); | 684 | BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); |
680 | 685 | ||
681 | gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); | 686 | gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); |
682 | gdesc->dword[2] = cpu_to_le32(dw2 | buf_size); | 687 | gdesc->dword[2] = cpu_to_le32(dw2); |
683 | gdesc->dword[3] = 0; | 688 | gdesc->dword[3] = 0; |
684 | 689 | ||
685 | dev_dbg(&adapter->netdev->dev, | 690 | dev_dbg(&adapter->netdev->dev, |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 762a6a7763fe..2121c735cabd 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -68,10 +68,10 @@ | |||
68 | /* | 68 | /* |
69 | * Version numbers | 69 | * Version numbers |
70 | */ | 70 | */ |
71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.13.0-k" | 71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k" |
72 | 72 | ||
73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01000B00 | 74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01000E00 |
75 | 75 | ||
76 | 76 | ||
77 | /* | 77 | /* |
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 2d7c96d7e865..eb80243e22df 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h | |||
@@ -152,6 +152,7 @@ enum { | |||
152 | /* Device IDs */ | 152 | /* Device IDs */ |
153 | USB_DEVICE_ID_I6050 = 0x0186, | 153 | USB_DEVICE_ID_I6050 = 0x0186, |
154 | USB_DEVICE_ID_I6050_2 = 0x0188, | 154 | USB_DEVICE_ID_I6050_2 = 0x0188, |
155 | USB_DEVICE_ID_I6250 = 0x0187, | ||
155 | }; | 156 | }; |
156 | 157 | ||
157 | 158 | ||
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 0d5081d77dc0..d3365ac85dde 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c | |||
@@ -491,6 +491,7 @@ int i2400mu_probe(struct usb_interface *iface, | |||
491 | switch (id->idProduct) { | 491 | switch (id->idProduct) { |
492 | case USB_DEVICE_ID_I6050: | 492 | case USB_DEVICE_ID_I6050: |
493 | case USB_DEVICE_ID_I6050_2: | 493 | case USB_DEVICE_ID_I6050_2: |
494 | case USB_DEVICE_ID_I6250: | ||
494 | i2400mu->i6050 = 1; | 495 | i2400mu->i6050 = 1; |
495 | break; | 496 | break; |
496 | default: | 497 | default: |
@@ -739,6 +740,7 @@ static | |||
739 | struct usb_device_id i2400mu_id_table[] = { | 740 | struct usb_device_id i2400mu_id_table[] = { |
740 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, | 741 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, |
741 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, | 742 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, |
743 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, | ||
742 | { USB_DEVICE(0x8086, 0x0181) }, | 744 | { USB_DEVICE(0x8086, 0x0181) }, |
743 | { USB_DEVICE(0x8086, 0x1403) }, | 745 | { USB_DEVICE(0x8086, 0x1403) }, |
744 | { USB_DEVICE(0x8086, 0x1405) }, | 746 | { USB_DEVICE(0x8086, 0x1405) }, |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index abff8934db13..9c38fc331dca 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c | |||
@@ -97,7 +97,6 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev); | |||
97 | static const struct iw_handler_def ray_handler_def; | 97 | static const struct iw_handler_def ray_handler_def; |
98 | 98 | ||
99 | /***** Prototypes for raylink functions **************************************/ | 99 | /***** Prototypes for raylink functions **************************************/ |
100 | static int asc_to_int(char a); | ||
101 | static void authenticate(ray_dev_t *local); | 100 | static void authenticate(ray_dev_t *local); |
102 | static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type); | 101 | static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type); |
103 | static void authenticate_timeout(u_long); | 102 | static void authenticate_timeout(u_long); |
@@ -1717,24 +1716,6 @@ static void authenticate_timeout(u_long data) | |||
1717 | } | 1716 | } |
1718 | 1717 | ||
1719 | /*===========================================================================*/ | 1718 | /*===========================================================================*/ |
1720 | static int asc_to_int(char a) | ||
1721 | { | ||
1722 | if (a < '0') | ||
1723 | return -1; | ||
1724 | if (a <= '9') | ||
1725 | return (a - '0'); | ||
1726 | if (a < 'A') | ||
1727 | return -1; | ||
1728 | if (a <= 'F') | ||
1729 | return (10 + a - 'A'); | ||
1730 | if (a < 'a') | ||
1731 | return -1; | ||
1732 | if (a <= 'f') | ||
1733 | return (10 + a - 'a'); | ||
1734 | return -1; | ||
1735 | } | ||
1736 | |||
1737 | /*===========================================================================*/ | ||
1738 | static int parse_addr(char *in_str, UCHAR *out) | 1719 | static int parse_addr(char *in_str, UCHAR *out) |
1739 | { | 1720 | { |
1740 | int len; | 1721 | int len; |
@@ -1754,14 +1735,14 @@ static int parse_addr(char *in_str, UCHAR *out) | |||
1754 | i = 5; | 1735 | i = 5; |
1755 | 1736 | ||
1756 | while (j > 0) { | 1737 | while (j > 0) { |
1757 | if ((k = asc_to_int(in_str[j--])) != -1) | 1738 | if ((k = hex_to_bin(in_str[j--])) != -1) |
1758 | out[i] = k; | 1739 | out[i] = k; |
1759 | else | 1740 | else |
1760 | return 0; | 1741 | return 0; |
1761 | 1742 | ||
1762 | if (j == 0) | 1743 | if (j == 0) |
1763 | break; | 1744 | break; |
1764 | if ((k = asc_to_int(in_str[j--])) != -1) | 1745 | if ((k = hex_to_bin(in_str[j--])) != -1) |
1765 | out[i] += k << 4; | 1746 | out[i] += k << 4; |
1766 | else | 1747 | else |
1767 | return 0; | 1748 | return 0; |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 147bb1a69aba..a75ed3083a6a 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -295,7 +295,7 @@ claw_driver_group_store(struct device_driver *ddrv, const char *buf, | |||
295 | int err; | 295 | int err; |
296 | err = ccwgroup_create_from_string(claw_root_dev, | 296 | err = ccwgroup_create_from_string(claw_root_dev, |
297 | claw_group_driver.driver_id, | 297 | claw_group_driver.driver_id, |
298 | &claw_ccw_driver, 3, buf); | 298 | &claw_ccw_driver, 2, buf); |
299 | return err ? err : count; | 299 | return err ? err : count; |
300 | } | 300 | } |
301 | 301 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d79892782a2b..d1257768be90 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -188,8 +188,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, | |||
188 | qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) | 188 | qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) |
189 | 189 | ||
190 | #define QETH_IDX_FUNC_LEVEL_OSD 0x0101 | 190 | #define QETH_IDX_FUNC_LEVEL_OSD 0x0101 |
191 | #define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108 | 191 | #define QETH_IDX_FUNC_LEVEL_IQD 0x4108 |
192 | #define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 | ||
193 | 192 | ||
194 | #define QETH_MODELLIST_ARRAY \ | 193 | #define QETH_MODELLIST_ARRAY \ |
195 | {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \ | 194 | {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \ |
@@ -741,6 +740,7 @@ struct qeth_card { | |||
741 | struct qeth_qdio_info qdio; | 740 | struct qeth_qdio_info qdio; |
742 | struct qeth_perf_stats perf_stats; | 741 | struct qeth_perf_stats perf_stats; |
743 | int use_hard_stop; | 742 | int use_hard_stop; |
743 | int read_or_write_problem; | ||
744 | struct qeth_osn_info osn_info; | 744 | struct qeth_osn_info osn_info; |
745 | struct qeth_discipline discipline; | 745 | struct qeth_discipline discipline; |
746 | atomic_t force_alloc_skb; | 746 | atomic_t force_alloc_skb; |
@@ -748,6 +748,7 @@ struct qeth_card { | |||
748 | struct qdio_ssqd_desc ssqd; | 748 | struct qdio_ssqd_desc ssqd; |
749 | debug_info_t *debug; | 749 | debug_info_t *debug; |
750 | struct mutex conf_mutex; | 750 | struct mutex conf_mutex; |
751 | struct mutex discipline_mutex; | ||
751 | }; | 752 | }; |
752 | 753 | ||
753 | struct qeth_card_list_struct { | 754 | struct qeth_card_list_struct { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b7019066c303..3a5a18a0fc28 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -262,6 +262,7 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
262 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " | 262 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " |
263 | "rc=%i\n", dev_name(&card->gdev->dev), rc); | 263 | "rc=%i\n", dev_name(&card->gdev->dev), rc); |
264 | atomic_set(&card->read.irq_pending, 0); | 264 | atomic_set(&card->read.irq_pending, 0); |
265 | card->read_or_write_problem = 1; | ||
265 | qeth_schedule_recovery(card); | 266 | qeth_schedule_recovery(card); |
266 | wake_up(&card->wait_q); | 267 | wake_up(&card->wait_q); |
267 | } | 268 | } |
@@ -382,6 +383,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) | |||
382 | qeth_put_reply(reply); | 383 | qeth_put_reply(reply); |
383 | } | 384 | } |
384 | spin_unlock_irqrestore(&card->lock, flags); | 385 | spin_unlock_irqrestore(&card->lock, flags); |
386 | atomic_set(&card->write.irq_pending, 0); | ||
385 | } | 387 | } |
386 | EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); | 388 | EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); |
387 | 389 | ||
@@ -1076,6 +1078,7 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1076 | card->state = CARD_STATE_DOWN; | 1078 | card->state = CARD_STATE_DOWN; |
1077 | card->lan_online = 0; | 1079 | card->lan_online = 0; |
1078 | card->use_hard_stop = 0; | 1080 | card->use_hard_stop = 0; |
1081 | card->read_or_write_problem = 0; | ||
1079 | card->dev = NULL; | 1082 | card->dev = NULL; |
1080 | spin_lock_init(&card->vlanlock); | 1083 | spin_lock_init(&card->vlanlock); |
1081 | spin_lock_init(&card->mclock); | 1084 | spin_lock_init(&card->mclock); |
@@ -1084,6 +1087,7 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1084 | spin_lock_init(&card->ip_lock); | 1087 | spin_lock_init(&card->ip_lock); |
1085 | spin_lock_init(&card->thread_mask_lock); | 1088 | spin_lock_init(&card->thread_mask_lock); |
1086 | mutex_init(&card->conf_mutex); | 1089 | mutex_init(&card->conf_mutex); |
1090 | mutex_init(&card->discipline_mutex); | ||
1087 | card->thread_start_mask = 0; | 1091 | card->thread_start_mask = 0; |
1088 | card->thread_allowed_mask = 0; | 1092 | card->thread_allowed_mask = 0; |
1089 | card->thread_running_mask = 0; | 1093 | card->thread_running_mask = 0; |
@@ -1383,12 +1387,7 @@ static void qeth_init_func_level(struct qeth_card *card) | |||
1383 | { | 1387 | { |
1384 | switch (card->info.type) { | 1388 | switch (card->info.type) { |
1385 | case QETH_CARD_TYPE_IQD: | 1389 | case QETH_CARD_TYPE_IQD: |
1386 | if (card->ipato.enabled) | 1390 | card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; |
1387 | card->info.func_level = | ||
1388 | QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT; | ||
1389 | else | ||
1390 | card->info.func_level = | ||
1391 | QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; | ||
1392 | break; | 1391 | break; |
1393 | case QETH_CARD_TYPE_OSD: | 1392 | case QETH_CARD_TYPE_OSD: |
1394 | case QETH_CARD_TYPE_OSN: | 1393 | case QETH_CARD_TYPE_OSN: |
@@ -1662,6 +1661,10 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1662 | 1661 | ||
1663 | QETH_CARD_TEXT(card, 2, "sendctl"); | 1662 | QETH_CARD_TEXT(card, 2, "sendctl"); |
1664 | 1663 | ||
1664 | if (card->read_or_write_problem) { | ||
1665 | qeth_release_buffer(iob->channel, iob); | ||
1666 | return -EIO; | ||
1667 | } | ||
1665 | reply = qeth_alloc_reply(card); | 1668 | reply = qeth_alloc_reply(card); |
1666 | if (!reply) { | 1669 | if (!reply) { |
1667 | return -ENOMEM; | 1670 | return -ENOMEM; |
@@ -1733,6 +1736,9 @@ time_err: | |||
1733 | spin_unlock_irqrestore(&reply->card->lock, flags); | 1736 | spin_unlock_irqrestore(&reply->card->lock, flags); |
1734 | reply->rc = -ETIME; | 1737 | reply->rc = -ETIME; |
1735 | atomic_inc(&reply->received); | 1738 | atomic_inc(&reply->received); |
1739 | atomic_set(&card->write.irq_pending, 0); | ||
1740 | qeth_release_buffer(iob->channel, iob); | ||
1741 | card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; | ||
1736 | wake_up(&reply->wait_q); | 1742 | wake_up(&reply->wait_q); |
1737 | rc = reply->rc; | 1743 | rc = reply->rc; |
1738 | qeth_put_reply(reply); | 1744 | qeth_put_reply(reply); |
@@ -1990,7 +1996,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1990 | QETH_DBF_TEXT(SETUP, 2, "olmlimit"); | 1996 | QETH_DBF_TEXT(SETUP, 2, "olmlimit"); |
1991 | dev_err(&card->gdev->dev, "A connection could not be " | 1997 | dev_err(&card->gdev->dev, "A connection could not be " |
1992 | "established because of an OLM limit\n"); | 1998 | "established because of an OLM limit\n"); |
1993 | rc = -EMLINK; | 1999 | iob->rc = -EMLINK; |
1994 | } | 2000 | } |
1995 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); | 2001 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1996 | return rc; | 2002 | return rc; |
@@ -2489,6 +2495,10 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2489 | qeth_prepare_ipa_cmd(card, iob, prot_type); | 2495 | qeth_prepare_ipa_cmd(card, iob, prot_type); |
2490 | rc = qeth_send_control_data(card, IPA_CMD_LENGTH, | 2496 | rc = qeth_send_control_data(card, IPA_CMD_LENGTH, |
2491 | iob, reply_cb, reply_param); | 2497 | iob, reply_cb, reply_param); |
2498 | if (rc == -ETIME) { | ||
2499 | qeth_clear_ipacmd_list(card); | ||
2500 | qeth_schedule_recovery(card); | ||
2501 | } | ||
2492 | return rc; | 2502 | return rc; |
2493 | } | 2503 | } |
2494 | EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); | 2504 | EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); |
@@ -3413,7 +3423,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3413 | { | 3423 | { |
3414 | struct qeth_ipa_cmd *cmd; | 3424 | struct qeth_ipa_cmd *cmd; |
3415 | struct qeth_set_access_ctrl *access_ctrl_req; | 3425 | struct qeth_set_access_ctrl *access_ctrl_req; |
3416 | int rc; | ||
3417 | 3426 | ||
3418 | QETH_CARD_TEXT(card, 4, "setaccb"); | 3427 | QETH_CARD_TEXT(card, 4, "setaccb"); |
3419 | 3428 | ||
@@ -3440,7 +3449,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3440 | card->gdev->dev.kobj.name, | 3449 | card->gdev->dev.kobj.name, |
3441 | access_ctrl_req->subcmd_code, | 3450 | access_ctrl_req->subcmd_code, |
3442 | cmd->data.setadapterparms.hdr.return_code); | 3451 | cmd->data.setadapterparms.hdr.return_code); |
3443 | rc = 0; | ||
3444 | break; | 3452 | break; |
3445 | } | 3453 | } |
3446 | case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: | 3454 | case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: |
@@ -3454,7 +3462,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3454 | 3462 | ||
3455 | /* ensure isolation mode is "none" */ | 3463 | /* ensure isolation mode is "none" */ |
3456 | card->options.isolation = ISOLATION_MODE_NONE; | 3464 | card->options.isolation = ISOLATION_MODE_NONE; |
3457 | rc = -EOPNOTSUPP; | ||
3458 | break; | 3465 | break; |
3459 | } | 3466 | } |
3460 | case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: | 3467 | case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: |
@@ -3469,7 +3476,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3469 | 3476 | ||
3470 | /* ensure isolation mode is "none" */ | 3477 | /* ensure isolation mode is "none" */ |
3471 | card->options.isolation = ISOLATION_MODE_NONE; | 3478 | card->options.isolation = ISOLATION_MODE_NONE; |
3472 | rc = -EOPNOTSUPP; | ||
3473 | break; | 3479 | break; |
3474 | } | 3480 | } |
3475 | case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: | 3481 | case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: |
@@ -3483,7 +3489,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3483 | 3489 | ||
3484 | /* ensure isolation mode is "none" */ | 3490 | /* ensure isolation mode is "none" */ |
3485 | card->options.isolation = ISOLATION_MODE_NONE; | 3491 | card->options.isolation = ISOLATION_MODE_NONE; |
3486 | rc = -EPERM; | ||
3487 | break; | 3492 | break; |
3488 | } | 3493 | } |
3489 | default: | 3494 | default: |
@@ -3497,12 +3502,11 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3497 | 3502 | ||
3498 | /* ensure isolation mode is "none" */ | 3503 | /* ensure isolation mode is "none" */ |
3499 | card->options.isolation = ISOLATION_MODE_NONE; | 3504 | card->options.isolation = ISOLATION_MODE_NONE; |
3500 | rc = 0; | ||
3501 | break; | 3505 | break; |
3502 | } | 3506 | } |
3503 | } | 3507 | } |
3504 | qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); | 3508 | qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); |
3505 | return rc; | 3509 | return 0; |
3506 | } | 3510 | } |
3507 | 3511 | ||
3508 | static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, | 3512 | static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, |
@@ -3744,15 +3748,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3744 | /* skip 4 bytes (data_len struct member) to get req_len */ | 3748 | /* skip 4 bytes (data_len struct member) to get req_len */ |
3745 | if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) | 3749 | if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) |
3746 | return -EFAULT; | 3750 | return -EFAULT; |
3747 | ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); | 3751 | ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); |
3748 | if (!ureq) { | 3752 | if (IS_ERR(ureq)) { |
3749 | QETH_CARD_TEXT(card, 2, "snmpnome"); | 3753 | QETH_CARD_TEXT(card, 2, "snmpnome"); |
3750 | return -ENOMEM; | 3754 | return PTR_ERR(ureq); |
3751 | } | ||
3752 | if (copy_from_user(ureq, udata, | ||
3753 | req_len + sizeof(struct qeth_snmp_ureq_hdr))) { | ||
3754 | kfree(ureq); | ||
3755 | return -EFAULT; | ||
3756 | } | 3755 | } |
3757 | qinfo.udata_len = ureq->hdr.data_len; | 3756 | qinfo.udata_len = ureq->hdr.data_len; |
3758 | qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); | 3757 | qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); |
@@ -3971,6 +3970,7 @@ retriable: | |||
3971 | else | 3970 | else |
3972 | goto retry; | 3971 | goto retry; |
3973 | } | 3972 | } |
3973 | card->read_or_write_problem = 0; | ||
3974 | rc = qeth_mpc_initialize(card); | 3974 | rc = qeth_mpc_initialize(card); |
3975 | if (rc) { | 3975 | if (rc) { |
3976 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); | 3976 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); |
@@ -4353,16 +4353,18 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) | |||
4353 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 4353 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
4354 | 4354 | ||
4355 | QETH_DBF_TEXT(SETUP, 2, "removedv"); | 4355 | QETH_DBF_TEXT(SETUP, 2, "removedv"); |
4356 | if (card->discipline.ccwgdriver) { | ||
4357 | card->discipline.ccwgdriver->remove(gdev); | ||
4358 | qeth_core_free_discipline(card); | ||
4359 | } | ||
4360 | 4356 | ||
4361 | if (card->info.type == QETH_CARD_TYPE_OSN) { | 4357 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
4362 | qeth_core_remove_osn_attributes(&gdev->dev); | 4358 | qeth_core_remove_osn_attributes(&gdev->dev); |
4363 | } else { | 4359 | } else { |
4364 | qeth_core_remove_device_attributes(&gdev->dev); | 4360 | qeth_core_remove_device_attributes(&gdev->dev); |
4365 | } | 4361 | } |
4362 | |||
4363 | if (card->discipline.ccwgdriver) { | ||
4364 | card->discipline.ccwgdriver->remove(gdev); | ||
4365 | qeth_core_free_discipline(card); | ||
4366 | } | ||
4367 | |||
4366 | debug_unregister(card->debug); | 4368 | debug_unregister(card->debug); |
4367 | write_lock_irqsave(&qeth_core_card_list.rwlock, flags); | 4369 | write_lock_irqsave(&qeth_core_card_list.rwlock, flags); |
4368 | list_del(&card->list); | 4370 | list_del(&card->list); |
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 2eb022ff2610..42fa783a70c8 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c | |||
@@ -411,7 +411,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
411 | if (!card) | 411 | if (!card) |
412 | return -EINVAL; | 412 | return -EINVAL; |
413 | 413 | ||
414 | mutex_lock(&card->conf_mutex); | 414 | mutex_lock(&card->discipline_mutex); |
415 | if (card->state != CARD_STATE_DOWN) { | 415 | if (card->state != CARD_STATE_DOWN) { |
416 | rc = -EPERM; | 416 | rc = -EPERM; |
417 | goto out; | 417 | goto out; |
@@ -433,6 +433,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
433 | if (card->options.layer2 == newdis) | 433 | if (card->options.layer2 == newdis) |
434 | goto out; | 434 | goto out; |
435 | else { | 435 | else { |
436 | card->info.mac_bits = 0; | ||
436 | if (card->discipline.ccwgdriver) { | 437 | if (card->discipline.ccwgdriver) { |
437 | card->discipline.ccwgdriver->remove(card->gdev); | 438 | card->discipline.ccwgdriver->remove(card->gdev); |
438 | qeth_core_free_discipline(card); | 439 | qeth_core_free_discipline(card); |
@@ -445,7 +446,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
445 | 446 | ||
446 | rc = card->discipline.ccwgdriver->probe(card->gdev); | 447 | rc = card->discipline.ccwgdriver->probe(card->gdev); |
447 | out: | 448 | out: |
448 | mutex_unlock(&card->conf_mutex); | 449 | mutex_unlock(&card->discipline_mutex); |
449 | return rc ? rc : count; | 450 | return rc ? rc : count; |
450 | } | 451 | } |
451 | 452 | ||
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 32d07c2dcc67..830d63524d61 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -860,8 +860,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
860 | unregister_netdev(card->dev); | 860 | unregister_netdev(card->dev); |
861 | card->dev = NULL; | 861 | card->dev = NULL; |
862 | } | 862 | } |
863 | |||
864 | qeth_l2_del_all_mc(card); | ||
865 | return; | 863 | return; |
866 | } | 864 | } |
867 | 865 | ||
@@ -935,6 +933,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
935 | enum qeth_card_states recover_flag; | 933 | enum qeth_card_states recover_flag; |
936 | 934 | ||
937 | BUG_ON(!card); | 935 | BUG_ON(!card); |
936 | mutex_lock(&card->discipline_mutex); | ||
938 | mutex_lock(&card->conf_mutex); | 937 | mutex_lock(&card->conf_mutex); |
939 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); | 938 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); |
940 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); | 939 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
@@ -1012,6 +1011,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1012 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 1011 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
1013 | out: | 1012 | out: |
1014 | mutex_unlock(&card->conf_mutex); | 1013 | mutex_unlock(&card->conf_mutex); |
1014 | mutex_unlock(&card->discipline_mutex); | ||
1015 | return 0; | 1015 | return 0; |
1016 | 1016 | ||
1017 | out_remove: | 1017 | out_remove: |
@@ -1025,6 +1025,7 @@ out_remove: | |||
1025 | else | 1025 | else |
1026 | card->state = CARD_STATE_DOWN; | 1026 | card->state = CARD_STATE_DOWN; |
1027 | mutex_unlock(&card->conf_mutex); | 1027 | mutex_unlock(&card->conf_mutex); |
1028 | mutex_unlock(&card->discipline_mutex); | ||
1028 | return rc; | 1029 | return rc; |
1029 | } | 1030 | } |
1030 | 1031 | ||
@@ -1040,6 +1041,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1040 | int rc = 0, rc2 = 0, rc3 = 0; | 1041 | int rc = 0, rc2 = 0, rc3 = 0; |
1041 | enum qeth_card_states recover_flag; | 1042 | enum qeth_card_states recover_flag; |
1042 | 1043 | ||
1044 | mutex_lock(&card->discipline_mutex); | ||
1043 | mutex_lock(&card->conf_mutex); | 1045 | mutex_lock(&card->conf_mutex); |
1044 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); | 1046 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); |
1045 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); | 1047 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); |
@@ -1060,6 +1062,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1060 | /* let user_space know that device is offline */ | 1062 | /* let user_space know that device is offline */ |
1061 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); | 1063 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); |
1062 | mutex_unlock(&card->conf_mutex); | 1064 | mutex_unlock(&card->conf_mutex); |
1065 | mutex_unlock(&card->discipline_mutex); | ||
1063 | return 0; | 1066 | return 0; |
1064 | } | 1067 | } |
1065 | 1068 | ||
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 8447d233d0b3..e705b27ec7dc 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
@@ -64,5 +64,6 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, | |||
64 | const u8 *); | 64 | const u8 *); |
65 | int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); | 65 | int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); |
66 | int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); | 66 | int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); |
67 | int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); | ||
67 | 68 | ||
68 | #endif /* __QETH_L3_H__ */ | 69 | #endif /* __QETH_L3_H__ */ |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 61d348e51920..e22ae248f613 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -195,7 +195,7 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | |||
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
198 | static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, | 198 | int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, |
199 | struct qeth_ipaddr *addr) | 199 | struct qeth_ipaddr *addr) |
200 | { | 200 | { |
201 | struct qeth_ipato_entry *ipatoe; | 201 | struct qeth_ipato_entry *ipatoe; |
@@ -3354,6 +3354,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
3354 | { | 3354 | { |
3355 | struct qeth_card *card = dev_get_drvdata(&cgdev->dev); | 3355 | struct qeth_card *card = dev_get_drvdata(&cgdev->dev); |
3356 | 3356 | ||
3357 | qeth_l3_remove_device_attributes(&cgdev->dev); | ||
3358 | |||
3357 | qeth_set_allowed_threads(card, 0, 1); | 3359 | qeth_set_allowed_threads(card, 0, 1); |
3358 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); | 3360 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); |
3359 | 3361 | ||
@@ -3367,7 +3369,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
3367 | card->dev = NULL; | 3369 | card->dev = NULL; |
3368 | } | 3370 | } |
3369 | 3371 | ||
3370 | qeth_l3_remove_device_attributes(&cgdev->dev); | ||
3371 | qeth_l3_clear_ip_list(card, 0, 0); | 3372 | qeth_l3_clear_ip_list(card, 0, 0); |
3372 | qeth_l3_clear_ipato_list(card); | 3373 | qeth_l3_clear_ipato_list(card); |
3373 | return; | 3374 | return; |
@@ -3380,6 +3381,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3380 | enum qeth_card_states recover_flag; | 3381 | enum qeth_card_states recover_flag; |
3381 | 3382 | ||
3382 | BUG_ON(!card); | 3383 | BUG_ON(!card); |
3384 | mutex_lock(&card->discipline_mutex); | ||
3383 | mutex_lock(&card->conf_mutex); | 3385 | mutex_lock(&card->conf_mutex); |
3384 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); | 3386 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); |
3385 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); | 3387 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
@@ -3461,6 +3463,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3461 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3463 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
3462 | out: | 3464 | out: |
3463 | mutex_unlock(&card->conf_mutex); | 3465 | mutex_unlock(&card->conf_mutex); |
3466 | mutex_unlock(&card->discipline_mutex); | ||
3464 | return 0; | 3467 | return 0; |
3465 | out_remove: | 3468 | out_remove: |
3466 | card->use_hard_stop = 1; | 3469 | card->use_hard_stop = 1; |
@@ -3473,6 +3476,7 @@ out_remove: | |||
3473 | else | 3476 | else |
3474 | card->state = CARD_STATE_DOWN; | 3477 | card->state = CARD_STATE_DOWN; |
3475 | mutex_unlock(&card->conf_mutex); | 3478 | mutex_unlock(&card->conf_mutex); |
3479 | mutex_unlock(&card->discipline_mutex); | ||
3476 | return rc; | 3480 | return rc; |
3477 | } | 3481 | } |
3478 | 3482 | ||
@@ -3488,6 +3492,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3488 | int rc = 0, rc2 = 0, rc3 = 0; | 3492 | int rc = 0, rc2 = 0, rc3 = 0; |
3489 | enum qeth_card_states recover_flag; | 3493 | enum qeth_card_states recover_flag; |
3490 | 3494 | ||
3495 | mutex_lock(&card->discipline_mutex); | ||
3491 | mutex_lock(&card->conf_mutex); | 3496 | mutex_lock(&card->conf_mutex); |
3492 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); | 3497 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); |
3493 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); | 3498 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); |
@@ -3508,6 +3513,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3508 | /* let user_space know that device is offline */ | 3513 | /* let user_space know that device is offline */ |
3509 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); | 3514 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); |
3510 | mutex_unlock(&card->conf_mutex); | 3515 | mutex_unlock(&card->conf_mutex); |
3516 | mutex_unlock(&card->discipline_mutex); | ||
3511 | return 0; | 3517 | return 0; |
3512 | } | 3518 | } |
3513 | 3519 | ||
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index fb5318b30e99..67cfa68dcf1b 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -479,6 +479,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
479 | struct device_attribute *attr, const char *buf, size_t count) | 479 | struct device_attribute *attr, const char *buf, size_t count) |
480 | { | 480 | { |
481 | struct qeth_card *card = dev_get_drvdata(dev); | 481 | struct qeth_card *card = dev_get_drvdata(dev); |
482 | struct qeth_ipaddr *tmpipa, *t; | ||
482 | char *tmp; | 483 | char *tmp; |
483 | int rc = 0; | 484 | int rc = 0; |
484 | 485 | ||
@@ -497,8 +498,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
497 | card->ipato.enabled = (card->ipato.enabled)? 0 : 1; | 498 | card->ipato.enabled = (card->ipato.enabled)? 0 : 1; |
498 | } else if (!strcmp(tmp, "1")) { | 499 | } else if (!strcmp(tmp, "1")) { |
499 | card->ipato.enabled = 1; | 500 | card->ipato.enabled = 1; |
501 | list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { | ||
502 | if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && | ||
503 | qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) | ||
504 | tmpipa->set_flags |= | ||
505 | QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
506 | } | ||
507 | |||
500 | } else if (!strcmp(tmp, "0")) { | 508 | } else if (!strcmp(tmp, "0")) { |
501 | card->ipato.enabled = 0; | 509 | card->ipato.enabled = 0; |
510 | list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { | ||
511 | if (tmpipa->set_flags & | ||
512 | QETH_IPA_SETIP_TAKEOVER_FLAG) | ||
513 | tmpipa->set_flags &= | ||
514 | ~QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
515 | } | ||
502 | } else | 516 | } else |
503 | rc = -EINVAL; | 517 | rc = -EINVAL; |
504 | out: | 518 | out: |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 7a104e2de3fa..f11e6bb5b036 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -533,7 +533,6 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
533 | vhost_net_enable_vq(n, vq); | 533 | vhost_net_enable_vq(n, vq); |
534 | } | 534 | } |
535 | 535 | ||
536 | done: | ||
537 | mutex_unlock(&vq->mutex); | 536 | mutex_unlock(&vq->mutex); |
538 | 537 | ||
539 | if (oldsock) { | 538 | if (oldsock) { |
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 000000000000..72b713ab57e9 --- /dev/null +++ b/include/linux/can/platform/flexcan.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de> | ||
3 | * | ||
4 | * This file is released under the GPLv2 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef __CAN_PLATFORM_FLEXCAN_H | ||
9 | #define __CAN_PLATFORM_FLEXCAN_H | ||
10 | |||
11 | /** | ||
12 | * struct flexcan_platform_data - flex CAN controller platform data | ||
13 | * @transceiver_enable: - called to power on/off the transceiver | ||
14 | * | ||
15 | */ | ||
16 | struct flexcan_platform_data { | ||
17 | void (*transceiver_switch)(int enable); | ||
18 | }; | ||
19 | |||
20 | #endif /* __CAN_PLATFORM_FLEXCAN_H */ | ||
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 3d7a6687d247..848480bc2bf9 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
@@ -127,6 +127,20 @@ static inline void random_ether_addr(u8 *addr) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | /** | 129 | /** |
130 | * dev_hw_addr_random - Create random MAC and set device flag | ||
131 | * @dev: pointer to net_device structure | ||
132 | * @addr: Pointer to a six-byte array containing the Ethernet address | ||
133 | * | ||
134 | * Generate random MAC to be used by a device and set addr_assign_type | ||
135 | * so the state can be read by sysfs and be used by udev. | ||
136 | */ | ||
137 | static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr) | ||
138 | { | ||
139 | dev->addr_assign_type |= NET_ADDR_RANDOM; | ||
140 | random_ether_addr(hwaddr); | ||
141 | } | ||
142 | |||
143 | /** | ||
130 | * compare_ether_addr - Compare two Ethernet addresses | 144 | * compare_ether_addr - Compare two Ethernet addresses |
131 | * @addr1: Pointer to a six-byte array containing the Ethernet address | 145 | * @addr1: Pointer to a six-byte array containing the Ethernet address |
132 | * @addr2: Pointer other six-byte array containing the Ethernet address | 146 | * @addr2: Pointer other six-byte array containing the Ethernet address |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index e24ce6ea1fa3..35280b302290 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
@@ -72,6 +72,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, | |||
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
75 | extern void macvlan_common_setup(struct net_device *dev); | ||
76 | |||
75 | extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | 77 | extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, |
76 | struct nlattr *tb[], struct nlattr *data[], | 78 | struct nlattr *tb[], struct nlattr *data[], |
77 | int (*receive)(struct sk_buff *skb), | 79 | int (*receive)(struct sk_buff *skb), |
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h index da0341b8ca0a..14ba4452296e 100644 --- a/include/linux/ks8842.h +++ b/include/linux/ks8842.h | |||
@@ -25,10 +25,14 @@ | |||
25 | * struct ks8842_platform_data - Platform data of the KS8842 network driver | 25 | * struct ks8842_platform_data - Platform data of the KS8842 network driver |
26 | * @macaddr: The MAC address of the device, set to all 0:s to use the on in | 26 | * @macaddr: The MAC address of the device, set to all 0:s to use the on in |
27 | * the chip. | 27 | * the chip. |
28 | * @rx_dma_channel: The DMA channel to use for RX, -1 for none. | ||
29 | * @tx_dma_channel: The DMA channel to use for TX, -1 for none. | ||
28 | * | 30 | * |
29 | */ | 31 | */ |
30 | struct ks8842_platform_data { | 32 | struct ks8842_platform_data { |
31 | u8 macaddr[ETH_ALEN]; | 33 | u8 macaddr[ETH_ALEN]; |
34 | int rx_dma_channel; | ||
35 | int tx_dma_channel; | ||
32 | }; | 36 | }; |
33 | 37 | ||
34 | #endif | 38 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b6262898ece0..1bca6171b1aa 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -66,6 +66,11 @@ struct wireless_dev; | |||
66 | #define HAVE_FREE_NETDEV /* free_netdev() */ | 66 | #define HAVE_FREE_NETDEV /* free_netdev() */ |
67 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ | 67 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ |
68 | 68 | ||
69 | /* hardware address assignment types */ | ||
70 | #define NET_ADDR_PERM 0 /* address is permanent (default) */ | ||
71 | #define NET_ADDR_RANDOM 1 /* address is generated randomly */ | ||
72 | #define NET_ADDR_STOLEN 2 /* address is stolen from other device */ | ||
73 | |||
69 | /* Backlog congestion levels */ | 74 | /* Backlog congestion levels */ |
70 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | 75 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
71 | #define NET_RX_DROP 1 /* packet dropped */ | 76 | #define NET_RX_DROP 1 /* packet dropped */ |
@@ -919,6 +924,7 @@ struct net_device { | |||
919 | 924 | ||
920 | /* Interface address info. */ | 925 | /* Interface address info. */ |
921 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 926 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
927 | unsigned char addr_assign_type; /* hw address assignment type */ | ||
922 | unsigned char addr_len; /* hardware address length */ | 928 | unsigned char addr_len; /* hardware address length */ |
923 | unsigned short dev_id; /* for shared network cards */ | 929 | unsigned short dev_id; /* for shared network cards */ |
924 | 930 | ||
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index fbc8cb0d48c3..58d44491880f 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -282,6 +282,7 @@ enum rtattr_type_t { | |||
282 | RTA_SESSION, /* no longer used */ | 282 | RTA_SESSION, /* no longer used */ |
283 | RTA_MP_ALGO, /* no longer used */ | 283 | RTA_MP_ALGO, /* no longer used */ |
284 | RTA_TABLE, | 284 | RTA_TABLE, |
285 | RTA_MARK, | ||
285 | __RTA_MAX | 286 | __RTA_MAX |
286 | }; | 287 | }; |
287 | 288 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f5aa87e1e0c8..d89876b806a0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -202,10 +202,11 @@ struct skb_shared_info { | |||
202 | */ | 202 | */ |
203 | atomic_t dataref; | 203 | atomic_t dataref; |
204 | 204 | ||
205 | skb_frag_t frags[MAX_SKB_FRAGS]; | ||
206 | /* Intermediate layers must ensure that destructor_arg | 205 | /* Intermediate layers must ensure that destructor_arg |
207 | * remains valid until skb destructor */ | 206 | * remains valid until skb destructor */ |
208 | void * destructor_arg; | 207 | void * destructor_arg; |
208 | /* must be last field, see pskb_expand_head() */ | ||
209 | skb_frag_t frags[MAX_SKB_FRAGS]; | ||
209 | }; | 210 | }; |
210 | 211 | ||
211 | /* We divide dataref into two halves. The higher 16 bits hold references | 212 | /* We divide dataref into two halves. The higher 16 bits hold references |
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index 7e582061b230..3bed61d379a8 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h | |||
@@ -53,10 +53,6 @@ typedef __u32 magic_t; | |||
53 | #ifndef IRDA_ALIGN | 53 | #ifndef IRDA_ALIGN |
54 | # define IRDA_ALIGN __attribute__((aligned)) | 54 | # define IRDA_ALIGN __attribute__((aligned)) |
55 | #endif | 55 | #endif |
56 | #ifndef IRDA_PACK | ||
57 | # define IRDA_PACK __attribute__((packed)) | ||
58 | #endif | ||
59 | |||
60 | 56 | ||
61 | #ifdef CONFIG_IRDA_DEBUG | 57 | #ifdef CONFIG_IRDA_DEBUG |
62 | 58 | ||
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h index 641f88e848bd..6b1dc4f8eca5 100644 --- a/include/net/irda/irlap_frame.h +++ b/include/net/irda/irlap_frame.h | |||
@@ -85,7 +85,7 @@ struct discovery_t; | |||
85 | struct disc_frame { | 85 | struct disc_frame { |
86 | __u8 caddr; /* Connection address */ | 86 | __u8 caddr; /* Connection address */ |
87 | __u8 control; | 87 | __u8 control; |
88 | } IRDA_PACK; | 88 | } __packed; |
89 | 89 | ||
90 | struct xid_frame { | 90 | struct xid_frame { |
91 | __u8 caddr; /* Connection address */ | 91 | __u8 caddr; /* Connection address */ |
@@ -96,41 +96,41 @@ struct xid_frame { | |||
96 | __u8 flags; /* Discovery flags */ | 96 | __u8 flags; /* Discovery flags */ |
97 | __u8 slotnr; | 97 | __u8 slotnr; |
98 | __u8 version; | 98 | __u8 version; |
99 | } IRDA_PACK; | 99 | } __packed; |
100 | 100 | ||
101 | struct test_frame { | 101 | struct test_frame { |
102 | __u8 caddr; /* Connection address */ | 102 | __u8 caddr; /* Connection address */ |
103 | __u8 control; | 103 | __u8 control; |
104 | __le32 saddr; /* Source device address */ | 104 | __le32 saddr; /* Source device address */ |
105 | __le32 daddr; /* Destination device address */ | 105 | __le32 daddr; /* Destination device address */ |
106 | } IRDA_PACK; | 106 | } __packed; |
107 | 107 | ||
108 | struct ua_frame { | 108 | struct ua_frame { |
109 | __u8 caddr; | 109 | __u8 caddr; |
110 | __u8 control; | 110 | __u8 control; |
111 | __le32 saddr; /* Source device address */ | 111 | __le32 saddr; /* Source device address */ |
112 | __le32 daddr; /* Dest device address */ | 112 | __le32 daddr; /* Dest device address */ |
113 | } IRDA_PACK; | 113 | } __packed; |
114 | 114 | ||
115 | struct dm_frame { | 115 | struct dm_frame { |
116 | __u8 caddr; /* Connection address */ | 116 | __u8 caddr; /* Connection address */ |
117 | __u8 control; | 117 | __u8 control; |
118 | } IRDA_PACK; | 118 | } __packed; |
119 | 119 | ||
120 | struct rd_frame { | 120 | struct rd_frame { |
121 | __u8 caddr; /* Connection address */ | 121 | __u8 caddr; /* Connection address */ |
122 | __u8 control; | 122 | __u8 control; |
123 | } IRDA_PACK; | 123 | } __packed; |
124 | 124 | ||
125 | struct rr_frame { | 125 | struct rr_frame { |
126 | __u8 caddr; /* Connection address */ | 126 | __u8 caddr; /* Connection address */ |
127 | __u8 control; | 127 | __u8 control; |
128 | } IRDA_PACK; | 128 | } __packed; |
129 | 129 | ||
130 | struct i_frame { | 130 | struct i_frame { |
131 | __u8 caddr; | 131 | __u8 caddr; |
132 | __u8 control; | 132 | __u8 control; |
133 | } IRDA_PACK; | 133 | } __packed; |
134 | 134 | ||
135 | struct snrm_frame { | 135 | struct snrm_frame { |
136 | __u8 caddr; | 136 | __u8 caddr; |
@@ -138,7 +138,7 @@ struct snrm_frame { | |||
138 | __le32 saddr; | 138 | __le32 saddr; |
139 | __le32 daddr; | 139 | __le32 daddr; |
140 | __u8 ncaddr; | 140 | __u8 ncaddr; |
141 | } IRDA_PACK; | 141 | } __packed; |
142 | 142 | ||
143 | void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb); | 143 | void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb); |
144 | void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s, | 144 | void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s, |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index f85fc8a140dc..b0787a1dea90 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -419,7 +419,7 @@ struct ieee80211_tx_rate { | |||
419 | s8 idx; | 419 | s8 idx; |
420 | u8 count; | 420 | u8 count; |
421 | u8 flags; | 421 | u8 flags; |
422 | } __attribute__((packed)); | 422 | } __packed; |
423 | 423 | ||
424 | /** | 424 | /** |
425 | * struct ieee80211_tx_info - skb transmit information | 425 | * struct ieee80211_tx_info - skb transmit information |
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index ceac661cdfd5..cfe2943690ff 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h | |||
@@ -9,6 +9,7 @@ struct tcf_mirred { | |||
9 | int tcfm_ifindex; | 9 | int tcfm_ifindex; |
10 | int tcfm_ok_push; | 10 | int tcfm_ok_push; |
11 | struct net_device *tcfm_dev; | 11 | struct net_device *tcfm_dev; |
12 | struct list_head tcfm_list; | ||
12 | }; | 13 | }; |
13 | #define to_mirred(pc) \ | 14 | #define to_mirred(pc) \ |
14 | container_of(pc, struct tcf_mirred, common) | 15 | container_of(pc, struct tcf_mirred, common) |
diff --git a/net/Kconfig b/net/Kconfig index b3250944cde9..e24fa0873f32 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -32,7 +32,7 @@ config WANT_COMPAT_NETLINK_MESSAGES | |||
32 | config COMPAT_NETLINK_MESSAGES | 32 | config COMPAT_NETLINK_MESSAGES |
33 | def_bool y | 33 | def_bool y |
34 | depends on COMPAT | 34 | depends on COMPAT |
35 | depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES | 35 | depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES |
36 | help | 36 | help |
37 | This option makes it possible to send different netlink messages | 37 | This option makes it possible to send different netlink messages |
38 | to tasks depending on whether the task is a compat task or not. To | 38 | to tasks depending on whether the task is a compat task or not. To |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index 4b04d25b6a3f..eb1602022ac0 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -193,7 +193,7 @@ out: | |||
193 | 193 | ||
194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) | 194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) |
195 | { | 195 | { |
196 | caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size); | 196 | caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); |
197 | 197 | ||
198 | /* Add info for MUX-layer to route the packet out. */ | 198 | /* Add info for MUX-layer to route the packet out. */ |
199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; | 199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; |
diff --git a/net/core/dev.c b/net/core/dev.c index 6e1b4370781c..b74fcd3e9365 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1484,6 +1484,7 @@ static inline void net_timestamp_check(struct sk_buff *skb) | |||
1484 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 1484 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
1485 | { | 1485 | { |
1486 | skb_orphan(skb); | 1486 | skb_orphan(skb); |
1487 | nf_reset(skb); | ||
1487 | 1488 | ||
1488 | if (!(dev->flags & IFF_UP) || | 1489 | if (!(dev->flags & IFF_UP) || |
1489 | (skb->len > (dev->mtu + dev->hard_header_len))) { | 1490 | (skb->len > (dev->mtu + dev->hard_header_len))) { |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 646ef3bc7200..36e603c78ce9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -347,9 +347,9 @@ static struct notifier_block dropmon_net_notifier = { | |||
347 | 347 | ||
348 | static int __init init_net_drop_monitor(void) | 348 | static int __init init_net_drop_monitor(void) |
349 | { | 349 | { |
350 | int cpu; | ||
351 | int rc, i, ret; | ||
352 | struct per_cpu_dm_data *data; | 350 | struct per_cpu_dm_data *data; |
351 | int cpu, rc; | ||
352 | |||
353 | printk(KERN_INFO "Initalizing network drop monitor service\n"); | 353 | printk(KERN_INFO "Initalizing network drop monitor service\n"); |
354 | 354 | ||
355 | if (sizeof(void *) > 8) { | 355 | if (sizeof(void *) > 8) { |
@@ -357,21 +357,12 @@ static int __init init_net_drop_monitor(void) | |||
357 | return -ENOSPC; | 357 | return -ENOSPC; |
358 | } | 358 | } |
359 | 359 | ||
360 | if (genl_register_family(&net_drop_monitor_family) < 0) { | 360 | rc = genl_register_family_with_ops(&net_drop_monitor_family, |
361 | dropmon_ops, | ||
362 | ARRAY_SIZE(dropmon_ops)); | ||
363 | if (rc) { | ||
361 | printk(KERN_ERR "Could not create drop monitor netlink family\n"); | 364 | printk(KERN_ERR "Could not create drop monitor netlink family\n"); |
362 | return -EFAULT; | 365 | return rc; |
363 | } | ||
364 | |||
365 | rc = -EFAULT; | ||
366 | |||
367 | for (i = 0; i < ARRAY_SIZE(dropmon_ops); i++) { | ||
368 | ret = genl_register_ops(&net_drop_monitor_family, | ||
369 | &dropmon_ops[i]); | ||
370 | if (ret) { | ||
371 | printk(KERN_CRIT "Failed to register operation %d\n", | ||
372 | dropmon_ops[i].cmd); | ||
373 | goto out_unreg; | ||
374 | } | ||
375 | } | 366 | } |
376 | 367 | ||
377 | rc = register_netdevice_notifier(&dropmon_net_notifier); | 368 | rc = register_netdevice_notifier(&dropmon_net_notifier); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index d2b596537d41..af4dfbadf2a0 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -95,6 +95,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, | |||
95 | } | 95 | } |
96 | 96 | ||
97 | NETDEVICE_SHOW(dev_id, fmt_hex); | 97 | NETDEVICE_SHOW(dev_id, fmt_hex); |
98 | NETDEVICE_SHOW(addr_assign_type, fmt_dec); | ||
98 | NETDEVICE_SHOW(addr_len, fmt_dec); | 99 | NETDEVICE_SHOW(addr_len, fmt_dec); |
99 | NETDEVICE_SHOW(iflink, fmt_dec); | 100 | NETDEVICE_SHOW(iflink, fmt_dec); |
100 | NETDEVICE_SHOW(ifindex, fmt_dec); | 101 | NETDEVICE_SHOW(ifindex, fmt_dec); |
@@ -295,6 +296,7 @@ static ssize_t show_ifalias(struct device *dev, | |||
295 | } | 296 | } |
296 | 297 | ||
297 | static struct device_attribute net_class_attributes[] = { | 298 | static struct device_attribute net_class_attributes[] = { |
299 | __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL), | ||
298 | __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), | 300 | __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), |
299 | __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), | 301 | __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), |
300 | __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), | 302 | __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 24a19debda1b..10a1ea72010d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -1434,18 +1434,12 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1434 | i += len; | 1434 | i += len; |
1435 | 1435 | ||
1436 | for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { | 1436 | for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { |
1437 | if (*v >= '0' && *v <= '9') { | 1437 | int value; |
1438 | *m *= 16; | 1438 | |
1439 | *m += *v - '0'; | 1439 | value = hex_to_bin(*v); |
1440 | } | 1440 | if (value >= 0) |
1441 | if (*v >= 'A' && *v <= 'F') { | 1441 | *m = *m * 16 + value; |
1442 | *m *= 16; | 1442 | |
1443 | *m += *v - 'A' + 10; | ||
1444 | } | ||
1445 | if (*v >= 'a' && *v <= 'f') { | ||
1446 | *m *= 16; | ||
1447 | *m += *v - 'a' + 10; | ||
1448 | } | ||
1449 | if (*v == ':') { | 1443 | if (*v == ':') { |
1450 | m++; | 1444 | m++; |
1451 | *m = 0; | 1445 | *m = 0; |
@@ -1476,18 +1470,12 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1476 | i += len; | 1470 | i += len; |
1477 | 1471 | ||
1478 | for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { | 1472 | for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { |
1479 | if (*v >= '0' && *v <= '9') { | 1473 | int value; |
1480 | *m *= 16; | 1474 | |
1481 | *m += *v - '0'; | 1475 | value = hex_to_bin(*v); |
1482 | } | 1476 | if (value >= 0) |
1483 | if (*v >= 'A' && *v <= 'F') { | 1477 | *m = *m * 16 + value; |
1484 | *m *= 16; | 1478 | |
1485 | *m += *v - 'A' + 10; | ||
1486 | } | ||
1487 | if (*v >= 'a' && *v <= 'f') { | ||
1488 | *m *= 16; | ||
1489 | *m += *v - 'a' + 10; | ||
1490 | } | ||
1491 | if (*v == ':') { | 1479 | if (*v == ':') { |
1492 | m++; | 1480 | m++; |
1493 | *m = 0; | 1481 | *m = 0; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 76d33ca5f037..3a2513f0d0c3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -817,7 +817,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
817 | memcpy(data + nhead, skb->head, skb->tail - skb->head); | 817 | memcpy(data + nhead, skb->head, skb->tail - skb->head); |
818 | #endif | 818 | #endif |
819 | memcpy(data + size, skb_end_pointer(skb), | 819 | memcpy(data + size, skb_end_pointer(skb), |
820 | sizeof(struct skb_shared_info)); | 820 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
821 | 821 | ||
822 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 822 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
823 | get_page(skb_shinfo(skb)->frags[i].page); | 823 | get_page(skb_shinfo(skb)->frags[i].page); |
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
843 | skb->network_header += off; | 843 | skb->network_header += off; |
844 | if (skb_mac_header_was_set(skb)) | 844 | if (skb_mac_header_was_set(skb)) |
845 | skb->mac_header += off; | 845 | skb->mac_header += off; |
846 | skb->csum_start += nhead; | 846 | /* Only adjust this if it actually is csum_start rather than csum */ |
847 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
848 | skb->csum_start += nhead; | ||
847 | skb->cloned = 0; | 849 | skb->cloned = 0; |
848 | skb->hdr_len = 0; | 850 | skb->hdr_len = 0; |
849 | skb->nohdr = 0; | 851 | skb->nohdr = 0; |
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | |||
930 | copy_skb_header(n, skb); | 932 | copy_skb_header(n, skb); |
931 | 933 | ||
932 | off = newheadroom - oldheadroom; | 934 | off = newheadroom - oldheadroom; |
933 | n->csum_start += off; | 935 | if (n->ip_summed == CHECKSUM_PARTIAL) |
936 | n->csum_start += off; | ||
934 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 937 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
935 | n->transport_header += off; | 938 | n->transport_header += off; |
936 | n->network_header += off; | 939 | n->network_header += off; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 64d0875f5192..3a43cf36db87 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -469,7 +469,7 @@ struct arp_payload { | |||
469 | __be32 src_ip; | 469 | __be32 src_ip; |
470 | u_int8_t dst_hw[ETH_ALEN]; | 470 | u_int8_t dst_hw[ETH_ALEN]; |
471 | __be32 dst_ip; | 471 | __be32 dst_ip; |
472 | } __attribute__ ((packed)); | 472 | } __packed; |
473 | 473 | ||
474 | #ifdef DEBUG | 474 | #ifdef DEBUG |
475 | static void arp_print(struct arp_payload *payload) | 475 | static void arp_print(struct arp_payload *payload) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 562ce92de2a6..3f56b6e6c6aa 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2878,6 +2878,9 @@ static int rt_fill_info(struct net *net, | |||
2878 | if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) | 2878 | if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) |
2879 | goto nla_put_failure; | 2879 | goto nla_put_failure; |
2880 | 2880 | ||
2881 | if (rt->fl.mark) | ||
2882 | NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark); | ||
2883 | |||
2881 | error = rt->dst.error; | 2884 | error = rt->dst.error; |
2882 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; | 2885 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; |
2883 | if (rt->peer) { | 2886 | if (rt->peer) { |
@@ -2933,6 +2936,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2933 | __be32 src = 0; | 2936 | __be32 src = 0; |
2934 | u32 iif; | 2937 | u32 iif; |
2935 | int err; | 2938 | int err; |
2939 | int mark; | ||
2936 | struct sk_buff *skb; | 2940 | struct sk_buff *skb; |
2937 | 2941 | ||
2938 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); | 2942 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); |
@@ -2960,6 +2964,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2960 | src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; | 2964 | src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; |
2961 | dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; | 2965 | dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; |
2962 | iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; | 2966 | iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; |
2967 | mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; | ||
2963 | 2968 | ||
2964 | if (iif) { | 2969 | if (iif) { |
2965 | struct net_device *dev; | 2970 | struct net_device *dev; |
@@ -2972,6 +2977,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2972 | 2977 | ||
2973 | skb->protocol = htons(ETH_P_IP); | 2978 | skb->protocol = htons(ETH_P_IP); |
2974 | skb->dev = dev; | 2979 | skb->dev = dev; |
2980 | skb->mark = mark; | ||
2975 | local_bh_disable(); | 2981 | local_bh_disable(); |
2976 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); | 2982 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); |
2977 | local_bh_enable(); | 2983 | local_bh_enable(); |
@@ -2989,6 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2989 | }, | 2995 | }, |
2990 | }, | 2996 | }, |
2991 | .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, | 2997 | .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, |
2998 | .mark = mark, | ||
2992 | }; | 2999 | }; |
2993 | err = ip_route_output_key(net, &rt, &fl); | 3000 | err = ip_route_output_key(net, &rt, &fl); |
2994 | } | 3001 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e81155d2f251..ab70a3fbcafa 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1763,7 +1763,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1763 | 1763 | ||
1764 | idev = ipv6_find_idev(dev); | 1764 | idev = ipv6_find_idev(dev); |
1765 | if (!idev) | 1765 | if (!idev) |
1766 | return NULL; | 1766 | return ERR_PTR(-ENOBUFS); |
1767 | |||
1768 | if (idev->cnf.disable_ipv6) | ||
1769 | return ERR_PTR(-EACCES); | ||
1767 | 1770 | ||
1768 | /* Add default multicast route */ | 1771 | /* Add default multicast route */ |
1769 | addrconf_add_mroute(dev); | 1772 | addrconf_add_mroute(dev); |
@@ -2132,8 +2135,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2132 | if (!dev) | 2135 | if (!dev) |
2133 | return -ENODEV; | 2136 | return -ENODEV; |
2134 | 2137 | ||
2135 | if ((idev = addrconf_add_dev(dev)) == NULL) | 2138 | idev = addrconf_add_dev(dev); |
2136 | return -ENOBUFS; | 2139 | if (IS_ERR(idev)) |
2140 | return PTR_ERR(idev); | ||
2137 | 2141 | ||
2138 | scope = ipv6_addr_scope(pfx); | 2142 | scope = ipv6_addr_scope(pfx); |
2139 | 2143 | ||
@@ -2380,7 +2384,7 @@ static void addrconf_dev_config(struct net_device *dev) | |||
2380 | } | 2384 | } |
2381 | 2385 | ||
2382 | idev = addrconf_add_dev(dev); | 2386 | idev = addrconf_add_dev(dev); |
2383 | if (idev == NULL) | 2387 | if (IS_ERR(idev)) |
2384 | return; | 2388 | return; |
2385 | 2389 | ||
2386 | memset(&addr, 0, sizeof(struct in6_addr)); | 2390 | memset(&addr, 0, sizeof(struct in6_addr)); |
@@ -2471,7 +2475,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) | |||
2471 | ASSERT_RTNL(); | 2475 | ASSERT_RTNL(); |
2472 | 2476 | ||
2473 | idev = addrconf_add_dev(dev); | 2477 | idev = addrconf_add_dev(dev); |
2474 | if (!idev) { | 2478 | if (IS_ERR(idev)) { |
2475 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); | 2479 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); |
2476 | return; | 2480 | return; |
2477 | } | 2481 | } |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index dab6b8efe5fa..29ac8e1a509e 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -627,7 +627,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta) | |||
627 | skb->dev = sta->sdata->dev; | 627 | skb->dev = sta->sdata->dev; |
628 | skb->protocol = eth_type_trans(skb, sta->sdata->dev); | 628 | skb->protocol = eth_type_trans(skb, sta->sdata->dev); |
629 | memset(skb->cb, 0, sizeof(skb->cb)); | 629 | memset(skb->cb, 0, sizeof(skb->cb)); |
630 | netif_rx(skb); | 630 | netif_rx_ni(skb); |
631 | } | 631 | } |
632 | 632 | ||
633 | static void sta_apply_parameters(struct ieee80211_local *local, | 633 | static void sta_apply_parameters(struct ieee80211_local *local, |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 8648a9922aab..2cbf380377d5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1406 | struct netlink_sock *nlk = nlk_sk(sk); | 1406 | struct netlink_sock *nlk = nlk_sk(sk); |
1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
1408 | size_t copied; | 1408 | size_t copied; |
1409 | struct sk_buff *skb, *frag __maybe_unused = NULL; | 1409 | struct sk_buff *skb; |
1410 | int err; | 1410 | int err; |
1411 | 1411 | ||
1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
@@ -1441,7 +1441,21 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1441 | kfree_skb(skb); | 1441 | kfree_skb(skb); |
1442 | skb = compskb; | 1442 | skb = compskb; |
1443 | } else { | 1443 | } else { |
1444 | frag = skb_shinfo(skb)->frag_list; | 1444 | /* |
1445 | * Before setting frag_list to NULL, we must get a | ||
1446 | * private copy of skb if shared (because of MSG_PEEK) | ||
1447 | */ | ||
1448 | if (skb_shared(skb)) { | ||
1449 | struct sk_buff *nskb; | ||
1450 | |||
1451 | nskb = pskb_copy(skb, GFP_KERNEL); | ||
1452 | kfree_skb(skb); | ||
1453 | skb = nskb; | ||
1454 | err = -ENOMEM; | ||
1455 | if (!skb) | ||
1456 | goto out; | ||
1457 | } | ||
1458 | kfree_skb(skb_shinfo(skb)->frag_list); | ||
1445 | skb_shinfo(skb)->frag_list = NULL; | 1459 | skb_shinfo(skb)->frag_list = NULL; |
1446 | } | 1460 | } |
1447 | } | 1461 | } |
@@ -1478,10 +1492,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1478 | if (flags & MSG_TRUNC) | 1492 | if (flags & MSG_TRUNC) |
1479 | copied = skb->len; | 1493 | copied = skb->len; |
1480 | 1494 | ||
1481 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
1482 | skb_shinfo(skb)->frag_list = frag; | ||
1483 | #endif | ||
1484 | |||
1485 | skb_free_datagram(sk, skb); | 1495 | skb_free_datagram(sk, skb); |
1486 | 1496 | ||
1487 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1497 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index aa4308afcc7f..26ed3e8587c2 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -303,6 +303,7 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops) | |||
303 | errout: | 303 | errout: |
304 | return err; | 304 | return err; |
305 | } | 305 | } |
306 | EXPORT_SYMBOL(genl_register_ops); | ||
306 | 307 | ||
307 | /** | 308 | /** |
308 | * genl_unregister_ops - unregister generic netlink operations | 309 | * genl_unregister_ops - unregister generic netlink operations |
@@ -337,6 +338,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) | |||
337 | 338 | ||
338 | return -ENOENT; | 339 | return -ENOENT; |
339 | } | 340 | } |
341 | EXPORT_SYMBOL(genl_unregister_ops); | ||
340 | 342 | ||
341 | /** | 343 | /** |
342 | * genl_register_family - register a generic netlink family | 344 | * genl_register_family - register a generic netlink family |
@@ -405,6 +407,7 @@ errout_locked: | |||
405 | errout: | 407 | errout: |
406 | return err; | 408 | return err; |
407 | } | 409 | } |
410 | EXPORT_SYMBOL(genl_register_family); | ||
408 | 411 | ||
409 | /** | 412 | /** |
410 | * genl_register_family_with_ops - register a generic netlink family | 413 | * genl_register_family_with_ops - register a generic netlink family |
@@ -485,6 +488,7 @@ int genl_unregister_family(struct genl_family *family) | |||
485 | 488 | ||
486 | return -ENOENT; | 489 | return -ENOENT; |
487 | } | 490 | } |
491 | EXPORT_SYMBOL(genl_unregister_family); | ||
488 | 492 | ||
489 | static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 493 | static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
490 | { | 494 | { |
@@ -873,11 +877,7 @@ static int __init genl_init(void) | |||
873 | for (i = 0; i < GENL_FAM_TAB_SIZE; i++) | 877 | for (i = 0; i < GENL_FAM_TAB_SIZE; i++) |
874 | INIT_LIST_HEAD(&family_ht[i]); | 878 | INIT_LIST_HEAD(&family_ht[i]); |
875 | 879 | ||
876 | err = genl_register_family(&genl_ctrl); | 880 | err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1); |
877 | if (err < 0) | ||
878 | goto problem; | ||
879 | |||
880 | err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops); | ||
881 | if (err < 0) | 881 | if (err < 0) |
882 | goto problem; | 882 | goto problem; |
883 | 883 | ||
@@ -899,11 +899,6 @@ problem: | |||
899 | 899 | ||
900 | subsys_initcall(genl_init); | 900 | subsys_initcall(genl_init); |
901 | 901 | ||
902 | EXPORT_SYMBOL(genl_register_ops); | ||
903 | EXPORT_SYMBOL(genl_unregister_ops); | ||
904 | EXPORT_SYMBOL(genl_register_family); | ||
905 | EXPORT_SYMBOL(genl_unregister_family); | ||
906 | |||
907 | static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, | 902 | static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, |
908 | gfp_t flags) | 903 | gfp_t flags) |
909 | { | 904 | { |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index a16b0175f890..11f195af2da0 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -33,6 +33,7 @@ | |||
33 | static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; | 33 | static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; |
34 | static u32 mirred_idx_gen; | 34 | static u32 mirred_idx_gen; |
35 | static DEFINE_RWLOCK(mirred_lock); | 35 | static DEFINE_RWLOCK(mirred_lock); |
36 | static LIST_HEAD(mirred_list); | ||
36 | 37 | ||
37 | static struct tcf_hashinfo mirred_hash_info = { | 38 | static struct tcf_hashinfo mirred_hash_info = { |
38 | .htab = tcf_mirred_ht, | 39 | .htab = tcf_mirred_ht, |
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) | |||
47 | m->tcf_bindcnt--; | 48 | m->tcf_bindcnt--; |
48 | m->tcf_refcnt--; | 49 | m->tcf_refcnt--; |
49 | if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { | 50 | if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { |
50 | dev_put(m->tcfm_dev); | 51 | list_del(&m->tcfm_list); |
52 | if (m->tcfm_dev) | ||
53 | dev_put(m->tcfm_dev); | ||
51 | tcf_hash_destroy(&m->common, &mirred_hash_info); | 54 | tcf_hash_destroy(&m->common, &mirred_hash_info); |
52 | return 1; | 55 | return 1; |
53 | } | 56 | } |
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, | |||
134 | m->tcfm_ok_push = ok_push; | 137 | m->tcfm_ok_push = ok_push; |
135 | } | 138 | } |
136 | spin_unlock_bh(&m->tcf_lock); | 139 | spin_unlock_bh(&m->tcf_lock); |
137 | if (ret == ACT_P_CREATED) | 140 | if (ret == ACT_P_CREATED) { |
141 | list_add(&m->tcfm_list, &mirred_list); | ||
138 | tcf_hash_insert(pc, &mirred_hash_info); | 142 | tcf_hash_insert(pc, &mirred_hash_info); |
143 | } | ||
139 | 144 | ||
140 | return ret; | 145 | return ret; |
141 | } | 146 | } |
@@ -164,9 +169,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
164 | m->tcf_bstats.packets++; | 169 | m->tcf_bstats.packets++; |
165 | 170 | ||
166 | dev = m->tcfm_dev; | 171 | dev = m->tcfm_dev; |
172 | if (!dev) { | ||
173 | printk_once(KERN_NOTICE "tc mirred: target device is gone\n"); | ||
174 | goto out; | ||
175 | } | ||
176 | |||
167 | if (!(dev->flags & IFF_UP)) { | 177 | if (!(dev->flags & IFF_UP)) { |
168 | if (net_ratelimit()) | 178 | if (net_ratelimit()) |
169 | pr_notice("tc mirred to Houston: device %s is gone!\n", | 179 | pr_notice("tc mirred to Houston: device %s is down\n", |
170 | dev->name); | 180 | dev->name); |
171 | goto out; | 181 | goto out; |
172 | } | 182 | } |
@@ -230,6 +240,28 @@ nla_put_failure: | |||
230 | return -1; | 240 | return -1; |
231 | } | 241 | } |
232 | 242 | ||
243 | static int mirred_device_event(struct notifier_block *unused, | ||
244 | unsigned long event, void *ptr) | ||
245 | { | ||
246 | struct net_device *dev = ptr; | ||
247 | struct tcf_mirred *m; | ||
248 | |||
249 | if (event == NETDEV_UNREGISTER) | ||
250 | list_for_each_entry(m, &mirred_list, tcfm_list) { | ||
251 | if (m->tcfm_dev == dev) { | ||
252 | dev_put(dev); | ||
253 | m->tcfm_dev = NULL; | ||
254 | } | ||
255 | } | ||
256 | |||
257 | return NOTIFY_DONE; | ||
258 | } | ||
259 | |||
260 | static struct notifier_block mirred_device_notifier = { | ||
261 | .notifier_call = mirred_device_event, | ||
262 | }; | ||
263 | |||
264 | |||
233 | static struct tc_action_ops act_mirred_ops = { | 265 | static struct tc_action_ops act_mirred_ops = { |
234 | .kind = "mirred", | 266 | .kind = "mirred", |
235 | .hinfo = &mirred_hash_info, | 267 | .hinfo = &mirred_hash_info, |
@@ -250,12 +282,17 @@ MODULE_LICENSE("GPL"); | |||
250 | 282 | ||
251 | static int __init mirred_init_module(void) | 283 | static int __init mirred_init_module(void) |
252 | { | 284 | { |
285 | int err = register_netdevice_notifier(&mirred_device_notifier); | ||
286 | if (err) | ||
287 | return err; | ||
288 | |||
253 | pr_info("Mirror/redirect action on\n"); | 289 | pr_info("Mirror/redirect action on\n"); |
254 | return tcf_register_action(&act_mirred_ops); | 290 | return tcf_register_action(&act_mirred_ops); |
255 | } | 291 | } |
256 | 292 | ||
257 | static void __exit mirred_cleanup_module(void) | 293 | static void __exit mirred_cleanup_module(void) |
258 | { | 294 | { |
295 | unregister_netdevice_notifier(&mirred_device_notifier); | ||
259 | tcf_unregister_action(&act_mirred_ops); | 296 | tcf_unregister_action(&act_mirred_ops); |
260 | } | 297 | } |
261 | 298 | ||