diff options
Diffstat (limited to 'drivers/net/cxgb4vf/cxgb4vf_main.c')
-rw-r--r-- | drivers/net/cxgb4vf/cxgb4vf_main.c | 322 |
1 files changed, 191 insertions, 131 deletions
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 7b6d07f50c71..e71c08e547e4 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter) | |||
280 | const struct port_info *pi = netdev_priv(dev); | 280 | const struct port_info *pi = netdev_priv(dev); |
281 | int qs, msi; | 281 | int qs, msi; |
282 | 282 | ||
283 | for (qs = 0, msi = MSIX_NIQFLINT; | 283 | for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { |
284 | qs < pi->nqsets; | ||
285 | qs++, msi++) { | ||
286 | snprintf(adapter->msix_info[msi].desc, namelen, | 284 | snprintf(adapter->msix_info[msi].desc, namelen, |
287 | "%s-%d", dev->name, qs); | 285 | "%s-%d", dev->name, qs); |
288 | adapter->msix_info[msi].desc[namelen] = 0; | 286 | adapter->msix_info[msi].desc[namelen] = 0; |
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter) | |||
309 | /* | 307 | /* |
310 | * Ethernet queues. | 308 | * Ethernet queues. |
311 | */ | 309 | */ |
312 | msi = MSIX_NIQFLINT; | 310 | msi = MSIX_IQFLINT; |
313 | for_each_ethrxq(s, rxq) { | 311 | for_each_ethrxq(s, rxq) { |
314 | err = request_irq(adapter->msix_info[msi].vec, | 312 | err = request_irq(adapter->msix_info[msi].vec, |
315 | t4vf_sge_intr_msix, 0, | 313 | t4vf_sge_intr_msix, 0, |
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter) | |||
337 | int rxq, msi; | 335 | int rxq, msi; |
338 | 336 | ||
339 | free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); | 337 | free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); |
340 | msi = MSIX_NIQFLINT; | 338 | msi = MSIX_IQFLINT; |
341 | for_each_ethrxq(s, rxq) | 339 | for_each_ethrxq(s, rxq) |
342 | free_irq(adapter->msix_info[msi++].vec, | 340 | free_irq(adapter->msix_info[msi++].vec, |
343 | &s->ethrxq[rxq].rspq); | 341 | &s->ethrxq[rxq].rspq); |
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter) | |||
527 | * brought up at which point lots of things get nailed down | 525 | * brought up at which point lots of things get nailed down |
528 | * permanently ... | 526 | * permanently ... |
529 | */ | 527 | */ |
530 | msix = MSIX_NIQFLINT; | 528 | msix = MSIX_IQFLINT; |
531 | for_each_port(adapter, pidx) { | 529 | for_each_port(adapter, pidx) { |
532 | struct net_device *dev = adapter->port[pidx]; | 530 | struct net_device *dev = adapter->port[pidx]; |
533 | struct port_info *pi = netdev_priv(dev); | 531 | struct port_info *pi = netdev_priv(dev); |
@@ -748,11 +746,22 @@ static int cxgb4vf_open(struct net_device *dev) | |||
748 | /* | 746 | /* |
749 | * Note that this interface is up and start everything up ... | 747 | * Note that this interface is up and start everything up ... |
750 | */ | 748 | */ |
751 | dev->real_num_tx_queues = pi->nqsets; | 749 | netif_set_real_num_tx_queues(dev, pi->nqsets); |
752 | set_bit(pi->port_id, &adapter->open_device_map); | 750 | err = netif_set_real_num_rx_queues(dev, pi->nqsets); |
753 | link_start(dev); | 751 | if (err) |
752 | goto err_unwind; | ||
753 | err = link_start(dev); | ||
754 | if (err) | ||
755 | goto err_unwind; | ||
756 | |||
754 | netif_tx_start_all_queues(dev); | 757 | netif_tx_start_all_queues(dev); |
758 | set_bit(pi->port_id, &adapter->open_device_map); | ||
755 | return 0; | 759 | return 0; |
760 | |||
761 | err_unwind: | ||
762 | if (adapter->open_device_map == 0) | ||
763 | adapter_down(adapter); | ||
764 | return err; | ||
756 | } | 765 | } |
757 | 766 | ||
758 | /* | 767 | /* |
@@ -761,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev) | |||
761 | */ | 770 | */ |
762 | static int cxgb4vf_stop(struct net_device *dev) | 771 | static int cxgb4vf_stop(struct net_device *dev) |
763 | { | 772 | { |
764 | int ret; | ||
765 | struct port_info *pi = netdev_priv(dev); | 773 | struct port_info *pi = netdev_priv(dev); |
766 | struct adapter *adapter = pi->adapter; | 774 | struct adapter *adapter = pi->adapter; |
767 | 775 | ||
768 | netif_tx_stop_all_queues(dev); | 776 | netif_tx_stop_all_queues(dev); |
769 | netif_carrier_off(dev); | 777 | netif_carrier_off(dev); |
770 | ret = t4vf_enable_vi(adapter, pi->viid, false, false); | 778 | t4vf_enable_vi(adapter, pi->viid, false, false); |
771 | pi->link_cfg.link_ok = 0; | 779 | pi->link_cfg.link_ok = 0; |
772 | 780 | ||
773 | clear_bit(pi->port_id, &adapter->open_device_map); | 781 | clear_bit(pi->port_id, &adapter->open_device_map); |
@@ -811,40 +819,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) | |||
811 | } | 819 | } |
812 | 820 | ||
813 | /* | 821 | /* |
814 | * Collect up to maxaddrs worth of a netdevice's unicast addresses into an | 822 | * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting |
815 | * array of addrss pointers and return the number collected. | 823 | * at a specified offset within the list, into an array of addrss pointers and |
824 | * return the number collected. | ||
816 | */ | 825 | */ |
817 | static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, | 826 | static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, |
818 | const u8 **addr, | 827 | const u8 **addr, |
819 | unsigned int maxaddrs) | 828 | unsigned int offset, |
829 | unsigned int maxaddrs) | ||
820 | { | 830 | { |
831 | unsigned int index = 0; | ||
821 | unsigned int naddr = 0; | 832 | unsigned int naddr = 0; |
822 | const struct netdev_hw_addr *ha; | 833 | const struct netdev_hw_addr *ha; |
823 | 834 | ||
824 | for_each_dev_addr(dev, ha) { | 835 | for_each_dev_addr(dev, ha) |
825 | addr[naddr++] = ha->addr; | 836 | if (index++ >= offset) { |
826 | if (naddr >= maxaddrs) | 837 | addr[naddr++] = ha->addr; |
827 | break; | 838 | if (naddr >= maxaddrs) |
828 | } | 839 | break; |
840 | } | ||
829 | return naddr; | 841 | return naddr; |
830 | } | 842 | } |
831 | 843 | ||
832 | /* | 844 | /* |
833 | * Collect up to maxaddrs worth of a netdevice's multicast addresses into an | 845 | * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting |
834 | * array of addrss pointers and return the number collected. | 846 | * at a specified offset within the list, into an array of addrss pointers and |
847 | * return the number collected. | ||
835 | */ | 848 | */ |
836 | static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, | 849 | static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, |
837 | const u8 **addr, | 850 | const u8 **addr, |
838 | unsigned int maxaddrs) | 851 | unsigned int offset, |
852 | unsigned int maxaddrs) | ||
839 | { | 853 | { |
854 | unsigned int index = 0; | ||
840 | unsigned int naddr = 0; | 855 | unsigned int naddr = 0; |
841 | const struct netdev_hw_addr *ha; | 856 | const struct netdev_hw_addr *ha; |
842 | 857 | ||
843 | netdev_for_each_mc_addr(ha, dev) { | 858 | netdev_for_each_mc_addr(ha, dev) |
844 | addr[naddr++] = ha->addr; | 859 | if (index++ >= offset) { |
845 | if (naddr >= maxaddrs) | 860 | addr[naddr++] = ha->addr; |
846 | break; | 861 | if (naddr >= maxaddrs) |
847 | } | 862 | break; |
863 | } | ||
848 | return naddr; | 864 | return naddr; |
849 | } | 865 | } |
850 | 866 | ||
@@ -857,16 +873,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) | |||
857 | u64 mhash = 0; | 873 | u64 mhash = 0; |
858 | u64 uhash = 0; | 874 | u64 uhash = 0; |
859 | bool free = true; | 875 | bool free = true; |
860 | u16 filt_idx[7]; | 876 | unsigned int offset, naddr; |
861 | const u8 *addr[7]; | 877 | const u8 *addr[7]; |
862 | int ret, naddr = 0; | 878 | int ret; |
863 | const struct port_info *pi = netdev_priv(dev); | 879 | const struct port_info *pi = netdev_priv(dev); |
864 | 880 | ||
865 | /* first do the secondary unicast addresses */ | 881 | /* first do the secondary unicast addresses */ |
866 | naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); | 882 | for (offset = 0; ; offset += naddr) { |
867 | if (naddr > 0) { | 883 | naddr = collect_netdev_uc_list_addrs(dev, addr, offset, |
884 | ARRAY_SIZE(addr)); | ||
885 | if (naddr == 0) | ||
886 | break; | ||
887 | |||
868 | ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, | 888 | ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, |
869 | naddr, addr, filt_idx, &uhash, sleep); | 889 | naddr, addr, NULL, &uhash, sleep); |
870 | if (ret < 0) | 890 | if (ret < 0) |
871 | return ret; | 891 | return ret; |
872 | 892 | ||
@@ -874,12 +894,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) | |||
874 | } | 894 | } |
875 | 895 | ||
876 | /* next set up the multicast addresses */ | 896 | /* next set up the multicast addresses */ |
877 | naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); | 897 | for (offset = 0; ; offset += naddr) { |
878 | if (naddr > 0) { | 898 | naddr = collect_netdev_mc_list_addrs(dev, addr, offset, |
899 | ARRAY_SIZE(addr)); | ||
900 | if (naddr == 0) | ||
901 | break; | ||
902 | |||
879 | ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, | 903 | ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, |
880 | naddr, addr, filt_idx, &mhash, sleep); | 904 | naddr, addr, NULL, &mhash, sleep); |
881 | if (ret < 0) | 905 | if (ret < 0) |
882 | return ret; | 906 | return ret; |
907 | free = false; | ||
883 | } | 908 | } |
884 | 909 | ||
885 | return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, | 910 | return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, |
@@ -1100,18 +1125,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) | |||
1100 | return 0; | 1125 | return 0; |
1101 | } | 1126 | } |
1102 | 1127 | ||
1103 | /* | ||
1104 | * Return a TX Queue on which to send the specified skb. | ||
1105 | */ | ||
1106 | static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
1107 | { | ||
1108 | /* | ||
1109 | * XXX For now just use the default hash but we probably want to | ||
1110 | * XXX look at other possibilities ... | ||
1111 | */ | ||
1112 | return skb_tx_hash(dev, skb); | ||
1113 | } | ||
1114 | |||
1115 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1128 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1116 | /* | 1129 | /* |
1117 | * Poll all of our receive queues. This is called outside of normal interrupt | 1130 | * Poll all of our receive queues. This is called outside of normal interrupt |
@@ -1154,7 +1167,8 @@ static int cxgb4vf_get_settings(struct net_device *dev, | |||
1154 | 1167 | ||
1155 | cmd->supported = pi->link_cfg.supported; | 1168 | cmd->supported = pi->link_cfg.supported; |
1156 | cmd->advertising = pi->link_cfg.advertising; | 1169 | cmd->advertising = pi->link_cfg.advertising; |
1157 | cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1; | 1170 | ethtool_cmd_speed_set(cmd, |
1171 | netif_carrier_ok(dev) ? pi->link_cfg.speed : -1); | ||
1158 | cmd->duplex = DUPLEX_FULL; | 1172 | cmd->duplex = DUPLEX_FULL; |
1159 | 1173 | ||
1160 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; | 1174 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; |
@@ -1313,37 +1327,22 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev, | |||
1313 | } | 1327 | } |
1314 | 1328 | ||
1315 | /* | 1329 | /* |
1316 | * Return whether RX Checksum Offloading is currently enabled for the device. | 1330 | * Identify the port by blinking the port's LED. |
1317 | */ | ||
1318 | static u32 cxgb4vf_get_rx_csum(struct net_device *dev) | ||
1319 | { | ||
1320 | struct port_info *pi = netdev_priv(dev); | ||
1321 | |||
1322 | return (pi->rx_offload & RX_CSO) != 0; | ||
1323 | } | ||
1324 | |||
1325 | /* | ||
1326 | * Turn RX Checksum Offloading on or off for the device. | ||
1327 | */ | 1331 | */ |
1328 | static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum) | 1332 | static int cxgb4vf_phys_id(struct net_device *dev, |
1333 | enum ethtool_phys_id_state state) | ||
1329 | { | 1334 | { |
1335 | unsigned int val; | ||
1330 | struct port_info *pi = netdev_priv(dev); | 1336 | struct port_info *pi = netdev_priv(dev); |
1331 | 1337 | ||
1332 | if (csum) | 1338 | if (state == ETHTOOL_ID_ACTIVE) |
1333 | pi->rx_offload |= RX_CSO; | 1339 | val = 0xffff; |
1340 | else if (state == ETHTOOL_ID_INACTIVE) | ||
1341 | val = 0; | ||
1334 | else | 1342 | else |
1335 | pi->rx_offload &= ~RX_CSO; | 1343 | return -EINVAL; |
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | /* | ||
1340 | * Identify the port by blinking the port's LED. | ||
1341 | */ | ||
1342 | static int cxgb4vf_phys_id(struct net_device *dev, u32 id) | ||
1343 | { | ||
1344 | struct port_info *pi = netdev_priv(dev); | ||
1345 | 1344 | ||
1346 | return t4vf_identify_port(pi->adapter, pi->viid, 5); | 1345 | return t4vf_identify_port(pi->adapter, pi->viid, val); |
1347 | } | 1346 | } |
1348 | 1347 | ||
1349 | /* | 1348 | /* |
@@ -1355,6 +1354,8 @@ struct queue_port_stats { | |||
1355 | u64 rx_csum; | 1354 | u64 rx_csum; |
1356 | u64 vlan_ex; | 1355 | u64 vlan_ex; |
1357 | u64 vlan_ins; | 1356 | u64 vlan_ins; |
1357 | u64 lro_pkts; | ||
1358 | u64 lro_merged; | ||
1358 | }; | 1359 | }; |
1359 | 1360 | ||
1360 | /* | 1361 | /* |
@@ -1392,6 +1393,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { | |||
1392 | "RxCsumGood ", | 1393 | "RxCsumGood ", |
1393 | "VLANextractions ", | 1394 | "VLANextractions ", |
1394 | "VLANinsertions ", | 1395 | "VLANinsertions ", |
1396 | "GROPackets ", | ||
1397 | "GROMerged ", | ||
1395 | }; | 1398 | }; |
1396 | 1399 | ||
1397 | /* | 1400 | /* |
@@ -1441,6 +1444,8 @@ static void collect_sge_port_stats(const struct adapter *adapter, | |||
1441 | stats->rx_csum += rxq->stats.rx_cso; | 1444 | stats->rx_csum += rxq->stats.rx_cso; |
1442 | stats->vlan_ex += rxq->stats.vlan_ex; | 1445 | stats->vlan_ex += rxq->stats.vlan_ex; |
1443 | stats->vlan_ins += txq->vlan_ins; | 1446 | stats->vlan_ins += txq->vlan_ins; |
1447 | stats->lro_pkts += rxq->stats.lro_pkts; | ||
1448 | stats->lro_merged += rxq->stats.lro_merged; | ||
1444 | } | 1449 | } |
1445 | } | 1450 | } |
1446 | 1451 | ||
@@ -1537,16 +1542,9 @@ static void cxgb4vf_get_wol(struct net_device *dev, | |||
1537 | } | 1542 | } |
1538 | 1543 | ||
1539 | /* | 1544 | /* |
1540 | * Set TCP Segmentation Offloading feature capabilities. | 1545 | * TCP Segmentation Offload flags which we support. |
1541 | */ | 1546 | */ |
1542 | static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) | 1547 | #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) |
1543 | { | ||
1544 | if (tso) | ||
1545 | dev->features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
1546 | else | ||
1547 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
1548 | return 0; | ||
1549 | } | ||
1550 | 1548 | ||
1551 | static struct ethtool_ops cxgb4vf_ethtool_ops = { | 1549 | static struct ethtool_ops cxgb4vf_ethtool_ops = { |
1552 | .get_settings = cxgb4vf_get_settings, | 1550 | .get_settings = cxgb4vf_get_settings, |
@@ -1558,19 +1556,14 @@ static struct ethtool_ops cxgb4vf_ethtool_ops = { | |||
1558 | .get_coalesce = cxgb4vf_get_coalesce, | 1556 | .get_coalesce = cxgb4vf_get_coalesce, |
1559 | .set_coalesce = cxgb4vf_set_coalesce, | 1557 | .set_coalesce = cxgb4vf_set_coalesce, |
1560 | .get_pauseparam = cxgb4vf_get_pauseparam, | 1558 | .get_pauseparam = cxgb4vf_get_pauseparam, |
1561 | .get_rx_csum = cxgb4vf_get_rx_csum, | ||
1562 | .set_rx_csum = cxgb4vf_set_rx_csum, | ||
1563 | .set_tx_csum = ethtool_op_set_tx_ipv6_csum, | ||
1564 | .set_sg = ethtool_op_set_sg, | ||
1565 | .get_link = ethtool_op_get_link, | 1559 | .get_link = ethtool_op_get_link, |
1566 | .get_strings = cxgb4vf_get_strings, | 1560 | .get_strings = cxgb4vf_get_strings, |
1567 | .phys_id = cxgb4vf_phys_id, | 1561 | .set_phys_id = cxgb4vf_phys_id, |
1568 | .get_sset_count = cxgb4vf_get_sset_count, | 1562 | .get_sset_count = cxgb4vf_get_sset_count, |
1569 | .get_ethtool_stats = cxgb4vf_get_ethtool_stats, | 1563 | .get_ethtool_stats = cxgb4vf_get_ethtool_stats, |
1570 | .get_regs_len = cxgb4vf_get_regs_len, | 1564 | .get_regs_len = cxgb4vf_get_regs_len, |
1571 | .get_regs = cxgb4vf_get_regs, | 1565 | .get_regs = cxgb4vf_get_regs, |
1572 | .get_wol = cxgb4vf_get_wol, | 1566 | .get_wol = cxgb4vf_get_wol, |
1573 | .set_tso = cxgb4vf_set_tso, | ||
1574 | }; | 1567 | }; |
1575 | 1568 | ||
1576 | /* | 1569 | /* |
@@ -2016,7 +2009,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2016 | { | 2009 | { |
2017 | int i; | 2010 | int i; |
2018 | 2011 | ||
2019 | BUG_ON(adapter->debugfs_root == NULL); | 2012 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2020 | 2013 | ||
2021 | /* | 2014 | /* |
2022 | * Debugfs support is best effort. | 2015 | * Debugfs support is best effort. |
@@ -2035,9 +2028,9 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2035 | * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave | 2028 | * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave |
2036 | * it to our caller to tear down the directory (debugfs_root). | 2029 | * it to our caller to tear down the directory (debugfs_root). |
2037 | */ | 2030 | */ |
2038 | static void __devexit cleanup_debugfs(struct adapter *adapter) | 2031 | static void cleanup_debugfs(struct adapter *adapter) |
2039 | { | 2032 | { |
2040 | BUG_ON(adapter->debugfs_root == NULL); | 2033 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2041 | 2034 | ||
2042 | /* | 2035 | /* |
2043 | * Unlike our sister routine cleanup_proc(), we don't need to remove | 2036 | * Unlike our sister routine cleanup_proc(), we don't need to remove |
@@ -2053,7 +2046,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter) | |||
2053 | * adapter parameters we're going to be using and initialize basic adapter | 2046 | * adapter parameters we're going to be using and initialize basic adapter |
2054 | * hardware support. | 2047 | * hardware support. |
2055 | */ | 2048 | */ |
2056 | static int adap_init0(struct adapter *adapter) | 2049 | static int __devinit adap_init0(struct adapter *adapter) |
2057 | { | 2050 | { |
2058 | struct vf_resources *vfres = &adapter->params.vfres; | 2051 | struct vf_resources *vfres = &adapter->params.vfres; |
2059 | struct sge_params *sge_params = &adapter->params.sge; | 2052 | struct sge_params *sge_params = &adapter->params.sge; |
@@ -2072,6 +2065,22 @@ static int adap_init0(struct adapter *adapter) | |||
2072 | } | 2065 | } |
2073 | 2066 | ||
2074 | /* | 2067 | /* |
2068 | * Some environments do not properly handle PCIE FLRs -- e.g. in Linux | ||
2069 | * 2.6.31 and later we can't call pci_reset_function() in order to | ||
2070 | * issue an FLR because of a self- deadlock on the device semaphore. | ||
2071 | * Meanwhile, the OS infrastructure doesn't issue FLRs in all the | ||
2072 | * cases where they're needed -- for instance, some versions of KVM | ||
2073 | * fail to reset "Assigned Devices" when the VM reboots. Therefore we | ||
2074 | * use the firmware based reset in order to reset any per function | ||
2075 | * state. | ||
2076 | */ | ||
2077 | err = t4vf_fw_reset(adapter); | ||
2078 | if (err < 0) { | ||
2079 | dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); | ||
2080 | return err; | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2075 | * Grab basic operational parameters. These will predominantly have | 2084 | * Grab basic operational parameters. These will predominantly have |
2076 | * been set up by the Physical Function Driver or will be hard coded | 2085 | * been set up by the Physical Function Driver or will be hard coded |
2077 | * into the adapter. We just have to live with them ... Note that | 2086 | * into the adapter. We just have to live with them ... Note that |
@@ -2243,6 +2252,7 @@ static void __devinit cfg_queues(struct adapter *adapter) | |||
2243 | { | 2252 | { |
2244 | struct sge *s = &adapter->sge; | 2253 | struct sge *s = &adapter->sge; |
2245 | int q10g, n10g, qidx, pidx, qs; | 2254 | int q10g, n10g, qidx, pidx, qs; |
2255 | size_t iqe_size; | ||
2246 | 2256 | ||
2247 | /* | 2257 | /* |
2248 | * We should not be called till we know how many Queue Sets we can | 2258 | * We should not be called till we know how many Queue Sets we can |
@@ -2287,6 +2297,13 @@ static void __devinit cfg_queues(struct adapter *adapter) | |||
2287 | s->ethqsets = qidx; | 2297 | s->ethqsets = qidx; |
2288 | 2298 | ||
2289 | /* | 2299 | /* |
2300 | * The Ingress Queue Entry Size for our various Response Queues needs | ||
2301 | * to be big enough to accommodate the largest message we can receive | ||
2302 | * from the chip/firmware; which is 64 bytes ... | ||
2303 | */ | ||
2304 | iqe_size = 64; | ||
2305 | |||
2306 | /* | ||
2290 | * Set up default Queue Set parameters ... Start off with the | 2307 | * Set up default Queue Set parameters ... Start off with the |
2291 | * shortest interrupt holdoff timer. | 2308 | * shortest interrupt holdoff timer. |
2292 | */ | 2309 | */ |
@@ -2294,7 +2311,7 @@ static void __devinit cfg_queues(struct adapter *adapter) | |||
2294 | struct sge_eth_rxq *rxq = &s->ethrxq[qs]; | 2311 | struct sge_eth_rxq *rxq = &s->ethrxq[qs]; |
2295 | struct sge_eth_txq *txq = &s->ethtxq[qs]; | 2312 | struct sge_eth_txq *txq = &s->ethtxq[qs]; |
2296 | 2313 | ||
2297 | init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); | 2314 | init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); |
2298 | rxq->fl.size = 72; | 2315 | rxq->fl.size = 72; |
2299 | txq->q.size = 1024; | 2316 | txq->q.size = 1024; |
2300 | } | 2317 | } |
@@ -2303,8 +2320,7 @@ static void __devinit cfg_queues(struct adapter *adapter) | |||
2303 | * The firmware event queue is used for link state changes and | 2320 | * The firmware event queue is used for link state changes and |
2304 | * notifications of TX DMA completions. | 2321 | * notifications of TX DMA completions. |
2305 | */ | 2322 | */ |
2306 | init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, | 2323 | init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); |
2307 | L1_CACHE_BYTES); | ||
2308 | 2324 | ||
2309 | /* | 2325 | /* |
2310 | * The forwarded interrupt queue is used when we're in MSI interrupt | 2326 | * The forwarded interrupt queue is used when we're in MSI interrupt |
@@ -2320,7 +2336,7 @@ static void __devinit cfg_queues(struct adapter *adapter) | |||
2320 | * any time ... | 2336 | * any time ... |
2321 | */ | 2337 | */ |
2322 | init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, | 2338 | init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, |
2323 | L1_CACHE_BYTES); | 2339 | iqe_size); |
2324 | } | 2340 | } |
2325 | 2341 | ||
2326 | /* | 2342 | /* |
@@ -2414,7 +2430,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { | |||
2414 | .ndo_get_stats = cxgb4vf_get_stats, | 2430 | .ndo_get_stats = cxgb4vf_get_stats, |
2415 | .ndo_set_rx_mode = cxgb4vf_set_rxmode, | 2431 | .ndo_set_rx_mode = cxgb4vf_set_rxmode, |
2416 | .ndo_set_mac_address = cxgb4vf_set_mac_addr, | 2432 | .ndo_set_mac_address = cxgb4vf_set_mac_addr, |
2417 | .ndo_select_queue = cxgb4vf_select_queue, | ||
2418 | .ndo_validate_addr = eth_validate_addr, | 2433 | .ndo_validate_addr = eth_validate_addr, |
2419 | .ndo_do_ioctl = cxgb4vf_do_ioctl, | 2434 | .ndo_do_ioctl = cxgb4vf_do_ioctl, |
2420 | .ndo_change_mtu = cxgb4vf_change_mtu, | 2435 | .ndo_change_mtu = cxgb4vf_change_mtu, |
@@ -2443,17 +2458,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2443 | struct net_device *netdev; | 2458 | struct net_device *netdev; |
2444 | 2459 | ||
2445 | /* | 2460 | /* |
2446 | * Vet our module parameters. | ||
2447 | */ | ||
2448 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2449 | dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" | ||
2450 | " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, | ||
2451 | MSI_MSI); | ||
2452 | err = -EINVAL; | ||
2453 | goto err_out; | ||
2454 | } | ||
2455 | |||
2456 | /* | ||
2457 | * Print our driver banner the first time we're called to initialize a | 2461 | * Print our driver banner the first time we're called to initialize a |
2458 | * device. | 2462 | * device. |
2459 | */ | 2463 | */ |
@@ -2462,7 +2466,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2462 | version_printed = 1; | 2466 | version_printed = 1; |
2463 | } | 2467 | } |
2464 | 2468 | ||
2465 | |||
2466 | /* | 2469 | /* |
2467 | * Initialize generic PCI device state. | 2470 | * Initialize generic PCI device state. |
2468 | */ | 2471 | */ |
@@ -2595,20 +2598,19 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2595 | * it. | 2598 | * it. |
2596 | */ | 2599 | */ |
2597 | pi->xact_addr_filt = -1; | 2600 | pi->xact_addr_filt = -1; |
2598 | pi->rx_offload = RX_CSO; | ||
2599 | netif_carrier_off(netdev); | 2601 | netif_carrier_off(netdev); |
2600 | netif_tx_stop_all_queues(netdev); | ||
2601 | netdev->irq = pdev->irq; | 2602 | netdev->irq = pdev->irq; |
2602 | 2603 | ||
2603 | netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | | 2604 | netdev->hw_features = NETIF_F_SG | TSO_FLAGS | |
2604 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 2605 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
2605 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | 2606 | NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; |
2606 | NETIF_F_GRO); | 2607 | netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | |
2608 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | ||
2609 | NETIF_F_HIGHDMA; | ||
2610 | netdev->features = netdev->hw_features | | ||
2611 | NETIF_F_HW_VLAN_RX; | ||
2607 | if (pci_using_dac) | 2612 | if (pci_using_dac) |
2608 | netdev->features |= NETIF_F_HIGHDMA; | 2613 | netdev->features |= NETIF_F_HIGHDMA; |
2609 | netdev->vlan_features = | ||
2610 | (netdev->features & | ||
2611 | ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)); | ||
2612 | 2614 | ||
2613 | #ifdef HAVE_NET_DEVICE_OPS | 2615 | #ifdef HAVE_NET_DEVICE_OPS |
2614 | netdev->netdev_ops = &cxgb4vf_netdev_ops; | 2616 | netdev->netdev_ops = &cxgb4vf_netdev_ops; |
@@ -2622,7 +2624,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2622 | netdev->do_ioctl = cxgb4vf_do_ioctl; | 2624 | netdev->do_ioctl = cxgb4vf_do_ioctl; |
2623 | netdev->change_mtu = cxgb4vf_change_mtu; | 2625 | netdev->change_mtu = cxgb4vf_change_mtu; |
2624 | netdev->set_mac_address = cxgb4vf_set_mac_addr; | 2626 | netdev->set_mac_address = cxgb4vf_set_mac_addr; |
2625 | netdev->select_queue = cxgb4vf_select_queue; | ||
2626 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2627 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2627 | netdev->poll_controller = cxgb4vf_poll_controller; | 2628 | netdev->poll_controller = cxgb4vf_poll_controller; |
2628 | #endif | 2629 | #endif |
@@ -2668,11 +2669,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2668 | /* | 2669 | /* |
2669 | * Set up our debugfs entries. | 2670 | * Set up our debugfs entries. |
2670 | */ | 2671 | */ |
2671 | if (cxgb4vf_debugfs_root) { | 2672 | if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { |
2672 | adapter->debugfs_root = | 2673 | adapter->debugfs_root = |
2673 | debugfs_create_dir(pci_name(pdev), | 2674 | debugfs_create_dir(pci_name(pdev), |
2674 | cxgb4vf_debugfs_root); | 2675 | cxgb4vf_debugfs_root); |
2675 | if (adapter->debugfs_root == NULL) | 2676 | if (IS_ERR_OR_NULL(adapter->debugfs_root)) |
2676 | dev_warn(&pdev->dev, "could not create debugfs" | 2677 | dev_warn(&pdev->dev, "could not create debugfs" |
2677 | " directory"); | 2678 | " directory"); |
2678 | else | 2679 | else |
@@ -2706,7 +2707,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2706 | cfg_queues(adapter); | 2707 | cfg_queues(adapter); |
2707 | 2708 | ||
2708 | /* | 2709 | /* |
2709 | * Print a short notice on the existance and configuration of the new | 2710 | * Print a short notice on the existence and configuration of the new |
2710 | * VF network device ... | 2711 | * VF network device ... |
2711 | */ | 2712 | */ |
2712 | for_each_port(adapter, pidx) { | 2713 | for_each_port(adapter, pidx) { |
@@ -2727,7 +2728,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2727 | */ | 2728 | */ |
2728 | 2729 | ||
2729 | err_free_debugfs: | 2730 | err_free_debugfs: |
2730 | if (adapter->debugfs_root) { | 2731 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2731 | cleanup_debugfs(adapter); | 2732 | cleanup_debugfs(adapter); |
2732 | debugfs_remove_recursive(adapter->debugfs_root); | 2733 | debugfs_remove_recursive(adapter->debugfs_root); |
2733 | } | 2734 | } |
@@ -2759,7 +2760,6 @@ err_release_regions: | |||
2759 | err_disable_device: | 2760 | err_disable_device: |
2760 | pci_disable_device(pdev); | 2761 | pci_disable_device(pdev); |
2761 | 2762 | ||
2762 | err_out: | ||
2763 | return err; | 2763 | return err; |
2764 | } | 2764 | } |
2765 | 2765 | ||
@@ -2797,7 +2797,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2797 | /* | 2797 | /* |
2798 | * Tear down our debugfs entries. | 2798 | * Tear down our debugfs entries. |
2799 | */ | 2799 | */ |
2800 | if (adapter->debugfs_root) { | 2800 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2801 | cleanup_debugfs(adapter); | 2801 | cleanup_debugfs(adapter); |
2802 | debugfs_remove_recursive(adapter->debugfs_root); | 2802 | debugfs_remove_recursive(adapter->debugfs_root); |
2803 | } | 2803 | } |
@@ -2831,6 +2831,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2831 | } | 2831 | } |
2832 | 2832 | ||
2833 | /* | 2833 | /* |
2834 | * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt | ||
2835 | * delivery. | ||
2836 | */ | ||
2837 | static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) | ||
2838 | { | ||
2839 | struct adapter *adapter; | ||
2840 | int pidx; | ||
2841 | |||
2842 | adapter = pci_get_drvdata(pdev); | ||
2843 | if (!adapter) | ||
2844 | return; | ||
2845 | |||
2846 | /* | ||
2847 | * Disable all Virtual Interfaces. This will shut down the | ||
2848 | * delivery of all ingress packets into the chip for these | ||
2849 | * Virtual Interfaces. | ||
2850 | */ | ||
2851 | for_each_port(adapter, pidx) { | ||
2852 | struct net_device *netdev; | ||
2853 | struct port_info *pi; | ||
2854 | |||
2855 | if (!test_bit(pidx, &adapter->registered_device_map)) | ||
2856 | continue; | ||
2857 | |||
2858 | netdev = adapter->port[pidx]; | ||
2859 | if (!netdev) | ||
2860 | continue; | ||
2861 | |||
2862 | pi = netdev_priv(netdev); | ||
2863 | t4vf_enable_vi(adapter, pi->viid, false, false); | ||
2864 | } | ||
2865 | |||
2866 | /* | ||
2867 | * Free up all Queues which will prevent further DMA and | ||
2868 | * Interrupts allowing various internal pathways to drain. | ||
2869 | */ | ||
2870 | t4vf_free_sge_resources(adapter); | ||
2871 | } | ||
2872 | |||
2873 | /* | ||
2834 | * PCI Device registration data structures. | 2874 | * PCI Device registration data structures. |
2835 | */ | 2875 | */ |
2836 | #define CH_DEVICE(devid, idx) \ | 2876 | #define CH_DEVICE(devid, idx) \ |
@@ -2841,6 +2881,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = { | |||
2841 | CH_DEVICE(0x4800, 0), /* T440-dbg */ | 2881 | CH_DEVICE(0x4800, 0), /* T440-dbg */ |
2842 | CH_DEVICE(0x4801, 0), /* T420-cr */ | 2882 | CH_DEVICE(0x4801, 0), /* T420-cr */ |
2843 | CH_DEVICE(0x4802, 0), /* T422-cr */ | 2883 | CH_DEVICE(0x4802, 0), /* T422-cr */ |
2884 | CH_DEVICE(0x4803, 0), /* T440-cr */ | ||
2885 | CH_DEVICE(0x4804, 0), /* T420-bch */ | ||
2886 | CH_DEVICE(0x4805, 0), /* T440-bch */ | ||
2887 | CH_DEVICE(0x4806, 0), /* T460-ch */ | ||
2888 | CH_DEVICE(0x4807, 0), /* T420-so */ | ||
2889 | CH_DEVICE(0x4808, 0), /* T420-cx */ | ||
2890 | CH_DEVICE(0x4809, 0), /* T420-bt */ | ||
2891 | CH_DEVICE(0x480a, 0), /* T404-bt */ | ||
2844 | { 0, } | 2892 | { 0, } |
2845 | }; | 2893 | }; |
2846 | 2894 | ||
@@ -2855,6 +2903,7 @@ static struct pci_driver cxgb4vf_driver = { | |||
2855 | .id_table = cxgb4vf_pci_tbl, | 2903 | .id_table = cxgb4vf_pci_tbl, |
2856 | .probe = cxgb4vf_pci_probe, | 2904 | .probe = cxgb4vf_pci_probe, |
2857 | .remove = __devexit_p(cxgb4vf_pci_remove), | 2905 | .remove = __devexit_p(cxgb4vf_pci_remove), |
2906 | .shutdown = __devexit_p(cxgb4vf_pci_shutdown), | ||
2858 | }; | 2907 | }; |
2859 | 2908 | ||
2860 | /* | 2909 | /* |
@@ -2864,14 +2913,25 @@ static int __init cxgb4vf_module_init(void) | |||
2864 | { | 2913 | { |
2865 | int ret; | 2914 | int ret; |
2866 | 2915 | ||
2916 | /* | ||
2917 | * Vet our module parameters. | ||
2918 | */ | ||
2919 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2920 | printk(KERN_WARNING KBUILD_MODNAME | ||
2921 | ": bad module parameter msi=%d; must be %d" | ||
2922 | " (MSI-X or MSI) or %d (MSI)\n", | ||
2923 | msi, MSI_MSIX, MSI_MSI); | ||
2924 | return -EINVAL; | ||
2925 | } | ||
2926 | |||
2867 | /* Debugfs support is optional, just warn if this fails */ | 2927 | /* Debugfs support is optional, just warn if this fails */ |
2868 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 2928 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
2869 | if (!cxgb4vf_debugfs_root) | 2929 | if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2870 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" | 2930 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" |
2871 | " debugfs entry, continuing\n"); | 2931 | " debugfs entry, continuing\n"); |
2872 | 2932 | ||
2873 | ret = pci_register_driver(&cxgb4vf_driver); | 2933 | ret = pci_register_driver(&cxgb4vf_driver); |
2874 | if (ret < 0) | 2934 | if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2875 | debugfs_remove(cxgb4vf_debugfs_root); | 2935 | debugfs_remove(cxgb4vf_debugfs_root); |
2876 | return ret; | 2936 | return ret; |
2877 | } | 2937 | } |